hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3e981c832b18fefb820a4ad2210a6419e8c67d3a
| 13,129
|
py
|
Python
|
heat/db/api.py
|
maestro-hybrid-cloud/heat
|
91a4bb3170bd81b1c67a896706851e55709c9b5a
|
[
"Apache-2.0"
] | null | null | null |
heat/db/api.py
|
maestro-hybrid-cloud/heat
|
91a4bb3170bd81b1c67a896706851e55709c9b5a
|
[
"Apache-2.0"
] | null | null | null |
heat/db/api.py
|
maestro-hybrid-cloud/heat
|
91a4bb3170bd81b1c67a896706851e55709c9b5a
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Interface for database access.
Usage:
>>> from heat import db
>>> db.event_get(context, event_id)
# Event object received
The underlying driver is loaded . SQLAlchemy is currently the only
supported backend.
"""
from oslo_config import cfg
from oslo_db import api
CONF = cfg.CONF
_BACKEND_MAPPING = {'sqlalchemy': 'heat.db.sqlalchemy.api'}
IMPL = api.DBAPI.from_config(CONF, backend_mapping=_BACKEND_MAPPING)
def get_engine():
return IMPL.get_engine()
def get_session():
return IMPL.get_session()
def raw_template_get(context, template_id):
return IMPL.raw_template_get(context, template_id)
def raw_template_create(context, values):
return IMPL.raw_template_create(context, values)
def raw_template_update(context, template_id, values):
return IMPL.raw_template_update(context, template_id, values)
def raw_template_delete(context, template_id):
return IMPL.raw_template_delete(context, template_id)
def resource_data_get_all(resource, data=None):
return IMPL.resource_data_get_all(resource, data)
def resource_data_get(resource, key):
return IMPL.resource_data_get(resource, key)
def resource_data_set(resource, key, value, redact=False):
return IMPL.resource_data_set(resource, key, value, redact=redact)
def resource_data_get_by_key(context, resource_id, key):
return IMPL.resource_data_get_by_key(context, resource_id, key)
def resource_data_delete(resource, key):
"""Remove a resource_data element associated to a resource."""
return IMPL.resource_data_delete(resource, key)
def stack_tags_set(context, stack_id, tags):
return IMPL.stack_tags_set(context, stack_id, tags)
def stack_tags_delete(context, stack_id):
return IMPL.stack_tags_delete(context, stack_id)
def stack_tags_get(context, stack_id):
return IMPL.stack_tags_get(context, stack_id)
def resource_get(context, resource_id):
return IMPL.resource_get(context, resource_id)
def resource_get_all(context):
return IMPL.resource_get_all(context)
def resource_update(context, resource_id, values, atomic_key,
expected_engine_id=None):
return IMPL.resource_update(context, resource_id, values, atomic_key,
expected_engine_id)
def resource_create(context, values):
return IMPL.resource_create(context, values)
def resource_exchange_stacks(context, resource_id1, resource_id2):
return IMPL.resource_exchange_stacks(context, resource_id1, resource_id2)
def resource_get_all_by_stack(context, stack_id, key_id=False):
return IMPL.resource_get_all_by_stack(context, stack_id, key_id)
def resource_get_by_name_and_stack(context, resource_name, stack_id):
return IMPL.resource_get_by_name_and_stack(context,
resource_name, stack_id)
def resource_get_by_physical_resource_id(context, physical_resource_id):
return IMPL.resource_get_by_physical_resource_id(context,
physical_resource_id)
def stack_get(context, stack_id, show_deleted=False, tenant_safe=True,
eager_load=False):
return IMPL.stack_get(context, stack_id, show_deleted=show_deleted,
tenant_safe=tenant_safe,
eager_load=eager_load)
def stack_get_by_name_and_owner_id(context, stack_name, owner_id):
return IMPL.stack_get_by_name_and_owner_id(context, stack_name,
owner_id=owner_id)
def stack_get_by_name(context, stack_name):
return IMPL.stack_get_by_name(context, stack_name)
def stack_get_all(context, limit=None, sort_keys=None, marker=None,
sort_dir=None, filters=None, tenant_safe=True,
show_deleted=False, show_nested=False, show_hidden=False,
tags=None, tags_any=None, not_tags=None,
not_tags_any=None):
return IMPL.stack_get_all(context, limit, sort_keys,
marker, sort_dir, filters, tenant_safe,
show_deleted, show_nested, show_hidden,
tags, tags_any, not_tags, not_tags_any)
def stack_get_all_by_owner_id(context, owner_id):
return IMPL.stack_get_all_by_owner_id(context, owner_id)
def stack_count_all(context, filters=None, tenant_safe=True,
show_deleted=False, show_nested=False, show_hidden=False,
tags=None, tags_any=None, not_tags=None,
not_tags_any=None):
return IMPL.stack_count_all(context, filters=filters,
tenant_safe=tenant_safe,
show_deleted=show_deleted,
show_nested=show_nested,
show_hidden=show_hidden,
tags=tags,
tags_any=tags_any,
not_tags=not_tags,
not_tags_any=not_tags_any)
def stack_create(context, values):
return IMPL.stack_create(context, values)
def stack_update(context, stack_id, values, exp_trvsl=None):
return IMPL.stack_update(context, stack_id, values, exp_trvsl=exp_trvsl)
def stack_delete(context, stack_id):
return IMPL.stack_delete(context, stack_id)
def stack_lock_create(stack_id, engine_id):
return IMPL.stack_lock_create(stack_id, engine_id)
def stack_lock_get_engine_id(stack_id):
return IMPL.stack_lock_get_engine_id(stack_id)
def stack_lock_steal(stack_id, old_engine_id, new_engine_id):
return IMPL.stack_lock_steal(stack_id, old_engine_id, new_engine_id)
def stack_lock_release(stack_id, engine_id):
return IMPL.stack_lock_release(stack_id, engine_id)
def persist_state_and_release_lock(context, stack_id, engine_id, values):
return IMPL.persist_state_and_release_lock(context, stack_id,
engine_id, values)
def stack_get_root_id(context, stack_id):
return IMPL.stack_get_root_id(context, stack_id)
def stack_count_total_resources(context, stack_id):
return IMPL.stack_count_total_resources(context, stack_id)
def user_creds_create(context):
return IMPL.user_creds_create(context)
def user_creds_delete(context, user_creds_id):
return IMPL.user_creds_delete(context, user_creds_id)
def user_creds_get(context_id):
return IMPL.user_creds_get(context_id)
def event_get(context, event_id):
return IMPL.event_get(context, event_id)
def event_get_all(context):
return IMPL.event_get_all(context)
def event_get_all_by_tenant(context, limit=None, marker=None,
sort_keys=None, sort_dir=None, filters=None):
return IMPL.event_get_all_by_tenant(context,
limit=limit,
marker=marker,
sort_keys=sort_keys,
sort_dir=sort_dir,
filters=filters)
def event_get_all_by_stack(context, stack_id, limit=None, marker=None,
sort_keys=None, sort_dir=None, filters=None):
return IMPL.event_get_all_by_stack(context, stack_id,
limit=limit,
marker=marker,
sort_keys=sort_keys,
sort_dir=sort_dir,
filters=filters)
def event_count_all_by_stack(context, stack_id):
return IMPL.event_count_all_by_stack(context, stack_id)
def event_create(context, values):
return IMPL.event_create(context, values)
def watch_rule_get(context, watch_rule_id):
return IMPL.watch_rule_get(context, watch_rule_id)
def watch_rule_get_by_name(context, watch_rule_name):
return IMPL.watch_rule_get_by_name(context, watch_rule_name)
def watch_rule_get_all(context):
return IMPL.watch_rule_get_all(context)
def watch_rule_get_all_by_stack(context, stack_id):
return IMPL.watch_rule_get_all_by_stack(context, stack_id)
def watch_rule_create(context, values):
return IMPL.watch_rule_create(context, values)
def watch_rule_update(context, watch_id, values):
return IMPL.watch_rule_update(context, watch_id, values)
def watch_rule_delete(context, watch_id):
return IMPL.watch_rule_delete(context, watch_id)
def watch_data_create(context, values):
return IMPL.watch_data_create(context, values)
def watch_data_get_all(context):
return IMPL.watch_data_get_all(context)
def watch_data_get_all_by_watch_rule_id(context, watch_rule_id):
return IMPL.watch_data_get_all_by_watch_rule_id(context, watch_rule_id)
def software_config_create(context, values):
return IMPL.software_config_create(context, values)
def software_config_get(context, config_id):
return IMPL.software_config_get(context, config_id)
def software_config_get_all(context, limit=None, marker=None,
tenant_safe=True):
return IMPL.software_config_get_all(context,
limit=limit,
marker=marker,
tenant_safe=tenant_safe)
def software_config_delete(context, config_id):
return IMPL.software_config_delete(context, config_id)
def software_deployment_create(context, values):
return IMPL.software_deployment_create(context, values)
def software_deployment_get(context, deployment_id):
return IMPL.software_deployment_get(context, deployment_id)
def software_deployment_get_all(context, server_id=None):
return IMPL.software_deployment_get_all(context, server_id)
def software_deployment_update(context, deployment_id, values):
return IMPL.software_deployment_update(context, deployment_id, values)
def software_deployment_delete(context, deployment_id):
return IMPL.software_deployment_delete(context, deployment_id)
def snapshot_create(context, values):
return IMPL.snapshot_create(context, values)
def snapshot_get(context, snapshot_id):
return IMPL.snapshot_get(context, snapshot_id)
def snapshot_get_by_stack(context, snapshot_id, stack):
return IMPL.snapshot_get_by_stack(context, snapshot_id, stack)
def snapshot_update(context, snapshot_id, values):
return IMPL.snapshot_update(context, snapshot_id, values)
def snapshot_delete(context, snapshot_id):
return IMPL.snapshot_delete(context, snapshot_id)
def snapshot_get_all(context, stack_id):
return IMPL.snapshot_get_all(context, stack_id)
def service_create(context, values):
return IMPL.service_create(context, values)
def service_update(context, service_id, values):
return IMPL.service_update(context, service_id, values)
def service_delete(context, service_id, soft_delete=True):
return IMPL.service_delete(context, service_id, soft_delete)
def service_get(context, service_id):
return IMPL.service_get(context, service_id)
def service_get_all(context):
return IMPL.service_get_all(context)
def service_get_all_by_args(context, host, binary, hostname):
return IMPL.service_get_all_by_args(context, host, binary, hostname)
def sync_point_delete_all_by_stack_and_traversal(context, stack_id,
traversal_id):
return IMPL.sync_point_delete_all_by_stack_and_traversal(context,
stack_id,
traversal_id)
def sync_point_create(context, values):
return IMPL.sync_point_create(context, values)
def sync_point_get(context, entity_id, traversal_id, is_update):
return IMPL.sync_point_get(context, entity_id, traversal_id, is_update)
def sync_point_update_input_data(context, entity_id,
traversal_id, is_update, atomic_key,
input_data):
return IMPL.sync_point_update_input_data(context, entity_id,
traversal_id, is_update,
atomic_key, input_data)
def db_sync(engine, version=None):
"""Migrate the database to `version` or the most recent version."""
return IMPL.db_sync(engine, version=version)
def db_version(engine):
"""Display the current database version."""
return IMPL.db_version(engine)
| 30.964623
| 78
| 0.69396
|
f5553f7742cea0988b799b25614a8a7db8f847a8
| 691
|
py
|
Python
|
Python web/test_cgi.py
|
Vendriix/Stuff
|
4ac80bae3c3136e83cf05e4cef043c4d8939bef8
|
[
"Unlicense"
] | null | null | null |
Python web/test_cgi.py
|
Vendriix/Stuff
|
4ac80bae3c3136e83cf05e4cef043c4d8939bef8
|
[
"Unlicense"
] | null | null | null |
Python web/test_cgi.py
|
Vendriix/Stuff
|
4ac80bae3c3136e83cf05e4cef043c4d8939bef8
|
[
"Unlicense"
] | null | null | null |
import os
import cgi
import json
import cgitb
cgitb.enable(1,None,5,"text")
import pprint
pp = pprint.PrettyPrinter(indent=4)
fields = cgi.FieldStorage()
print "Content-type: text/html"
print
print "<title>Test CGI</title>"
print "<p>Hello World!</p>"
print "<pre>"
#pp.pprint(fields.value)
#pp.pprint(os.environ)
try:
data = json.loads(fields.value)
#print json.dumps(data, sort_keys=True, indent=4, separators=(',', ': '))
pp.pprint(data)
pp.pprint(data['release']['draft'])
pp.pprint(data['release']['prerelease'])
pp.pprint(data['release']['tag_name'])
pp.pprint(data['release']['url'])
except ValueError:
print "malformed JSON"
print "</pre>"
| 19.194444
| 75
| 0.670043
|
6fb5932f2b64aab81c9e93c4e86b020644e0e70a
| 361
|
py
|
Python
|
feeds/templatetags/feeds.py
|
isergart/gradient
|
837d882f5ab07f2a9847d0212698cdc2d9312125
|
[
"MIT"
] | null | null | null |
feeds/templatetags/feeds.py
|
isergart/gradient
|
837d882f5ab07f2a9847d0212698cdc2d9312125
|
[
"MIT"
] | null | null | null |
feeds/templatetags/feeds.py
|
isergart/gradient
|
837d882f5ab07f2a9847d0212698cdc2d9312125
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from django import template
from ..models import *
register = template.Library()
@register.inclusion_tag('feeds/tpl_news.html')
def news():
news = Post.objects.all()
return {'news': news}
@register.inclusion_tag('feeds/tpl_project.html')
def project():
projects = Project.objects.all()
return {'projects': projects}
| 20.055556
| 49
| 0.686981
|
3b0d223a02d43b1c144b0603bee285df37d2f0a9
| 2,023
|
py
|
Python
|
__main__.py
|
Axelandre42/garry-travis
|
701dc3e977a904aef279e0da7d9ab46855b8da48
|
[
"MIT"
] | null | null | null |
__main__.py
|
Axelandre42/garry-travis
|
701dc3e977a904aef279e0da7d9ab46855b8da48
|
[
"MIT"
] | 9
|
2021-08-09T20:17:37.000Z
|
2022-03-25T20:16:58.000Z
|
__main__.py
|
Axelandre42/garry-travis
|
701dc3e977a904aef279e0da7d9ab46855b8da48
|
[
"MIT"
] | null | null | null |
import os
import platform
import signal
from discord.ext import tasks
from dotenv import load_dotenv
import discord
import sys
import logging.handlers
import mysql.connector
import utils
import commands
import ticker
logger = logging.getLogger('garry-travis')
def init_loggers():
logger.setLevel(logging.INFO)
handler = logging.handlers.SysLogHandler(address='/dev/log')
logger.addHandler(handler)
sys.stdout = utils.StreamLogger(logger, logging.DEBUG)
sys.stderr = utils.StreamLogger(logger, logging.WARNING)
client = discord.Client(intents=discord.Intents.default())
global db
global cmd_inst
@tasks.loop(minutes=15)
async def repeat():
await ticker.check_tick(client, db)
async def stop_bot():
await client.change_presence(status=discord.Status.offline)
await client.close()
db.close()
print('Goodbye.')
@client.event
async def on_ready():
if not repeat.is_running():
repeat.start()
cmd_inst.setup(client)
await client.change_presence(status=discord.Status.online, activity=discord.Game(name='Version 0.3.1'))
print('Ready!')
if __name__ == '__main__':
if platform.system() == 'Linux':
init_loggers()
load_dotenv('/etc/garry-travis/.env')
load_dotenv()
db = mysql.connector.connect(host=os.getenv('DATABASE_HOST'),
port=os.getenv('DATABASE_PORT'),
user=os.getenv('DATABASE_USER'),
password=os.getenv('DATABASE_PASSWORD'),
database=os.getenv('DATABASE_NAME'))
cmd_inst = commands.Commands(db)
if platform.system() == 'Linux':
client.loop.add_signal_handler(signal.SIGINT, lambda: client.loop.create_task(stop_bot()))
client.loop.add_signal_handler(signal.SIGTERM, lambda: client.loop.create_task(stop_bot()))
client.loop.add_signal_handler(signal.SIGHUP, lambda: client.loop.create_task(cmd_inst.reload()))
client.run(os.getenv('DISCORD_KEY'))
| 28.492958
| 107
| 0.683144
|
0a3018183859a182914802d0d2d129e53b4c72a5
| 1,426
|
py
|
Python
|
setup.py
|
ElissonRodrigues/mimetypes-magic
|
9ffcb6913752ad06815bccf2cda2d4a9e1e16bea
|
[
"MIT"
] | null | null | null |
setup.py
|
ElissonRodrigues/mimetypes-magic
|
9ffcb6913752ad06815bccf2cda2d4a9e1e16bea
|
[
"MIT"
] | null | null | null |
setup.py
|
ElissonRodrigues/mimetypes-magic
|
9ffcb6913752ad06815bccf2cda2d4a9e1e16bea
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import setuptools
import io
import os
def read(file_name):
"""Read a text file and return the content as a string."""
with io.open(os.path.join(os.path.dirname(__file__), file_name),
encoding='utf-8') as f:
return f.read()
setuptools.setup(
name='mimetypes-magic',
description='File type identification using libmagic',
author='Adam Hupp',
author_email='adam@hupp.org',
url="https://github.com/ElissonRodrigues/mimetypes-magic",
version='0.4.30',
long_description=read('README.md'),
long_description_content_type='text/markdown',
packages=['magic'],
package_data={
'magic': ['py.typed'],
},
keywords="mime magic file",
license="MIT",
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: CPython',
],
)
| 31
| 73
| 0.597475
|
b896e4b6c688c41ffbd92a9b02ee4bf15ac6ce35
| 17,984
|
py
|
Python
|
census2text2010.py
|
ikding/census-parser
|
3b736af83fcd8000ffda8eab49567617fb37deb1
|
[
"0BSD"
] | 1
|
2018-05-20T01:45:21.000Z
|
2018-05-20T01:45:21.000Z
|
census2text2010.py
|
ikding/census-parser
|
3b736af83fcd8000ffda8eab49567617fb37deb1
|
[
"0BSD"
] | null | null | null |
census2text2010.py
|
ikding/census-parser
|
3b736af83fcd8000ffda8eab49567617fb37deb1
|
[
"0BSD"
] | null | null | null |
#!/usr/bin/env python
""" Convert remote U.S. Census 2010 data to local tab-separated text files.
Run with --help flag for usage instructions.
"""
from sys import stdout, stderr, argv
from os import SEEK_SET, SEEK_CUR, SEEK_END
from time import time
from csv import reader, DictWriter, DictReader
from os.path import basename, dirname, join
from datetime import timedelta
from optparse import OptionParser
from urlparse import urlparse, urljoin
from cStringIO import StringIO
from httplib import HTTPConnection
from urllib import urlopen
from zipfile import ZipFile
from itertools import izip
import re
class RemoteFileObject:
""" Implement enough of this to be useful:
http://docs.python.org/release/2.5.2/lib/bltin-file-objects.html
Pull data from a remote URL with HTTP range headers.
"""
def __init__(self, url, verbose=False, block_size=(16 * 1024)):
self.verbose = verbose
# scheme://host/path;parameters?query#fragment
(scheme, host, path, parameters, query, fragment) = urlparse(url)
self.host = host
self.rest = path + (query and ('?' + query) or '')
self.offset = 0
self.length = self.get_length()
self.chunks = {}
self.block_size = block_size
self.start_time = time()
def get_length(self):
"""
"""
conn = HTTPConnection(self.host)
conn.request('GET', self.rest, headers={'Range': '0-1'})
length = int(conn.getresponse().getheader('content-length'))
if self.verbose:
print >> stderr, length, 'bytes in', basename(self.rest)
return length
def get_range(self, start, end):
"""
"""
headers = {'Range': 'bytes=%(start)d-%(end)d' % locals()}
conn = HTTPConnection(self.host)
conn.request('GET', self.rest, headers=headers)
return conn.getresponse().read()
def read(self, count=None):
""" Read /count/ bytes from the resource at the current offset.
"""
if count is None:
# to the end
count = self.length - self.offset
out = StringIO()
while count:
chunk_offset = self.block_size * (self.offset / self.block_size)
if chunk_offset not in self.chunks:
range = chunk_offset, min(self.length, self.offset + self.block_size) - 1
self.chunks[chunk_offset] = StringIO(self.get_range(*range))
if self.verbose:
loaded = float(self.block_size) * len(self.chunks) / self.length
expect = (time() - self.start_time) / loaded
remain = max(0, int(expect * (1 - loaded)))
print >> stderr, '%.1f%%' % min(100, 100 * loaded),
print >> stderr, 'of', basename(self.rest),
print >> stderr, 'with', timedelta(seconds=remain), 'to go'
chunk = self.chunks[chunk_offset]
in_chunk_offset = self.offset % self.block_size
in_chunk_count = min(count, self.block_size - in_chunk_offset)
chunk.seek(in_chunk_offset, SEEK_SET)
out.write(chunk.read(in_chunk_count))
count -= in_chunk_count
self.offset += in_chunk_count
out.seek(0)
return out.read()
def seek(self, offset, whence=SEEK_SET):
""" Seek to the specified offset.
/whence/ behaves as with other file-like objects:
http://docs.python.org/lib/bltin-file-objects.html
"""
if whence == SEEK_SET:
self.offset = offset
elif whence == SEEK_CUR:
self.offset += offset
elif whence == SEEK_END:
self.offset = self.length + offset
def tell(self):
return self.offset
def file_choice(tables, verbose):
"""
Choose the right summary file component for the given Census table
"""
# code originally in readcsv.py by Peter Gao
datareader = DictReader(open(dirname(argv[0]) + "sf1_data_field_descriptors_2010.csv"))
data = []
entry = None
prevCol = None
current_table = ""
for line in datareader:
new_table_number = line['TABLE NUMBER']
if new_table_number != current_table:
# save the old one
if entry != None:
data.append(entry)
entry = {}
current_table = new_table_number
entry['Matrix Number'] = line['TABLE NUMBER']
entry['File Name'] = line['SEGMENT']
next_line = datareader.next()
entry['Universe'] = (next_line['FIELD NAME'][9:].lstrip())
entry['Name'] = line['FIELD NAME'][:line['FIELD NAME'].index('[')-1]
entry['Cell Count'] = 0
entry['Field Names'] = []
# Increment the cell count iff there's actually data, rather than this being a descriptive row,
# and save the column name
if len(line['FIELD CODE']) > 0:
entry['Cell Count'] += 1
entry['Field Names'].append(line['FIELD CODE'])
# sanity check: ensure the columns are stored in order
if entry['Cell Count'] == 1:
assert int(re.sub('[A-Z]', '', line['FIELD CODE'][-4:])) == 1,\
'Field names not stored in order for matrix %s: first column is %s' % (entry['Matrix Number'], line['FIELD CODE'])
else:
assert int(re.sub('[A-Z]', '', line['FIELD CODE'][-4:])) == int(re.sub('[A-Z]', '', prevCol[-4:])) + 1,\
'Field names are not stored in order for matrix %s: column %s follows column %s' %\
(entry['Matrix Number'], line['FIELD CODE'], prevCol)
prevCol = line['FIELD CODE']
files = []
for table in tables:
file_name, column_offset = None, 5
for row in data:
curr_file, curr_table, cell_count = row.get('File Name'), row.get('Matrix Number'), int(row.get('Cell Count'))
if curr_file != file_name:
file_name, column_offset = curr_file, 5
if curr_table == table:
if verbose:
print >> stderr, table, '-', row.get('Name'), 'in', row.get('Universe')
files.append((table, file_name, column_offset, cell_count, row.get('Field Names')))
break
column_offset += cell_count
return files
def file_paths(state, files):
"""
Convert File 3 California into ca000032010.sf1
"""
return '%sgeo2010.sf1' % states[state].lower(), dict([(f, '%s%05d2010.sf1' % (states[state].lower(), int(f))) for f in files])
def column_names(wide):
"""
Column names for geographic header file
"""
if wide is True:
return ['Summary Level', 'Geographic Component', 'State FIPS', 'Place FIPS', 'County FIPS', 'Tract', 'Zip', 'Block Group', 'Block', 'Name', 'Latitude', 'Longitude', 'Land Area', 'Water Area', 'Population', 'Housing Units']
elif wide is False:
return ['State FIPS', 'Place FIPS', 'County FIPS', 'Tract', 'Zip', 'Block Group', 'Block']
else:
return ['Summary Level', 'Geographic Component', 'State FIPS', 'Place FIPS', 'County FIPS', 'Tract', 'Zip', 'Block Group', 'Block', 'Name', 'Latitude', 'Longitude']
def key_names(wide):
"""
Key names for geographic header file
"""
if wide is True:
return ('SUMLEV', 'GEOCOMP', 'STATE', 'PLACE', 'COUNTY', 'TRACT', 'ZCTA5', 'BLOCKGROUP', 'BLOCK', 'NAME', 'LATITUDE', 'LONGITUDE', 'AREALAND', 'AREAWATER', 'POP100', 'HU100')
elif wide is False:
return ('STATE', 'PLACE', 'COUNTY', 'TRACT', 'ZCTA5', 'BLOCKGROUP', 'BLOCK')
else:
return ('SUMLEV', 'GEOCOMP', 'STATE', 'PLACE', 'COUNTY', 'TRACT', 'ZCTA5', 'BLOCKGROUP', 'BLOCK', 'NAME', 'LATITUDE', 'LONGITUDE')
def get_file_in_zipfile(url, fname, verbose):
"""
Return a file-like object for a file in a remote zipfile
"""
f = RemoteFileObject(url, verbose, 256 * 1024)
z = ZipFile(f)
assert fname in z.namelist(), 'Filename %s not found in ZIP %s' % (fname, url)
return z.open(fname)
def geo_lines(url, fname, verbose):
"""
Get the appropriate geographic header
"""
# make sure it is a geographic header
assert fname[2:] == 'geo2010.sf1', 'Not a geographic header file: %s' % fname
inp = get_file_in_zipfile(url, fname, verbose)
# The column offsets and widths are recorded here for the 2010 geographic header
# Offsets here are one-based to match the documentation on page 19 of the SF1 documentation
# Note that AREAWATER is called AREAWATR in the docs; despite dropping penultimate e's being
# all the rage in cool web 2.0 apps (e.g. Flickr), we're going to restore it.
cols = [('LATITUDE', 337, 11), ('LONGITUDE', 348, 12),
('LOGRECNO', 19, 7), ('SUMLEV', 9, 3), ('GEOCOMP', 12, 2),
('STATE', 28, 2), ('PLACE', 46, 5), ('COUNTY', 30, 3), ('TRACT', 55, 6),
('BLOCKGROUP', 61, 1), ('BLOCK', 62, 4), ('NAME', 227, 90), ('ZCTA5', 172, 5),
('AREALAND', 199, 14), ('AREAWATER', 213, 14),
('POP100', 319, 9), ('HU100', 328, 9)]
for line in inp:
data = dict( [(key, line[s-1:s-1+l].strip()) for (key, s, l) in cols] )
# Census Bureau represents positive latitude and longitude as +number, get rid of the plus
# There is positive longitude in the US, check out Attu Station CDP, Alaska
for key in ('LATITUDE', 'LONGITUDE'):
data[key] = data[key].lstrip('+')
yield data
def data_lines(url, fname, verbose):
"""
Get all the lines in data file fname from zip file path
"""
data = get_file_in_zipfile(url, fname, verbose)
for row in reader(data):
yield row
# Updated for 2010 census
summary_levels = {'state': '040', 'county': '050', 'tract': '140', 'zip': '871', 'blockgroup': '150', 'block': '101', 'place': '160'}
states = {'Alabama': 'AL', 'Alaska': 'AK', 'American Samoa': 'AS', 'Arizona': 'AZ',
'Arkansas': 'AR', 'California': 'CA', 'Colorado': 'CO', 'Connecticut': 'CT', 'Delaware': 'DE',
'District of Columbia': 'DC', 'Florida': 'FL', 'Georgia': 'GA', 'Hawaii': 'HI', 'Idaho': 'ID',
'Illinois': 'IL', 'Indiana': 'IN', 'Iowa': 'IA', 'Kansas': 'KS', 'Kentucky': 'KY',
'Louisiana': 'LA', 'Maine': 'ME', 'Marshall Islands': 'MH', 'Maryland': 'MD',
'Massachusetts': 'MA', 'Michigan': 'MI', 'Minnesota': 'MN', 'Mississippi': 'MS',
'Missouri': 'MO', 'Montana': 'MT', 'Nebraska': 'NE', 'Nevada': 'NV', 'New Hampshire': 'NH',
'New Jersey': 'NJ', 'New Mexico': 'NM', 'New York': 'NY', 'North Carolina': 'NC',
'North Dakota': 'ND', 'Ohio': 'OH', 'Oklahoma': 'OK', 'Oregon': 'OR', 'Pennsylvania': 'PA',
'Puerto Rico': 'PR', 'Rhode Island': 'RI', 'South Carolina': 'SC', 'South Dakota': 'SD',
'Tennessee': 'TN', 'Texas': 'TX', 'Utah': 'UT', 'Vermont': 'VT', 'Virginia': 'VA',
'Washington': 'WA', 'West Virginia': 'WV', 'Wisconsin': 'WI', 'Wyoming': 'WY'}
parser = OptionParser(usage="""%%prog [options] [list of table IDs]
Convert remote U.S. Census 2010 data to local tab-separated text files.
Examples:
Housing basics for counties in Rhode Island
census2text.py --state 'Rhode Island' H1 H3 H4
Age breakdowns for census tracts around Oakland, CA
census2text.py --state California --bbox 37.86 -122.35 37.70 -122.10 --geography tract P12
Complete documentation of Summary File data is dense but helpful:
http://www.census.gov/prod/cen2010/doc/sf1.pdf
Column descriptions are start on page 183.
Available summary files: SF1.
Available summary levels: %s.
See also numeric summary levels in the SF1 documentation, page 107.
""".rstrip() % ', '.join(summary_levels.keys()))
parser.set_defaults(summary_level='county', table='P1', verbose=None, wide=None)
parser.add_option('-s', '--state', dest='state',
help='State, e.g. "Alaska", "District of Columbia." Required.',
type='choice', choices=states.keys())
parser.add_option('-o', '--output', dest='output',
help='Optional output filename, stdout if omitted.')
parser.add_option('-g', '--geography', dest='summary_level',
help='Geographic summary level, e.g. "state", "040". Some available summary levels are %s.' % ', '.join(summary_levels.keys()),
type='choice', choices=summary_levels.keys() + summary_levels.values())
parser.add_option('-c', '--county', dest='county',
help='County FIPS code (3 digits). e.g. --state California --county 083 would yield data for Santa Barbara County, CA',
type='string')
parser.add_option('-b', '--bbox', dest='bbox',
help='Optional geographic bounds: north west south east.',
type='float', nargs=4)
parser.add_option('-n', '--narrow', dest='wide',
help='Output fewer columns than normal',
action='store_false')
parser.add_option('-w', '--wide', dest='wide',
help='Output more columns than normal',
action='store_true')
parser.add_option('-q', '--quiet', dest='verbose',
help='Be quieter than normal',
action='store_false')
parser.add_option('-v', '--verbose', dest='verbose',
help='Be louder than normal',
action='store_true')
if __name__ == '__main__':
options, tables = parser.parse_args()
if options.state == None:
parser.error('Please specify a state; the 2010 Census no longer provides nation-level files')
if options.summary_level in summary_levels:
options.summary_level = summary_levels[options.summary_level]
# There may be multiple summary levels; if not, fix up
if type(options.summary_level) is not tuple:
options.summary_level = (options.summary_level, )
# Figure out what files we need to fetch
files = file_choice(tables, options.verbose is not False)
# set up the path to the zipfile
src_file = 'http://www2.census.gov/census_2010/04-Summary_File_1/%s/%s2010.sf1.zip' % (options.state.replace(' ', '_'), states[options.state].lower())
if options.verbose is not False:
print >> stderr, 'Fetching from %s' % src_file
print >> stderr, ', '.join(options.summary_level), options.state, '-',
print >> stderr, ', '.join(['%s: file %s (%d @%d)' % (tbl, fn, cc, co) for (tbl, fn, co, cc, flds) in files])
print >> stderr, '-' * 32
file_names = set([file_name for (tbl, file_name, co, cc, flds) in files])
geo_path, data_paths = file_paths(options.state, file_names)
# Be forgiving about the bounding box
if options.bbox is not None:
north = max(options.bbox[0], options.bbox[2])
south = min(options.bbox[0], options.bbox[2])
east = max(options.bbox[1], options.bbox[3])
west = min(options.bbox[1], options.bbox[3])
# Get the header for the geo columns
row = column_names(options.wide)
pat = re.compile(r'^([A-Z]+)(\d+)([A-Z]*)$')
# Write the header for the data columns
for (table, fn, co, cell_count, field_names) in files:
row += field_names
out = options.output and open(options.output, 'w') or stdout
out = DictWriter(out, dialect='excel-tab', fieldnames=row)
out.writeheader()
# Get iterables for all of the files
file_iters = {}
for (tbl, file_name, co, cc, flds) in files:
if file_name not in file_iters:
file_iters[file_name] = data_lines(src_file, data_paths[file_name], options.verbose)
file_names = sorted(file_iters.keys())
# get rows from geographic header
geo_iter = geo_lines(src_file, geo_path, options.verbose)
for geo in geo_iter:
if geo['SUMLEV'] not in options.summary_level:
# This is not the summary level you're looking for.
continue
if geo['GEOCOMP'] != '00':
# Geographic Component "00" means the whole thing,
# not e.g. "01" for urban or "43" for rural parts.
continue
if options.county != None and geo['COUNTY'] != options.county:
# This is not the county you're looking for
continue
if options.bbox is not None:
lat, lon = float(geo['LATITUDE']), float(geo['LONGITUDE'])
if lat < south or north < lat or lon < west or east < lon:
# This geography is outside the bounding box
continue
vals = [geo[key] for key in key_names(options.wide)]
# name the columns appropriately
row = dict(zip(column_names(options.wide), vals))
# Iterate over every line in each of the necessary files
# It is possible that there won't be an entry for some variable in some file,
# so we can't iterate over them all at once as was done in the 2000 version of this script
for fname in file_iters.keys():
for line in file_iters[fname]:
if line[4] == geo['LOGRECNO']:
# We found a match, grab every matrix in this file at once
# matrix is in the form (matrix/table name, file, offset, cell count, field names)
for matrix in [i for i in files if i[1] == fname]:
names = matrix[4]
values = line[matrix[2]:matrix[2]+matrix[3]]
row.update(zip(names, values))
# done
break
out.writerow(row)
stdout.flush()
| 40.32287
| 230
| 0.58274
|
a5d1deae246a3b49a729b067b844be49269d261e
| 2,706
|
py
|
Python
|
uuoskit/test_helper.py
|
uuosio/python-contract-demos
|
7d56ba371f2115b0ab895fca3e71092c2523f25d
|
[
"MIT"
] | 2
|
2020-12-08T13:15:06.000Z
|
2020-12-29T10:06:44.000Z
|
uuoskit/test_helper.py
|
uuosio/python-contract-demos
|
7d56ba371f2115b0ab895fca3e71092c2523f25d
|
[
"MIT"
] | null | null | null |
uuoskit/test_helper.py
|
uuosio/python-contract-demos
|
7d56ba371f2115b0ab895fca3e71092c2523f25d
|
[
"MIT"
] | null | null | null |
load_code = None
run = None
from . import config
from . import wallet
test_account1 = 'helloworld11'
test_account2 = 'helloworld12'
def init(network='EOS_TESTNET', deploy_type=1):
#def init(network='UUOS_TESTNET', deploy_type=1):
global test_account1
global test_account2
config.network = network
wallet.create('test')
# import active key for hello
wallet.import_key('test', '5JRYimgLBrRLCBAcjHUWCYRv3asNedTYYzVgmiU4q2ZVxMBiJXL')
# import active key for helloworld11
wallet.import_key('test', '5Jbb4wuwz8MAzTB9FJNmrVYGXo4ABb7wqPVoWGcZ6x8V2FwNeDo')
# import active key for helloworld12
wallet.import_key('test', '5JHRxntHapUryUetZgWdd3cg6BrpZLMJdqhhXnMaZiiT4qdJPhv')
# active key of ceyelqpjeeia
wallet.import_key('test', '5JfZz1kXF8TXsxQgwfsvZCUBeTQefYSsCLDSbSPmnbKQfFmtBny')
# active key of ebvjmdibybgq
wallet.import_key('test', '5KiVDjfHMHXzxrcLqZxGENrhCcCXBMSXP7paPbJWiMCDRMbStsF')
if network == 'EOS_TESTNET':
config.setup_eos_test_network()
test_account1 = 'ceyelqpjeeia'
test_account2 = 'ebvjmdibybgq'
elif network == 'UUOS_TESTNET':
config.setup_uuos_test_network()
test_account1 = 'helloworld11'
test_account2 = 'helloworld12'
else:
raise Exception(f'unknown network: {network}')
config.contract_deploy_type = deploy_type
# config.setup_local_test_network()
# config.setup_local_uuos_test_network()
def init1():
init(network='EOS_TESTNET', deploy_type=1)
def init2():
init(network='UUOS_TESTNET', deploy_type=2)
def init3():
init(network='UUOS_TESTNET', deploy_type=1)
def print_console(r):
print('\n===================== CONSOLE OUTPUT BEGIN =====================\n')
print(r['processed']['action_traces'][0]['console'])
print('\n===================== CONSOLE OUTPUT END =====================\n')
try:
from browser import window, aio
editor = window.ace.edit("editor")
editor_abi = window.ace.edit("editor_abi")
def _load_code():
abi = editor_abi.getValue()
src = editor.getValue()
return src, abi
def _run(task):
aio.run(task)
load_code = _load_code
run = _run
except Exception as e:
import os
import sys
import asyncio
def _load_code():
with open('code.py', 'r') as f:
code = f.read()
with open('abi.py', 'r') as f:
abi = f.read()
return code, abi
def _run(future):
loop = asyncio.get_event_loop()
loop.run_until_complete(future)
load_code = _load_code
run = _run
sys.path.append('..')
if os.path.exists('test.wallet'):
os.remove('test.wallet')
| 27.333333
| 84
| 0.656319
|
1b7e02dab029f134a7d52b5e961793e246f1a837
| 458
|
py
|
Python
|
Class and Statick Methods/04_gym/project/subscription.py
|
milenpenev/SoftUni-OOP
|
90f730cb37713f7ca93b1c0ecd0d12aa351247d2
|
[
"MIT"
] | null | null | null |
Class and Statick Methods/04_gym/project/subscription.py
|
milenpenev/SoftUni-OOP
|
90f730cb37713f7ca93b1c0ecd0d12aa351247d2
|
[
"MIT"
] | null | null | null |
Class and Statick Methods/04_gym/project/subscription.py
|
milenpenev/SoftUni-OOP
|
90f730cb37713f7ca93b1c0ecd0d12aa351247d2
|
[
"MIT"
] | null | null | null |
class Subscription:
id = 1
def __init__(self, date, customer_id, trainer_id, exercise_id):
self.date = date
self.customer_id = customer_id
self.trainer_id = trainer_id
self.exercise_id = exercise_id
self.id = Subscription.id
Subscription.id += 1
def __repr__(self):
return f"Subscription <{self.id}> on {self.date}"
@staticmethod
def get_next_id():
return Subscription.id
| 25.444444
| 67
| 0.633188
|
703a47071871a4a0cd940debf272d06bb9efa744
| 816
|
py
|
Python
|
source/OCRlab_2.py
|
Hairaa-1026/HCI-course-project
|
8c80daf4ec248fd057868582204de0248f415c52
|
[
"MIT"
] | null | null | null |
source/OCRlab_2.py
|
Hairaa-1026/HCI-course-project
|
8c80daf4ec248fd057868582204de0248f415c52
|
[
"MIT"
] | null | null | null |
source/OCRlab_2.py
|
Hairaa-1026/HCI-course-project
|
8c80daf4ec248fd057868582204de0248f415c52
|
[
"MIT"
] | null | null | null |
from aip import AipOcr
"""APPID AK SK """
APP_ID = '22849136'
API_KEY = 'tMuFasuy6juP1oYyNxU1vsu3'
SECRET_KEY = 'UPD9kEe5PNR3eNantpF7GyfsmmG5ZTKp'
client = AipOcr(APP_ID, API_KEY, SECRET_KEY)
def get_file_content(filepath):
with open(filepath, 'rb') as fp:
return fp.read()
path = r'../data/ocr/'
L = [path + 'ads.jpg', path + 'ads2.jpg', path + 'ads3.jpg',
path + 'ChineseClearText.png', path + 'EnglishClearText.png',
path + 'photo.JPG', path + 'screenshot.jpg']
for i in range(len(L)):
image = get_file_content(L[i])
client.basicAccurate(image)
options = {"language_type": "CHN_ENG", "probability": "true"}
Result = client.basicAccurate(image, options)
show = Result['words_result']
for i in show:
print(i['words'])
print("——————这-里-是-分-割-线——————")
| 28.137931
| 66
| 0.644608
|
583c67033d8df0ab7508c15a290df3c92ad91073
| 1,406
|
py
|
Python
|
utils/processors/tokenization.py
|
sy-wada/blue_benchmark_with_transformers
|
fbf6236db5a4fb7affde94a05a5c875cc5ee948b
|
[
"Apache-2.0"
] | 17
|
2020-05-18T06:40:26.000Z
|
2022-03-23T08:34:27.000Z
|
utils/processors/tokenization.py
|
sy-wada/blue_benchmark_with_transformers
|
fbf6236db5a4fb7affde94a05a5c875cc5ee948b
|
[
"Apache-2.0"
] | 3
|
2020-05-18T23:24:13.000Z
|
2021-05-27T07:12:14.000Z
|
utils/processors/tokenization.py
|
sy-wada/blue_benchmark_with_transformers
|
fbf6236db5a4fb7affde94a05a5c875cc5ee948b
|
[
"Apache-2.0"
] | 2
|
2020-05-18T20:26:15.000Z
|
2021-11-09T14:21:11.000Z
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Originates from: https://github.com/google-research/bert/blob/master/tokenization.py
import six
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
| 39.055556
| 86
| 0.673542
|
c4897e210d5c2ffb90ffd5d5e176d2f2ec1daf70
| 16,130
|
py
|
Python
|
official/benchmark/keras_cifar_benchmark.py
|
vincentcheny/models
|
afb1a59fc1bc792ac72d1a3e22e2469020529788
|
[
"Apache-2.0"
] | 1
|
2019-09-11T09:41:11.000Z
|
2019-09-11T09:41:11.000Z
|
official/benchmark/keras_cifar_benchmark.py
|
vincentcheny/models
|
afb1a59fc1bc792ac72d1a3e22e2469020529788
|
[
"Apache-2.0"
] | null | null | null |
official/benchmark/keras_cifar_benchmark.py
|
vincentcheny/models
|
afb1a59fc1bc792ac72d1a3e22e2469020529788
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Executes Keras benchmarks and accuracy tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from absl import flags
import tensorflow as tf # pylint: disable=g-bad-import-order
from official.benchmark import keras_benchmark
from official.vision.image_classification import resnet_cifar_main
MIN_TOP_1_ACCURACY = 0.929
MAX_TOP_1_ACCURACY = 0.938
FLAGS = flags.FLAGS
CIFAR_DATA_DIR_NAME = 'cifar-10-batches-bin'
class Resnet56KerasAccuracy(keras_benchmark.KerasBenchmark):
"""Accuracy tests for ResNet56 Keras CIFAR-10."""
def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
"""A benchmark class.
Args:
output_dir: directory where to output e.g. log files
root_data_dir: directory under which to look for dataset
**kwargs: arbitrary named arguments. This is needed to make the
constructor forward compatible in case PerfZero provides more
named arguments before updating the constructor.
"""
self.data_dir = os.path.join(root_data_dir, CIFAR_DATA_DIR_NAME)
flag_methods = [resnet_cifar_main.define_cifar_flags]
super(Resnet56KerasAccuracy, self).__init__(
output_dir=output_dir, flag_methods=flag_methods)
def benchmark_graph_1_gpu(self):
"""Test keras based model with Keras fit and distribution strategies."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.data_dir = self.data_dir
FLAGS.batch_size = 128
FLAGS.train_epochs = 182
FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu')
FLAGS.dtype = 'fp32'
self._run_and_report_benchmark()
def benchmark_1_gpu(self):
"""Test keras based model with eager and distribution strategies."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.data_dir = self.data_dir
FLAGS.batch_size = 128
FLAGS.train_epochs = 182
FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu')
FLAGS.dtype = 'fp32'
FLAGS.enable_eager = True
self._run_and_report_benchmark()
def benchmark_cpu(self):
"""Test keras based model on CPU."""
self._setup()
FLAGS.num_gpus = 0
FLAGS.data_dir = self.data_dir
FLAGS.batch_size = 128
FLAGS.train_epochs = 182
FLAGS.model_dir = self._get_model_dir('benchmark_cpu')
FLAGS.dtype = 'fp32'
FLAGS.enable_eager = True
FLAGS.data_format = 'channels_last'
self._run_and_report_benchmark()
def benchmark_cpu_no_dist_strat(self):
"""Test keras based model on CPU without distribution strategies."""
self._setup()
FLAGS.num_gpus = 0
FLAGS.data_dir = self.data_dir
FLAGS.batch_size = 128
FLAGS.train_epochs = 182
FLAGS.model_dir = self._get_model_dir('benchmark_cpu_no_dist_strat')
FLAGS.dtype = 'fp32'
FLAGS.enable_eager = True
FLAGS.distribution_strategy = 'off'
FLAGS.data_format = 'channels_last'
self._run_and_report_benchmark()
def benchmark_cpu_no_dist_strat_run_eagerly(self):
"""Test keras based model on CPU w/forced eager and no dist_strat."""
self._setup()
FLAGS.num_gpus = 0
FLAGS.data_dir = self.data_dir
FLAGS.batch_size = 128
FLAGS.train_epochs = 182
FLAGS.model_dir = self._get_model_dir(
'benchmark_cpu_no_dist_strat_run_eagerly')
FLAGS.dtype = 'fp32'
FLAGS.enable_eager = True
FLAGS.run_eagerly = True
FLAGS.distribution_strategy = 'off'
FLAGS.data_format = 'channels_last'
self._run_and_report_benchmark()
def benchmark_1_gpu_no_dist_strat(self):
"""Test keras based model with eager and no dist strat."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.explicit_gpu_placement = True
FLAGS.data_dir = self.data_dir
FLAGS.batch_size = 128
FLAGS.train_epochs = 182
FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_no_dist_strat')
FLAGS.dtype = 'fp32'
FLAGS.enable_eager = True
FLAGS.distribution_strategy = 'off'
self._run_and_report_benchmark()
def benchmark_1_gpu_no_dist_strat_run_eagerly(self):
"""Test keras based model w/forced eager and no dist_strat."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.data_dir = self.data_dir
FLAGS.batch_size = 128
FLAGS.train_epochs = 182
FLAGS.model_dir = self._get_model_dir(
'benchmark_1_gpu_no_dist_strat_run_eagerly')
FLAGS.dtype = 'fp32'
FLAGS.enable_eager = True
FLAGS.run_eagerly = True
FLAGS.distribution_strategy = 'off'
self._run_and_report_benchmark()
def benchmark_graph_1_gpu_no_dist_strat(self):
"""Test keras based model with Keras fit but not distribution strategies."""
self._setup()
FLAGS.distribution_strategy = 'off'
FLAGS.num_gpus = 1
FLAGS.data_dir = self.data_dir
FLAGS.batch_size = 128
FLAGS.train_epochs = 182
FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu_no_dist_strat')
FLAGS.dtype = 'fp32'
self._run_and_report_benchmark()
def benchmark_1_gpu_no_dist_strat_force_v1_path(self):
"""No dist strat forced v1 execution path."""
self._setup()
FLAGS.distribution_strategy = 'off'
FLAGS.num_gpus = 1
FLAGS.data_dir = self.data_dir
FLAGS.batch_size = 128
FLAGS.train_epochs = 182
FLAGS.model_dir = self._get_model_dir(
'benchmark_1_gpu_no_dist_strat_force_v1_path')
FLAGS.dtype = 'fp32'
FLAGS.enable_eager = True
FLAGS.force_v2_in_keras_compile = False
self._run_and_report_benchmark()
def benchmark_2_gpu(self):
"""Test keras based model with eager and distribution strategies."""
self._setup()
FLAGS.num_gpus = 2
FLAGS.data_dir = self.data_dir
FLAGS.batch_size = 128
FLAGS.train_epochs = 182
FLAGS.model_dir = self._get_model_dir('benchmark_2_gpu')
FLAGS.dtype = 'fp32'
FLAGS.enable_eager = True
self._run_and_report_benchmark()
def benchmark_graph_2_gpu(self):
"""Test keras based model with Keras fit and distribution strategies."""
self._setup()
FLAGS.num_gpus = 2
FLAGS.data_dir = self.data_dir
FLAGS.batch_size = 128
FLAGS.train_epochs = 182
FLAGS.model_dir = self._get_model_dir('benchmark_graph_2_gpu')
FLAGS.dtype = 'fp32'
self._run_and_report_benchmark()
def _run_and_report_benchmark(self):
start_time_sec = time.time()
stats = resnet_cifar_main.run(FLAGS)
wall_time_sec = time.time() - start_time_sec
super(Resnet56KerasAccuracy, self)._report_benchmark(
stats,
wall_time_sec,
top_1_min=MIN_TOP_1_ACCURACY,
top_1_max=MAX_TOP_1_ACCURACY,
total_batch_size=FLAGS.batch_size,
log_steps=100)
class Resnet56KerasBenchmarkBase(keras_benchmark.KerasBenchmark):
"""Short performance tests for ResNet56 via Keras and CIFAR-10."""
def __init__(self, output_dir=None, default_flags=None):
flag_methods = [resnet_cifar_main.define_cifar_flags]
super(Resnet56KerasBenchmarkBase, self).__init__(
output_dir=output_dir,
flag_methods=flag_methods,
default_flags=default_flags)
def _run_and_report_benchmark(self):
start_time_sec = time.time()
stats = resnet_cifar_main.run(FLAGS)
wall_time_sec = time.time() - start_time_sec
super(Resnet56KerasBenchmarkBase, self)._report_benchmark(
stats,
wall_time_sec,
total_batch_size=FLAGS.batch_size,
log_steps=FLAGS.log_steps)
def benchmark_1_gpu(self):
"""Test 1 gpu."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.enable_eager = True
FLAGS.distribution_strategy = 'default'
FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu')
FLAGS.batch_size = 128
self._run_and_report_benchmark()
def benchmark_1_gpu_xla(self):
"""Test 1 gpu with xla enabled."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.enable_eager = True
FLAGS.run_eagerly = False
FLAGS.enable_xla = True
FLAGS.distribution_strategy = 'default'
FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_xla')
FLAGS.batch_size = 128
self._run_and_report_benchmark()
def benchmark_1_gpu_force_v1_path(self):
"""Test 1 gpu using forced v1 execution path."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.enable_eager = True
FLAGS.distribution_strategy = 'default'
FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_force_v1_path')
FLAGS.batch_size = 128
FLAGS.force_v2_in_keras_compile = False
self._run_and_report_benchmark()
def benchmark_graph_1_gpu(self):
"""Test 1 gpu graph."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.enable_eager = False
FLAGS.run_eagerly = False
FLAGS.distribution_strategy = 'default'
FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu')
FLAGS.batch_size = 128
self._run_and_report_benchmark()
def benchmark_1_gpu_no_dist_strat(self):
"""Test 1 gpu without distribution strategies."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.enable_eager = True
FLAGS.distribution_strategy = 'off'
FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_no_dist_strat')
FLAGS.batch_size = 128
self._run_and_report_benchmark()
def benchmark_graph_1_gpu_no_dist_strat(self):
"""Test 1 gpu graph mode without distribution strategies."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.enable_eager = False
FLAGS.distribution_strategy = 'off'
FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu_no_dist_strat')
FLAGS.batch_size = 128
self._run_and_report_benchmark()
def benchmark_1_gpu_no_dist_strat_run_eagerly(self):
"""Test 1 gpu without distribution strategy and forced eager."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.batch_size = 128
FLAGS.model_dir = self._get_model_dir(
'benchmark_1_gpu_no_dist_strat_run_eagerly')
FLAGS.dtype = 'fp32'
FLAGS.enable_eager = True
FLAGS.run_eagerly = True
FLAGS.distribution_strategy = 'off'
self._run_and_report_benchmark()
def benchmark_1_gpu_no_dist_strat_force_v1_path(self):
"""No dist strat but forced v1 execution path."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.batch_size = 128
FLAGS.model_dir = self._get_model_dir(
'benchmark_1_gpu_no_dist_strat_force_v1_path')
FLAGS.dtype = 'fp32'
FLAGS.enable_eager = True
FLAGS.distribution_strategy = 'off'
FLAGS.force_v2_in_keras_compile = False
self._run_and_report_benchmark()
def benchmark_1_gpu_no_dist_strat_force_v1_path_run_eagerly(self):
"""Forced v1 execution path and forced eager."""
self._setup()
FLAGS.num_gpus = 1
FLAGS.batch_size = 128
FLAGS.model_dir = self._get_model_dir(
'benchmark_1_gpu_no_dist_strat_force_v1_path_run_eagerly')
FLAGS.dtype = 'fp32'
FLAGS.enable_eager = True
FLAGS.run_eagerly = True
FLAGS.distribution_strategy = 'off'
FLAGS.force_v2_in_keras_compile = False
self._run_and_report_benchmark()
def benchmark_2_gpu(self):
"""Test 2 gpu."""
self._setup()
FLAGS.num_gpus = 2
FLAGS.enable_eager = True
FLAGS.run_eagerly = False
FLAGS.distribution_strategy = 'default'
FLAGS.model_dir = self._get_model_dir('benchmark_2_gpu')
FLAGS.batch_size = 128 * 2 # 2 GPUs
self._run_and_report_benchmark()
def benchmark_graph_2_gpu(self):
"""Test 2 gpu graph mode."""
self._setup()
FLAGS.num_gpus = 2
FLAGS.enable_eager = False
FLAGS.run_eagerly = False
FLAGS.distribution_strategy = 'default'
FLAGS.model_dir = self._get_model_dir('benchmark_graph_2_gpu')
FLAGS.batch_size = 128 * 2 # 2 GPUs
self._run_and_report_benchmark()
def benchmark_cpu(self):
"""Test cpu."""
self._setup()
FLAGS.num_gpus = 0
FLAGS.enable_eager = True
FLAGS.model_dir = self._get_model_dir('benchmark_cpu')
FLAGS.batch_size = 128
FLAGS.data_format = 'channels_last'
self._run_and_report_benchmark()
def benchmark_graph_cpu(self):
"""Test cpu graph mode."""
self._setup()
FLAGS.num_gpus = 0
FLAGS.enable_eager = False
FLAGS.model_dir = self._get_model_dir('benchmark_graph_cpu')
FLAGS.batch_size = 128
FLAGS.data_format = 'channels_last'
self._run_and_report_benchmark()
def benchmark_cpu_no_dist_strat_run_eagerly(self):
"""Test cpu without distribution strategy and forced eager."""
self._setup()
FLAGS.num_gpus = 0
FLAGS.distribution_strategy = 'off'
FLAGS.enable_eager = True
FLAGS.run_eagerly = True
FLAGS.model_dir = self._get_model_dir(
'benchmark_cpu_no_dist_strat_run_eagerly')
FLAGS.batch_size = 128
FLAGS.data_format = 'channels_last'
self._run_and_report_benchmark()
def benchmark_cpu_no_dist_strat(self):
"""Test cpu without distribution strategies."""
self._setup()
FLAGS.num_gpus = 0
FLAGS.enable_eager = True
FLAGS.distribution_strategy = 'off'
FLAGS.model_dir = self._get_model_dir('benchmark_cpu_no_dist_strat')
FLAGS.batch_size = 128
FLAGS.data_format = 'channels_last'
self._run_and_report_benchmark()
def benchmark_cpu_no_dist_strat_force_v1_path(self):
"""Test cpu without dist strat and force v1 in model.compile."""
self._setup()
FLAGS.num_gpus = 0
FLAGS.enable_eager = True
FLAGS.distribution_strategy = 'off'
FLAGS.model_dir = self._get_model_dir(
'benchmark_cpu_no_dist_strat_force_v1_path')
FLAGS.batch_size = 128
FLAGS.data_format = 'channels_last'
FLAGS.force_v2_in_keras_compile = False
self._run_and_report_benchmark()
def benchmark_graph_cpu_no_dist_strat(self):
"""Test cpu graph mode without distribution strategies."""
self._setup()
FLAGS.num_gpus = 0
FLAGS.enable_eager = False
FLAGS.distribution_strategy = 'off'
FLAGS.model_dir = self._get_model_dir('benchmark_graph_cpu_no_dist_strat')
FLAGS.batch_size = 128
FLAGS.data_format = 'channels_last'
self._run_and_report_benchmark()
class Resnet56KerasBenchmarkSynth(Resnet56KerasBenchmarkBase):
"""Synthetic benchmarks for ResNet56 and Keras."""
def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
default_flags = {}
default_flags['skip_eval'] = True
default_flags['use_synthetic_data'] = True
default_flags['train_steps'] = 110
default_flags['log_steps'] = 10
super(Resnet56KerasBenchmarkSynth, self).__init__(
output_dir=output_dir, default_flags=default_flags)
class Resnet56KerasBenchmarkReal(Resnet56KerasBenchmarkBase):
"""Real data benchmarks for ResNet56 and Keras."""
def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
default_flags = {}
default_flags['skip_eval'] = True
default_flags['data_dir'] = os.path.join(root_data_dir, CIFAR_DATA_DIR_NAME)
default_flags['train_steps'] = 110
default_flags['log_steps'] = 10
super(Resnet56KerasBenchmarkReal, self).__init__(
output_dir=output_dir, default_flags=default_flags)
if __name__ == '__main__':
tf.test.main()
| 34.989154
| 81
| 0.701612
|
028dfbf40481e6124bb8ca326c255392b11dbd93
| 23,329
|
py
|
Python
|
acct_mgr/model.py
|
SpamExperts/AccountManagerPlugin
|
7c9676fbe656da899de36117ca0987bc775adcdd
|
[
"Beerware"
] | 1
|
2017-06-17T13:02:52.000Z
|
2017-06-17T13:02:52.000Z
|
acct_mgr/model.py
|
SpamExperts/AccountManagerPlugin
|
7c9676fbe656da899de36117ca0987bc775adcdd
|
[
"Beerware"
] | null | null | null |
acct_mgr/model.py
|
SpamExperts/AccountManagerPlugin
|
7c9676fbe656da899de36117ca0987bc775adcdd
|
[
"Beerware"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2014 Steffen Hoffmann <hoff.st@web.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# Author: Steffen Hoffmann <hoff.st@web.de>
import re
from trac.core import TracError
from trac.db.api import DatabaseManager
from trac.util.text import to_unicode
from acct_mgr.api import GenericUserIdChanger
from acct_mgr.compat import as_int, exception_to_unicode
from acct_mgr.hashlib_compat import md5
_USER_KEYS = {
'auth_cookie': 'name',
'permission': 'username',
}
def _db_exc(env):
"""Return an object (typically a module) containing all the
backend-specific exception types as attributes, named
according to the Python Database API
(http://www.python.org/dev/peps/pep-0249/).
This is derived from code found in trac.env.Environment.db_exc (Trac 1.0).
"""
try:
module = DatabaseManager(env).get_exceptions()
except AttributeError:
module = None
if dburi.startswith('sqlite:'):
try:
import pysqlite2.dbapi2 as sqlite
module = sqlite
except ImportError:
try:
import sqlite3 as sqlite
module = sqlite
except ImportError:
pass
elif dburi.startswith('postgres:'):
try:
import psycopg2 as psycopg
module = psycopg
except ImportError:
pass
elif dburi.startswith('mysql:'):
try:
import MySQLdb
module = MySQLdb
except ImportError:
pass
# Do not need more alternatives, because otherwise we wont get here.
return module
def _get_cc_list(cc_value):
"""Parse cc list.
Derived from from trac.ticket.model._fixup_cc_list (Trac-1.0).
"""
cclist = []
for cc in re.split(r'[;,\s]+', cc_value):
if cc and cc not in cclist:
cclist.append(cc)
return cclist
def _get_db_exc(env):
return (_db_exc(env).InternalError, _db_exc(env).OperationalError,
_db_exc(env).ProgrammingError)
class PrimitiveUserIdChanger(GenericUserIdChanger):
"""Handle the simple owner-column replacement case."""
abstract = True
column = 'author'
table = None
# IUserIdChanger method
def replace(self, old_uid, new_uid, db):
result = 0
cursor = db.cursor()
try:
cursor.execute("SELECT COUNT(*) FROM %s WHERE %s=%%s"
% (self.table, self.column), (old_uid,))
exists = cursor.fetchone()
if exists[0]:
cursor.execute("UPDATE %s SET %s=%%s WHERE %s=%%s"
% (self.table, self.column, self.column),
(new_uid, old_uid))
result = int(exists[0])
self.log.debug(self.msg(old_uid, new_uid, self.table,
self.column, result='%s time(s)' % result))
except (_get_db_exc(self.env)), e:
result = exception_to_unicode(e)
self.log.debug(self.msg(old_uid, new_uid, self.table,
self.column, result='failed: %s'
% exception_to_unicode(e, traceback=True)))
return dict(error={(self.table, self.column, None): result})
return {(self.table, self.column, None): result}
class UniqueUserIdChanger(PrimitiveUserIdChanger):
"""Handle columns, where user IDs are an unique key or part of it."""
abstract = True
column = 'sid'
# IUserIdChanger method
def replace(self, old_uid, new_uid, db):
cursor = db.cursor()
try:
cursor.execute("DELETE FROM %s WHERE %s=%%s"
% (self.table, self.column), (new_uid,))
except (_get_db_exc(self.env)), e:
result = exception_to_unicode(e)
self.log.debug(self.msg(old_uid, new_uid, self.table,
self.column, result='failed: %s'
% exception_to_unicode(e, traceback=True)))
return dict(error={(self.table, self.column, None): result})
return super(UniqueUserIdChanger,
self).replace(old_uid, new_uid, db)
class AttachmentUserIdChanger(PrimitiveUserIdChanger):
"""Change user IDs in attachments."""
table = 'attachment'
class AuthCookieUserIdChanger(UniqueUserIdChanger):
"""Change user IDs for authentication cookies."""
column = 'name'
table = 'auth_cookie'
class ComponentUserIdChanger(PrimitiveUserIdChanger):
"""Change user IDs in components."""
column = 'owner'
table = 'component'
class PermissionUserIdChanger(UniqueUserIdChanger):
"""Change user IDs for permissions."""
column = 'username'
table = 'permission'
class ReportUserIdChanger(PrimitiveUserIdChanger):
"""Change user IDs in reports."""
table = 'report'
class RevisionUserIdChanger(PrimitiveUserIdChanger):
"""Change user IDs in changesets."""
table = 'revision'
class TicketUserIdChanger(PrimitiveUserIdChanger):
"""Change all user IDs in tickets."""
table = 'ticket'
# IUserIdChanger method
def replace(self, old_uid, new_uid, db):
results=dict()
self.column = 'owner'
result = super(TicketUserIdChanger,
self).replace(old_uid, new_uid, db)
if 'error' in result:
return result
results.update(result)
self.column = 'reporter'
result = super(TicketUserIdChanger,
self).replace(old_uid, new_uid, db)
if 'error' in result:
return result
results.update(result)
# Replace user ID in Cc ticket column.
cursor = db.cursor()
cursor.execute("SELECT id,cc FROM ticket WHERE cc %s" % db.like(),
('%' + db.like_escape(old_uid) + '%',))
result = 0
for row in cursor.fetchall():
cc = _get_cc_list(row[1])
for i in [i for i,r in enumerate(cc) if r == old_uid]:
cc[i] = new_uid
try:
cursor.execute("UPDATE ticket SET cc=%s WHERE id=%s",
(', '.join(cc), int(row[0])))
result += 1
except (_get_db_exc(self.env)), e:
result = exception_to_unicode(e)
self.log.debug(self.msg(old_uid, new_uid, self.table, 'cc',
result='failed: %s'
% exception_to_unicode(e, traceback=True)))
return dict(error={(self.table, 'cc', None): result})
self.log.debug(self.msg(old_uid, new_uid, self.table, 'cc',
result='%s time(s)' % result))
results.update({(self.table, 'cc', None): result})
table = 'ticket_change'
self.column = 'author'
self.table = table
result = super(TicketUserIdChanger,
self).replace(old_uid, new_uid, db)
if 'error' in result:
return result
results.update(result)
constraint = "field='owner'|'reporter'"
cursor = db.cursor()
for column in ('oldvalue', 'newvalue'):
cursor.execute("""
SELECT COUNT(*)
FROM %s
WHERE %s=%%s
AND (field='owner'
OR field='reporter')
""" % (table, column), (old_uid,))
exists = cursor.fetchone()
result = int(exists[0])
if exists[0]:
try:
cursor.execute("""
UPDATE %s
SET %s=%%s
WHERE %s=%%s
AND (field='owner'
OR field='reporter')
""" % (table, column, column), (new_uid, old_uid))
except (_get_db_exc(self.env)), e:
result = exception_to_unicode(e)
self.log.debug(
self.msg(old_uid, new_uid, table, column,
constraint, result='failed: %s'
% exception_to_unicode(e, traceback=True)))
return dict(error={(self.table, column,
constraint): result})
self.log.debug(self.msg(old_uid, new_uid, table, column,
constraint, result='%s time(s)' % result))
results.update({(table, column, constraint): result})
# Replace user ID in Cc ticket field changes too.
constraint = "field='cc'"
for column in ('oldvalue', 'newvalue'):
cursor.execute("""
SELECT ticket,time,%s
FROM %s
WHERE field='cc'
AND %s %s
""" % (column, table, column, db.like()),
('%' + db.like_escape(old_uid) + '%',))
result = 0
for row in cursor.fetchall():
cc = _get_cc_list(row[2])
for i in [i for i,r in enumerate(cc) if r == old_uid]:
cc[i] = new_uid
try:
cursor.execute("""
UPDATE %s
SET %s=%%s
WHERE ticket=%%s
AND time=%%s
""" % (table, column),
(', '.join(cc), int(row[0]), int(row[1])))
result += 1
except (_get_db_exc(self.env)), e:
result = exception_to_unicode(e)
self.log.debug(
self.msg(old_uid, new_uid, table, column,
constraint, result='failed: %s'
% exception_to_unicode(e, traceback=True)
))
return dict(error={(self.table, column,
constraint): result})
self.log.debug(self.msg(old_uid, new_uid, table, column,
constraint, result='%s time(s)' % result))
results.update({(table, column, constraint): result})
return results
class WikiUserIdChanger(PrimitiveUserIdChanger):
"""Change user IDs in wiki pages."""
table = 'wiki'
# Public functions
def email_associated(env, email):
"""Returns whether an authenticated user account with that email address
exists.
"""
with env.db_query as db:
cursor = db.cursor()
cursor.execute("""
SELECT value
FROM session_attribute
WHERE authenticated=1 AND name='email' AND value=%s
""", (email,))
for row in cursor:
return True
return False
def email_verified(env, user, email):
"""Returns whether the account and email has been verified.
Use with care, as it returns the private token string,
if verification is pending.
"""
if not user_known(env, user) or not email:
# Nothing more to check here.
return None
with env.db_query as db:
cursor = db.cursor()
cursor.execute("""
SELECT value
FROM session_attribute
WHERE sid=%s AND name='email_verification_sent_to'
""", (user,))
for row in cursor:
env.log.debug('AcctMgr:model:email_verified for user \"' + \
user + '\", email \"' + str(email) + '\": ' + str(row[0]))
if row[0] != email:
# verification has been sent to different email address
return None
cursor.execute("""
SELECT value
FROM session_attribute
WHERE sid=%s AND name='email_verification_token'
""", (user,))
for row in cursor:
# verification token still unverified
env.log.debug('AcctMgr:model:email_verified for user \"' + \
user + '\", email \"' + str(email) + '\": ' + str(row[0]))
return row[0]
return True
def user_known(env, user):
"""Returns whether the user has ever been authenticated before."""
with env.db_query as db:
cursor = db.cursor()
cursor.execute("""
SELECT 1
FROM session
WHERE authenticated=1 AND sid=%s
""", (user,))
for row in cursor:
return True
return False
# Utility functions
def change_uid(env, old_uid, new_uid, changers, attr_overwrite):
"""Handle user ID transition for all supported Trac realms."""
with env.db_transaction as db:
# Handle the single unique Trac user ID reference first.
cursor = db.cursor()
sql = """
DELETE
FROM session
WHERE authenticated=1 AND sid=%s
"""
cursor.execute(sql, (new_uid,))
cursor.execute("""
INSERT INTO session
(sid,authenticated,last_visit)
VALUES (%s,1,(SELECT last_visit FROM session WHERE sid=%s))
""", (new_uid, old_uid))
# Process related attributes.
attr_count = copy_user_attributes(env, old_uid, new_uid, attr_overwrite)
# May want to keep attributes, if not copied completely.
if attr_overwrite:
del_user_attribute(env, old_uid)
results = dict()
results.update({('session_attribute', 'sid', None): attr_count})
for changer in changers:
result = changer.replace(old_uid, new_uid)
if 'error' in result:
# Explicit transaction termination is required here to do clean-up
# before leaving this context.
db.rollback()
with env.db_transaction as db:
cursor = db.cursor()
cursor.execute(sql, (new_uid,))
return result
results.update(result)
# Finally delete old user ID reference after moving everything else.
cursor.execute(sql, (old_uid,))
results.update({('session', 'sid', None): 1})
return results
def copy_user_attributes(env, username, new_uid, overwrite):
"""Duplicate attributes for another user, optionally preserving existing
values.
Returns the number of changed attributes.
"""
count = 0
attrs = get_user_attribute(env, username)
if attrs and username in attrs and attrs[username].get(1):
attrs_new = get_user_attribute(env, new_uid)
with env.db_transaction as db:
if not (attrs_new and new_uid in attrs_new and \
attrs_new[new_uid].get(1)):
# No attributes found.
attrs_new = None
# Remove value id hashes.
attrs[username][1].pop('id')
cursor = db.cursor()
for attribute, value in attrs[username][1].iteritems():
if not (attrs_new and attribute in attrs_new[new_uid][1]):
cursor.execute("""
INSERT INTO session_attribute
(sid,authenticated,name,value)
VALUES (%s,1,%s,%s)
""", (new_uid, attribute, value))
count += 1
elif overwrite:
cursor.execute("""
UPDATE session_attribute
SET value=%s
WHERE sid=%s
AND authenticated=1
AND name=%s
""", (value, new_uid, attribute))
count += 1
return count
def get_user_attribute(env, username=None, authenticated=1, attribute=None,
value=None):
"""Return user attributes."""
ALL_COLS = ('sid', 'authenticated', 'name', 'value')
columns = []
constraints = []
if username is not None:
columns.append('sid')
constraints.append(username)
if authenticated is not None:
columns.append('authenticated')
constraints.append(as_int(authenticated, 0, min=0, max=1))
if attribute is not None:
columns.append('name')
constraints.append(attribute)
if value is not None:
columns.append('value')
constraints.append(to_unicode(value))
sel_columns = [col for col in ALL_COLS if col not in columns]
if len(sel_columns) == 0:
# No variable left, so only COUNTing is as a sensible task here.
sel_stmt = 'COUNT(*)'
else:
if 'sid' not in sel_columns:
sel_columns.append('sid')
sel_stmt = ','.join(sel_columns)
if len(columns) > 0:
where_stmt = ''.join(['WHERE ', '=%s AND '.join(columns), '=%s'])
else:
where_stmt = ''
sql = """
SELECT %s
FROM session_attribute
%s
""" % (sel_stmt, where_stmt)
sql_args = tuple(constraints)
with env.db_query as db:
cursor = db.cursor()
cursor.execute(sql, sql_args)
rows = cursor.fetchall()
if rows is None:
return {}
res = {}
for row in rows:
if sel_stmt == 'COUNT(*)':
return [row[0]]
res_row = {}
res_row.update(zip(sel_columns, row))
# Merge with constraints, that are constants for this SQL query.
res_row.update(zip(columns, constraints))
account = res_row.pop('sid')
authenticated = res_row.pop('authenticated')
# Create single unique attribute ID.
m = md5()
m.update(''.join([account, str(authenticated),
res_row.get('name')]).encode('utf-8'))
row_id = m.hexdigest()
if account in res:
if authenticated in res[account]:
res[account][authenticated].update({
res_row['name']: res_row['value']
})
res[account][authenticated]['id'].update({
res_row['name']: row_id
})
else:
res[account][authenticated] = {
res_row['name']: res_row['value'],
'id': {res_row['name']: row_id}
}
# Create account ID for additional authentication state.
m = md5()
m.update(''.join([account,
str(authenticated)]).encode('utf-8'))
res[account]['id'][authenticated] = m.hexdigest()
else:
# Create account ID for authentication state.
m = md5()
m.update(''.join([account, str(authenticated)]).encode('utf-8'))
res[account] = {authenticated: {res_row['name']: res_row['value'],
'id': {res_row['name']: row_id}},
'id': {authenticated: m.hexdigest()}}
return res
def prime_auth_session(env, username):
"""Prime session for registered users before initial login.
These days there's no distinct user object in Trac, but users consist
in terms of anonymous or authenticated sessions and related session
attributes. So INSERT new sid, needed as foreign key in some db schemata
later on, at least for PostgreSQL.
"""
with env.db_transaction as db:
cursor = db.cursor()
cursor.execute("""
SELECT COUNT(*)
FROM session
WHERE sid=%s
AND authenticated=1
""", (username,))
exists = cursor.fetchone()
if not exists[0]:
cursor.execute("""
INSERT INTO session
(sid,authenticated,last_visit)
VALUES (%s,1,0)
""", (username,))
def set_user_attribute(env, username, attribute, value):
"""Set or update a Trac user attribute within an atomic db transaction."""
with env.db_transaction as db:
cursor = db.cursor()
sql = """
WHERE sid=%s
AND authenticated=1
AND name=%s
"""
cursor.execute("""
UPDATE session_attribute
SET value=%s
""" + sql, (value, username, attribute))
cursor.execute("""
SELECT value
FROM session_attribute
""" + sql, (username, attribute))
if cursor.fetchone() is None:
cursor.execute("""
INSERT INTO session_attribute
(sid,authenticated,name,value)
VALUES (%s,1,%s,%s)
""", (username, attribute, value))
def del_user_attribute(env, username=None, authenticated=1, attribute=None):
"""Delete one or more Trac user attributes for one or more users."""
columns = []
constraints = []
if username is not None:
columns.append('sid')
constraints.append(username)
if authenticated is not None:
columns.append('authenticated')
constraints.append(as_int(authenticated, 0, min=0, max=1))
if attribute is not None:
columns.append('name')
constraints.append(attribute)
if len(columns) > 0:
where_stmt = ''.join(['WHERE ', '=%s AND '.join(columns), '=%s'])
else:
where_stmt = ''
sql = """
DELETE
FROM session_attribute
%s
""" % where_stmt
sql_args = tuple(constraints)
with env.db_transaction as db:
cursor = db.cursor()
cursor.execute(sql, sql_args)
def delete_user(env, user):
# Delete session attributes, session and any custom permissions
# set for the user.
with env.db_transaction as db:
cursor = db.cursor()
for table in ['auth_cookie', 'session_attribute', 'session', 'permission']:
# Preseed, since variable table and column names aren't allowed
# as SQL arguments (security measure agains SQL injections).
sql = """
DELETE
FROM %s
WHERE %s=%%s
""" % (table, _USER_KEYS.get(table, 'sid'))
cursor.execute(sql, (user,))
env.log.debug("Purged session data and permissions for user '%s'" % user)
def last_seen(env, user=None):
with env.db_query as db:
cursor = db.cursor()
sql = """
SELECT sid,last_visit
FROM session
WHERE authenticated=1
"""
if user:
sql += " AND sid=%s"
cursor.execute(sql, (user,))
else:
cursor.execute(sql)
# Don't pass over the cursor (outside of scope), only it's content.
return [row for row in cursor]
| 36.113003
| 83
| 0.526041
|
ef76c204d8df10b32e6a4619fd8d066e6a88a7ce
| 2,783
|
py
|
Python
|
modules/auto_split/compiler/tensorflow_compiler.py
|
sophon-ai-algo/sophon-inference
|
f923413b76615e265af28fd1dd2b43e5eb303dcd
|
[
"Apache-2.0"
] | 18
|
2020-02-21T03:06:33.000Z
|
2022-03-21T03:41:56.000Z
|
modules/auto_split/compiler/tensorflow_compiler.py
|
sophon-ai-algo/sophon-inference
|
f923413b76615e265af28fd1dd2b43e5eb303dcd
|
[
"Apache-2.0"
] | null | null | null |
modules/auto_split/compiler/tensorflow_compiler.py
|
sophon-ai-algo/sophon-inference
|
f923413b76615e265af28fd1dd2b43e5eb303dcd
|
[
"Apache-2.0"
] | 6
|
2020-07-10T08:55:38.000Z
|
2021-12-28T01:36:04.000Z
|
""" Copyright 2016-2022 by Bitmain Technologies Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import subprocess
import sys
from ..common.base_compiler import Compiler
PY_COMAND = 'python'
if sys.version > '3':
PY_COMAND = 'python3'
class TensorflowCompiler(Compiler):
""" Compile tf graphs into bmodels.
"""
def check_init(self):
assert self.platform == 'tensorflow'
assert self.layout == 'NHWC'
def generate_compiling_script(self, compile_info):
model = os.path.join(self.folder, compile_info['model_info']['model_path'])
outdir = os.path.join(self.folder, compile_info['context_dir'])
outdir_ = outdir.split('/')
input_names = compile_info['input_names']
shapes = compile_info['input_shapes']
output_names = compile_info['output_names']
ret = "import bmnett as bm\n"
ret = ret + "import os\n\n"
ret = ret + "model='{0}'\n".format(compile_info['model_info']['model_path'])
ret = ret + "outdir='{0}'\n".format(compile_info['context_dir'])
ret = ret + "target='{0}'\n".format(self.target)
ret = ret + "input_names=[\n"
for i in input_names:
ret = ret + "'{0}',\n".format(i)
ret = ret + "]\n"
ret = ret + "output_names=[\n"
for o_name in output_names:
ret = ret + "'{0}',\n".format(o_name)
ret = ret + "]\n"
ret = ret + "shapes=[\n"
for i in shapes:
ret = ret + str(i) + ",\n"
ret = ret + "]\n"
ret = ret + \
"net_name='auto_tf_{0}_{1}'\n\n".format(outdir_[-2], outdir_[-1])
ret = ret + "bm.compile(model, outdir, target, input_names, " + \
"output_names, shapes, net_name, dyn={0})\n\n".format(self.dynamic)
ret = ret + "# os.remove('bm_multi_engine_stas_0.dat')\n\n"
with open(os.path.join(self.folder, \
'compile_to_{0}.py'.format(compile_info['context_dir'])), \
'w+') as save_stream:
save_stream.write(ret)
def compile_model_using_bmcompiler(self, compile_info):
ret = subprocess.call([PY_COMAND, \
'compile_to_{0}.py'.format(compile_info['context_dir'])], \
cwd=self.folder, close_fds=True)
if ret != 0:
raise RuntimeError("compile failed: {}".format\
('compile_to_{0}.py'.format(compile_info['context_dir'])))
| 37.608108
| 80
| 0.654689
|
9a4867738e58685a7a048b32590f4bdb562de4b9
| 11,201
|
py
|
Python
|
data_helper.py
|
ShimShim46/HFT-CNN
|
c068db94dffbcbfab0406e29d81abcb4eb399458
|
[
"MIT"
] | 99
|
2018-10-19T12:52:22.000Z
|
2022-02-26T13:16:14.000Z
|
data_helper.py
|
CharlotteSean/HFT-CNN
|
c068db94dffbcbfab0406e29d81abcb4eb399458
|
[
"MIT"
] | 2
|
2019-06-19T11:01:38.000Z
|
2020-07-22T09:50:36.000Z
|
data_helper.py
|
CharlotteSean/HFT-CNN
|
c068db94dffbcbfab0406e29d81abcb4eb399458
|
[
"MIT"
] | 23
|
2019-01-26T14:47:25.000Z
|
2022-02-26T13:16:17.000Z
|
import os
import pdb
import pickle
import re
from collections import defaultdict
from itertools import chain
import chakin
import numpy as np
import scipy.sparse as sp
from gensim.models import KeyedVectors
from gensim.models.wrappers.fasttext import FastText
from sklearn.metrics import classification_report, f1_score
from sklearn.preprocessing import MultiLabelBinarizer
from tqdm import tqdm
# sequence operation
# =========================================================
def clean_str(string):
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
# read data from text file
# =========================================================
def make_data_list(data, kind_of_data, tree_info, max_sen_len, vocab, catgy, article_id, useWords):
data_list = []
for line in tqdm(data,desc="Loading " + kind_of_data + " data"):
tmp_dict = dict()
line = line[:-1]
tmp_dict['text'] = ' '.join(clean_str(' '.join(line.split("\t")[1].split(" "))).split(" ")[:useWords])
[vocab[word] for word in tmp_dict['text'].split(" ")]
tmp_dict['num_words'] = len(tmp_dict['text'].split(" "))
max_sen_len = max(max_sen_len, tmp_dict['num_words'])
tmp_dict['split'] = kind_of_data
tmp_dict['hie_info'] = list(set([tree_info[cat] for cat in line.split("\t")[0].split(",")]))
tmp_dict['catgy'] = [cat for cat in line.split("\t")[0].split(",")]
[catgy[cat] for cat in line.split("\t")[0].split(",")]
tmp_dict['id'] = str(article_id)
article_id += 1
data_list.append(tmp_dict)
del tmp_dict
return data_list, max_sen_len, vocab, catgy, article_id
# read data
# =========================================================
def data_load(train, valid, test, tree_info, use_words):
vocab = defaultdict( lambda: len(vocab) )
catgy = defaultdict( lambda: len(catgy) )
article_id = 0
max_sen_len = 0
train_list, max_sen_len, vocab, catgy, article_id = make_data_list(train, 'train', tree_info, max_sen_len, vocab, catgy, article_id, use_words)
valid_list, max_sen_len, vocab, catgy, article_id = make_data_list(valid, 'valid', tree_info, max_sen_len, vocab, catgy, article_id, use_words)
test_list, max_sen_len, vocab, catgy, article_id = make_data_list(test, 'test', tree_info, max_sen_len, vocab, catgy, article_id, use_words)
class_dim = len(catgy)
data = {}
data['train'] = train_list
data['test'] = test_list
data['valid'] = valid_list
data['vocab'] = vocab
data['catgy'] = catgy
data['max_sen_len'] = max_sen_len
data['class_dim'] = class_dim
return data
# read word embedding
# =========================================================
def embedding_weights_load(words_map,embedding_weights_path):
pre_trained_embedding = None
try:
model = FastText.load_fasttext_format(embedding_weights_path)
pre_trained_embedding = "bin"
except:
print ("fastText binary file (.bin) is not found!")
if os.path.exists("./Word_embedding/wiki.en.vec"):
print ("Using wikipedia(en) pre-trained word vectors.")
else:
print ("Downloading wikipedia(en) pre-trained word vectors.")
chakin.download(number=2, save_dir="./Word_embedding")
print ("Loading vectors...")
if os.path.exists("./Word_embedding_model.pkl"):
with open("./Word_embedding_model.pkl", mode="rb") as f:
model = pickle.load(f)
else:
model = KeyedVectors.load_word2vec_format('./Word_embedding/wiki.en.vec')
with open("Word_embedding_model.pkl", mode="wb") as f:
pickle.dump(model, f)
pre_trained_embedding = "txt"
vocab_size = len(words_map)
word_dimension = model['a'].shape[0]
w = np.zeros((vocab_size,word_dimension),dtype=np.float32)
for k,v in words_map.items():
word = k
word_number = v
try:
w[word_number][:] = model[word]
except KeyError as e:
if pre_trained_embedding == "bin":
w[word_number][:] = model.seeded_vector(word)
else:
np.random.seed(word_number)
w[word_number][:] = np.random.uniform(-0.25, 0.25, word_dimension)
return w
# Conversion from network output to label
# =========================================================
def get_catgy_mapping(network_output_order_list, test_labels, prediction,current_depth):
predict_result = []
grand_labels = []
for i in range(len(test_labels)):
predict_result.append([])
grand_labels.append([])
class_dim = prediction.shape[1]
row_idx, col_idx, val_idx = [], [], []
for i in range(len(test_labels)):
l_list = list(set(test_labels[i]))
for y in l_list:
row_idx.append(i)
col_idx.append(y)
val_idx.append(1)
m = max(row_idx) + 1
n = max(col_idx) + 1
n = max(class_dim, n)
test_labels = sp.csr_matrix((val_idx, (row_idx, col_idx)), shape=(m, n), dtype=np.int8).todense()
np_orderList = np.array(network_output_order_list)
for i,j in tqdm(enumerate(prediction), desc="Generating predict labels..."):
one_hots = np.where(j == 1)[0]
if len(one_hots) >= 1:
predict_result[i] = np_orderList[one_hots].tolist()
output_grand_truth_file_name = "CNN/RESULT/grand_truth_" + current_depth + ".csv"
with open(output_grand_truth_file_name, 'w') as f:
f.write(','.join(network_output_order_list)+"\n")
with open(output_grand_truth_file_name, 'a') as f:
for i,j in tqdm(enumerate(test_labels), desc="Generating grand truth labels..."):
one_hots = np.where(j == 1)[1]
if len(one_hots) >= 1:
grand_labels[i] = np_orderList[one_hots].tolist()
f.write(",".join(grand_labels[i])+"\n")
else:
f.write("\n")
return grand_labels,predict_result
# Write results to a file
# =========================================================
def write_out_prediction(GrandLabels, PredResult, input_data_dic):
# Writing out prediction
# ===================================================
print ("-"*50)
print ("Writing out prediction...")
test_data = input_data_dic['test']
result_file = open("./CNN/RESULT/Prediction.txt", mode="w")
result_file.write("Grand-truth-label\tPrediction-labels\tInput-text\n")
for g,p,t in zip(GrandLabels, PredResult, test_data):
result_file.write("{}\t{}\t{}\n".format(','.join(sorted(g)), ','.join(sorted(p)), t['text']))
result_file.close()
# conversion of data
#========================================================
# conversion from text data to ndarray
# =========================================================
def build_input_sentence_data(sentences):
x = np.array(sentences)
return x
# conversion from sequence label to the number
# =========================================================
def build_input_label_data(labels, class_order):
from sklearn.preprocessing import MultiLabelBinarizer
from itertools import chain
bml = MultiLabelBinarizer(classes=class_order, sparse_output=True)
indexes = sp.find(bml.fit_transform(labels))
y = []
for i in range(len(labels)):
y.append([])
for i,j in zip(indexes[0], indexes[1]):
y[i].append(j)
return y
# padding operation
# =========================================================
def pad_sentences(sentences, padding_word=-1, max_length=50):
sequence_length = max(max(len(x) for x in sentences), max_length)
padded_sentences = []
for i in range(len(sentences)):
sentence = sentences[i]
if len(sentence) < max_length:
num_padding = sequence_length - len(sentence)
new_sentence = sentence + [padding_word] * num_padding
else:
new_sentence = sentence[:max_length]
padded_sentences.append(new_sentence)
return padded_sentences
# conversion from documents and labels to the numbers
# =========================================================
def build_problem(learning_categories, depth, input_data_dic):
train_data = input_data_dic['train']
validation_data = input_data_dic['valid']
test_data = input_data_dic['test']
vocab = input_data_dic['vocab']
max_sen_len = input_data_dic['max_sen_len']
if depth == "flat":
trn_text = [[vocab[word] for word in doc['text'].split()] for doc in train_data]
trn_labels = [doc['catgy'] for doc in train_data]
val_text = [[vocab[word] for word in doc['text'].split()] for doc in validation_data]
val_labels = [doc['catgy'] for doc in validation_data]
tst_text = [[vocab[word] for word in doc['text'].split()] for doc in test_data]
tst_labels = [doc['catgy'] for doc in test_data]
else:
layer = int(depth[:-2])
trn_text = [[vocab[word] for word in doc['text'].split()] for doc in train_data if (layer in doc['hie_info']) or ((layer-1) in doc['hie_info'])]
trn_labels = [list( set(doc['catgy']) & set(learning_categories)) for doc in train_data if (layer in doc['hie_info']) or ((layer-1) in doc['hie_info'])]
val_text = [[vocab[word] for word in doc['text'].split()] for doc in validation_data if (layer in doc['hie_info']) or ((layer-1) in doc['hie_info'])]
val_labels = [list( set(doc['catgy']) & set(learning_categories)) for doc in validation_data if (layer in doc['hie_info']) or ((layer-1) in doc['hie_info'])]
tst_text = [[vocab[word] for word in doc['text'].split()] for doc in test_data]
tst_labels = [list( set(doc['catgy']) & set(learning_categories)) if layer in doc['hie_info'] else [] for doc in test_data]
trn_padded = pad_sentences(trn_text, max_length=max_sen_len)
val_padded = pad_sentences(val_text, max_length=max_sen_len)
tst_padded = pad_sentences(tst_text, max_length=max_sen_len)
x_trn = build_input_sentence_data(trn_padded)
x_val = build_input_sentence_data(val_padded)
x_tst = build_input_sentence_data(tst_padded)
y_trn = build_input_label_data(trn_labels,learning_categories)
y_val = build_input_label_data(val_labels, learning_categories)
y_tst = build_input_label_data(tst_labels, learning_categories)
return x_trn, y_trn, x_val, y_val, x_tst, y_tst
# conversion from the number to an ordinal number
# =========================================================
def order_n(i): return {1:"1st", 2:"2nd", 3:"3rd"}.get(i) or "%dth"%i
| 41.485185
| 165
| 0.600482
|
a2ec94cdb2a9686dc60c25af7cf916f930172b8e
| 7,306
|
py
|
Python
|
plot_templates_manju.py
|
Matammanjunath/template_codes
|
606e127cd0f2da2499c972a65a76ddb51e0a6ebf
|
[
"MIT"
] | null | null | null |
plot_templates_manju.py
|
Matammanjunath/template_codes
|
606e127cd0f2da2499c972a65a76ddb51e0a6ebf
|
[
"MIT"
] | 1
|
2021-12-05T06:23:22.000Z
|
2021-12-05T06:23:22.000Z
|
plot_templates_manju.py
|
Matammanjunath/template_codes
|
606e127cd0f2da2499c972a65a76ddb51e0a6ebf
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 4 20:39:23 2021
@author: MMatam
"""
import matplotlib.pyplot as plt
#https://matplotlib.org/stable/gallery/color/named_colors.html
import matplotlib.colors as mcolors
def oscilloscope_plot(df,x='x',y=['y1','y2'],xlbl = ['xlabel','xuni'],
ylbl = [['ylabel1','yuni1'],['ylabel2','yuni2']],linestyle='-',
saveformat='Oscilloscope_plot.jpg',fontsize=12):
color_list = list(mcolors.TABLEAU_COLORS)
n = len(y)
fig, axes = plt.subplots(nrows=n,ncols=1,sharex=True)
for i in range(n):
# print('%s'%(y[i][0]))
axes[i] = plt.subplot((n*100)+10+(i+1))
plt.plot(df[x], df[y[i]],
'%s'%(linestyle),color=color_list[i])
axes[i].grid(axis="y")
axes[i].legend(['%s'%(ylbl[i][0])],fontsize=fontsize)
axes[i].spines['left'].set_color(color_list[i])
axes[i].tick_params(axis='y', color=color_list[i],
labelcolor=color_list[i],size=fontsize)
plt.ylabel('(in %s)'%(ylbl[i][1]), color=color_list[i],fontsize=fontsize)
if i==0:
axes[i].spines['bottom'].set_visible(False)
elif i!=(n-1):
plt.setp(axes[i].get_xaxis(), visible=False)
axes[i].spines['top'].set_visible(False)
axes[i].spines['bottom'].set_visible(False)
else:
plt.setp(axes[i].get_xaxis(), visible=True)
axes[i].spines['top'].set_visible(False)
axes[i].spines['bottom'].set_visible(True)
axes[i].set_xlabel('%s (in %s)'%(xlbl[0],xlbl[1]),fontsize=fontsize)
axes[i].tick_params(axis='x',size=fontsize)
plt.gcf().autofmt_xdate()
plt.subplots_adjust(hspace=0.01)
plt.subplots_adjust(left=0.12, right=0.97, top=0.95, bottom=0.15)
plt.savefig(saveformat,bbox_inches='tight',pad_inches=0.1, dpi=250)
plt.show()
def dataframe_scatter_polyfit_plot(df,xcol,ycols,xlim=None,ylim=None,
xlbl='X-axis',ylbl='Y-axis',
polfit_deg=1,scatter_alpha=1,
fig_name = 'samples_polyfit.png'):
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
clr_list = mcolors.TABLEAU_COLORS
# # Access limits
if xlim==None:
xlim = [df[xcol[0]].min()*0.95,df[xcol[0]].max()*1.1]
if ylim==None:
if len(ycols)>=2:
ylim = [df[ycols].min().min()*0.95,df[ycols].max().max()*1.1]
else:
ylim = [df[ycols].min()*0.95,df[ycols].max()*1.1]
# print(xlim)
# print(ylim)
# convert clr_list of keys, values into a list
clr_list = list(dict(clr_list).values())
# Find the columns where each value is null
empty_cols = [col for col in df[ycols].columns if df[col].isnull().all()]
# Drop these columns from the dataframe
# print(ycols)
ycols = [x for x in ycols if x not in empty_cols]
# print(ycols)
# Perform polyfit, predict and plot the samples and prediction
for i in range(len(ycols)):
pfit = np.polyfit(df[xcol[0]],df[ycols[i]],polfit_deg)
pred = np.poly1d(pfit)
plt.scatter(df[xcol[0]],df[ycols[i]],marker='.',color=clr_list[i],alpha=0.2,label='%s'%(ycols[i]))
plt.plot(df[xcol[0]].tolist()+[xlim[1]],pred(df[xcol[0]].tolist()+[xlim[1]]),'-',color=clr_list[i],label='%s_fit'%(ycols[i]))
plt.axis([xlim[0],xlim[1],ylim[0],ylim[1]])
## Plot settings
plt.legend(loc='best',fontsize=12)
plt.xlabel('%s'%(xlbl))
plt.ylabel('%s'%(ylbl))
plt.subplots_adjust(left=0.12, right=0.97, top=0.95, bottom=0.15)
plt.savefig(fig_name,bbox_inches='tight',pad_inches=0.1, dpi=250)
plt.show()
def dataframe_html_3dscatterplot(df,xcol,ycol,zcol,
clr_col,clr_labels=[],
# clr_type=
size_col=None,
xrange=None,yrange=None,zrange=None):
import plotly.express as px
from plotly.offline import plot
import plotly
# for camera view position to save as png
# Refer: https://nbviewer.org/github/etpinard/plotly-misc-nbs/blob/master/3d-camera-controls.ipynb
if size_col !=None:
df[size_col] = df[size_col].astype(float)
fig = px.scatter_3d(df, x=xcol, y=ycol, z=zcol,
color=clr_col,opacity=1,
color_continuous_scale = plotly.colors.sequential.Viridis,
size=size_col)
# print(clr_labels)
if len(clr_labels) > 2:
cat_labels = clr_labels
fig.update_coloraxes(colorbar=dict(ticktext=cat_labels,
tickvals=list(range(1, len(cat_labels)+1)),
thickness=20,
ticklabelposition='outside',
orientation='v',
x=0.7),
colorbar_tickfont=dict(size=24))
if size_col==None:
fig.update_traces(marker_size = 4)
fig.update_layout(
# title="Plot Title",
# xaxis_title="X Axis Title",
# yaxis_title="Y Axis Title",
# legend_title="Legend Title",
font=dict(
# # family="Aerial",
size=14,
# # color="RebeccaPurple"
),
margin=dict(t=30, r=30, l=30, b=30
)
)
# xaxis.backgroundcolor is used to set background color
fig.update_layout(scene = dict(
xaxis = dict(
backgroundcolor="rgb(200, 200, 230)",
gridcolor="white",
showbackground=True,
zerolinecolor="white",
range=xrange),
yaxis = dict(
backgroundcolor="rgb(230, 200,230)",
gridcolor="white",
showbackground=True,
zerolinecolor="white",
range=yrange),
zaxis = dict(
backgroundcolor="rgb(230, 230,200)",
gridcolor="white",
showbackground=True,
zerolinecolor="white",
range=zrange),))
# fig.update_layout(font=dict(size=20))
fig.update_xaxes(tickfont_size=20)
fig.update_scenes(xaxis_title_font=dict(size=24),
yaxis_title_font=dict(size=24),
zaxis_title_font=dict(size=24))
# fig.update_yaxes(title_standoff = 25)
# fig.update_layout(
# # # title='Mt Bruno Elevation',
# # width=400, height=400,
# margin=dict(t=0, r=0, l=0, b=0
# ))
name = 'eye = (x:0., y:2.5, z:0.)'
camera = dict(
up=dict(x=0, y=0, z=1),
center=dict(x=0, y=0, z=0),
eye=dict(x=1.94, y=0.1, z=0.1)
)
fig.update_layout(scene_camera=camera,
# title=name
)
## Render the html 3d plot
fig.show(renderer='browser')
| 40.588889
| 133
| 0.525185
|
404365de1232f3e257d98b1365a64b7cd1f1774c
| 156
|
py
|
Python
|
create_view.py
|
mediaProduct2017/logs-analysis
|
337d562e210227af4014ff66d66e1517f33c0b94
|
[
"MIT"
] | null | null | null |
create_view.py
|
mediaProduct2017/logs-analysis
|
337d562e210227af4014ff66d66e1517f33c0b94
|
[
"MIT"
] | null | null | null |
create_view.py
|
mediaProduct2017/logs-analysis
|
337d562e210227af4014ff66d66e1517f33c0b94
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# coding: utf-8
from reporting import Reporting
DBNAME = 'news'
analyst = Reporting(DBNAME)
analyst.create_view()
analyst.close()
| 14.181818
| 31
| 0.74359
|
421e1015965a228894eb70ba1dce2eaf5dad86e6
| 45
|
py
|
Python
|
python/testData/intentions/SpecifyTypeInPy3AnnotationsIntentionTest/caretOnParamUsage_after.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/intentions/SpecifyTypeInPy3AnnotationsIntentionTest/caretOnParamUsage_after.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/intentions/SpecifyTypeInPy3AnnotationsIntentionTest/caretOnParamUsage_after.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
def foo(var: object):
print(var)
pass
| 15
| 21
| 0.6
|
1d6986d82a0835101dd9cd16acc12291c8f1d442
| 621
|
py
|
Python
|
oops_fhir/r4/value_set/performer_role_codes.py
|
Mikuana/oops_fhir
|
77963315d123756b7d21ae881f433778096a1d25
|
[
"MIT"
] | null | null | null |
oops_fhir/r4/value_set/performer_role_codes.py
|
Mikuana/oops_fhir
|
77963315d123756b7d21ae881f433778096a1d25
|
[
"MIT"
] | null | null | null |
oops_fhir/r4/value_set/performer_role_codes.py
|
Mikuana/oops_fhir
|
77963315d123756b7d21ae881f433778096a1d25
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from fhir.resources.valueset import ValueSet as _ValueSet
from oops_fhir.utils import ValueSet
from oops_fhir.r4.code_system.performer_role_codes import (
PerformerRoleCodes as PerformerRoleCodes_,
)
__all__ = ["PerformerRoleCodes"]
_resource = _ValueSet.parse_file(Path(__file__).with_suffix(".json"))
class PerformerRoleCodes(PerformerRoleCodes_):
"""
Performer Role Codes
This value set includes sample Performer Role codes.
Status: draft - Version: 4.0.1
http://hl7.org/fhir/ValueSet/consent-performer
"""
class Meta:
resource = _resource
| 20.032258
| 69
| 0.745572
|
cc9d648b49cf8ea9cdb2485f16bd318bf8695125
| 642
|
py
|
Python
|
icevision/utils/indexable_dict.py
|
ai-fast-track/mantisshrimp
|
cc6d6a4a048f6ddda2782b6593dcd6b083a673e4
|
[
"Apache-2.0"
] | 580
|
2020-09-10T06:29:57.000Z
|
2022-03-29T19:34:54.000Z
|
icevision/utils/indexable_dict.py
|
ai-fast-track/mantisshrimp
|
cc6d6a4a048f6ddda2782b6593dcd6b083a673e4
|
[
"Apache-2.0"
] | 691
|
2020-09-05T03:08:34.000Z
|
2022-03-31T23:47:06.000Z
|
icevision/utils/indexable_dict.py
|
lgvaz/mantisshrimp2
|
743cb7df0dae7eb1331fc2bb66fc9ca09db496cd
|
[
"Apache-2.0"
] | 105
|
2020-09-09T10:41:35.000Z
|
2022-03-25T17:16:49.000Z
|
__all__ = ["IndexableDict"]
import collections
class IndexableDictValuesView(collections.abc.ValuesView):
def __getitem__(self, index):
return self._mapping._list[index]
class IndexableDict(collections.UserDict):
def __init__(self, *args, **kwargs):
self._list = []
super().__init__(*args, **kwargs)
def __setitem__(self, key, value):
super().__setitem__(key, value)
self._list.append(value)
def __delitem__(self, key):
super().__delitem__(key)
self._list.remove(key)
def values(self) -> IndexableDictValuesView:
return IndexableDictValuesView(self)
| 24.692308
| 58
| 0.67134
|
72bfe63994be1ef8cd27e49130f0d572993b3184
| 4,553
|
py
|
Python
|
tests/api/api/test_functions.py
|
daniels290813/mlrun
|
c95d90c3b4ce78d9b71456333ccd201f932d60ea
|
[
"Apache-2.0"
] | null | null | null |
tests/api/api/test_functions.py
|
daniels290813/mlrun
|
c95d90c3b4ce78d9b71456333ccd201f932d60ea
|
[
"Apache-2.0"
] | null | null | null |
tests/api/api/test_functions.py
|
daniels290813/mlrun
|
c95d90c3b4ce78d9b71456333ccd201f932d60ea
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import unittest.mock
from http import HTTPStatus
import httpx
import kubernetes.client.rest
import pytest
from fastapi.testclient import TestClient
from sqlalchemy.orm import Session
import mlrun.api.api.endpoints.functions
import mlrun.api.crud
import mlrun.api.schemas
import mlrun.api.utils.singletons.db
import mlrun.api.utils.singletons.k8s
import mlrun.artifacts.dataset
import mlrun.artifacts.model
import mlrun.errors
import tests.conftest
def test_build_status_pod_not_found(db: Session, client: TestClient):
function = {
"kind": "job",
"metadata": {
"name": "function-name",
"project": "project-name",
"tag": "latest",
},
"status": {"build_pod": "some-pod-name"},
}
response = client.post(
f"func/{function['metadata']['project']}/{function['metadata']['name']}",
json=function,
)
assert response.status_code == HTTPStatus.OK.value
mlrun.api.utils.singletons.k8s.get_k8s().v1api = unittest.mock.Mock()
mlrun.api.utils.singletons.k8s.get_k8s().v1api.read_namespaced_pod = (
unittest.mock.Mock(
side_effect=kubernetes.client.rest.ApiException(
status=HTTPStatus.NOT_FOUND.value
)
)
)
response = client.get(
"build/status",
params={
"project": function["metadata"]["project"],
"name": function["metadata"]["name"],
"tag": function["metadata"]["tag"],
},
)
assert response.status_code == HTTPStatus.NOT_FOUND.value
@pytest.mark.asyncio
async def test_multiple_store_function_race_condition(
db: Session, async_client: httpx.AsyncClient
):
"""
This is testing the case that the retry_on_conflict decorator is coming to solve, see its docstring for more details
"""
project = {
"metadata": {
"name": "project-name",
}
}
response = await async_client.post(
"projects",
json=project,
)
assert response.status_code == HTTPStatus.CREATED.value
# Make the get function method to return None on the first two calls, and then use the original function
get_function_mock = tests.conftest.MockSpecificCalls(
mlrun.api.utils.singletons.db.get_db()._get_class_instance_by_uid, [1, 2], None
).mock_function
mlrun.api.utils.singletons.db.get_db()._get_class_instance_by_uid = (
unittest.mock.Mock(side_effect=get_function_mock)
)
function = {
"kind": "job",
"metadata": {
"name": "function-name",
"project": "project-name",
"tag": "latest",
},
}
request1_task = asyncio.create_task(
async_client.post(
f"func/{function['metadata']['project']}/{function['metadata']['name']}",
json=function,
)
)
request2_task = asyncio.create_task(
async_client.post(
f"func/{function['metadata']['project']}/{function['metadata']['name']}",
json=function,
)
)
response1, response2 = await asyncio.gather(
request1_task,
request2_task,
)
assert response1.status_code == HTTPStatus.OK.value
assert response2.status_code == HTTPStatus.OK.value
# 2 times for two store function requests + 1 time on retry for one of them
assert (
mlrun.api.utils.singletons.db.get_db()._get_class_instance_by_uid.call_count
== 3
)
def test_build_function_with_mlrun_bool(db: Session, client: TestClient):
function_dict = {
"kind": "job",
"metadata": {
"name": "function-name",
"project": "project-name",
"tag": "latest",
},
}
original_build_function = mlrun.api.api.endpoints.functions._build_function
for with_mlrun in [True, False]:
request_body = {
"function": function_dict,
"with_mlrun": with_mlrun,
}
function = mlrun.new_function(runtime=function_dict)
mlrun.api.api.endpoints.functions._build_function = unittest.mock.Mock(
return_value=(function, True)
)
response = client.post(
"build/function",
json=request_body,
)
assert response.status_code == HTTPStatus.OK.value
assert (
mlrun.api.api.endpoints.functions._build_function.call_args[0][3]
== with_mlrun
)
mlrun.api.api.endpoints.functions._build_function = original_build_function
| 31.4
| 120
| 0.62662
|
24dba23a08e6bb051fd55cb73b94c70b11f3fab9
| 3,558
|
py
|
Python
|
tests/features/steps/basics.py
|
Lreus/python-behave
|
d8eb530185ee28d0288028f506d6ff29b2d81cd9
|
[
"MIT"
] | null | null | null |
tests/features/steps/basics.py
|
Lreus/python-behave
|
d8eb530185ee28d0288028f506d6ff29b2d81cd9
|
[
"MIT"
] | null | null | null |
tests/features/steps/basics.py
|
Lreus/python-behave
|
d8eb530185ee28d0288028f506d6ff29b2d81cd9
|
[
"MIT"
] | null | null | null |
from behave import *
"""basics.py
Commented test examples based on behave documentation
"""
@given('we have behave installed')
def empty_method(context):
"""
Even if it may not be used, context argument is required for step
implementation methods. It is an instance of behave.runner.Context
"""
pass
@when('we implement a test')
def assert_obvious(context):
"""
A failed assertion will raise an exception and therefore make the test fail
if it has not been caught
"""
assert True is not False
@then('behave will test it for that')
def check_context(context):
assert context.failed is False
@given('we store the following text')
def set_text_block_to_response(context):
"""
The decorator will allocate the content of a following text block
(delimited by three double quotes) to context.text attribute
"""
context.response = context.text
@given('we set the context response to the word "{word}"')
def set_string_to_response(context, word):
"""
the Formatted String Literals syntax {}, allows us to allocate a variable
string from the step definition to an attribute.
context.response is the recommended attribute but it is not mandatory
https://behave.readthedocs.io/en/latest/api.html#behave.runner.Context
lists the attribute managed by behave. It is not advised to overwrite
them.
Examples:
- context.table
- context.text
- context.failed
- ...
"""
context.response = word
context.random = word
@then('the context response should be "{text}"')
def is_equal_to_response(context, text):
"""
a previously allocated context attribute is accessible through multiple steps.
"""
assert text == context.response
assert text == context.random
@then('the context response length should be {number:d}')
def response_length(context, number):
"""
the syntax 'name:type' allows to define type variable and restrict the step to
this type.
This step won't be triggered if number is not an integer
"""
assert number == len(context.response)
@then('the context text attribute should contain the word "{text}"')
def does_response_contain(context, text):
"""
Search for the given word in context response
"""
assert text in context.response
@given('a set of frameworks')
def store_framework_table(context):
"""
A two dimensional array can be provided as a variable. It will be available
in the context.table attribute.
"""
context.response = context.table
@then('the number of framework should be {number:d}')
def is_equal_to_total_framework(context, number):
"""
A context.table attribute is a behave.model.Table.
It contains rows of key/value elements.
The keys are defined in the first row.
Table.rows method returns a list of behave.model.Row
"""
assert len(context.response.rows) == number
@then('the number of "{language}" frameworks should be {number:d}')
def time_language_appears(context, language, number):
"""
behave.model.Row class allows the syntax data = row[key]
but they are not dictionaries !
"""
occurrences = {}
for row in context.response.rows:
lang = row['language']
if lang in occurrences:
occurrences[lang] = occurrences[lang] + 1
else:
occurrences[lang] = 1
assert occurrences[language] == number
| 29.404959
| 86
| 0.669477
|
4a72aec50a8a2b122b84a75f027457fa16ecfc64
| 489
|
py
|
Python
|
setup.py
|
olinger/flask_accepts
|
c1bba213531e614129391f7ea8c45b0198600e6d
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
olinger/flask_accepts
|
c1bba213531e614129391f7ea8c45b0198600e6d
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
olinger/flask_accepts
|
c1bba213531e614129391f7ea8c45b0198600e6d
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright Alan (AJ) Pryor, Jr. 2018
from setuptools import setup, find_packages
setup(
name="flask_accepts",
author='Alan "AJ" Pryor, Jr.',
author_email="apryor6@gmail.com",
version="0.17.6",
description="Easy, opinionated Flask input/output handling with Flask-restx and Marshmallow",
ext_modules=[],
packages=find_packages(),
install_requires=[
"marshmallow>=3.0.1",
"flask-restx>=0.2.0",
"Werkzeug"
],
)
| 25.736842
| 98
| 0.621677
|
ad61915ab8850f4b64e0906f44cd769dc70c452f
| 8,527
|
py
|
Python
|
api/anubis/utils/http/decorators.py
|
synoet/Anubis
|
051888a88e37c67e5e772245604c79ceb4db8764
|
[
"MIT"
] | 2
|
2022-02-24T17:39:27.000Z
|
2022-02-25T02:14:06.000Z
|
api/anubis/utils/http/decorators.py
|
synoet/Anubis
|
051888a88e37c67e5e772245604c79ceb4db8764
|
[
"MIT"
] | null | null | null |
api/anubis/utils/http/decorators.py
|
synoet/Anubis
|
051888a88e37c67e5e772245604c79ceb4db8764
|
[
"MIT"
] | null | null | null |
from functools import wraps
from typing import Union, List, Tuple
from flask import request
from anubis.utils.auth import current_user
from anubis.utils.data import jsonify, _verify_data_shape
from anubis.utils.exceptions import AuthenticationError
from anubis.utils.http.https import error_response
def load_from_id(model, verify_owner=False):
"""
This flask decorator loads the id kwarg passed in by flask
and uses it to pull the sqlalchemy object corresponding to that id
>>> @app.route('/assignment/<string:id>')
>>> @require_user
>>> @load_from_id(Assignment)
>>> def view_function(assignment: Assignment):
>>> pass
If the verify_owner is true, then the sqlachemy object's owner
relationship (assuming it has one) will be checked against the
current logged in user.
:param model:
:param verify_owner:
:return:
"""
def wrapper(func):
@wraps(func)
def decorator(id, *args, **kwargs):
# Use the id from the view functions params to query for
# the object.
r = model.query.filter_by(id=id).first()
# If the sqlalchemy object was not found, then return a 400
if r is None:
return error_response("Unable to find"), 400
# If the verify_owner option is on, then
# check the object's owner against the currently
# logged in user.
if verify_owner and current_user.id != r.owner.id:
raise AuthenticationError()
return func(r, *args, **kwargs)
return decorator
return wrapper
def json_response(func):
"""
Wrap a route so that it always converts data
response to proper json.
@app.route('/')
@json
def test():
return {
'success': True
}
"""
@wraps(func)
def json_wrap(*args, **kwargs):
data = func(*args, **kwargs)
status_code = 200
if isinstance(data, tuple):
data, status_code = data
return jsonify(data, status_code)
return json_wrap
def json_endpoint(
required_fields: Union[List[str], List[Tuple], None] = None,
only_required: bool = False,
):
"""
Wrap a route so that it always converts data response to proper
json. This decorator will save a whole lot of time verifying
json body data.
The required fields should be a list of either strings or tuples.
If the required fields is a list of strings, then each of the
strings will be verified in the json body, and passed to the
view function as a kwarg.
>>> @app.route('/')
>>> @json_endpoint(['name')])
>>> def test(name, **_):
>>> return {
>>> 'success': True
>>> }
If the required fields are a list of tuples, then the first item
should be the string name of the field, then its type. When you
specify the type in a tuple, then that fields type will also
be verified in the json body.
>>> @app.route('/')
>>> @json_endpoint([('name', str)])
>>> def test(name: str, **_):
>>> return {
>>> 'success': True
>>> }
"""
def wrapper(func):
@wraps(func)
def json_wrap(*args, **kwargs):
# Get the content type header
content_type = request.headers.get("Content-Type", default="")
# Verify that the content type header was application json.
# If the content type header is not application/json, then
# flask will not parse the body of the request.
if not content_type.startswith("application/json"):
# If the content-type was not set properly, then we
# should hand back a 406 not acceptable error code.
return error_response("Content-Type header is not application/json"), 406
# After verifying that the content type header was set,
# then we can access the request json body
json_body: dict = request.json
# Build a list of the required field string values
_required_fields: List[str] = []
# If the required fields was set, then we
# need to verify that they exist in the json
# body, along with type checks if they were
# specified.
if required_fields is not None:
# Check required fields
for index, field in enumerate(required_fields):
# If field was a tuple, extract field name and required type.
required_type = None
if isinstance(field, tuple):
# If the tuple was more than two items, then
# we dont know how to handle.
if len(field) != 2:
pass
# Pull the field apart into the field and required type
field, required_type = field
# At this point, the tuple will have been parsed if it had one,
# so the field will always be a string. Add it to the running
# (fresh) list of required field string objects.
_required_fields.append(field)
# Make sure that the field is in the json body.
# If this condition is not met, then we will return
# a 406 not acceptable.
if field not in json_body:
# field missing, return error
# Not Acceptable
return error_response(f"Malformed requests. Missing field {field}."), 406
# If a type was specified, verify it
if required_type is not None:
# Do a type check on the json body field
if not isinstance(json_body[field], required_type):
# Not Acceptable
return error_response("Malformed requests. Invalid field type."), 406
# Give the positional args first,
# then the json data (in the order of
# the required fields), and lastly
# the kwargs that were passed in.
if required_fields is not None:
# We can optionally specify only_required to
# skip this step. Here we are adding the key
# values from the posted json to the kwargs
# of the function. This is potentially destructive
# as it will overwrite any keys already in the
# kwargs with the values in the json.
if not only_required:
for key, value in json_body.items():
if key not in _required_fields:
kwargs[key] = value
# Call the function while trying to maintain a
# logical order to the arguments
return func(
*args,
**{field: json_body[field] for field in _required_fields},
**kwargs,
)
# If there was no required fields specified, then we can just call the
# view function with the first argument being the json body.
return func(json_body, *args, **kwargs)
return json_wrap
return wrapper
def verify_shape(*shapes):
"""
This is the decorator form of the data shape verification function. It will validate the
arguments of a function before calling it. You can just sequentially provide the expected shapes
of the arguments. It will return error_response's if there was a problem validating something.
:param shapes: sequence of argument shapes
:return: error_response on error
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
# We should reject if we're not able to use all our shapes
if len(args) < len(shapes):
return error_response("Missing fields"), 406
# Verify our argument shapes
for data, shape in zip(args, shapes):
r, e = _verify_data_shape(data, shape)
if not r:
return error_response("Shape invalid {}".format(e)), 406
# Shapes pass, run function
return func(*args, **kwargs)
return wrapper
return decorator
| 35.381743
| 100
| 0.574059
|
2fe6ee9b5fa830be2c92a078f217497e478392dd
| 372
|
py
|
Python
|
djangoapp/api/serializers.py
|
kevinha298/microservice
|
4de46929a802afa7997436c781338532be6c2cb9
|
[
"MIT"
] | null | null | null |
djangoapp/api/serializers.py
|
kevinha298/microservice
|
4de46929a802afa7997436c781338532be6c2cb9
|
[
"MIT"
] | null | null | null |
djangoapp/api/serializers.py
|
kevinha298/microservice
|
4de46929a802afa7997436c781338532be6c2cb9
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from api.models import Member
class MemberSerializer(serializers.ModelSerializer):
mrn = serializers.IntegerField(required=False)
name = serializers.CharField(required=False)
dob = serializers.DateField(required=False)
class Meta:
model = Member
# fields = ('name', 'mrn')
fields = '__all__'
| 31
| 52
| 0.712366
|
e1229a3da8083df90a3aba45d210fe85bca452cc
| 17,284
|
py
|
Python
|
teste.py
|
pedralmeida22/IA-Bomberman
|
548cf3e6c1c138698854fd1d9e22274d9d086fb0
|
[
"MIT"
] | null | null | null |
teste.py
|
pedralmeida22/IA-Bomberman
|
548cf3e6c1c138698854fd1d9e22274d9d086fb0
|
[
"MIT"
] | null | null | null |
teste.py
|
pedralmeida22/IA-Bomberman
|
548cf3e6c1c138698854fd1d9e22274d9d086fb0
|
[
"MIT"
] | 1
|
2020-01-13T19:57:29.000Z
|
2020-01-13T19:57:29.000Z
|
import sys
import json
import asyncio
import websockets
import getpass
import os
from defs2 import *
from mapa import Map
from Node import *
from path import *
from bomb import *
async def agent_loop(server_address="localhost:8000", agent_name="student"):
async with websockets.connect(f"ws://{server_address}/player") as websocket:
# Receive information about static game properties
await websocket.send(json.dumps({"cmd": "join", "name": agent_name}))
msg = await websocket.recv()
game_properties = json.loads(msg)
# You can create your own map representation or use the game representation:
mapa = Map(size=game_properties["size"], mapa=game_properties["map"])
previous_key = ""
calc_hide_pos = False
previous_level = None
previous_lives = None
previos_pos = None
samePosCounter = 0
positions = []
history = []
limite = 0
got_powerup = False
powerup = [0,0]
detonador = False
wallpass = False
bombpass = False
change=False
enemyCloseCounter = 0
goal = []
samePosBomba = 0
corner = None
while True:
try:
state = json.loads(
await websocket.recv()
) # receive game state, this must be called timely or your game will get out of sync with the server
# Next lines are only for the Human Agent, the key values are nonetheless the correct ones!
if state['level'] == 15 and state['enemies'] == []:
return 0
if state['lives'] == 0:
return 0
key = ""
print(state)
# atualizar mapa
mapa.walls = state['walls']
level = state['level']
if previous_level != None and previous_lives != None:
# se morrer ou passar de nível faz reset às variáveis globais
if previous_level != state['level']:
got_powerup = False
powerup = [0,0]
previos_pos = None
samePosCounter = 0
if previous_level != state['level'] or previous_lives != state['lives']:
print('RESET')
calc_hide_pos = False
previous_level = state['level']
previous_lives = state['lives']
positions = []
history = []
goal = []
enemyCloseCounter = 0
if corner == None:
corner = find_corner(mapa)
# ignora powerups não utilizados
if level == 2 or level == 5 or level == 6 or level == 10 or level == 11 or level == 12 or level == 13 or level == 14 or level==15:
got_powerup = True
if detonador:
if level == 8 or level == 13:
got_powerup = True
my_pos = state['bomberman']
ways = get_possible_ways(mapa, my_pos)
print('ways: ', end='')
print(ways)
# verificar se tem detonador
if my_pos == powerup:
got_powerup = True
if level == 3 or level == 8 or level == 13:
detonador = True
if level == 9:
bombpass = True
# fuga recursiva
if state['bombs'] != [] and not calc_hide_pos:
print("calcurar hide pos")
goal, calc_hide_pos = choose_hide_pos2(my_pos, state['bombs'][0], mapa, '', 0, 60, state['enemies'],detonador)
print('my pos:', my_pos)
print('hide pos calculado:',goal)
print('hide pos: ' + str(calc_hide_pos))
key = choose_move(my_pos, ways, goal)
# key = choose_key(mapa, my_pos, positions, goal, True)
print('key hide pos in cacl:', key)
change = False
elif state['bombs'] != [] and calc_hide_pos:
print('já sabe a hide pos!')
if detonador:
if samePosBomba >=3:
change = True
if my_pos == previos_pos:
samePosBomba += 1
else:
samePosBomba = 0
print('change: ' , change)
'''
if len(history) > 11:
for i in range(0,10):
if history[i] != history[i+1]:
change= False
'''
if not change:
if dist_to(my_pos, goal) != 0:
print("ir para hide pos")
key = choose_move(my_pos, ways, goal)
print('hide pos: ', goal)
# key = choose_key(mapa, my_pos, positions, goal, True)
print('key hide pos :', key)
else: # esta seguro, espera ate a bomba rebentar
if detonador:
print('Usar detonador')
key = 'A'
ways.append('A')
else:
print("Esperar que a bomba rebente...")
key = ''
else:
print("A ir para o [1,1]! ZWA")
change = False
goal, calc_hide_pos = choose_hide_pos2(my_pos, state['bombs'][0], mapa, '', 0, 60, state['enemies'],detonador)
print('nova hide pos: ',goal)
key=choose_move(my_pos,ways,goal)
elif state['bombs'] == []: # nao ha bombas
calc_hide_pos = False
# enquanto nao tiver detonador nao procura ballons
if detonador == True:
enemies = state['enemies']
else:
enemies = [e for e in state['enemies'] if e['name'] in ['Oneal','Minvo','Kondoria','Ovapi','Pass']]
enemies = state['enemies']
# só há inimigos vai atras deles
if state['walls'] == [] and state['enemies'] != [] and state['powerups'] == []:
enemies = [e for e in state['enemies'] if e['name'] in ['Oneal','Minvo','Kondoria','Ovapi','Pass']]
if enemies !=[]:
enemies.sort(key=lambda x: dist_to(my_pos, x['pos']))
distToClosestEnemy = dist_to(my_pos, enemies[0]['pos'])
print('DisToClosestEnemy: ' + str(distToClosestEnemy))
print('enemy_pos: ' + str(enemies[0]['pos']))
key = pathToEnemy(mapa, my_pos, enemies[0]['pos'])
goal = enemies[0]['pos']
# se tiver perto do inimigo incrementa o contador
if distToClosestEnemy < 2.5:
print('Perto do inimigo!')
enemyCloseCounter += 1
elif enemyCloseCounter > 20:
print('Ciclo infinito encontrado!!!'.center(50, '-'))
# vai para uma parede
#print('Encontrar caminho até à parede alvo: ' + str(wall))
goal = list(mapa.bomberman_spawn)
key = choose_move(my_pos,ways,goal)
enemyCloseCounter = 0
print('goal: ',goal)
elif dist_to(my_pos, corner) == 0:
print("going to kill enemies")
enemies = state['enemies']
enemies.sort(key=lambda x: dist_to(my_pos, x['pos']))
if dist_to(list(corner), enemies[0]['pos']) < 6:
key = 'B'
ways.append('B')
else:
pass
else:
key, positions = choose_key(mapa, ways, my_pos, positions, list(corner), True)
goal = list(corner)
# apanhar powerups
elif state['powerups'] != []:
print("going to powerups")
#key = choose_move(my_pos,ways,state['powerups'][0][0])
#key,positions = choose_key(mapa, ways, my_pos, positions, state['powerups'][0][0], True)
powerup = state['powerups'][0][0]
key, positions, goal = goTo(mapa, my_pos, ways, positions, powerup, True)
print('positions: ' + str(positions))
print('key from goTo (powerup): ' + key)
print('goal' + str(goal))
if state['walls']:
parede = min(state['walls'], key=lambda x: dist_to(my_pos, x))
if dist_to(my_pos, parede) <= 1:
key = 'B'
ways.append('B')
# ir para 'exit'
elif got_powerup and state['enemies'] == [] and state['exit'] != []:
print("going to exit")
key, positions, goal = goTo(mapa, my_pos, ways, positions, state['exit'], True)
if key == '':
key, positions, goal = goTo(mapa, my_pos, ways, positions, state['exit'], True)
print('positions: ' + str(positions))
print('key from goTo (exit): ' + key)
print('goal' + str(goal))
if state['walls']:
parede = min(state['walls'], key=lambda x: dist_to(my_pos, x))
if dist_to(my_pos, parede) <= 1:
key = 'B'
ways.append('B')
# ha paredes
elif state['walls'] != []:
print("Escolher parede alvo...")
print('my' + str(my_pos))
'''if positions == [] or positions is None:
print("Escolher nova parede: ")'''
wall = next_wall(my_pos, state['walls'])
print('parede: ', wall)
print('dist to wall: ', end='')
print(dist_to(my_pos, wall))
if len(enemies) == 1 and not got_powerup: #para apanhar o powerup
enemies = []
# por bomba se tiver perto da parede
if dist_to(my_pos, wall) <= 1:
print('Cheguei à parede! Pôr bomba!')
key = 'B'
ways.append('B')
# ha inimigos
elif enemies !=[] :
enemies.sort(key=lambda x: dist_to(my_pos, x['pos']))
distToClosestEnemy = dist_to(my_pos, enemies[0]['pos'])
print('DisToClosestEnemy: ' + str(distToClosestEnemy))
# se tiver perto do inimigo incrementa o contador
if distToClosestEnemy < 2.5:
print('Perto do inimigo!')
enemyCloseCounter += 1
# verificar ciclo com inimigo
if enemyCloseCounter > 20:
print('Ciclo infinito com inimigo encontrado!!!'.center(50, '-'))
# vai destruir parede mais proxima
key, positions, goal = goTo(mapa, my_pos, ways, positions, wall, False)
print('Encontrar caminho até à parede alvo: ' + str(wall))
enemyCloseCounter = 0
print('positions: ' + str(positions))
print('key from ciclo enimie: ' + key)
print('goal: ',goal)
# procura caminho para inimigo e parede
else:
# procura caminho para inimigo
key = pathToEnemy(mapa, my_pos, enemies[0]['pos'])
if key == '':
key, positions, goal = goTo(mapa, my_pos, ways, positions, wall,False)
print('positions: ' + str(positions))
print('key from gotoWall: ' + key)
print('goal' + str(goal))
else:
print('Encontrar caminho até à parede alvo: ' + str(wall))
key, positions, goal = goTo(mapa, my_pos, ways, positions, wall,False)
print('positions: ' + str(positions))
print('key from gotoWall: ' + key)
print('goal' + str(goal))
if state['enemies'] != [] and state['bombs'] == []:
##17/10 - Fugir dos inimigos
enemies = state['enemies']
enemies.sort(key=lambda x: dist_to(my_pos, x['pos']))
if key in ['w','s','d','a']:
if in_range(mapa.calc_pos(my_pos,key), 1, enemies[0]['pos'], mapa):
print('Enemie close! Pôr bomba! (Calculado)')
key = 'B'
ways.append('B')
if in_range(my_pos, 1, enemies[0]['pos'], mapa):
print('Enemie close! Pôr bomba!')
key = 'B'
ways.append('B')
if my_pos == previos_pos:
samePosCounter += 1
if samePosCounter >= 20:
print('Suicidio'.center(80, '/'))
if state['bombs'] != []:
if detonador:
key = 'A'
ways.append('A')
else:
key = choose_random_move(ways)
print ('key random:'+ key)
samePosCounter = 0
else:
print('Reset samePosCounter!')
samePosCounter = 0
# garantir que key é válida
if key != '' or key == None:
if not key in ways:
print('Caminho impossivel... escolhendo novo')
print('goal: ',goal) #quando vai matar inimigos e entra em ciclo infinito o goal que passa é [], não sei porque
if goal:
key = choose_move(my_pos, ways, goal)
else:
key = choose_move(my_pos,ways,next_wall(my_pos,state['walls']))
history.append(my_pos)
previous_level = state['level']
previous_lives = state['lives']
previous_key = key
previos_pos = my_pos
print('Sending key: ' + key + '\n\n')
print("got_powerup: ",got_powerup)
print('Detonador: ', detonador)
print('Bombpass: ', bombpass)
print('corner: ', str(corner))
await websocket.send(
json.dumps({"cmd": "key", "key": key})
) # send key command to server - you must implement this send in the AI agent
# break
except websockets.exceptions.ConnectionClosedOK:
print("Server has cleanly disconnected us")
return
# DO NOT CHANGE THE LINES BELLOW
# You can change the default values using the command line, example:
# $ NAME='bombastico' python3 client.py
loop = asyncio.get_event_loop()
SERVER = os.environ.get("SERVER", "localhost")
PORT = os.environ.get("PORT", "8000")
NAME = os.environ.get("NAME", getpass.getuser())
loop.run_until_complete(agent_loop(f"{SERVER}:{PORT}", NAME))
| 43.86802
| 146
| 0.418132
|
8af21b960366ed26d414023a78b547a11a3f367f
| 1,228
|
py
|
Python
|
pyranges/methods/call.py
|
jergosh/pyranges
|
a401fd5bf1f6aa1a2546354a22d81e8c59a82114
|
[
"MIT"
] | null | null | null |
pyranges/methods/call.py
|
jergosh/pyranges
|
a401fd5bf1f6aa1a2546354a22d81e8c59a82114
|
[
"MIT"
] | null | null | null |
pyranges/methods/call.py
|
jergosh/pyranges
|
a401fd5bf1f6aa1a2546354a22d81e8c59a82114
|
[
"MIT"
] | null | null | null |
import pandas as pd
import pyranges as pr
def _handle_eval_return(self, result, col, as_pyranges, subset):
"""Handle return from eval.
If col is set, add/update cols. If subset is True, use return series to subset PyRanges.
Otherwise return PyRanges or dict of data."""
if as_pyranges:
if not result:
return pr.PyRanges()
first_hit = list(result.values())[0]
if isinstance(first_hit, pd.Series):
if first_hit.dtype == bool and subset:
return self[result]
elif col:
self.__setattr__(col, result)
return self
else:
raise Exception(
"Cannot return PyRanges when function returns a Series! Use as_pyranges=False."
)
return pr.PyRanges(result)
else:
return result
def _call(self, f, col=None, strand=None, subset=True, as_pyranges=True):
if strand is None:
strand = self.stranded
if self.stranded and not strand:
self = self.unstrand()
result = self.apply(f, strand=strand, as_pyranges=False)
result = _handle_eval_return(self, result, col, as_pyranges, subset)
return result
| 27.288889
| 99
| 0.611564
|
1d9c39ebb7c4d3cf927baf68fe18879e8876d01c
| 2,431
|
py
|
Python
|
tensorflow_datasets/testing/__init__.py
|
kmh4321/datasets
|
286d7a8a5eb3e073f18f8fee4f774bafc23fb445
|
[
"Apache-2.0"
] | 14
|
2019-03-30T02:11:29.000Z
|
2021-11-16T12:06:32.000Z
|
tensorflow_datasets/testing/__init__.py
|
kmh4321/datasets
|
286d7a8a5eb3e073f18f8fee4f774bafc23fb445
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_datasets/testing/__init__.py
|
kmh4321/datasets
|
286d7a8a5eb3e073f18f8fee4f774bafc23fb445
|
[
"Apache-2.0"
] | 10
|
2019-03-31T08:35:29.000Z
|
2021-09-01T06:28:43.000Z
|
# coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing utilities."""
from tensorflow_datasets.testing.dataset_builder_testing import DatasetBuilderTestCase
from tensorflow_datasets.testing.mocking import mock_data
from tensorflow_datasets.testing.test_case import TestCase
from tensorflow_datasets.testing.test_utils import DummyDatasetSharedGenerator
from tensorflow_datasets.testing.test_utils import DummyMnist
from tensorflow_datasets.testing.test_utils import DummyParser
from tensorflow_datasets.testing.test_utils import DummySerializer
from tensorflow_datasets.testing.test_utils import fake_examples_dir
from tensorflow_datasets.testing.test_utils import FeatureExpectationItem
from tensorflow_datasets.testing.test_utils import FeatureExpectationsTestCase
from tensorflow_datasets.testing.test_utils import make_tmp_dir
from tensorflow_datasets.testing.test_utils import mock_kaggle_api
from tensorflow_datasets.testing.test_utils import RaggedConstant
from tensorflow_datasets.testing.test_utils import rm_tmp_dir
from tensorflow_datasets.testing.test_utils import run_in_graph_and_eager_modes
from tensorflow_datasets.testing.test_utils import SubTestCase
from tensorflow_datasets.testing.test_utils import test_main
from tensorflow_datasets.testing.test_utils import tmp_dir
__all__ = [
"DatasetBuilderTestCase",
"DummyDatasetSharedGenerator",
"DummyMnist",
"fake_examples_dir",
"FeatureExpectationItem",
"FeatureExpectationsTestCase",
"SubTestCase",
"TestCase",
"RaggedConstant",
"run_in_graph_and_eager_modes",
"test_main",
"tmp_dir", # TODO(afrozm): rm from here and add as methods to TestCase
"make_tmp_dir", # TODO(afrozm): rm from here and add as methods to TestCase
"mock_kaggle_api",
"mock_data",
"rm_tmp_dir", # TODO(afrozm): rm from here and add as methods to TestCase
]
| 44.2
| 86
| 0.816125
|
7a28f34470415037666a90e90e9029e28b4c6fec
| 2,239
|
py
|
Python
|
flask_app/app.py
|
FancyFun/dogflask
|
255f7c19d2741b9c3b2e5cc24cac2748be3d6305
|
[
"MIT"
] | null | null | null |
flask_app/app.py
|
FancyFun/dogflask
|
255f7c19d2741b9c3b2e5cc24cac2748be3d6305
|
[
"MIT"
] | null | null | null |
flask_app/app.py
|
FancyFun/dogflask
|
255f7c19d2741b9c3b2e5cc24cac2748be3d6305
|
[
"MIT"
] | 1
|
2020-09-18T03:13:11.000Z
|
2020-09-18T03:13:11.000Z
|
from flask import Flask, render_template, request
import requests
from flask_sqlalchemy import SQLAlchemy
def create_app():
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///Dog.sqlite3'
app.config['SQLALCHEMY_TRACK_MODIFCATIONS'] = False
DB.init_app(app)
@app.route('/')
def home():
return 'success the flask app is running!'
@app.route('/render')
def render():
return render_template('home.html')
@app.route('/render_with_insert/<insert>')
def render_insert(insert):
return render_template('insert.html',insertion=insert)
@app.route('/puppy')
def puppy():
json = requests.get('https://dog.ceo/api/breeds/image/random').json()
image = json['message']
return render_template('dog.html', picture=image, blob=json)
@app.route('/reset')
def reset():
DB.drop_all()
DB.create_all()
return 'DB reset'
@app.route('/save_dog')
def save_dog():
json = requests.get('https://dog.ceo/api/breeds/image/random').json()
image = json['message']
return render_template('save_dog.html', picture=image)
@app.route('/saved_dog', methods=['POST'])
def saved_dog():
image = request.values['doglink']
name = request.values['dogname']
dog = Dog(dog=image,name=name)
DB.session.add(dog)
DB.session.commit()
return render_template('saved_dog.html', picture=image, name=name)
@app.route('/dog_list')
def dog_list():
dogs = Dog.query.all()
names = [dog.name for dog in dogs]
return render_template('dog_list.html', names=names)
@app.route('/view_dog', methods=['POST'])
def view_dog():
name = request.values['dogname']
dog = Dog.query.filter_by(name=name).all()[0]
return render_template('saved_dog.html', picture=dog.dog, name=dog.name)
return app
##########################################################
DB = SQLAlchemy()
class Dog(DB.Model):
id = DB.Column(DB.Integer, primary_key=True)
dog = DB.Column(DB.Text)
name = DB.Column(DB.Text)
def __repr__(self):
return f'{self.name} is dog number {self.id}'
| 28.341772
| 80
| 0.608307
|
46a4b6fcf9d3d83250af7953b1345e401870ade4
| 317
|
py
|
Python
|
sp/quadratic.py
|
Breccia/s-py
|
4fc5fcd0efbfcaa6574a81ee922c1083ed0ef57d
|
[
"MIT"
] | null | null | null |
sp/quadratic.py
|
Breccia/s-py
|
4fc5fcd0efbfcaa6574a81ee922c1083ed0ef57d
|
[
"MIT"
] | null | null | null |
sp/quadratic.py
|
Breccia/s-py
|
4fc5fcd0efbfcaa6574a81ee922c1083ed0ef57d
|
[
"MIT"
] | null | null | null |
#!/usr/local/anaconda3/bin/python
import matplotlib.pylab as plt
# 0 = -(x**2) -2x - 1
def plot_quad():
x = []
y = []
for i in range(-50, 50):
x.append(i)
y.append(+(i ** 2) + (2*(i**1)) + 1)
plt.plot(x, y, marker='*')
plt.show()
if __name__ == '__main__':
plot_quad()
| 14.409091
| 44
| 0.495268
|
c1e9c1289b265e9394990044c232149cc9d30651
| 1,477
|
py
|
Python
|
bot.py
|
changapuzharamanan/message-view-counter-bot
|
40cca3ab354b30e4191edab565fa6645f6691024
|
[
"Unlicense"
] | null | null | null |
bot.py
|
changapuzharamanan/message-view-counter-bot
|
40cca3ab354b30e4191edab565fa6645f6691024
|
[
"Unlicense"
] | null | null | null |
bot.py
|
changapuzharamanan/message-view-counter-bot
|
40cca3ab354b30e4191edab565fa6645f6691024
|
[
"Unlicense"
] | null | null | null |
from pyrogram import Client, filters
from pyrogram.types import Message, Poll, InlineKeyboardMarkup, InlineKeyboardButton, CallbackQuery
import os
api_id = int(environ.get("API_ID"))
api_hash = environ.get("API_HASH")
token = environ.get("TOKEN")
app = Client(':memory:', api_id, api_hash, bot_token=token)
non_anonymous_poll = filters.create(lambda *_: _[2].poll is not None and not _[2].poll.is_anonymous)
forwardchannel = -1001200739050
startmsg = """
STARTED
"""
@app.on_message(filters.command("start") & filters.private)
async def start(client, message):
await message.reply(startmsg,
)
@app.on_message(~filters.service & ~filters.game & ~filters.channel & ~filters.edited & ~filters.linked_channel & ~non_anonymous_poll)
async def viewcounter(client, message):
forward = await message.forward(forwardchannel)
await forward.forward(message.chat.id)
@app.on_message((filters.service | filters.game | filters.channel | non_anonymous_poll) & ~filters.edited)
async def notsupported(client, message):
await message.reply("sorry but this type of message not supported (non anonymous polls or games (like @gamebot or @gamee) or message from channels or service messages)", reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("delete this message", "deleterrormessage")]]))
@app.on_callback_query(filters.regex("^deleterrormessage"))
async def delerrmsg(client: app, cquery: CallbackQuery):
await cquery.message.delete()
app.run()
| 36.925
| 275
| 0.761002
|
fa448a649f65e4e85832c6895840a50221c8d87d
| 14,750
|
py
|
Python
|
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/terraform.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/terraform.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/terraform.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ryan Scott Brown <ryansb@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: terraform
short_description: Manages a Terraform deployment (and plans)
description:
- Provides support for deploying resources with Terraform and pulling
resource information back into Ansible.
options:
state:
choices: ['planned', 'present', 'absent']
description:
- Goal state of given stage/project
required: false
default: present
binary_path:
description:
- The path of a terraform binary to use, relative to the 'service_path'
unless you supply an absolute path.
required: false
project_path:
description:
- The path to the root of the Terraform directory with the
vars.tf/main.tf/etc to use.
required: true
workspace:
description:
- The terraform workspace to work with.
required: false
default: default
purge_workspace:
description:
- Only works with state = absent
- If true, the workspace will be deleted after the "terraform destroy" action.
- The 'default' workspace will not be deleted.
required: false
default: false
type: bool
plan_file:
description:
- The path to an existing Terraform plan file to apply. If this is not
specified, Ansible will build a new TF plan and execute it.
Note that this option is required if 'state' has the 'planned' value.
required: false
state_file:
description:
- The path to an existing Terraform state file to use when building plan.
If this is not specified, the default `terraform.tfstate` will be used.
- This option is ignored when plan is specified.
required: false
variables_file:
description:
- The path to a variables file for Terraform to fill into the TF
configurations.
required: false
variables:
description:
- A group of key-values to override template variables or those in
variables files.
required: false
targets:
description:
- A list of specific resources to target in this plan/application. The
resources selected here will also auto-include any dependencies.
required: false
lock:
description:
- Enable statefile locking, if you use a service that accepts locks (such
as S3+DynamoDB) to store your statefile.
required: false
type: bool
lock_timeout:
description:
- How long to maintain the lock on the statefile, if you use a service
that accepts locks (such as S3+DynamoDB).
required: false
force_init:
description:
- To avoid duplicating infra, if a state file can't be found this will
force a `terraform init`. Generally, this should be turned off unless
you intend to provision an entirely new Terraform deployment.
default: false
required: false
type: bool
backend_config:
description:
- A group of key-values to provide at init stage to the -backend-config parameter.
required: false
notes:
- To just run a `terraform plan`, use check mode.
requirements: [ "terraform" ]
author: "Ryan Scott Brown (@ryansb)"
'''
EXAMPLES = """
# Basic deploy of a service
- terraform:
project_path: '{{ project_dir }}'
state: present
# Define the backend configuration at init
- terraform:
project_path: 'project/'
state: "{{ state }}"
force_init: true
backend_config:
region: "eu-west-1"
bucket: "some-bucket"
key: "random.tfstate"
"""
RETURN = """
outputs:
type: complex
description: A dictionary of all the TF outputs by their assigned name. Use `.outputs.MyOutputName.value` to access the value.
returned: on success
sample: '{"bukkit_arn": {"sensitive": false, "type": "string", "value": "arn:aws:s3:::tf-test-bukkit"}'
contains:
sensitive:
type: bool
returned: always
description: Whether Terraform has marked this value as sensitive
type:
type: str
returned: always
description: The type of the value (string, int, etc)
value:
returned: always
description: The value of the output as interpolated by Terraform
stdout:
type: str
description: Full `terraform` command stdout, in case you want to display it or examine the event log
returned: always
sample: ''
command:
type: str
description: Full `terraform` command built by this module, in case you want to re-run the command outside the module or debug a problem.
returned: always
sample: terraform apply ...
"""
import os
import json
import tempfile
import traceback
from ansible.module_utils.six.moves import shlex_quote
from ansible.module_utils.basic import AnsibleModule
DESTROY_ARGS = ('destroy', '-no-color', '-force')
APPLY_ARGS = ('apply', '-no-color', '-input=false', '-auto-approve=true')
module = None
def preflight_validation(bin_path, project_path, variables_args=None, plan_file=None):
if project_path in [None, ''] or '/' not in project_path:
module.fail_json(msg="Path for Terraform project can not be None or ''.")
if not os.path.exists(bin_path):
module.fail_json(msg="Path for Terraform binary '{0}' doesn't exist on this host - check the path and try again please.".format(bin_path))
if not os.path.isdir(project_path):
module.fail_json(msg="Path for Terraform project '{0}' doesn't exist on this host - check the path and try again please.".format(project_path))
rc, out, err = module.run_command([bin_path, 'validate'] + variables_args, cwd=project_path, use_unsafe_shell=True)
if rc != 0:
module.fail_json(msg="Failed to validate Terraform configuration files:\r\n{0}".format(err))
def _state_args(state_file):
if state_file and os.path.exists(state_file):
return ['-state', state_file]
if state_file and not os.path.exists(state_file):
module.fail_json(msg='Could not find state_file "{0}", check the path and try again.'.format(state_file))
return []
def init_plugins(bin_path, project_path, backend_config):
command = [bin_path, 'init', '-input=false']
if backend_config:
for key, val in backend_config.items():
command.extend([
'-backend-config',
shlex_quote('{0}={1}'.format(key, val))
])
rc, out, err = module.run_command(command, cwd=project_path)
if rc != 0:
module.fail_json(msg="Failed to initialize Terraform modules:\r\n{0}".format(err))
def get_workspace_context(bin_path, project_path):
workspace_ctx = {"current": "default", "all": []}
command = [bin_path, 'workspace', 'list', '-no-color']
rc, out, err = module.run_command(command, cwd=project_path)
if rc != 0:
module.warn("Failed to list Terraform workspaces:\r\n{0}".format(err))
for item in out.split('\n'):
stripped_item = item.strip()
if not stripped_item:
continue
elif stripped_item.startswith('* '):
workspace_ctx["current"] = stripped_item.replace('* ', '')
else:
workspace_ctx["all"].append(stripped_item)
return workspace_ctx
def _workspace_cmd(bin_path, project_path, action, workspace):
command = [bin_path, 'workspace', action, workspace, '-no-color']
rc, out, err = module.run_command(command, cwd=project_path)
if rc != 0:
module.fail_json(msg="Failed to {0} workspace:\r\n{1}".format(action, err))
return rc, out, err
def create_workspace(bin_path, project_path, workspace):
_workspace_cmd(bin_path, project_path, 'new', workspace)
def select_workspace(bin_path, project_path, workspace):
_workspace_cmd(bin_path, project_path, 'select', workspace)
def remove_workspace(bin_path, project_path, workspace):
_workspace_cmd(bin_path, project_path, 'delete', workspace)
def build_plan(command, project_path, variables_args, state_file, targets, state, plan_path=None):
if plan_path is None:
f, plan_path = tempfile.mkstemp(suffix='.tfplan')
plan_command = [command[0], 'plan', '-input=false', '-no-color', '-detailed-exitcode', '-out', plan_path]
for t in (module.params.get('targets') or []):
plan_command.extend(['-target', t])
plan_command.extend(_state_args(state_file))
rc, out, err = module.run_command(plan_command + variables_args, cwd=project_path, use_unsafe_shell=True)
if rc == 0:
# no changes
return plan_path, False, out, err, plan_command if state == 'planned' else command
elif rc == 1:
# failure to plan
module.fail_json(msg='Terraform plan could not be created\r\nSTDOUT: {0}\r\n\r\nSTDERR: {1}'.format(out, err))
elif rc == 2:
# changes, but successful
return plan_path, True, out, err, plan_command if state == 'planned' else command
module.fail_json(msg='Terraform plan failed with unexpected exit code {0}. \r\nSTDOUT: {1}\r\n\r\nSTDERR: {2}'.format(rc, out, err))
def main():
global module
module = AnsibleModule(
argument_spec=dict(
project_path=dict(required=True, type='path'),
binary_path=dict(type='path'),
workspace=dict(required=False, type='str', default='default'),
purge_workspace=dict(type='bool', default=False),
state=dict(default='present', choices=['present', 'absent', 'planned']),
variables=dict(type='dict'),
variables_file=dict(type='path'),
plan_file=dict(type='path'),
state_file=dict(type='path'),
targets=dict(type='list', default=[]),
lock=dict(type='bool', default=True),
lock_timeout=dict(type='int',),
force_init=dict(type='bool', default=False),
backend_config=dict(type='dict', default=None),
),
required_if=[('state', 'planned', ['plan_file'])],
supports_check_mode=True,
)
project_path = module.params.get('project_path')
bin_path = module.params.get('binary_path')
workspace = module.params.get('workspace')
purge_workspace = module.params.get('purge_workspace')
state = module.params.get('state')
variables = module.params.get('variables') or {}
variables_file = module.params.get('variables_file')
plan_file = module.params.get('plan_file')
state_file = module.params.get('state_file')
force_init = module.params.get('force_init')
backend_config = module.params.get('backend_config')
if bin_path is not None:
command = [bin_path]
else:
command = [module.get_bin_path('terraform', required=True)]
if force_init:
init_plugins(command[0], project_path, backend_config)
workspace_ctx = get_workspace_context(command[0], project_path)
if workspace_ctx["current"] != workspace:
if workspace not in workspace_ctx["all"]:
create_workspace(command[0], project_path, workspace)
else:
select_workspace(command[0], project_path, workspace)
if state == 'present':
command.extend(APPLY_ARGS)
elif state == 'absent':
command.extend(DESTROY_ARGS)
variables_args = []
for k, v in variables.items():
variables_args.extend([
'-var',
'{0}={1}'.format(k, v)
])
if variables_file:
variables_args.extend(['-var-file', variables_file])
preflight_validation(command[0], project_path, variables_args)
if module.params.get('lock') is not None:
if module.params.get('lock'):
command.append('-lock=true')
else:
command.append('-lock=false')
if module.params.get('lock_timeout') is not None:
command.append('-lock-timeout=%ds' % module.params.get('lock_timeout'))
for t in (module.params.get('targets') or []):
command.extend(['-target', t])
# we aren't sure if this plan will result in changes, so assume yes
needs_application, changed = True, False
out, err = '', ''
if state == 'absent':
command.extend(variables_args)
elif state == 'present' and plan_file:
if any([os.path.isfile(project_path + "/" + plan_file), os.path.isfile(plan_file)]):
command.append(plan_file)
else:
module.fail_json(msg='Could not find plan_file "{0}", check the path and try again.'.format(plan_file))
else:
plan_file, needs_application, out, err, command = build_plan(command, project_path, variables_args, state_file,
module.params.get('targets'), state, plan_file)
command.append(plan_file)
if needs_application and not module.check_mode and not state == 'planned':
rc, out, err = module.run_command(command, cwd=project_path)
# checks out to decide if changes were made during execution
if '0 added, 0 changed' not in out and not state == "absent" or '0 destroyed' not in out:
changed = True
if rc != 0:
module.fail_json(
msg="Failure when executing Terraform command. Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, out, err),
command=' '.join(command)
)
outputs_command = [command[0], 'output', '-no-color', '-json'] + _state_args(state_file)
rc, outputs_text, outputs_err = module.run_command(outputs_command, cwd=project_path)
if rc == 1:
module.warn("Could not get Terraform outputs. This usually means none have been defined.\nstdout: {0}\nstderr: {1}".format(outputs_text, outputs_err))
outputs = {}
elif rc != 0:
module.fail_json(
msg="Failure when getting Terraform outputs. "
"Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, outputs_text, outputs_err),
command=' '.join(outputs_command))
else:
outputs = json.loads(outputs_text)
# Restore the Terraform workspace found when running the module
if workspace_ctx["current"] != workspace:
select_workspace(command[0], project_path, workspace_ctx["current"])
if state == 'absent' and workspace != 'default' and purge_workspace is True:
remove_workspace(command[0], project_path, workspace)
module.exit_json(changed=changed, state=state, workspace=workspace, outputs=outputs, stdout=out, stderr=err, command=' '.join(command))
if __name__ == '__main__':
main()
| 37.341772
| 158
| 0.659593
|
e81c30fbe3d8e0f136d77c98305e1a8cc2ed7660
| 3,004
|
py
|
Python
|
sample.py
|
Changyoon-Lee/stockauto
|
5029b95d8055d6780b9e902109c4ae43525d7b49
|
[
"MIT"
] | 1
|
2020-12-18T13:52:39.000Z
|
2020-12-18T13:52:39.000Z
|
sample.py
|
Changyoon-Lee/stockauto
|
5029b95d8055d6780b9e902109c4ae43525d7b49
|
[
"MIT"
] | null | null | null |
sample.py
|
Changyoon-Lee/stockauto
|
5029b95d8055d6780b9e902109c4ae43525d7b49
|
[
"MIT"
] | null | null | null |
import pythoncom
from PyQt5.QtWidgets import *
import win32com.client
import win32event
g_objCodeMgr = win32com.client.Dispatch('CpUtil.CpCodeMgr')
StopEvent = win32event.CreateEvent(None, 0, 0, None)
class CpEvent:
def set_params(self, client, name, caller):
self.client = client # CP 실시간 통신 object
self.name = name # 서비스가 다른 이벤트를 구분하기 위한 이름
self.caller = caller # callback 을 위해 보관
def OnReceived(self):
# 실시간 처리 - 현재가 주문 체결
if self.name == 'stockmst':
print('recieved')
win32event.SetEvent(StopEvent)
return
class CpCurReply:
def __init__(self, objEvent):
self.name = "stockmst"
self.obj = objEvent
def Subscribe(self):
handler = win32com.client.WithEvents(self.obj, CpEvent)
handler.set_params(self.obj, self.name, None)
def MessagePump(timeout):
waitables = [StopEvent]
while 1:
rc = win32event.MsgWaitForMultipleObjects(
waitables,
0, # Wait for all = false, so it waits for anyone
timeout, #(or win32event.INFINITE)
win32event.QS_ALLEVENTS) # Accepts all input
if rc == win32event.WAIT_OBJECT_0:
# Our first event listed, the StopEvent, was triggered, so we must exit
print('stop event')
break
elif rc == win32event.WAIT_OBJECT_0 + len(waitables):
# A windows message is waiting - take care of it. (Don't ask me
# why a WAIT_OBJECT_MSG isn't defined < WAIT_OBJECT_0...!).
# This message-serving MUST be done for COM, DDE, and other
# Windowsy things to work properly!
print('pump')
if pythoncom.PumpWaitingMessages():
break # we received a wm_quit message
elif rc == win32event.WAIT_TIMEOUT:
print('timeout')
return
pass
else:
print('exception')
raise RuntimeError("unexpected win32wait return value")
code = 'A005930'
##############################################################
#1. BlockRequest
print('#####################################')
objStockMst = win32com.client.Dispatch("DsCbo1.StockMst")
objStockMst.SetInputValue(0, code)
objStockMst.BlockRequest()
print('BlockRequest 로 수신 받은 데이터')
item = {}
item['종목명']= g_objCodeMgr.CodeToName(code)
item['현재가'] = objStockMst.GetHeaderValue(11) # 종가
item['대비'] = objStockMst.GetHeaderValue(12) # 전일대비
print(item)
print('')
##############################################################
# 2. Request ==> 메시지 펌프 ==> OnReceived 이벤트 수신
print('#####################################')
objReply = CpCurReply(objStockMst)
objReply.Subscribe()
code = 'A005930'
objStockMst.SetInputValue(0, code)
objStockMst.Request()
MessagePump(10000)
item = {}
item['종목명']= g_objCodeMgr.CodeToName(code)
item['현재가'] = objStockMst.GetHeaderValue(11) # 종가
item['대비'] = objStockMst.GetHeaderValue(12) # 전일대비
print(item)
| 30.969072
| 83
| 0.592876
|
f78c04b18a3db96eb00d7c5881a78d62cc9495f2
| 3,413
|
py
|
Python
|
customization/aws_secrets.py
|
wahabakhtar/aws-limit-monitor
|
76e2ed0def21061d66d62fdcf74b0bcf0e8793a0
|
[
"Apache-2.0"
] | null | null | null |
customization/aws_secrets.py
|
wahabakhtar/aws-limit-monitor
|
76e2ed0def21061d66d62fdcf74b0bcf0e8793a0
|
[
"Apache-2.0"
] | null | null | null |
customization/aws_secrets.py
|
wahabakhtar/aws-limit-monitor
|
76e2ed0def21061d66d62fdcf74b0bcf0e8793a0
|
[
"Apache-2.0"
] | null | null | null |
'''
Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
the License. A copy of the License is located at
http://aws.amazon.com/apache2.0/
or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and
limitations under the License.
'''
import boto3
import base64
import json
import sys
import logging
from botocore.exceptions import ClientError
from tempfile import mkstemp
def get_secret(secret_name, region_name):
# Create a Secrets Manager client
session = boto3.session.Session()
client = session.client(
service_name='secretsmanager',
region_name=region_name
)
# See https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_GetSecretValue.html
# We rethrow the exception by default.
try:
# print ("******************* Gonna Get Secrests *********")
get_secret_value_response = client.get_secret_value(
SecretId=secret_name
)
# print ("++++++++++++++++++++")
# print (get_secret_value_response)
# print ("+++++++++++END+++++++")
except ClientError as e:
logging.error("******** Error1 Occured *********")
logging.error(e)
logging.error("******** Error1 Finished ********")
if e.response['Error']['Code'] == 'DecryptionFailureException':
# Secrets Manager can't decrypt the protected secret text using the provided KMS key.
# Deal with the exception here, and/or rethrow at your discretion.
raise e
elif e.response['Error']['Code'] == 'InternalServiceErrorException':
# An error occurred on the server side.
# Deal with the exception here, and/or rethrow at your discretion.
raise e
elif e.response['Error']['Code'] == 'InvalidParameterException':
# You provided an invalid value for a parameter.
# Deal with the exception here, and/or rethrow at your discretion.
raise e
elif e.response['Error']['Code'] == 'InvalidRequestException':
# You provided a parameter value that is not valid for the current state of the resource.
# Deal with the exception here, and/or rethrow at your discretion.
raise e
elif e.response['Error']['Code'] == 'ResourceNotFoundException':
# We can't find the resource that you asked for.
# Deal with the exception here, and/or rethrow at your discretion.
raise e
else:
# Decrypts secret using the associated KMS CMK.
# Depending on whether the secret is a string or binary, one of these fields will be populated.
if 'SecretString' in get_secret_value_response:
secret = get_secret_value_response['SecretString']
else:
decoded_binary_secret = base64.b64decode(
get_secret_value_response['SecretBinary'])
return secret
def get_secret_key(secret_name, region_name, username):
secret = get_secret(secret_name, region_name)
secretjson = json.loads(secret)
secretvar = secretjson[username]
return secretvar
| 42.6625
| 118
| 0.655142
|
e95821821e104616204fb900acce987d0377b5f5
| 936
|
py
|
Python
|
personal_portfolio/urls.py
|
aminyaraghi/portfolio
|
dabef787f36eb80563b6a0246a55d3495b126cb5
|
[
"MIT"
] | null | null | null |
personal_portfolio/urls.py
|
aminyaraghi/portfolio
|
dabef787f36eb80563b6a0246a55d3495b126cb5
|
[
"MIT"
] | null | null | null |
personal_portfolio/urls.py
|
aminyaraghi/portfolio
|
dabef787f36eb80563b6a0246a55d3495b126cb5
|
[
"MIT"
] | null | null | null |
"""personal_portfolio URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('projects/', include('projects.urls')),
path('blog/', include('blog.urls')),
]
| 34.666667
| 77
| 0.712607
|
5a451a4c359b1049f65a0c073265c9c130a8dfd9
| 4,247
|
py
|
Python
|
brave/evaluate/evaluate_video_embedding.py
|
deepmind/brave
|
0ae20d9afcf6b1fa4d31d70c906d711901b56e9c
|
[
"Apache-2.0"
] | 26
|
2021-10-14T19:06:56.000Z
|
2022-03-02T18:22:45.000Z
|
brave/evaluate/evaluate_video_embedding.py
|
deepmind/brave
|
0ae20d9afcf6b1fa4d31d70c906d711901b56e9c
|
[
"Apache-2.0"
] | 1
|
2022-01-31T23:23:31.000Z
|
2022-02-08T01:07:15.000Z
|
brave/evaluate/evaluate_video_embedding.py
|
deepmind/brave
|
0ae20d9afcf6b1fa4d31d70c906d711901b56e9c
|
[
"Apache-2.0"
] | 1
|
2022-02-04T10:54:53.000Z
|
2022-02-04T10:54:53.000Z
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implement the standard evaluation procedure for video embeddings."""
from typing import Sequence
import chex
import tensorflow as tf
import tensorflow_datasets as tfds
from brave.datasets import augmentations
from brave.datasets import datasets
from brave.datasets import media_sequences
from brave.evaluate import eval_datasets
from brave.evaluate import evaluate
DEFAULT_EVAL_BATCH_SIZE = 1
DEFAULT_EVAL_NUM_TRAIN_EPOCHS = 10
DEFAULT_TRAIN_MIN_CROP_WINDOW_AREA = 0.3
DEFAULT_TRAIN_MAX_CROP_WINDOW_AREA = 1.0
DEFAULT_TRAIN_MIN_CROP_WINDOW_ASPECT_RATIO = 0.5
DEFAULT_TRAIN_MAX_CROP_WINDOW_ASPECT_RATIO = 2.0
DEFAULT_TEST_INITIAL_RESIZE = 256
DEFAULT_TEST_NUM_TEMPORAL_CROPS = 10
DEFAULT_TEST_NUM_SPATIAL_CROPS = 3
@chex.dataclass
class VideoConfig:
num_frames: int
image_size: int
video_step: int
def evaluate_video_embedding(
train_dataset_shards: Sequence[str],
test_dataset_shards: Sequence[str],
embedding_fn: evaluate.EmbeddingFn,
config: VideoConfig,
svm_regularization: float,
batch_size: int = DEFAULT_EVAL_BATCH_SIZE,
shard_reader: media_sequences.ShardReaderFn = media_sequences
.tf_record_shard_reader,
) -> evaluate.EvaluationResults:
"""Standardized evaluation for embeddings."""
train_ds = eval_datasets.random_sampling_dataset(
train_dataset_shards,
image_size=config.image_size,
num_video_frames=config.num_frames,
video_step=config.video_step,
min_crop_window_area=DEFAULT_TRAIN_MIN_CROP_WINDOW_AREA,
max_crop_window_area=DEFAULT_TRAIN_MAX_CROP_WINDOW_AREA,
min_crop_window_aspect_ratio=DEFAULT_TRAIN_MIN_CROP_WINDOW_ASPECT_RATIO,
max_crop_window_aspect_ratio=DEFAULT_TRAIN_MAX_CROP_WINDOW_ASPECT_RATIO,
shuffle=True,
shard_reader=shard_reader)
train_ds = train_ds.map(_transform_train, num_parallel_calls=tf.data.AUTOTUNE)
train_ds = train_ds.repeat(DEFAULT_EVAL_NUM_TRAIN_EPOCHS)
train_ds = train_ds.batch(batch_size)
train_ds = tfds.as_numpy(train_ds)
test_ds = eval_datasets.multiple_crop_dataset(
test_dataset_shards,
num_temporal_crops=DEFAULT_TEST_NUM_TEMPORAL_CROPS,
num_spatial_crops=DEFAULT_TEST_NUM_SPATIAL_CROPS,
num_video_frames=config.num_frames,
video_step=config.video_step,
initial_resize=DEFAULT_TEST_INITIAL_RESIZE,
center_crop_size=config.image_size,
shuffle=False,
shard_reader=shard_reader)
test_ds = test_ds.map(_transform_test, num_parallel_calls=tf.data.AUTOTUNE)
test_ds = test_ds.batch(batch_size)
test_ds = tfds.as_numpy(test_ds)
group_size = DEFAULT_TEST_NUM_TEMPORAL_CROPS * DEFAULT_TEST_NUM_SPATIAL_CROPS
return evaluate.linear_svm_classifier(
train_ds,
test_ds,
embedding_fn,
test_predictions_group_size=group_size,
svm_regularization=svm_regularization)
def _transform_train(batch: datasets.MiniBatch) -> datasets.MiniBatch:
"""Transform the train set."""
def augment(view):
view = augmentations.normalize_video(view)
view = augmentations.random_horizontal_flip_video(view)
view = augmentations.random_color_augment_video(
view, prob_color_augment=0.8, prob_color_drop=0.2)
return view
return datasets.MiniBatch(views={
view_name: augment(view) for view_name, view in batch.views.items()
})
def _transform_test(batch: datasets.MiniBatch) -> datasets.MiniBatch:
"""Transform the test set."""
return datasets.MiniBatch(
views={
view_name: augmentations.normalize_video(view)
for view_name, view in batch.views.items()
})
| 33.976
| 80
| 0.768778
|
9be9ed9d7497e74a9e18a64f93b2d98a6e9f5ebf
| 91
|
py
|
Python
|
tests/test_A001220.py
|
danielsimonney/oeis
|
16c1dd6e058e49b629f695acb82ec55dd5f052f9
|
[
"MIT"
] | null | null | null |
tests/test_A001220.py
|
danielsimonney/oeis
|
16c1dd6e058e49b629f695acb82ec55dd5f052f9
|
[
"MIT"
] | null | null | null |
tests/test_A001220.py
|
danielsimonney/oeis
|
16c1dd6e058e49b629f695acb82ec55dd5f052f9
|
[
"MIT"
] | null | null | null |
from oeis import A001220
def test_A001220():
assert A001220(limit=2) == [1093, 3511]
| 15.166667
| 43
| 0.692308
|
a038817ef91d3f8b36a9876a743338ba1967fea0
| 11,007
|
py
|
Python
|
SpectralClustering/src/clustering.py
|
efikalti/computational-intelligence
|
1d5e3ebd0648e1b0d25dea42c1e4d16ce872489b
|
[
"MIT"
] | null | null | null |
SpectralClustering/src/clustering.py
|
efikalti/computational-intelligence
|
1d5e3ebd0648e1b0d25dea42c1e4d16ce872489b
|
[
"MIT"
] | null | null | null |
SpectralClustering/src/clustering.py
|
efikalti/computational-intelligence
|
1d5e3ebd0648e1b0d25dea42c1e4d16ce872489b
|
[
"MIT"
] | null | null | null |
import logging
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn import cluster
from sklearn.manifold import LocallyLinearEmbedding
from sklearn.neighbors import radius_neighbors_graph
from scipy.sparse import csgraph
class Cluster:
"""
Constructor
Initializes the class variables necessary for preprocessing the data
"""
def __init__(self):
self.lle = None
self.n_clusters = None
self.size = None
self.iterations = None
self.results = None
self.n_vectors = 5
self.affinities = ['rbf', 'nearest_neighbors']
self.laplacians = ['custom', 'csgraph']
self.eigvectors = [5, 15]
self.clusters = [3, 5, 7, 8]
#self.eigvectors = [5, 10, 15, 20]
"""
Run Locally Linear Embedding and Spectral Clustering on the provided data
LLE reduces the data to 2D
"""
def train(self, x_train, y_train, multiple=False, binary=False):
# Set number of clusters
self.n_clusters = 2
# Set the size to the training set size
self.size = len(x_train)
# Create list with numbers from 1 to number of training items
self.iterations = np.zeros(self.size)
for i in range(0, self.size):
self.iterations[i] = i+1
# Apply Locally Linear Embedding on training and testing data
x_train = self.LLE(x_train)
# Plot training data
self.filenale_ = 'multiclass'
if binary is True:
self.filenale_ = 'binary'
self.visualize2D(x_train[:, 0], x_train[:, 1], c=y_train, title='Training data ' + self.filenale_,
filename='logs/plots/training_data_' + self.filenale_)
# Change y_train labels for binary
for i in range(0, len(y_train)):
if y_train[i] == -1:
y_train[i] = 0
# Run SpectralClustering
if multiple is True:
for affinity in self.affinities:
for laplacian in self.laplacians:
for vector in self.eigvectors:
self.n_vectors = vector
if binary is True:
self.SpectralClustering(x_train, y_train, affinity=affinity, laplacian=laplacian)
else:
for n in self.clusters:
self.n_clusters = n
self.SpectralClustering(x_train, y_train, affinity=affinity, laplacian=laplacian)
else:
if binary is not True:
self.n_clusters = 8
self.n_vectors = 8
self.SpectralClustering(x_train, y_train)
if multiple is True:
for affinity in self.affinities:
# Run with sklearns Spectral Clustering
sklearn_predicted = self.SklearnSP(x_train, affinity=affinity)
title = 'SKLearn SpectralClustering Results for ' + self.filenale_ + ", " + 'affinity=' + affinity
filename='logs/plots/' + affinity + '_sklearn_' + self.filenale_
self.visualize2D(x_train[:, 0], x_train[:, 1], c=sklearn_predicted, title=title, filename=filename)
else:
# Run with sklearns Spectral Clustering
sklearn_predicted = self.SklearnSP(x_train)
self.logResults(y_train, sklearn_predicted, sklearn=True, affinity=affinity, laplacian=laplacian)
title = 'SKLearn SpectralClustering Results for ' + self.filenale_ + ", " + 'affinity=rbf'
filename='logs/plots/rbf_sklearn_' + self.filenale_
self.visualize2D(x_train[:, 0], x_train[:, 1], c=sklearn_predicted, title=title, filename=filename)
"""
Run Spectral Clustering for these data with these parameters
affinity=['rbf', 'nearest_neighbors'], laplacian=['custom', 'csgraph']
Default is nearest_neighbors kernel for similarity matrix, custom for laplacian matrix
"""
def SpectralClustering(self, x_train, y_train, affinity='nearest_neighbors', laplacian='custom'):
# Get similarity matrix for train data
if affinity == 'nearest_neighbors':
similarity_matrix = self.NNGraph(x_train)
else:
similarity_matrix = self.SimilarityMatrix(x_train)
# Get laplacian matrix from similarity matrix
if laplacian == 'csgraph':
laplacian_matrix = csgraph.laplacian(similarity_matrix, normed=False)
else:
laplacian_matrix = self.LaplacianMatrix(similarity_matrix=similarity_matrix)
# Transform data using the laplacian matrix
transormed_data = self.transformDataToLaplacian(laplacian_matrix)
# Cluster transormed data with kmeans
model = cluster.KMeans(n_clusters=self.n_clusters, precompute_distances='auto', random_state=0)
predicted = model.fit(transormed_data).labels_
self.logResults(y_train, predicted, affinity=affinity, laplacian=laplacian)
title = 'Custom SpectralClustering Results ' + self.filenale_ + ", " + 'affinity=' + affinity + ", laplacian=" + laplacian + ", vectors=" + str(self.n_vectors)
filename='logs/plots/' + affinity + '_' + laplacian + "_" + str(self.n_vectors) + "_" + str(self.n_clusters) + '_custom_' + self.filenale_
self.visualize2D(x_train[:, 0], x_train[:, 1], c=predicted, title=title, filename=filename)
"""
Create the new data using the laplacian matrix and its eigenvalues and eigenvectors
"""
def transformDataToLaplacian(self, laplacian_matrix):
# Get eigenvalues and eigenvectors from the laplacian matrix
eigval, eigvec = np.linalg.eig(laplacian_matrix)
# Keep the n_clusters smaller eigenvalues
sort_ind = np.argsort(eigval)[: self.n_vectors]
# Sort and plot eigenvalues
#eigval = np.sort(eigval)
# Initialize new array for the transormed data
transormed_data = np.zeros((len(laplacian_matrix), self.n_vectors-1), dtype=np.float64)
# Create transformed data
for i in range(0, len(laplacian_matrix)):
# Ignore first eigenvalue as it is close or equal to 0
for j in range(1, self.n_vectors):
transormed_data[i][j-1] = eigvec[i, np.asscalar(sort_ind[j])]
return transormed_data
"""
Transform and return data to 2D using LocallyLinearEmbedding
"""
def LLE(self, data):
if self.lle is None:
self.lle = LocallyLinearEmbedding(n_components=2)
self.lle.fit(data)
return self.lle.transform(data)
"""
Calculate and return the nearest neighbors graph which depicts the distances between each point to another
The graph connects only the items with at most limit distance between them and everything else is zero resulting in a sparse matrix
Default limit is 0.4
"""
def NNGraph(self, data, limit=0.4):
# Create the nearest neighbors graph
graph = radius_neighbors_graph(data, limit, mode='distance', metric='minkowski', p=2, metric_params=None, include_self=False)
graph = graph.toarray()
return graph
"""
Calculate and return the similarity matrix using the rbf kernel
"""
def SimilarityMatrix(self, data, limit=0.4):
size = len(data)
# Initialize array of size x size with zeros
similarity_matrix = np.zeros((size, size), dtype=np.float64)
for i in range(0, size):
for j in range(0, size):
if i != j:
value = self.rbf(data[i], data[j], 0.5)
#if value <= limit:
#similarity_matrix[i][j] = value
similarity_matrix[i][j] = value
return similarity_matrix
"""
Calculate and return the Laplacian matrix
"""
def LaplacianMatrix(self, similarity_matrix):
D = np.zeros(similarity_matrix.shape)
w = np.sum(similarity_matrix, axis=0)
D.flat[::len(w) + 1] = w ** (-0.5) # set the diag of D to w
return D.dot(similarity_matrix).dot(D)
"""
Run sklearn's Spectral Cluster method for comparison
"""
def SklearnSP(self, x_train, affinity='rbf'):
model = cluster.SpectralClustering(n_clusters=self.n_clusters, affinity=affinity)
model.fit(x_train)
y_predict = model.fit_predict(x_train)
return y_predict
"""
Return exp(−||a − b||^2/s^2) where s = sigma
"""
def rbf(self, a, b, sigma):
result = math.exp( -math.pow( self.VectorLength( self.VectorSub(a, b) ) , 2) / math.pow(sigma, 2) )
return result
"""
Return the legth of vector v
"""
def VectorLength(self, v):
sum = 0
for item in v:
sum += item * item
return math.sqrt(sum)
"""
Return the result of the subtraction a - b where a and b are vectors of the
same length
"""
def VectorSub(self, a, b):
if (len(a) != len(b)):
return None
v = np.zeros(len(a), dtype=np.float64)
for i in range(0, len(a)):
v[i] = a[i] - b[i]
return v
"""
Visualize 2D data
"""
def visualize2D(self, x, y, c=None, title='', filename=None):
fig, ax = plt.subplots(figsize=(13, 6))
ax.set_title(title, fontsize=16)
cmap = 'viridis'
dot_size=50
# Check if there are different colored items in the plot
if c is not None:
for i in range(0, self.n_clusters-1) :
temp_c = c[ (i*self.size) : (i+1) * self.size]
ax.scatter(x, y, c=temp_c, s=dot_size, cmap=cmap)
else:
ax.scatter(x, y, s=dot_size)
# Save to file or display plot
if filename is not None:
plt.savefig(filename + '.png')
plt.clf()
plt.close()
else:
plt.show()
"""
Log results
"""
def logResults(self, y_test, prediction, sklearn=False, affinity='rbf', laplacian='custom'):
if sklearn is True:
algorithm = 'SKLearn Spectral Clustering'
else:
algorithm = 'Custom Spectral Clustering'
# Calculate precision, recall, f1
result = metrics.precision_recall_fscore_support(y_test, prediction, average='macro')
self.results = self.results.append({ 'Algorithm': algorithm, 'Affinity': affinity,
'N_Vectors': str(self.n_vectors),
'Laplacian': laplacian, 'Precision': float("%0.3f"%result[0]),
'Recall': float("%0.3f"%result[1]), 'F1': float("%0.3f"%result[2])}, ignore_index=True)
"""
Setup results dataframe object
"""
def setupResults(self):
self.results = pd.DataFrame(columns=['Algorithm', 'Affinity', 'Laplacian', 'N_Vectors', 'Precision', 'Recall', 'F1'])
| 37.185811
| 167
| 0.605615
|
1ae6f40eda2bb4a1ef579c903301fd86a1de34ce
| 1,194
|
py
|
Python
|
src/worker/tool_config_parser.py
|
mehsoy/jaws
|
b79723c1fc549741494ebf5d948e94a44e971f2a
|
[
"MIT"
] | 1
|
2019-06-17T17:01:17.000Z
|
2019-06-17T17:01:17.000Z
|
src/worker/tool_config_parser.py
|
mehsoy/jaws
|
b79723c1fc549741494ebf5d948e94a44e971f2a
|
[
"MIT"
] | 7
|
2021-02-08T20:46:15.000Z
|
2021-09-08T02:12:59.000Z
|
src/worker/tool_config_parser.py
|
mehsoy/jaws
|
b79723c1fc549741494ebf5d948e94a44e971f2a
|
[
"MIT"
] | null | null | null |
import os
import configparser
from exceptions.configuration_exception import ConfigurationException
from helpers import Singleton
from worker.copytool.rsync import Rsync
from worker.copytool.shiftc import Shiftc
from worker.copytool.tar import Tar
class ToolConfigParser(metaclass=Singleton):
def __init__(self, config_path=None):
self._config_path = config_path
if not config_path or not os.path.exists(config_path):
raise ConfigurationException(' '.join(['File', config_path, 'doesn\'t exist']))
cp = configparser.ConfigParser()
cp.read(config_path)
if 'tools' in cp.sections():
self._tools = cp['tools']
else:
raise ConfigurationException('No section \'tools\' in' + config_path)
def get_executable_path(self, identifier: str):
if identifier in self._tools:
return self._tools.get(identifier)
else:
raise ConfigurationException(' '.join(['Field', identifier, 'doesn\'t exist in', self._config_path]))
def get_copytool_class(self, name):
i = dict(tar=Tar,
rsync=Rsync,
shiftc=Shiftc)
return i[name]
| 33.166667
| 113
| 0.665829
|
736337aed35889c16430019dba47a5129e73c668
| 3,667
|
py
|
Python
|
ta_assistant_django/settings.py
|
HelloYeew/ta_assistant_django
|
c72af9ae260c917d4835892811240894602ac454
|
[
"MIT"
] | null | null | null |
ta_assistant_django/settings.py
|
HelloYeew/ta_assistant_django
|
c72af9ae260c917d4835892811240894602ac454
|
[
"MIT"
] | null | null | null |
ta_assistant_django/settings.py
|
HelloYeew/ta_assistant_django
|
c72af9ae260c917d4835892811240894602ac454
|
[
"MIT"
] | null | null | null |
"""
Django settings for ta_assistant_django project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# Before production : Make a new key (https://humberto.io/blog/tldr-generate-django-secret-key/)
SECRET_KEY = 'django-insecure-$6sx#%y75q+7pz&!vx%41h30&b*95y-evi+=pwql7@+wbv7u$='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'classroom.apps.ClassroomConfig',
'users.apps.UsersConfig',
'crispy_forms',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ta_assistant_django.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates']
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ta_assistant_django.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
CRISPY_TEMPLATE_PACK = 'bootstrap4'
LOGIN_REDIRECT_URL = 'classroom-home'
LOGIN_URL = 'login'
| 26.381295
| 96
| 0.705481
|
e5d12f9ee0bd93ad5f0bf96abd979868752582f2
| 1,236
|
py
|
Python
|
httpserver/__init__.py
|
evinlort/http-server
|
7db0637da7afbdb3736e595c431477c5242320a9
|
[
"MIT"
] | null | null | null |
httpserver/__init__.py
|
evinlort/http-server
|
7db0637da7afbdb3736e595c431477c5242320a9
|
[
"MIT"
] | null | null | null |
httpserver/__init__.py
|
evinlort/http-server
|
7db0637da7afbdb3736e595c431477c5242320a9
|
[
"MIT"
] | null | null | null |
import os
import httpserver.server.server as srv
class Server:
def __init__(self, *, port: int = 8080, router: str = "router/web", controllers: str = "controllers"):
self._port = int(port)
self._router = router
self.__router_check()
self.__folder_check(controllers)
self._controllers = controllers
def get_port(self) -> int:
return self._port
def get_router(self) -> str:
return self._router
def get_controllers(self) -> str:
return self._controllers
def __router_check(self):
if self._router[-3:] != ".py":
self._router += ".py"
if os.path.exists(self._router):
return
router_list = self._router.split("/")
router_file = router_list[-1]
router_path = "/".join(router_list[:-1])
try:
os.makedirs(router_path)
except FileExistsError:
pass
open(f"{router_path}/{router_file}", "w").close()
@staticmethod
def __folder_check(folder_path):
if os.path.exists(folder_path):
return
os.makedirs(folder_path)
def run(self):
http_server = srv.ThreadingServer(self)
srv.run(http_server)
| 26.869565
| 106
| 0.596278
|
e36da741e759ed69596dbafa050fa61b41b9ac18
| 2,614
|
py
|
Python
|
grammaranalyzer.py
|
JVMartin/grammar-analyzer
|
c6316b10cac6185e7986ae321e2ca3806bf654e5
|
[
"MIT"
] | null | null | null |
grammaranalyzer.py
|
JVMartin/grammar-analyzer
|
c6316b10cac6185e7986ae321e2ca3806bf654e5
|
[
"MIT"
] | null | null | null |
grammaranalyzer.py
|
JVMartin/grammar-analyzer
|
c6316b10cac6185e7986ae321e2ca3806bf654e5
|
[
"MIT"
] | null | null | null |
"""
A class used to test strings against a given grammar to see if the grammar
can generate the string.
An instance of GrammarAnalyzer is initialized with a passed Grammar object
and is capable of testing that grammar against test strings (input strings)
using the test_string method.
"""
class GrammarAnalyzer:
def __init__(self, grammar):
self.verbose = False
self.grammar = grammar
self.stack = []
self.input = []
self.verbalize("Grammar analyzer initialized.")
self.verbalize("Imported grammar: " + grammar.get_desc())
self.verbalize(self.hr())
def test_string(self, string):
"""
Takes a given string and tests if the grammar can generate it.
:param string: The string to test.
:return bool: True if the grammar can generate it; false otherwise.
"""
if not len(string):
return False
self.stack = ["$", "S"]
self.input = list(string)
self.input.reverse()
self.verbalize("Testing string: " + string)
self.verbalize("Starting stack: " + self.stack_to_string())
return self.parse_input()
def parse_input(self):
"""
Use the grammar ruleset to parse the string and determine language
membership.
:return: True if the grammar can generate it; false otherwise.
"""
self.verbalize(self.hr())
self.verbalize("Input stack: " + self.input_to_string())
self.verbalize("Stack: " + self.stack_to_string())
# Pop the next symbol off the stack.
stack_symbol = self.stack.pop()
# If the stack is empty and the input is too, accept.
# Otherwise, reject.
if stack_symbol == "$":
return True if not self.input else False
# If the input is empty, reject.
if not self.input:
return False
# Peek at the next input symbol.
input_symbol = self.input[-1]
# If we have a variable...
if stack_symbol.isalpha() and stack_symbol.isupper():
# Grab the next rule to apply.
rule = self.grammar.get_rule(stack_symbol, input_symbol)
if not rule:
self.verbalize("There was no rule to push.")
return False
# Push the rule onto the stack.
symbols = list(rule)
for symbol in reversed(symbols):
self.stack.append(symbol)
# If we have a terminal and it matches the next input symbol...
elif stack_symbol == input_symbol:
# Turn the peek into a pop.
self.input.pop()
else:
return False
return self.parse_input()
def stack_to_string(self):
return "".join(reversed(self.stack))
def input_to_string(self):
return "".join(reversed(self.input))
def verbalize(self, output):
if self.verbose:
print(output)
def hr(self):
return "--------------------------------------------------"
| 25.378641
| 75
| 0.685539
|
0e6f8bbfab88586b7d3300c4ea107623af500be0
| 3,022
|
py
|
Python
|
joker/cmds/start_funcs.py
|
PuNkYsHuNgRy/joker-blockchain
|
e49d6b9aa46e6097e216561bd7563b50519aae13
|
[
"Apache-2.0"
] | 4
|
2022-03-04T06:08:15.000Z
|
2022-03-17T19:14:22.000Z
|
joker/cmds/start_funcs.py
|
zcomputerwiz/joker-blockchain
|
72cf94708acd49e0cbcc63c74d5ddb1e1045b8a5
|
[
"Apache-2.0"
] | 1
|
2022-03-29T13:20:11.000Z
|
2022-03-29T13:20:11.000Z
|
joker/cmds/start_funcs.py
|
zcomputerwiz/joker-blockchain
|
72cf94708acd49e0cbcc63c74d5ddb1e1045b8a5
|
[
"Apache-2.0"
] | 3
|
2022-03-10T22:26:44.000Z
|
2022-03-15T08:46:15.000Z
|
import asyncio
import os
import subprocess
import sys
from pathlib import Path
from typing import Optional
from joker.cmds.passphrase_funcs import get_current_passphrase
from joker.daemon.client import DaemonProxy, connect_to_daemon_and_validate
from joker.util.keychain import Keychain, KeyringMaxUnlockAttempts
from joker.util.service_groups import services_for_groups
def launch_start_daemon(root_path: Path) -> subprocess.Popen:
os.environ["JOKER_ROOT"] = str(root_path)
# TODO: use startupinfo=subprocess.DETACHED_PROCESS on windows
joker = sys.argv[0]
process = subprocess.Popen(f"{joker} run_daemon --wait-for-unlock".split(), stdout=subprocess.PIPE)
return process
async def create_start_daemon_connection(root_path: Path) -> Optional[DaemonProxy]:
connection = await connect_to_daemon_and_validate(root_path)
if connection is None:
print("Starting daemon")
# launch a daemon
process = launch_start_daemon(root_path)
# give the daemon a chance to start up
if process.stdout:
process.stdout.readline()
await asyncio.sleep(1)
# it prints "daemon: listening"
connection = await connect_to_daemon_and_validate(root_path)
if connection:
passphrase = None
if await connection.is_keyring_locked():
passphrase = Keychain.get_cached_master_passphrase()
if not Keychain.master_passphrase_is_valid(passphrase):
passphrase = get_current_passphrase()
if passphrase:
print("Unlocking daemon keyring")
await connection.unlock_keyring(passphrase)
return connection
return None
async def async_start(root_path: Path, group: str, restart: bool) -> None:
try:
daemon = await create_start_daemon_connection(root_path)
except KeyringMaxUnlockAttempts:
print("Failed to unlock keyring")
return None
if daemon is None:
print("Failed to create the joker daemon")
return None
for service in services_for_groups(group):
if await daemon.is_running(service_name=service):
print(f"{service}: ", end="", flush=True)
if restart:
if not await daemon.is_running(service_name=service):
print("not running")
elif await daemon.stop_service(service_name=service):
print("stopped")
else:
print("stop failed")
else:
print("Already running, use `-r` to restart")
continue
print(f"{service}: ", end="", flush=True)
msg = await daemon.start_service(service_name=service)
success = msg and msg["data"]["success"]
if success is True:
print("started")
else:
error = "no response"
if msg:
error = msg["data"]["error"]
print(f"{service} failed to start. Error: {error}")
await daemon.close()
| 35.139535
| 103
| 0.650232
|
ca63d64884bc09dee95fdf8523f1f35a66109176
| 11,642
|
py
|
Python
|
spyder/plugins/projects/widgets/projectexplorer.py
|
Earthman100/spyder
|
949ce0f9100a69504c70a5678e8589a05aee7d38
|
[
"MIT"
] | 7,956
|
2015-02-17T01:19:09.000Z
|
2022-03-31T21:52:15.000Z
|
spyder/plugins/projects/widgets/projectexplorer.py
|
Earthman100/spyder
|
949ce0f9100a69504c70a5678e8589a05aee7d38
|
[
"MIT"
] | 16,326
|
2015-02-16T23:15:21.000Z
|
2022-03-31T23:34:34.000Z
|
spyder/plugins/projects/widgets/projectexplorer.py
|
Earthman100/spyder
|
949ce0f9100a69504c70a5678e8589a05aee7d38
|
[
"MIT"
] | 1,918
|
2015-02-20T19:26:26.000Z
|
2022-03-31T19:03:25.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""Project Explorer"""
# pylint: disable=C0103
# Standard library imports
from __future__ import print_function
import os
import os.path as osp
import shutil
# Third party imports
from qtpy.QtCore import QSortFilterProxyModel, Qt, Signal, Slot
from qtpy.QtWidgets import QAbstractItemView, QHeaderView, QMessageBox
# Local imports
from spyder.api.translations import get_translation
from spyder.py3compat import to_text_string
from spyder.utils import misc
from spyder.plugins.explorer.widgets.explorer import DirView
_ = get_translation('spyder')
class ProxyModel(QSortFilterProxyModel):
"""Proxy model to filter tree view."""
PATHS_TO_HIDE = [
# Useful paths
'.spyproject',
'__pycache__',
'.ipynb_checkpoints',
# VCS paths
'.git',
'.hg',
'.svn',
# Others
'.pytest_cache',
'.DS_Store',
'Thumbs.db',
'.directory'
]
PATHS_TO_SHOW = [
'.github'
]
def __init__(self, parent):
"""Initialize the proxy model."""
super(ProxyModel, self).__init__(parent)
self.root_path = None
self.path_list = []
self.setDynamicSortFilter(True)
def setup_filter(self, root_path, path_list):
"""
Setup proxy model filter parameters.
Parameters
----------
root_path: str
Root path of the proxy model.
path_list: list
List with all the paths.
"""
self.root_path = osp.normpath(str(root_path))
self.path_list = [osp.normpath(str(p)) for p in path_list]
self.invalidateFilter()
def sort(self, column, order=Qt.AscendingOrder):
"""Reimplement Qt method."""
self.sourceModel().sort(column, order)
def filterAcceptsRow(self, row, parent_index):
"""Reimplement Qt method."""
if self.root_path is None:
return True
index = self.sourceModel().index(row, 0, parent_index)
path = osp.normcase(osp.normpath(
str(self.sourceModel().filePath(index))))
if osp.normcase(self.root_path).startswith(path):
# This is necessary because parent folders need to be scanned
return True
else:
for p in [osp.normcase(p) for p in self.path_list]:
if path == p or path.startswith(p + os.sep):
if not any([d in path for d in self.PATHS_TO_SHOW]):
if any([d in path for d in self.PATHS_TO_HIDE]):
return False
else:
return True
else:
return True
else:
return False
def data(self, index, role):
"""Show tooltip with full path only for the root directory."""
if role == Qt.ToolTipRole:
root_dir = self.path_list[0].split(osp.sep)[-1]
if index.data() == root_dir:
return osp.join(self.root_path, root_dir)
return QSortFilterProxyModel.data(self, index, role)
def type(self, index):
"""
Returns the type of file for the given index.
Parameters
----------
index: int
Given index to search its type.
"""
return self.sourceModel().type(self.mapToSource(index))
class FilteredDirView(DirView):
"""Filtered file/directory tree view."""
def __init__(self, parent=None):
"""Initialize the filtered dir view."""
super().__init__(parent)
self.proxymodel = None
self.setup_proxy_model()
self.root_path = None
# ---- Model
def setup_proxy_model(self):
"""Setup proxy model."""
self.proxymodel = ProxyModel(self)
self.proxymodel.setSourceModel(self.fsmodel)
def install_model(self):
"""Install proxy model."""
if self.root_path is not None:
self.setModel(self.proxymodel)
def set_root_path(self, root_path):
"""
Set root path.
Parameters
----------
root_path: str
New path directory.
"""
self.root_path = root_path
self.install_model()
index = self.fsmodel.setRootPath(root_path)
self.proxymodel.setup_filter(self.root_path, [])
self.setRootIndex(self.proxymodel.mapFromSource(index))
def get_index(self, filename):
"""
Return index associated with filename.
Parameters
----------
filename: str
String with the filename.
"""
index = self.fsmodel.index(filename)
if index.isValid() and index.model() is self.fsmodel:
return self.proxymodel.mapFromSource(index)
def set_folder_names(self, folder_names):
"""
Set folder names
Parameters
----------
folder_names: list
List with the folder names.
"""
assert self.root_path is not None
path_list = [osp.join(self.root_path, dirname)
for dirname in folder_names]
self.proxymodel.setup_filter(self.root_path, path_list)
def get_filename(self, index):
"""
Return filename from index
Parameters
----------
index: int
Index of the list of filenames
"""
if index:
path = self.fsmodel.filePath(self.proxymodel.mapToSource(index))
return osp.normpath(str(path))
def setup_project_view(self):
"""Setup view for projects."""
for i in [1, 2, 3]:
self.hideColumn(i)
self.setHeaderHidden(True)
# ---- Events
def directory_clicked(self, dirname, index):
if index and index.isValid():
if self.get_conf('single_click_to_open'):
state = not self.isExpanded(index)
else:
state = self.isExpanded(index)
self.setExpanded(index, state)
class ProjectExplorerTreeWidget(FilteredDirView):
"""Explorer tree widget"""
sig_delete_project = Signal()
def __init__(self, parent, show_hscrollbar=True):
FilteredDirView.__init__(self, parent)
self.last_folder = None
self.setSelectionMode(FilteredDirView.ExtendedSelection)
self.show_hscrollbar = show_hscrollbar
# Enable drag & drop events
self.setDragEnabled(True)
self.setDragDropMode(FilteredDirView.DragDrop)
# ------Public API---------------------------------------------------------
@Slot(bool)
def toggle_hscrollbar(self, checked):
"""Toggle horizontal scrollbar"""
self.set_conf('show_hscrollbar', checked)
self.show_hscrollbar = checked
self.header().setStretchLastSection(not checked)
self.header().setHorizontalScrollMode(QAbstractItemView.ScrollPerPixel)
self.header().setSectionResizeMode(QHeaderView.ResizeToContents)
# ---- Internal drag & drop
def dragMoveEvent(self, event):
"""Reimplement Qt method"""
index = self.indexAt(event.pos())
if index:
dst = self.get_filename(index)
if osp.isdir(dst):
event.acceptProposedAction()
else:
event.ignore()
else:
event.ignore()
def dropEvent(self, event):
"""Reimplement Qt method"""
event.ignore()
action = event.dropAction()
if action not in (Qt.MoveAction, Qt.CopyAction):
return
# QTreeView must not remove the source items even in MoveAction mode:
# event.setDropAction(Qt.CopyAction)
dst = self.get_filename(self.indexAt(event.pos()))
yes_to_all, no_to_all = None, None
src_list = [to_text_string(url.toString())
for url in event.mimeData().urls()]
if len(src_list) > 1:
buttons = (QMessageBox.Yes | QMessageBox.YesToAll |
QMessageBox.No | QMessageBox.NoToAll |
QMessageBox.Cancel)
else:
buttons = QMessageBox.Yes | QMessageBox.No
for src in src_list:
if src == dst:
continue
dst_fname = osp.join(dst, osp.basename(src))
if osp.exists(dst_fname):
if yes_to_all is not None or no_to_all is not None:
if no_to_all:
continue
elif osp.isfile(dst_fname):
answer = QMessageBox.warning(
self,
_('Project explorer'),
_('File <b>%s</b> already exists.<br>'
'Do you want to overwrite it?') % dst_fname,
buttons
)
if answer == QMessageBox.No:
continue
elif answer == QMessageBox.Cancel:
break
elif answer == QMessageBox.YesToAll:
yes_to_all = True
elif answer == QMessageBox.NoToAll:
no_to_all = True
continue
else:
QMessageBox.critical(
self,
_('Project explorer'),
_('Folder <b>%s</b> already exists.') % dst_fname,
QMessageBox.Ok
)
event.setDropAction(Qt.CopyAction)
return
try:
if action == Qt.CopyAction:
if osp.isfile(src):
shutil.copy(src, dst)
else:
shutil.copytree(src, dst)
else:
if osp.isfile(src):
misc.move_file(src, dst)
else:
shutil.move(src, dst)
self.parent_widget.removed.emit(src)
except EnvironmentError as error:
if action == Qt.CopyAction:
action_str = _('copy')
else:
action_str = _('move')
QMessageBox.critical(
self,
_("Project Explorer"),
_("<b>Unable to %s <i>%s</i></b>"
"<br><br>Error message:<br>%s") % (action_str, src,
str(error))
)
@Slot()
def delete(self, fnames=None):
"""Delete files"""
if fnames is None:
fnames = self.get_selected_filenames()
multiple = len(fnames) > 1
yes_to_all = None
for fname in fnames:
if fname == self.proxymodel.path_list[0]:
self.sig_delete_project.emit()
else:
yes_to_all = self.delete_file(fname, multiple, yes_to_all)
if yes_to_all is not None and not yes_to_all:
# Canceled
break
| 33.454023
| 80
| 0.519584
|
39b6f99f14ad91d5ca90da7b9cbe3767b06371b9
| 19,593
|
py
|
Python
|
onnx_coreml/converter.py
|
yukihiko/onnx-coreml
|
08aeb7246fbebad3beeeeef68d861012d232f856
|
[
"MIT"
] | 1
|
2019-11-25T16:38:01.000Z
|
2019-11-25T16:38:01.000Z
|
onnx_coreml/converter.py
|
yukihiko/onnx-coreml
|
08aeb7246fbebad3beeeeef68d861012d232f856
|
[
"MIT"
] | null | null | null |
onnx_coreml/converter.py
|
yukihiko/onnx-coreml
|
08aeb7246fbebad3beeeeef68d861012d232f856
|
[
"MIT"
] | 1
|
2019-06-09T06:19:01.000Z
|
2019-06-09T06:19:01.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from typing import Text, Union, Optional, Dict, Any, Iterable, Sequence, Callable, List
from ._shapeinference import infer_shapes_and_types
import onnx
import numpy as np
from onnx import TensorProto
from coremltools.models.neural_network import NeuralNetworkBuilder #type: ignore
from coremltools.models import datatypes, MLModel #type: ignore
from coremltools.proto import FeatureTypes_pb2 as ft #type: ignore
from typing import Tuple
from ._operators import _convert_node, _SEQUENCE_LAYERS_REGISTRY, _ONNX_NODE_REGISTRY, _add_const_inputs_if_required
from ._graph import Graph, EdgeInfo, Transformer
from ._transformers import ConvAddFuser, DropoutRemover, \
ReshapeInitTensorFuser, BNBroadcastedMulFuser, BNBroadcastedAddFuser, \
PixelShuffleFuser, OutputRenamer, AddModelInputsOutputs, \
ConstantsToInitializers, ImageScalerRemover, UnsqueezeConstantRemover, TransposeConstantRemover, \
ShapeOpRemover, SliceConstantRemover, ConcatConstantRemover
from ._error_utils import ErrorHandling
'''
inputs: list of tuples.
[Tuple]: [(name, type, shape)]
'''
def _make_coreml_input_features(graph): # type: (...) -> Sequence[Tuple[Text, datatypes.Array]]
'''
ONNX shapes to CoreML static shapes mapping
length==1: [C]
length==2: [B,C]
length==3: [C,H,W] or [Seq,B,C]
length==4: [B,C,H,W]
'''
inputs = graph.inputs
op_types = graph.blob_to_op_type
features = []
for input_ in inputs:
shape = input_[2]
if len(shape) == 0:
shape = [1, 1, 1]
elif len(shape) == 1:
# assume [C]
pass
elif len(shape) == 2:
# assume [Batch,C]
shape = [shape[1]]
elif len(shape) == 3:
# assume [C,H,W] unless its connected to recurrent related ops
if input_[0] in op_types and \
len(op_types[input_[0]]) == 1 and \
str(op_types[input_[0]][0]) in _SEQUENCE_LAYERS_REGISTRY:
# onnx shape: (Seq,B,C)
shape = [shape[2]]
elif len(shape) == 4: # (B,C,H,W) --> (C,H,W)
shape = shape[1:]
else:
raise ValueError("Unrecognized input shape %s, for input '%s' " % (str(shape), str(input_[0])))
features.append((str(input_[0]), datatypes.Array(*shape)))
return features
'''
outputs: list of tuples.
[Tuple]: [(name, type, shape)]
'''
def _make_coreml_output_features(graph): # type: (...) -> Sequence[Tuple[Text, datatypes.Array]]
features = []
outputs = graph.outputs
op_types = graph.blob_from_op_type
for output_ in outputs:
shape = output_[2]
if len(shape) == 0:
shape = [1, 1, 1]
elif len(shape) == 1:
pass
elif len(shape) == 3:
if output_[0] in op_types and \
str(op_types[output_[0]]) in _SEQUENCE_LAYERS_REGISTRY:
# onnx shape: (Seq,B,C)
shape = [shape[2]]
elif len(shape) == 4: # (B,C,H,W) --> (C,H,W)
shape = shape[1:]
else:
shape = None #output shape need not be specified for CoreML.
if shape is None:
features.append((str(output_[0]), shape))
else:
features.append((str(output_[0]), datatypes.Array(*shape)))
return features
def _check_unsupported_ops(nodes): # type: (...) -> None
unsupported_op_types = [] # type: List[Text]
for node in nodes:
if node.op_type not in _ONNX_NODE_REGISTRY and \
node.op_type not in unsupported_op_types:
unsupported_op_types.append(node.op_type)
if len(unsupported_op_types) > 0:
raise NotImplementedError("Unsupported ONNX ops of type: %s" % (
','.join(unsupported_op_types)))
def _update_multiarray_to_float32(feature, #type: Any
): # type : (...) -> None
if feature.type.HasField('multiArrayType'):
feature.type.multiArrayType.dataType = ft.ArrayFeatureType.FLOAT32
def _update_multiarray_to_int32(feature, #type: Any
): # type : (...) -> None
if feature.type.HasField('multiArrayType'):
feature.type.multiArrayType.dataType = ft.ArrayFeatureType.INT32
def _transform_coreml_dtypes(builder, # type : NeuralNetworkBuilder
inputs, # type: List[EdgeInfo]
outputs # type: List[EdgeInfo]
):
# type: (...) -> None
''' Make sure ONNX input/output data types are mapped to the equivalent CoreML types
'''
for i, input_ in enumerate(inputs):
onnx_type = input_[1]
if onnx_type == TensorProto.FLOAT:
_update_multiarray_to_float32(builder.spec.description.input[i])
elif onnx_type == TensorProto.DOUBLE:
continue
elif onnx_type == TensorProto.INT32 or onnx_type == TensorProto.INT64:
_update_multiarray_to_int32(builder.spec.description.input[i])
else:
raise TypeError("Input must be of of type FLOAT, DOUBLE, INT32 or INT64")
for i, output_ in enumerate(outputs):
onnx_type = output_[1]
if onnx_type == TensorProto.FLOAT:
_update_multiarray_to_float32(builder.spec.description.output[i])
elif onnx_type == TensorProto.DOUBLE:
continue
elif onnx_type == TensorProto.INT32 or onnx_type == TensorProto.INT64:
_update_multiarray_to_int32(builder.spec.description.output[i])
else:
raise TypeError("Output must be of of type FLOAT, DOUBLE, INT32 or INT64")
def _convert_multiarray_output_to_image(spec, # type: Any
feature_name, # type: Text
is_bgr=False, # type: bool
):
# type: (...) -> None
for output in spec.description.output:
if output.name != feature_name:
continue
if output.type.WhichOneof('Type') != 'multiArrayType':
raise ValueError(
"{} is not a multiarray type".format(output.name,)
)
array_shape = tuple(output.type.multiArrayType.shape)
if len(array_shape) == 2:
height, width = array_shape
output.type.imageType.colorSpace = \
ft.ImageFeatureType.ColorSpace.Value('GRAYSCALE')
else:
if len(array_shape) == 4:
if array_shape[0] != 1:
raise ValueError(
"Shape {} is not supported for image output"
.format(array_shape,)
)
array_shape = array_shape[1:]
channels, height, width = array_shape
if channels == 1:
output.type.imageType.colorSpace = \
ft.ImageFeatureType.ColorSpace.Value('GRAYSCALE')
elif channels == 3:
if is_bgr:
output.type.imageType.colorSpace = \
ft.ImageFeatureType.ColorSpace.Value('BGR')
else:
output.type.imageType.colorSpace = \
ft.ImageFeatureType.ColorSpace.Value('RGB')
else:
raise ValueError(
"Channel Value {} is not supported for image output"
.format(channels,)
)
output.type.imageType.width = width
output.type.imageType.height = height
def _set_deprocessing(is_grayscale, # type: bool
builder, # type: NeuralNetworkBuilder
deprocessing_args, # type: Dict[Text, Any]
input_name, # type: Text
output_name, # type: Text
):
# type: (...) -> None
is_bgr = deprocessing_args.get('is_bgr', False)
image_scale = deprocessing_args.get('image_scale', 1.0)
if is_grayscale:
gray_bias = deprocessing_args.get('gray_bias', 0.0)
W = np.array([image_scale])
b = np.array([gray_bias])
else:
W = np.array([image_scale, image_scale, image_scale])
red_bias = deprocessing_args.get('red_bias', 0.0)
green_bias = deprocessing_args.get('green_bias', 0.0)
blue_bias = deprocessing_args.get('blue_bias', 0.0)
if not is_bgr:
b = np.array([
red_bias,
green_bias,
blue_bias,
])
else:
b = np.array([
blue_bias,
green_bias,
red_bias,
])
builder.add_scale(
name=input_name,
W=W,
b=b,
has_bias=True,
shape_scale=W.shape,
shape_bias=b.shape,
input_name=input_name,
output_name=output_name
)
def _prepare_onnx_graph(graph, transformers): # type: (Graph, Iterable[Transformer]) -> Graph
graph = infer_shapes_and_types(graph)
graph_ = Graph.from_onnx(graph)
#from .graph_viz import plot_graph
#plot_graph(graph_, '/tmp/graph_raw.png')
graph_ = graph_.transformed(transformers)
#plot_graph(graph_, '/tmp/graph_opt.png')
return graph_
def convert(model, # type: Union[onnx.ModelProto, Text]
mode=None, # type: Optional[Text]
image_input_names=[], # type: Sequence[Text]
preprocessing_args={}, # type: Dict[Text, Any]
image_output_names=[], # type: Sequence[Text]
deprocessing_args={}, # type: Dict[Text, Any]
class_labels=None, # type: Union[Text, Iterable[Text], None]
predicted_feature_name='classLabel', # type: Text
add_custom_layers = False, # type: bool
custom_conversion_functions = {}, #type: Dict[Text, Any]
):
# type: (...) -> MLModel
"""
Convert ONNX model to CoreML.
Parameters
----------
model:
An ONNX model with parameters loaded in onnx package or path to file
with models.
mode: 'classifier', 'regressor' or None
Mode of the converted coreml model:
'classifier', a NeuralNetworkClassifier spec will be constructed.
'regressor', a NeuralNetworkRegressor spec will be constructed.
preprocessing_args:
'is_bgr', 'red_bias', 'green_bias', 'blue_bias', 'gray_bias',
'image_scale' keys with the same meaning as
https://apple.github.io/coremltools/generated/coremltools.models.neural_network.html#coremltools.models.neural_network.NeuralNetworkBuilder.set_pre_processing_parameters
deprocessing_args:
Same as 'preprocessing_args' but for deprocessing.
class_labels:
As a string it represents the name of the file which contains
the classification labels (one per line).
As a list of strings it represents a list of categories that map
the index of the output of a neural network to labels in a classifier.
predicted_feature_name:
Name of the output feature for the class labels exposed in the Core ML
model (applies to classifiers only). Defaults to 'classLabel'
add_custom_layers: bool
Flag to turn on addition of custom CoreML layers for unsupported ONNX ops or attributes within
a supported op.
custom_conversion_functions: dict()
A dictionary with keys corresponding to the names of onnx ops and values as functions taking
an object of class 'Node' (see onnx-coreml/_graph.Node) and returning CoreML custom layer parameters.
Returns
-------
model: A coreml model.
"""
if isinstance(model, Text):
onnx_model = onnx.load(model)
elif isinstance(model, onnx.ModelProto):
onnx_model = model
else:
raise TypeError(
"Model must be file path to .onnx file or onnx loaded model"
)
transformers = [
ConstantsToInitializers(),
ShapeOpRemover(),
ReshapeInitTensorFuser(),
DropoutRemover(),
UnsqueezeConstantRemover(),
TransposeConstantRemover(),
SliceConstantRemover(),
ConcatConstantRemover(),
ConvAddFuser(),
BNBroadcastedMulFuser(),
BNBroadcastedAddFuser(),
PixelShuffleFuser(),
AddModelInputsOutputs(),
] # type: Iterable[Transformer]
graph = _prepare_onnx_graph(onnx_model.graph, transformers)
# are there ImageScaler nodes in the Graph?
# If yes then add the info from it to the preprocessing dictionary, if the dictionary is not
# already provided by the user
if not bool(preprocessing_args):
for node in graph.nodes:
if node.op_type == 'ImageScaler':
inp_name = node.inputs[0]
scale = node.attrs.get('scale', 1.0)
bias = node.attrs.get('bias', [0,0,0])
if not (len(bias) == 1 or len(bias) == 3):
continue
if 'image_scale' in preprocessing_args:
preprocessing_args['image_scale'][inp_name] = scale
else:
preprocessing_args['image_scale'] = {inp_name: scale}
if len(bias) == 3:
for i, color in enumerate(['red', 'green', 'blue']):
if color + '_bias' in preprocessing_args:
preprocessing_args[color + '_bias'][inp_name] = bias[i]
else:
preprocessing_args[color + '_bias'] = {inp_name: bias[i]}
else:
if 'gray_bias' in preprocessing_args:
preprocessing_args['gray_bias'][inp_name] = bias[0]
else:
preprocessing_args['gray_bias'] = {inp_name: bias[0]}
if inp_name not in image_input_names:
image_input_names.append(inp_name) # type: ignore
# remove all ImageScaler ops
graph = graph.transformed([ImageScalerRemover()])
#Make CoreML input and output features by gathering shape info and
#interpreting it for CoreML
input_features = _make_coreml_input_features(graph)
output_features = _make_coreml_output_features(graph)
builder = NeuralNetworkBuilder(input_features, output_features, mode = mode)
_transform_coreml_dtypes(builder, graph.inputs, graph.outputs)
is_deprocess_bgr_only = (len(deprocessing_args) == 1) and \
("is_bgr" in deprocessing_args)
add_deprocess = (len(image_output_names) > 0) and \
(len(deprocessing_args) > 0) and \
(not is_deprocess_bgr_only)
if add_deprocess:
mapping = {}
for f in output_features:
output_name = f[0]
mapping[output_name] = graph.get_unique_edge_name(output_name)
graph = OutputRenamer(mapping)(graph)
if len(image_input_names) > 0:
builder.set_pre_processing_parameters(
image_input_names=image_input_names,
is_bgr=preprocessing_args.get('is_bgr', False),
red_bias=preprocessing_args.get('red_bias', 0.0),
green_bias=preprocessing_args.get('green_bias', 0.0),
blue_bias=preprocessing_args.get('blue_bias', 0.0),
gray_bias=preprocessing_args.get('gray_bias', 0.0),
image_scale=preprocessing_args.get('image_scale', 1.0)
)
preprocessing_args.clear()
if len(image_output_names) > 0:
for f in output_features:
f_name = f[0]
if f_name in image_output_names:
is_bgr = deprocessing_args.get('is_bgr', False)
_convert_multiarray_output_to_image(
builder.spec, f_name, is_bgr=is_bgr
)
'''Iterate through all the ops and translate them to CoreML layers.
'''
if not add_custom_layers:
_check_unsupported_ops(graph.nodes)
err = ErrorHandling(add_custom_layers,
custom_conversion_functions)
for i, node in enumerate(graph.nodes):
print("%d/%d: Converting Node Type %s" %(i+1, len(graph.nodes), node.op_type))
_add_const_inputs_if_required(builder, node, graph, err)
_convert_node(builder, node, graph, err)
if add_deprocess:
for f in output_features:
output_name = f[0]
if output_name not in image_output_names:
continue
output_shape = f[1].dimensions
if len(output_shape) == 2 or output_shape[0] == 1:
is_grayscale = True
elif output_shape[0] == 3:
is_grayscale = False
else:
raise ValueError('Output must be RGB image or Grayscale')
_set_deprocessing(
is_grayscale,
builder,
deprocessing_args,
mapping[output_name],
output_name
)
if class_labels is not None:
if isinstance(class_labels, Text):
labels = [l.strip() for l in open(class_labels).readlines()] # type: Sequence[Text]
elif isinstance(class_labels, list):
labels = class_labels
else:
raise TypeError(
"synset variable of unknown type. Type found: {}. \
Expected either string or list of strings."
.format(type(class_labels),))
builder.set_class_labels(
class_labels=labels,
predicted_feature_name=predicted_feature_name
)
# add description to inputs/outputs that feed in/out of recurrent layers
for node_ in graph.nodes:
if str(node_.op_type) in _SEQUENCE_LAYERS_REGISTRY:
input_ = node_.inputs[0]
output_ = node_.outputs[0]
for i, inputs in enumerate(builder.spec.description.input):
if inputs.name == input_:
builder.spec.description.input[i].shortDescription = 'This input is a sequence'
for i, outputs in enumerate(builder.spec.description.output):
if outputs.name == output_:
builder.spec.description.output[i].shortDescription = 'This output is a sequence'
print("Translation to CoreML spec completed. Now compiling the CoreML model.")
try:
mlmodel = MLModel(builder.spec)
except:
raise ValueError('Compilation failed. Translation to CoreML spec was incorrect.')
# print information about all ops for which custom layers have been added
if len(err.custom_layer_nodes) > 0:
print('\n')
print("Custom layers have been added to the CoreML model "
"corresponding to the following ops in the onnx model: ")
for i, node in enumerate(err.custom_layer_nodes):
input_info = []
for input_ in node.inputs:
input_info.append((str(input_), graph.shape_dict.get(input_, str("Shape not available"))))
output_info = []
for output_ in node.outputs:
output_info.append((str(output_), graph.shape_dict.get(output_, str("Shape not available"))))
print("{}/{}: op type: {}, op input names and shapes: {}, op output names and shapes: {}".
format(i+1, len(err.custom_layer_nodes), node.op_type, str(input_info), str(output_info)))
return mlmodel
| 40.232033
| 177
| 0.600163
|
5ee43517ff055edae0a64407edc3c96addab22f3
| 328
|
py
|
Python
|
session-7/file_read.py
|
ssxrgio/2018-19-PNE-practices
|
f73ed67cb676a2ffffdf7a5a3ddc4c9fa7902ee0
|
[
"Apache-2.0"
] | null | null | null |
session-7/file_read.py
|
ssxrgio/2018-19-PNE-practices
|
f73ed67cb676a2ffffdf7a5a3ddc4c9fa7902ee0
|
[
"Apache-2.0"
] | null | null | null |
session-7/file_read.py
|
ssxrgio/2018-19-PNE-practices
|
f73ed67cb676a2ffffdf7a5a3ddc4c9fa7902ee0
|
[
"Apache-2.0"
] | null | null | null |
# Example of reading a file located in our local file system.
name = 'mynotes.txt'
# Opening the file
file = open(name, 'r')
print('File opened: {}'.format(file.name)) # Name is an attribute that stores the name of the file opened.
contents = file.read()
print('The file contents are: {}'.format(contents))
file.close()
| 19.294118
| 106
| 0.698171
|
ac5b0b9319084310c8c7d8e9f68b54f2705aa1b9
| 1,668
|
py
|
Python
|
setup.py
|
jtlai0921/template-remover
|
de963f221612f57d4982fbc779acd21302c7b817
|
[
"Apache-2.0"
] | 4
|
2015-07-30T12:23:11.000Z
|
2019-02-08T14:59:07.000Z
|
setup.py
|
jtlai0921/template-remover
|
de963f221612f57d4982fbc779acd21302c7b817
|
[
"Apache-2.0"
] | 2
|
2015-07-30T12:34:21.000Z
|
2019-02-13T09:53:24.000Z
|
setup.py
|
jtlai0921/template-remover
|
de963f221612f57d4982fbc779acd21302c7b817
|
[
"Apache-2.0"
] | 2
|
2019-03-10T12:25:16.000Z
|
2020-05-29T08:02:47.000Z
|
# Copyright 2014 Deezer (http://www.deezer.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
setup(
name='template-remover',
version='0.1.7',
description='Remove the template markup from html files',
long_description=open('README.rst').read(),
author='Sebastian Kreft - Deezer',
author_email='skreft@deezer.com',
url='http://github.com/deezer/template-remover',
py_modules=['template_remover'],
install_requires=['docopt==0.6.1'],
tests_require=['nose>=1.3'],
scripts=['scripts/remove_template.py'],
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: Unix',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development',
],
)
| 37.066667
| 74
| 0.667866
|
fb086015e077a605675a63dda41e6c8de85a1129
| 1,487
|
py
|
Python
|
coursera_algorithms/dynamic1.py
|
violetcodes/act-dumb
|
ced310fa3f9e784c425ae9b013abc1ea601eba24
|
[
"Apache-2.0"
] | null | null | null |
coursera_algorithms/dynamic1.py
|
violetcodes/act-dumb
|
ced310fa3f9e784c425ae9b013abc1ea601eba24
|
[
"Apache-2.0"
] | null | null | null |
coursera_algorithms/dynamic1.py
|
violetcodes/act-dumb
|
ced310fa3f9e784c425ae9b013abc1ea601eba24
|
[
"Apache-2.0"
] | null | null | null |
def tester_decorator(name_of_function, params=None):
def dec(f):
def fn(*args):
print(f'testing {name_of_function} with follwoing parameters')
for i, arg in (zip(params, args) if params is not None else enumerate(args)):
print(f'arg -> {i}: {arg}')
result = f(*args)
print(f'result obtained: {result}')
print()
return fn
return dec
'''Change problem
Input: An integer money and positive
integers coin1, . . . , coind .
Output: The minimum number of coins with
denominations coin1, . . . , coind
that changes money'''
@tester_decorator('coin change', ['money', 'changes'])
def coin_change(m, changes):
# changes.sort()
min_changes = [0] * (m+1)
for i in range(1, m+1):
m1 = 100
for c in changes:
if c > i: break
m1 = min(m1, min_changes[i-c] + 1)
min_changes[i] = m1
return min_changes[m]
def coin_change2(m, changes, known=None):
known = known or {}
if m in known: return known[m]
r = min([coin_change2(m-c, changes, known) + 1 for c in changes if c <=m], default=0)
known[m] = r
return r
if __name__ == '__main__':
changes = [25, 20, 10, 5, 1]
money = 40
# changes = [1, 5, 6]
# money = 10
changes.sort()
coin_change(money, changes)
tr = {}
print(coin_change2(money, changes, tr))
print(tr)
| 28.056604
| 91
| 0.551446
|
8b2e5d1127dd36d235aed3a117ac8f3e03fc6681
| 1,642
|
py
|
Python
|
demo_cmsplugin_zinnia/urls.py
|
HiddenClever/cmsplugin-zinnia
|
cf3e066b2923eb3bcec91527cea7bef7580c97d6
|
[
"BSD-3-Clause"
] | null | null | null |
demo_cmsplugin_zinnia/urls.py
|
HiddenClever/cmsplugin-zinnia
|
cf3e066b2923eb3bcec91527cea7bef7580c97d6
|
[
"BSD-3-Clause"
] | null | null | null |
demo_cmsplugin_zinnia/urls.py
|
HiddenClever/cmsplugin-zinnia
|
cf3e066b2923eb3bcec91527cea7bef7580c97d6
|
[
"BSD-3-Clause"
] | null | null | null |
"""Urls for the cmsplugin_zinnia demo"""
from django.conf import settings
from django.contrib import admin
from django.conf.urls import url
from django.conf.urls import include
from django.conf.urls import patterns
from zinnia.sitemaps import TagSitemap
from zinnia.sitemaps import EntrySitemap
from zinnia.sitemaps import CategorySitemap
from zinnia.sitemaps import AuthorSitemap
admin.autodiscover()
handler500 = 'demo_cmsplugin_zinnia.views.server_error'
handler404 = 'django.views.defaults.page_not_found'
handler403 = 'django.views.defaults.permission_denied'
urlpatterns = patterns(
'',
url(r'^blog/', include('zinnia.urls')),
url(r'^comments/', include('django.contrib.comments.urls')),
url(r'^i18n/', include('django.conf.urls.i18n')),
url(r'^admin/', include(admin.site.urls)),
)
sitemaps = {
'tags': TagSitemap,
'blog': EntrySitemap,
'authors': AuthorSitemap,
'categories': CategorySitemap
}
urlpatterns += patterns(
'django.contrib.sitemaps.views',
url(r'^sitemap.xml$', 'index',
{'sitemaps': sitemaps}),
url(r'^sitemap-(?P<section>.+)\.xml$', 'sitemap',
{'sitemaps': sitemaps}),
)
urlpatterns += patterns(
'',
url(r'^403/$', 'django.views.defaults.permission_denied'),
url(r'^404/$', 'django.views.defaults.page_not_found'),
url(r'^500/$', 'demo_cmsplugin_zinnia.views.server_error'),
)
if settings.DEBUG:
urlpatterns += patterns(
'',
url(r'^media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT})
)
urlpatterns += patterns(
'',
url(r'^', include('cms.urls')),
)
| 27.830508
| 65
| 0.681486
|
a3fecfcd35d21ee1012ffe01a5f15f144d2b469b
| 11,614
|
py
|
Python
|
common-mk/platform2_unittest.py
|
strassek/chromiumos-platform2
|
12c953f41f48b8a6b0bd1c181d09bdb1de38325c
|
[
"BSD-3-Clause"
] | 4
|
2020-07-24T06:54:16.000Z
|
2021-06-16T17:13:53.000Z
|
common-mk/platform2_unittest.py
|
strassek/chromiumos-platform2
|
12c953f41f48b8a6b0bd1c181d09bdb1de38325c
|
[
"BSD-3-Clause"
] | 1
|
2021-04-02T17:35:07.000Z
|
2021-04-02T17:35:07.000Z
|
common-mk/platform2_unittest.py
|
strassek/chromiumos-platform2
|
12c953f41f48b8a6b0bd1c181d09bdb1de38325c
|
[
"BSD-3-Clause"
] | 1
|
2020-11-04T22:31:45.000Z
|
2020-11-04T22:31:45.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2020 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for platform2.py"""
from __future__ import print_function
import os
import sys
import mock
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)),
'..', '..', '..'))
# pylint: disable=wrong-import-position
import platform2
from chromite.lib import cros_test_lib
# pylint: enable=wrong-import-position
PLATFORM_SUBDIR = 'platform'
SYSROOT = '/'
TARGET_PREFIX = '//%s:' % PLATFORM_SUBDIR
class Platform2Configure(cros_test_lib.TestCase):
"""A base class of Platform2 unittest."""
@staticmethod
def _CreateTestPlatform2():
p2 = platform2.Platform2()
p2.platform_subdir = PLATFORM_SUBDIR
p2.sysroot = SYSROOT
return p2
def _RunWithDesc(self, func, gn_description):
"""Runs Platform2.|func| with fake |gn_description|."""
p2 = self._CreateTestPlatform2()
with mock.patch('platform2.Platform2.gn_desc', return_value=gn_description):
return getattr(p2, func)()
class Platform2ConfigureTest(Platform2Configure):
"""Tests Platform2.configure_test()."""
@staticmethod
def _CreateTestData(run_test=True, test_config=None):
"""Generates a template of test data."""
# It emulates a data that is generated by the templates in
# //common-mk/BUILDCONFIG.gn for this BUILD.gn rule example
#
# group("all") {
# deps = ["//platform:test"]
# }
#
# executable("test") {
# output_name = "output"
#
# run_test = $run_test
# test_config = $test_config
#
# # some required variables
# }
if test_config is None:
test_config = {}
return {
TARGET_PREFIX + 'all': {
'deps': [
TARGET_PREFIX + 'test',
],
},
TARGET_PREFIX + 'test': {
'metadata': {
'_run_test': [run_test],
'_test_config': [test_config],
},
'outputs': ['output'],
},
}
def _CheckConfigureTest(self, gn_description, expected):
"""Checks configure_test output |expected| outputs with |gn_description|."""
ret = self._RunWithDesc('configure_test', gn_description)
self.assertEqual(ret, expected)
@staticmethod
def _OutputTemplate(options):
"""Create Output Template.
Add platform2_test.py and some required options to the beginning.
"""
platform_tooldir = os.path.dirname(os.path.abspath(__file__))
p2_test_py = os.path.join(platform_tooldir, 'platform2_test.py')
prefix = [p2_test_py, '--action=run', '--sysroot=%s' % SYSROOT]
return prefix + options
def testMultipleTest(self):
"""Verify it can execute multiple tests."""
targets = [TARGET_PREFIX + 'test%s' % i for i in range(10)]
desc_data = {
TARGET_PREFIX + 'all': {
'deps': targets,
},
}
for target in targets:
desc_data[target] = {
'metadata': {
'_run_test': [True],
},
'outputs': [
'test-%s' % target,
],
}
self._CheckConfigureTest(
desc_data,
[self._OutputTemplate(['--', 'test-%s' % target])
for target in targets])
def testRunTest(self):
"""Verify it executes test only when run_test is true."""
self._CheckConfigureTest(
self._CreateTestData(run_test=True),
[self._OutputTemplate(['--', 'output'])])
self._CheckConfigureTest(
self._CreateTestData(run_test=False),
[])
def testBooleanConfigs(self):
"""Verify it converts boolean configs to flag options."""
self._CheckConfigureTest(
self._CreateTestData(test_config={
'run_as_root': True,
}),
[self._OutputTemplate(['--run_as_root', '--', 'output'])])
self._CheckConfigureTest(
self._CreateTestData(test_config={
'run_as_root': False,
}),
[self._OutputTemplate(['--', 'output'])])
def testStringConfigs(self):
"""Verify it converts string configs to not-flag options."""
self._CheckConfigureTest(
self._CreateTestData(test_config={
'gtest_filter': '-*.RunAsRoot',
}),
[self._OutputTemplate(['--gtest_filter=-*.RunAsRoot', '--', 'output'])])
class Platform2ConfigureInstall(Platform2Configure):
"""Tests Platform2.configure_install()."""
@staticmethod
def _CreateTestData(sources=None, install_path=None, outputs=None,
symlinks=None, recursive=False, options=None,
command_type=None):
"""Generates a template of test data."""
# It emulates a data that is generated by the templates in
# //common-mk/BUILDCONFIG.gn for this BUILD.gn rule example
#
# group("all") {
# deps = ["//platform:install"]
# }
#
# install_config("install") {
# sources = $sources
# install_path = $install_path
# outputs = $outputs
# symlinks = $symlinks
# recursive = $recursive
# options = $options
# type = $target_type
#
# # some required variables
# }
install_config = {
'sources': sources,
'install_path': install_path,
'outputs': outputs,
'symlinks': symlinks,
'recursive': recursive,
'options': options,
'type': command_type,
}
metadata = {
'_install_config': [install_config]
}
return {
TARGET_PREFIX + 'all': {
'deps': [
TARGET_PREFIX + 'install',
],
},
TARGET_PREFIX + 'install': {
'metadata': metadata,
},
}
@mock.patch('ebuild_function.generate', return_value=[['test', 'command']])
def testEbuildParameter(self, generate_mock):
"""Makes sure the parameter passed to ebuild_function correctly."""
gn_desc = self._CreateTestData(sources=['source'], install_path='/path',
outputs=['output'], symlinks=['symlink'],
recursive=True, options='-m0644',
command_type='executable')
self._RunWithDesc('configure_install', gn_desc)
generate_mock.assert_called_with(sources=['source'], install_path='/path',
outputs=['output'], symlinks=['symlink'],
recursive=True, options='-m0644',
command_type='executable')
@mock.patch('ebuild_function.generate', return_value=[['test', 'command']])
def testEbuildParameterWithoutNewNames(self, generate_mock):
"""Makes sure the parameter passed to ebuild_function correctly."""
gn_desc = self._CreateTestData(sources=['source'], install_path='/path',
recursive=True, options='-m0644',
command_type='executable')
self._RunWithDesc('configure_install', gn_desc)
generate_mock.assert_called_with(sources=['source'], install_path='/path',
outputs=None, symlinks=None,
recursive=True, options='-m0644',
command_type='executable')
@mock.patch('ebuild_function.generate', return_value=[['test', 'command']])
def testWithoutSources(self, generate_mock):
"""Makes sure it returns empty list when sources aren't specified."""
self._RunWithDesc('configure_install', self._CreateTestData())
self.assertEqual(generate_mock.call_count, 0)
@mock.patch('ebuild_function.generate', return_value=[['test', 'command']])
def testWithSources(self, generate_mock):
"""Makes sure it returns an install command when sources are specified."""
gn_desc = self._CreateTestData(sources=['source'])
self._RunWithDesc('configure_install', gn_desc)
self.assertEqual(generate_mock.call_count, 1)
@mock.patch('ebuild_function.generate', return_value=[['test', 'command']])
def testMultipleDifferentPathCommands(self, generate_mock):
"""Checks outputs are separated when having the different install_paths."""
num_install = 10
targets = [TARGET_PREFIX + 'test%s' % str(i) for i in range(num_install)]
gn_desc = {
TARGET_PREFIX + 'all': {
'deps': targets,
},
}
for target in targets:
gn_desc[target] = {
'metadata': {
'_install_config': [{
'install_path': target,
'sources': ['source'],
}],
},
}
self._RunWithDesc('configure_install', gn_desc)
self.assertEqual(generate_mock.call_count, 10)
@mock.patch('ebuild_function.generate', return_value=[['test', 'command']])
def testMultipleSamePathCommands(self, generate_mock):
"""Checks an output is combined when having the same install_paths."""
num_install = 10
targets = [TARGET_PREFIX + 'test%s' % str(i) for i in range(num_install)]
gn_desc = {
TARGET_PREFIX + 'all': {
'deps': targets,
},
}
for target in targets:
gn_desc[target] = {
'metadata': {
'_install_config': [{
'install_path': '/path',
'sources': ['source'],
}],
},
}
self._RunWithDesc('configure_install', gn_desc)
self.assertEqual(generate_mock.call_count, 1)
@mock.patch('ebuild_function.generate', return_value=[['test', 'command']])
def testMixedCommands(self, generate_mock):
"""Checks it returns two commands when having both new-cmd and do-cmd."""
# group("all") {
# deps = [
# "//platform:doins",
# "//platform:newins",
# ]
# }
#
# install_config("doins") {
# sources = "source-doins"
# install_path = "/path"
# }
# install_config("newins") {
# sources = "source-oldins"
# install_path = "/path"
# outputs = "source-newins"
# }
doins_target = TARGET_PREFIX + 'doins1'
newins_target = TARGET_PREFIX + 'newins'
gn_desc = {
TARGET_PREFIX + 'all': {
'deps': [
doins_target,
newins_target,
],
}, doins_target: {
'metadata': {
'_install_config': [{
'install_path': '/path',
'sources': ['source-doins'],
}],
}
}, newins_target: {
'metadata': {
'_install_config': [{
'install_path': '/path',
'sources': ['source-oldins'],
'outputs': ['source-newins'],
}],
},
}
}
self._RunWithDesc('configure_install', gn_desc)
generate_mock.assert_any_call(sources=['source-doins'],
install_path='/path',
outputs=None, symlinks=None,
recursive=None, options=None,
command_type=None)
generate_mock.assert_any_call(sources=['source-oldins'],
install_path='/path',
outputs=['source-newins'], symlinks=None,
recursive=None, options=None,
command_type=None)
if __name__ == '__main__':
cros_test_lib.main(module=__name__)
| 33.566474
| 80
| 0.573101
|
26b54deadf5cacfec185878bc3432567b4009224
| 4,313
|
py
|
Python
|
bsp/air640w/rtt/rtconfig.py
|
pengxxxxx/LuatOS
|
dc163e7dc2d5230901b7ce57566d662d1954546e
|
[
"MIT"
] | 217
|
2019-12-29T15:52:46.000Z
|
2022-03-29T05:44:29.000Z
|
bsp/air640w/rtt/rtconfig.py
|
zhangyinpeng/LuatOS
|
7543d59cbfe0fbb9c9ba1c3a0b506660c783367a
|
[
"MIT"
] | 64
|
2019-12-30T05:50:04.000Z
|
2022-03-06T03:48:56.000Z
|
bsp/air640w/rtt/rtconfig.py
|
zhangyinpeng/LuatOS
|
7543d59cbfe0fbb9c9ba1c3a0b506660c783367a
|
[
"MIT"
] | 59
|
2020-01-09T03:46:01.000Z
|
2022-03-27T03:17:36.000Z
|
import os
import sys
# toolchains options
ARCH='arm'
CPU='cortex-m3'
CROSS_TOOL='gcc'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
if os.getenv('RTT_ROOT'):
RTT_ROOT = os.getenv('RTT_ROOT')
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = 'E:/tool/env/tools/gnu_gcc/arm_gcc/mingw/bin'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = 'C:/Keil'
elif CROSS_TOOL == 'iar':
PLATFORM = 'iar'
EXEC_PATH = 'C:/Program Files/IAR Systems/Embedded Workbench 6.0 Evaluation'
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'release'
if os.path.exists(os.path.abspath('./') + '/drivers'):
gcc_linkscripts_path = 'drivers/linker_scripts/link.lds'
armcc_linkscripts_path = 'drivers/linker_scripts/link.sct'
iar_linkscripts_path = 'drivers/linker_scripts/link.icf'
else:
gcc_linkscripts_path = '../../drivers/linker_scripts/link.lds'
armcc_linkscripts_path = '../../drivers/linker_scripts/link.sct'
iar_linkscripts_path = '../../drivers/linker_scripts/link.icf'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
CXX = PREFIX + 'g++'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
NM = PREFIX + 'nm'
DEVICE = ' -mcpu=cortex-m3 -mthumb -ffunction-sections -fdata-sections'
CFLAGS = DEVICE + ' -std=gnu99 -w -fno-common -fomit-frame-pointer -fno-short-enums -fsigned-char'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -Wa,-mimplicit-it=thumb '
LFLAGS = DEVICE + ' -lm -lgcc -lc' + ' -g --specs=nano.specs -nostartfiles -Wl,-Map=rtthread-w60x.map -Os -Wl,--gc-sections -Wl,--cref -Wl,--entry=Reset_Handler -Wl,--no-enum-size-warning -Wl,--no-wchar-size-warning -T ' + gcc_linkscripts_path
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2 -g -Wall'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2 -Wall'
CXXFLAGS = CFLAGS
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
POST_ACTION += 'python ./makeimg.py'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --cpu=Cortex-M3'
CFLAGS = DEVICE + ' --apcs=interwork --c99 --gnu'
AFLAGS = DEVICE + ' --apcs=interwork '
LFLAGS = DEVICE + ' --scatter ' + armcc_linkscripts_path + ' --info sizes --info totals --info unused --info veneers --list rt-thread.map --strict'
LFLAGS += ' --libpath=' + EXEC_PATH + '/ARM/ARMCC/lib'
EXEC_PATH += '/ARM/ARMCC/bin/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET \n'
POST_ACTION += 'python ./makeimg.py'
elif PLATFORM == 'iar':
# toolchains
CC = 'iccarm'
AS = 'iasmarm'
AR = 'iarchive'
LINK = 'ilinkarm'
TARGET_EXT = 'out'
DEVICE = '-Dewarm'
CFLAGS = DEVICE
CFLAGS += ' --diag_suppress Pa050'
CFLAGS += ' --no_cse'
CFLAGS += ' --no_unroll'
CFLAGS += ' --no_inline'
CFLAGS += ' --no_code_motion'
CFLAGS += ' --no_tbaa'
CFLAGS += ' --no_clustering'
CFLAGS += ' --no_scheduling'
CFLAGS += ' --endian=little'
CFLAGS += ' --cpu=Cortex-M3'
CFLAGS += ' -e'
CFLAGS += ' --fpu=None'
CFLAGS += ' --dlib_config "' + EXEC_PATH + '/arm/INC/c/DLib_Config_Normal.h"'
CFLAGS += ' --silent'
AFLAGS = DEVICE
AFLAGS += ' -s+'
AFLAGS += ' -w+'
AFLAGS += ' -r'
AFLAGS += ' --cpu Cortex-M3'
AFLAGS += ' --fpu None'
AFLAGS += ' -S'
if BUILD == 'debug':
CFLAGS += ' --debug'
CFLAGS += ' -On'
else:
CFLAGS += ' -Oh'
LFLAGS = ' --config ' + iar_linkscripts_path
LFLAGS += ' --entry __iar_program_start'
EXEC_PATH = EXEC_PATH + '/arm/bin/'
POST_ACTION = 'ielftool --bin $TARGET rtthread.bin \n'
POST_ACTION += 'python ./makeimg.py'
| 30.160839
| 247
| 0.595641
|
06a5a46a22876d2e1b8758216dcfeab297fed7ae
| 442
|
py
|
Python
|
joe_username/generate.py
|
shreyas44/joe-username
|
add4ef5d44948401e2dd096ff9a73df456548c44
|
[
"MIT"
] | null | null | null |
joe_username/generate.py
|
shreyas44/joe-username
|
add4ef5d44948401e2dd096ff9a73df456548c44
|
[
"MIT"
] | null | null | null |
joe_username/generate.py
|
shreyas44/joe-username
|
add4ef5d44948401e2dd096ff9a73df456548c44
|
[
"MIT"
] | null | null | null |
import random
JOES_DICTIONARY = ["right", "awesome", "interesting", "cool"]
def generate(max_limit=4):
try:
max_limit = int(max_limit)
if max_limit <= 0:
raise ValueError
except ValueError:
raise ValueError("Make sure you enter an integer greater than 0")
word = ""
for i in range(max_limit):
rand_word = random.choice(JOES_DICTIONARY)
word += rand_word[0].upper() + rand_word[1:]
return word
| 18.416667
| 69
| 0.662896
|
df994e7169033b102835ead4076633bc68549336
| 4,705
|
py
|
Python
|
mll/recv_models/rnn_hierarchical_model.py
|
asappresearch/compositional-inductive-bias
|
2c67713306ec6591f397ca252f915c3edc5a794f
|
[
"MIT"
] | 2
|
2021-07-09T16:32:00.000Z
|
2022-03-21T17:32:39.000Z
|
mll/recv_models/rnn_hierarchical_model.py
|
asappresearch/compositional-inductive-bias
|
2c67713306ec6591f397ca252f915c3edc5a794f
|
[
"MIT"
] | null | null | null |
mll/recv_models/rnn_hierarchical_model.py
|
asappresearch/compositional-inductive-bias
|
2c67713306ec6591f397ca252f915c3edc5a794f
|
[
"MIT"
] | 1
|
2021-07-09T16:32:02.000Z
|
2021-07-09T16:32:02.000Z
|
import torch
from torch import nn
from ulfs import nn_modules
from mll.darts_cell import DGReceiverCell, DGSenderCell
class RNNHierEncoder(nn.Module):
def __init__(
self, embedding_size, rnn_type
):
self.rnn_type = rnn_type
self.embedding_size = embedding_size
super().__init__()
# assert rnn_type in ['GRU', 'LSTM', 'RNN', 'SRU']
# assert rnn_type not in ['LSTM']
if rnn_type in ['SRU']:
from sru import SRUCell
RNNCell = SRUCell
elif rnn_type == 'dgrecv':
RNNCell = DGReceiverCell
elif rnn_type == 'dgsend':
RNNCell = DGSenderCell
else:
RNNCell = getattr(nn, f'{rnn_type}Cell')
self.rnn_upper = RNNCell(
input_size=embedding_size,
hidden_size=embedding_size
)
self.rnn_lower = RNNCell(
input_size=embedding_size,
hidden_size=embedding_size
)
self.linear_lower_stop = nn.Linear(embedding_size, 1)
def forward(self, inputs, return_stopness=False):
seq_len, batch_size, embedding_size = inputs.size()
device = inputs.device
N = batch_size
if self.rnn_type in ['LSTM']:
lower_state = [
torch.zeros(N, self.embedding_size, dtype=torch.float32, device=device),
torch.zeros(N, self.embedding_size, dtype=torch.float32, device=device)
]
upper_state = [
torch.zeros(N, self.embedding_size, dtype=torch.float32, device=device),
torch.zeros(N, self.embedding_size, dtype=torch.float32, device=device)
]
else:
lower_state = torch.zeros(N, self.embedding_size, dtype=torch.float32, device=device)
upper_state = torch.zeros(N, self.embedding_size, dtype=torch.float32, device=device)
lower_stopness = torch.ones(N, 1, dtype=torch.float32, device=device)
all_lower_stopness = torch.zeros(seq_len, N, dtype=torch.float32, device=device)
for m in range(seq_len):
if self.rnn_type in ['LSTM']:
lower_state = list(lower_state)
lower_state[0] = lower_state[0] * (1 - lower_stopness)
lower_state[1] = lower_state[1] * (1 - lower_stopness)
else:
lower_state = lower_state * (1 - lower_stopness)
in_token = inputs[m]
lower_state = self.rnn_lower(in_token, lower_state)
lower_stopness = torch.sigmoid(self.linear_lower_stop(lower_state[0]))
all_lower_stopness[m] = lower_stopness.squeeze(-1)
if self.rnn_type in ['LSTM']:
input_to_upper = lower_state[0]
else:
input_to_upper = lower_state
new_upper_state = self.rnn_upper(input_to_upper, upper_state)
if self.rnn_type in ['LSTM']:
new_upper_state = list(new_upper_state)
upper_state[0] = lower_stopness * new_upper_state[0] + (1 - lower_stopness) * upper_state[0]
upper_state[1] = lower_stopness * new_upper_state[1] + (1 - lower_stopness) * upper_state[1]
else:
upper_state = lower_stopness * new_upper_state + (1 - lower_stopness) * upper_state
if self.rnn_type in ['LSTM']:
state = upper_state[0]
else:
state = upper_state
if return_stopness:
return state, all_lower_stopness
else:
return state
class HierModel(nn.Module):
supports_dropout = True
def __init__(
self, embedding_size, vocab_size, utt_len, num_meaning_types, meanings_per_type,
rnn_type, dropout
):
self.rnn_type = rnn_type
self.embedding_size = embedding_size
self.num_meaning_types = num_meaning_types
self.meanings_per_type = meanings_per_type
super().__init__()
self.embedding = nn_modules.EmbeddingAdapter(vocab_size + 1, embedding_size)
self.rnn_hierarchical = RNNHierEncoder(
embedding_size=embedding_size,
rnn_type=rnn_type
)
self.linear_out = nn.Linear(embedding_size, num_meaning_types * meanings_per_type)
self.drop = nn.Dropout(dropout)
def forward(self, utts):
embs = self.embedding(utts)
M, N, E = embs.size()
embs = self.drop(embs)
state, self.all_lower_stopness = self.rnn_hierarchical(inputs=embs, return_stopness=True)
x = self.linear_out(self.drop(state))
view_list = [self.num_meaning_types, self.meanings_per_type]
x = x.view(N, *view_list)
return x
| 38.565574
| 108
| 0.61084
|
8492af4dcee6ff2c4611b3c18acdfe1e8fe4dab6
| 954
|
py
|
Python
|
Detection/scripts/rename_mover_new_data.py
|
Final-Six-SIH2020/Detection-and-Classification
|
d02074fa4e5ba455e011ffd5081cc3428245c4d8
|
[
"MIT"
] | 1
|
2021-04-30T05:33:39.000Z
|
2021-04-30T05:33:39.000Z
|
Detection/scripts/rename_mover_new_data.py
|
Final-Six-SIH2020/Detection-and-Classification
|
d02074fa4e5ba455e011ffd5081cc3428245c4d8
|
[
"MIT"
] | 1
|
2020-07-27T16:25:00.000Z
|
2020-07-27T16:28:08.000Z
|
Detection/scripts/rename_mover_new_data.py
|
Final-Six-SIH2020/Detection-and-Classification
|
d02074fa4e5ba455e011ffd5081cc3428245c4d8
|
[
"MIT"
] | null | null | null |
import os
import shutil
import os.path as P
BASE_FOLDER = 'kaggle_dataset'
DST_FOLDER = 'kaggle_renamed'
if P.isdir(DST_FOLDER):
shutil.rmtree(DST_FOLDER)
os.makedirs(DST_FOLDER)
xml_files = [P.join(BASE_FOLDER, file)
for file in os.listdir(BASE_FOLDER) if file.endswith('.xml')]
missing = []
img_ext = '.jpg'
for i, file in enumerate(xml_files, 1):
path, filename = file.split('\\')
xml_file_name, xml_ext = P.splitext(filename)
src_xml_name = f'{xml_file_name}{xml_ext}'
src_img_name = f'{xml_file_name}{img_ext}'
if not os.path.isfile(P.join(path, src_img_name)):
missing.append(path)
continue
dst_xml_name = f'kaggle_{i:04}{xml_ext}'
dst_img_name = f'kaggle_{i:04}{img_ext}'
shutil.copy(file, P.join(DST_FOLDER, dst_xml_name)) # XML
shutil.copy(P.join(path, src_img_name),
P.join(DST_FOLDER, dst_img_name))
print(f"\rProcessed: {i} files", end='')
| 24.461538
| 74
| 0.669811
|
4139952f6e41e157cb48005765c678971a5da01e
| 4,383
|
py
|
Python
|
contrib/seeds/generate-seeds.py
|
gothcoin/coin
|
d8812caf6d04884eade5f8d52b556a0282875fa9
|
[
"MIT"
] | 2
|
2021-05-16T07:29:34.000Z
|
2021-05-17T12:41:48.000Z
|
contrib/seeds/generate-seeds.py
|
gothcoin/coin
|
d8812caf6d04884eade5f8d52b556a0282875fa9
|
[
"MIT"
] | null | null | null |
contrib/seeds/generate-seeds.py
|
gothcoin/coin
|
d8812caf6d04884eade5f8d52b556a0282875fa9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2017 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from base64 import b32decode
from binascii import a2b_hex
import sys
import os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % vchAddr)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
sys.exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the gothcoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside an IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'), 'r', encoding="utf8") as f:
process_nodes(g, f, 'pnSeed6_main', 35414)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'), 'r', encoding="utf8") as f:
process_nodes(g, f, 'pnSeed6_test', 45414)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| 31.307143
| 99
| 0.583619
|
193455097ec6538e72e1e47238ce4794adb6aea0
| 935
|
py
|
Python
|
src/genie/libs/parser/junos/tests/ShowServicesAccountingAggregationTemplate/cli/equal/golden_output_1_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 204
|
2018-06-27T00:55:27.000Z
|
2022-03-06T21:12:18.000Z
|
src/genie/libs/parser/junos/tests/ShowServicesAccountingAggregationTemplate/cli/equal/golden_output_1_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 468
|
2018-06-19T00:33:18.000Z
|
2022-03-31T23:23:35.000Z
|
src/genie/libs/parser/junos/tests/ShowServicesAccountingAggregationTemplate/cli/equal/golden_output_1_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 309
|
2019-01-16T20:21:07.000Z
|
2022-03-30T12:56:41.000Z
|
expected_output = {
"services-accounting-information": {
"flow-aggregate-template-detail": {
"flow-aggregate-template-detail-ipv4": {
"detail-entry": [{
"source-address": "10.120.202.64",
"destination-address": "10.169.14.158",
"source-port": "8",
"destination-port": "0",
"protocol": {"#text": "1"},
"tos": "0",
"tcp-flags": "0",
"source-mask": "32",
"destination-mask": "30",
"input-snmp-interface-index": "618",
"output-snmp-interface-index": "620",
"start-time": "79167425",
"end-time": "79167425",
"packet-count": "1",
"byte-count": "84",
}]
}
}
}
}
| 35.961538
| 59
| 0.37861
|
4c5f3b30b3cc634772659ed40127d394c4f38ab5
| 1,909
|
py
|
Python
|
test/units/plugins/terminal/test_slxos.py
|
Container-Projects/ansible-provider-docs
|
100b695b0b0c4d8d08af362069557ffc735d0d7e
|
[
"PSF-2.0",
"BSD-2-Clause",
"MIT"
] | 37
|
2017-08-15T15:02:43.000Z
|
2021-07-23T03:44:31.000Z
|
test/units/plugins/terminal/test_slxos.py
|
Container-Projects/ansible-provider-docs
|
100b695b0b0c4d8d08af362069557ffc735d0d7e
|
[
"PSF-2.0",
"BSD-2-Clause",
"MIT"
] | 12
|
2018-01-10T05:25:25.000Z
|
2021-11-28T06:55:48.000Z
|
test/units/plugins/terminal/test_slxos.py
|
Container-Projects/ansible-provider-docs
|
100b695b0b0c4d8d08af362069557ffc735d0d7e
|
[
"PSF-2.0",
"BSD-2-Clause",
"MIT"
] | 49
|
2017-08-15T09:52:13.000Z
|
2022-03-21T17:11:54.000Z
|
#
# (c) 2018 Extreme Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from os import path
import json
from mock import MagicMock
from ansible.compat.tests import unittest
from ansible.plugins.terminal import slxos
from ansible.errors import AnsibleConnectionFailure
class TestPluginTerminalSLXOS(unittest.TestCase):
""" Test class for SLX-OS Terminal Module
"""
def setUp(self):
self._mock_connection = MagicMock()
self._terminal = slxos.TerminalModule(self._mock_connection)
def tearDown(self):
pass
def test_on_open_shell(self):
""" Test on_open_shell
"""
self._mock_connection.exec_command.side_effect = [
b'Looking out my window I see a brick building, and people. Cool.',
]
self._terminal.on_open_shell()
self._mock_connection.exec_command.assert_called_with(u'terminal length 0')
def test_on_open_shell_error(self):
""" Test on_open_shell with error
"""
self._mock_connection.exec_command.side_effect = [
AnsibleConnectionFailure
]
with self.assertRaises(AnsibleConnectionFailure):
self._terminal.on_open_shell()
| 31.816667
| 83
| 0.718177
|
6cbb5b951a26e1e7bf3ed5976a518c42db080fc1
| 3,122
|
py
|
Python
|
data/p2DJ/New/program/qiskit/simulator/startQiskit238.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p2DJ/New/program/qiskit/simulator/startQiskit238.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p2DJ/New/program/qiskit/simulator/startQiskit238.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=2
# total number=11
import cirq
import qiskit
from qiskit import IBMQ
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename='circuit/deutsch-oracle.png')
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n, "qc")
target = QuantumRegister(1, "qt")
prog = QuantumCircuit(input_qubit, target)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(target)
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[1]) # number=1
prog.h(target)
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [target])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
#for i in range(n):
# prog.measure(input_qubit[i], classicals[i])
prog.swap(input_qubit[1],input_qubit[0]) # number=2
prog.swap(input_qubit[1],input_qubit[0]) # number=3
prog.cx(input_qubit[0],input_qubit[1]) # number=8
prog.x(input_qubit[1]) # number=9
prog.cx(input_qubit[0],input_qubit[1]) # number=10
prog.cx(input_qubit[0],input_qubit[1]) # number=7
prog.rx(-2.73004401596953,input_qubit[1]) # number=6
prog.z(input_qubit[1]) # number=4
# circuit end
return prog
if __name__ == '__main__':
n = 2
f = lambda rep: rep[-1]
# f = lambda rep: "1" if rep[0:2] == "01" or rep[0:2] == "10" else "0"
# f = lambda rep: "0"
prog = make_circuit(n, f)
sample_shot =2800
backend = BasicAer.get_backend('qasm_simulator')
circuit1 = transpile(prog,FakeVigo())
circuit1.x(qubit=3)
circuit1.x(qubit=3)
circuit1.measure_all()
prog = circuit1
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
writefile = open("../data/startQiskit238.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 28.642202
| 82
| 0.626842
|
6f02d403e4a364c3f41cf60321de96b0f9424f0f
| 537
|
py
|
Python
|
src/handlers/static_files.py
|
nhardy/py-js-web-scaffold
|
adf3e3ada0b21cdb9620676de795579107442dd7
|
[
"MIT"
] | null | null | null |
src/handlers/static_files.py
|
nhardy/py-js-web-scaffold
|
adf3e3ada0b21cdb9620676de795579107442dd7
|
[
"MIT"
] | null | null | null |
src/handlers/static_files.py
|
nhardy/py-js-web-scaffold
|
adf3e3ada0b21cdb9620676de795579107442dd7
|
[
"MIT"
] | null | null | null |
import mimetypes
import tornado.web
class StaticFileHandler(tornado.web.StaticFileHandler):
def get_content_type(self):
mime, encoding = mimetypes.guess_type(self.absolute_path)
if mime is not None:
return mime
elif self.absolute_path.endswith('.ttf'):
return 'application/x-font-ttf'
elif self.absolute_path.endswith('.woff'):
return 'application/font-woff'
elif self.absolute_path.endswith('.7z'):
return 'application/x-7z-compressed'
else:
return 'application/octet-stream'
| 26.85
| 61
| 0.715084
|
a6d3a1958272279fb0b4957c733d4d1eb07d81a3
| 32,085
|
py
|
Python
|
python/ccxt/coinex.py
|
wadaxofficial/ccxt
|
765aaa4cd83c1e2dc75717d49b50a55ac4dc0487
|
[
"MIT"
] | null | null | null |
python/ccxt/coinex.py
|
wadaxofficial/ccxt
|
765aaa4cd83c1e2dc75717d49b50a55ac4dc0487
|
[
"MIT"
] | 5
|
2020-07-17T06:15:25.000Z
|
2021-05-09T03:07:57.000Z
|
python/ccxt/coinex.py
|
wadaxofficial/ccxt
|
765aaa4cd83c1e2dc75717d49b50a55ac4dc0487
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
class coinex (Exchange):
def describe(self):
return self.deep_extend(super(coinex, self).describe(), {
'id': 'coinex',
'name': 'CoinEx',
'version': 'v1',
'countries': ['CN'],
'rateLimit': 1000,
'has': {
'fetchTickers': True,
'fetchOHLCV': True,
'fetchOrder': True,
'fetchOpenOrders': True,
'fetchClosedOrders': True,
'fetchMyTrades': True,
'withdraw': True,
'fetchDeposits': True,
'fetchWithdrawals': True,
},
'timeframes': {
'1m': '1min',
'3m': '3min',
'5m': '5min',
'15m': '15min',
'30m': '30min',
'1h': '1hour',
'2h': '2hour',
'4h': '4hour',
'6h': '6hour',
'12h': '12hour',
'1d': '1day',
'3d': '3day',
'1w': '1week',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/38046312-0b450aac-32c8-11e8-99ab-bc6b136b6cc7.jpg',
'api': {
'public': 'https://api.coinex.com',
'private': 'https://api.coinex.com',
'web': 'https://www.coinex.com',
},
'www': 'https://www.coinex.com',
'doc': 'https://github.com/coinexcom/coinex_exchange_api/wiki',
'fees': 'https://www.coinex.com/fees',
'referral': 'https://www.coinex.com/account/signup?refer_code=yw5fz',
},
'api': {
'web': {
'get': [
'res/market',
],
},
'public': {
'get': [
'market/list',
'market/ticker',
'market/ticker/all',
'market/depth',
'market/deals',
'market/kline',
],
},
'private': {
'get': [
'balance/coin/withdraw',
'balance/coin/deposit',
'balance/info',
'order',
'order/pending',
'order/finished',
'order/finished/{id}',
'order/user/deals',
],
'post': [
'balance/coin/withdraw',
'order/limit',
'order/market',
],
'delete': [
'balance/coin/withdraw',
'order/pending',
],
},
},
'fees': {
'trading': {
'maker': 0.001,
'taker': 0.001,
},
'funding': {
'withdraw': {
'BCH': 0.0,
'BTC': 0.001,
'LTC': 0.001,
'ETH': 0.001,
'ZEC': 0.0001,
'DASH': 0.0001,
},
},
},
'limits': {
'amount': {
'min': 0.001,
'max': None,
},
},
'precision': {
'amount': 8,
'price': 8,
},
'options': {
'createMarketBuyOrderRequiresPrice': True,
},
})
def fetch_markets(self, params={}):
response = self.webGetResMarket()
markets = response['data']['market_info']
result = []
keys = list(markets.keys())
for i in range(0, len(keys)):
key = keys[i]
market = markets[key]
id = market['market']
quoteId = market['buy_asset_type']
baseId = market['sell_asset_type']
base = self.common_currency_code(baseId)
quote = self.common_currency_code(quoteId)
symbol = base + '/' + quote
precision = {
'amount': market['sell_asset_type_places'],
'price': market['buy_asset_type_places'],
}
numMergeLevels = len(market['merge'])
active = (market['status'] == 'pass')
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': active,
'taker': self.safe_float(market, 'taker_fee_rate'),
'maker': self.safe_float(market, 'maker_fee_rate'),
'info': market,
'precision': precision,
'limits': {
'amount': {
'min': self.safe_float(market, 'least_amount'),
'max': None,
},
'price': {
'min': float(market['merge'][numMergeLevels - 1]),
'max': None,
},
},
})
return result
def parse_ticker(self, ticker, market=None):
timestamp = ticker['date']
symbol = market['symbol']
ticker = ticker['ticker']
last = self.safe_float(ticker, 'last')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker, 'buy'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'sell'),
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_float_2(ticker, 'vol', 'volume'),
'quoteVolume': None,
'info': ticker,
}
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
response = self.publicGetMarketTicker(self.extend({
'market': market['id'],
}, params))
return self.parse_ticker(response['data'], market)
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
response = self.publicGetMarketTickerAll(params)
data = response['data']
timestamp = data['date']
tickers = data['ticker']
ids = list(tickers.keys())
result = {}
for i in range(0, len(ids)):
id = ids[i]
market = self.markets_by_id[id]
symbol = market['symbol']
ticker = {
'date': timestamp,
'ticker': tickers[id],
}
result[symbol] = self.parse_ticker(ticker, market)
return result
def fetch_order_book(self, symbol, limit=20, params={}):
self.load_markets()
if limit is None:
limit = 20 # default
request = {
'market': self.market_id(symbol),
'merge': '0.0000000001',
'limit': str(limit),
}
response = self.publicGetMarketDepth(self.extend(request, params))
return self.parse_order_book(response['data'])
def parse_trade(self, trade, market=None):
# self method parses both public and private trades
timestamp = self.safe_integer(trade, 'create_time')
if timestamp is None:
timestamp = self.safe_integer(trade, 'date_ms')
else:
timestamp = timestamp * 1000
tradeId = self.safe_string(trade, 'id')
orderId = self.safe_string(trade, 'order_id')
price = self.safe_float(trade, 'price')
amount = self.safe_float(trade, 'amount')
marketId = self.safe_string(trade, 'market')
market = self.safe_value(self.markets_by_id, marketId, market)
symbol = None
if market is not None:
symbol = market['symbol']
cost = self.safe_float(trade, 'deal_money')
if not cost:
cost = float(self.cost_to_precision(symbol, price * amount))
fee = None
feeCost = self.safe_float(trade, 'fee')
if feeCost is not None:
feeCurrencyId = self.safe_string(trade, 'fee_asset')
feeCurrency = self.safe_value(self.currencies_by_id, feeCurrencyId)
feeCurrencyCode = None
if feeCurrency is not None:
feeCurrencyCode = feeCurrency['code']
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
takerOrMaker = self.safe_string(trade, 'role')
side = self.safe_string(trade, 'type')
return {
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': tradeId,
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
response = self.publicGetMarketDeals(self.extend({
'market': market['id'],
}, params))
return self.parse_trades(response['data'], market, since, limit)
def parse_ohlcv(self, ohlcv, market=None, timeframe='5m', since=None, limit=None):
return [
ohlcv[0] * 1000,
float(ohlcv[1]),
float(ohlcv[3]),
float(ohlcv[4]),
float(ohlcv[2]),
float(ohlcv[5]),
]
def fetch_ohlcv(self, symbol, timeframe='5m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
response = self.publicGetMarketKline(self.extend({
'market': market['id'],
'type': self.timeframes[timeframe],
}, params))
return self.parse_ohlcvs(response['data'], market, timeframe, since, limit)
def fetch_balance(self, params={}):
self.load_markets()
response = self.privateGetBalanceInfo(params)
#
# {
# "code": 0,
# "data": {
# "BCH": { # BCH account
# "available": "13.60109", # Available BCH
# "frozen": "0.00000" # Frozen BCH
# },
# "BTC": { # BTC account
# "available": "32590.16", # Available BTC
# "frozen": "7000.00" # Frozen BTC
# },
# "ETH": { # ETH account
# "available": "5.06000", # Available ETH
# "frozen": "0.00000" # Frozen ETH
# }
# },
# "message": "Ok"
# }
#
result = {'info': response}
balances = response['data']
currencies = list(balances.keys())
for i in range(0, len(currencies)):
id = currencies[i]
balance = balances[id]
currency = self.common_currency_code(id)
account = {
'free': float(balance['available']),
'used': float(balance['frozen']),
'total': 0.0,
}
account['total'] = self.sum(account['free'], account['used'])
result[currency] = account
return self.parse_balance(result)
def parse_order_status(self, status):
statuses = {
'not_deal': 'open',
'part_deal': 'open',
'done': 'closed',
'cancel': 'canceled',
}
if status in statuses:
return statuses[status]
return status
def parse_order(self, order, market=None):
#
# fetchOrder
#
# {
# "amount": "0.1",
# "asset_fee": "0.22736197736197736197",
# "avg_price": "196.85000000000000000000",
# "create_time": 1537270135,
# "deal_amount": "0.1",
# "deal_fee": "0",
# "deal_money": "19.685",
# "fee_asset": "CET",
# "fee_discount": "0.5",
# "id": 1788259447,
# "left": "0",
# "maker_fee_rate": "0",
# "market": "ETHUSDT",
# "order_type": "limit",
# "price": "170.00000000",
# "status": "done",
# "taker_fee_rate": "0.0005",
# "type": "sell",
# }
#
timestamp = self.safe_integer(order, 'create_time') * 1000
price = self.safe_float(order, 'price')
cost = self.safe_float(order, 'deal_money')
amount = self.safe_float(order, 'amount')
filled = self.safe_float(order, 'deal_amount')
average = self.safe_float(order, 'avg_price')
symbol = None
marketId = self.safe_string(order, 'market')
market = self.safe_value(self.markets_by_id, marketId)
feeCurrency = None
feeCurrencyId = self.safe_string(order, 'fee_asset')
currency = self.safe_value(self.currencies_by_id, feeCurrencyId)
if currency is not None:
feeCurrency = currency['code']
if market is not None:
symbol = market['symbol']
if feeCurrency is None:
feeCurrency = market['quote']
remaining = self.safe_float(order, 'left')
status = self.parse_order_status(self.safe_string(order, 'status'))
type = self.safe_string(order, 'order_type')
side = self.safe_string(order, 'type')
return {
'id': self.safe_string(order, 'id'),
'datetime': self.iso8601(timestamp),
'timestamp': timestamp,
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'cost': cost,
'average': average,
'amount': amount,
'filled': filled,
'remaining': remaining,
'trades': None,
'fee': {
'currency': feeCurrency,
'cost': self.safe_float(order, 'deal_fee'),
},
'info': order,
}
def create_order(self, symbol, type, side, amount, price=None, params={}):
amount = float(amount) # self line is deprecated
if type == 'market':
# for market buy it requires the amount of quote currency to spend
if side == 'buy':
if self.options['createMarketBuyOrderRequiresPrice']:
if price is None:
raise InvalidOrder(self.id + " createOrder() requires the price argument with market buy orders to calculate total order cost(amount to spend), where cost = amount * price. Supply a price argument to createOrder() call if you want the cost to be calculated for you from price and amount, or, alternatively, add .options['createMarketBuyOrderRequiresPrice'] = False to supply the cost in the amount argument(the exchange-specific behaviour)")
else:
price = float(price) # self line is deprecated
amount = amount * price
self.load_markets()
method = 'privatePostOrder' + self.capitalize(type)
market = self.market(symbol)
request = {
'market': market['id'],
'amount': self.amount_to_precision(symbol, amount),
'type': side,
}
if type == 'limit':
price = float(price) # self line is deprecated
request['price'] = self.price_to_precision(symbol, price)
response = getattr(self, method)(self.extend(request, params))
order = self.parse_order(response['data'], market)
id = order['id']
self.orders[id] = order
return order
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
market = self.market(symbol)
response = self.privateDeleteOrderPending(self.extend({
'id': id,
'market': market['id'],
}, params))
return self.parse_order(response['data'], market)
def fetch_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder requires a symbol argument')
self.load_markets()
market = self.market(symbol)
response = self.privateGetOrder(self.extend({
'id': id,
'market': market['id'],
}, params))
#
# {
# "code": 0,
# "data": {
# "amount": "0.1",
# "asset_fee": "0.22736197736197736197",
# "avg_price": "196.85000000000000000000",
# "create_time": 1537270135,
# "deal_amount": "0.1",
# "deal_fee": "0",
# "deal_money": "19.685",
# "fee_asset": "CET",
# "fee_discount": "0.5",
# "id": 1788259447,
# "left": "0",
# "maker_fee_rate": "0",
# "market": "ETHUSDT",
# "order_type": "limit",
# "price": "170.00000000",
# "status": "done",
# "taker_fee_rate": "0.0005",
# "type": "sell",
# },
# "message": "Ok"
# }
#
return self.parse_order(response['data'], market)
def fetch_orders_by_status(self, status, symbol=None, since=None, limit=None, params={}):
self.load_markets()
if limit is None:
limit = 100
request = {
'page': 1,
'limit': limit,
}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
method = 'privateGetOrder' + self.capitalize(status)
response = getattr(self, method)(self.extend(request, params))
return self.parse_orders(response['data']['data'], market, since, limit)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders_by_status('pending', symbol, since, limit, params)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders_by_status('finished', symbol, since, limit, params)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
if limit is None:
limit = 100
request = {
'page': 1,
'limit': limit,
}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = self.privateGetOrderUserDeals(self.extend(request, params))
return self.parse_trades(response['data']['data'], market, since, limit)
def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
self.load_markets()
currency = self.currency(code)
if tag:
address = address + ':' + tag
request = {
'coin_type': currency['id'],
'coin_address': address, # must be authorized, inter-user transfer by a registered mobile phone number or an email address is supported
'actual_amount': float(amount), # the actual amount without fees, https://www.coinex.com/fees
'transfer_method': '1', # '1' = normal onchain transfer, '2' = internal local transfer from one user to another
}
response = self.privatePostBalanceCoinWithdraw(self.extend(request, params))
#
# {
# "code": 0,
# "data": {
# "actual_amount": "1.00000000",
# "amount": "1.00000000",
# "coin_address": "1KAv3pazbTk2JnQ5xTo6fpKK7p1it2RzD4",
# "coin_type": "BCH",
# "coin_withdraw_id": 206,
# "confirmations": 0,
# "create_time": 1524228297,
# "status": "audit",
# "tx_fee": "0",
# "tx_id": ""
# },
# "message": "Ok"
# }
#
transaction = self.safe_value(response, 'data', {})
return self.parse_transaction(transaction, currency)
def parse_transaction_status(self, status):
statuses = {
'audit': 'pending',
'pass': 'pending',
'processing': 'pending',
'confirming': 'pending',
'not_pass': 'failed',
'cancel': 'canceled',
'finish': 'ok',
'fail': 'failed',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# fetchDeposits
#
# {
# "actual_amount": "120.00000000",
# "actual_amount_display": "120",
# "add_explorer": "XXX",
# "amount": "120.00000000",
# "amount_display": "120",
# "coin_address": "XXXXXXXX",
# "coin_address_display": "XXXXXXXX",
# "coin_deposit_id": 1866,
# "coin_type": "USDT",
# "confirmations": 0,
# "create_time": 1539595701,
# "explorer": "",
# "remark": "",
# "status": "finish",
# "status_display": "finish",
# "transfer_method": "local",
# "tx_id": "",
# "tx_id_display": "XXXXXXXXXX"
# }
#
# fetchWithdrawals
#
# {
# "actual_amount": "0.10000000",
# "amount": "0.10000000",
# "coin_address": "15sr1VdyXQ6sVLqeJUJ1uPzLpmQtgUeBSB",
# "coin_type": "BCH",
# "coin_withdraw_id": 203,
# "confirmations": 11,
# "create_time": 1515806440,
# "status": "finish",
# "tx_fee": "0",
# "tx_id": "896371d0e23d64d1cac65a0b7c9e9093d835affb572fec89dd4547277fbdd2f6"
# }
#
id = self.safe_string_2(transaction, 'coin_withdraw_id', 'coin_deposit_id')
address = self.safe_string(transaction, 'coin_address')
tag = self.safe_string(transaction, 'remark') # set but unused
if tag is not None:
if len(tag) < 1:
tag = None
txid = self.safe_value(transaction, 'tx_id')
if txid is not None:
if len(txid) < 1:
txid = None
code = None
currencyId = self.safe_string(transaction, 'coin_type')
if currencyId in self.currencies_by_id:
currency = self.currencies_by_id[currencyId]
else:
code = self.common_currency_code(currencyId)
if currency is not None:
code = currency['code']
timestamp = self.safe_integer(transaction, 'create_time')
if timestamp is not None:
timestamp = timestamp * 1000
type = 'withdraw' if ('coin_withdraw_id' in list(transaction.keys())) else 'deposit'
status = self.parse_transaction_status(self.safe_string(transaction, 'status'), type)
amount = self.safe_float(transaction, 'amount')
feeCost = self.safe_float(transaction, 'tx_fee')
if type == 'deposit':
feeCost = 0
fee = {
'cost': feeCost,
'currency': code,
}
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'address': address,
'tag': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
if code is None:
raise ArgumentsRequired(self.id + ' fetchWithdrawals requires a currency code argument')
currency = self.currency(code)
request = {
'coin_type': currency['id'],
}
if limit is not None:
request['Limit'] = limit
response = self.privateGetBalanceCoinWithdraw(self.extend(request, params))
#
# {
# "code": 0,
# "data": [
# {
# "actual_amount": "1.00000000",
# "amount": "1.00000000",
# "coin_address": "1KAv3pazbTk2JnQ5xTo6fpKK7p1it2RzD4",
# "coin_type": "BCH",
# "coin_withdraw_id": 206,
# "confirmations": 0,
# "create_time": 1524228297,
# "status": "audit",
# "tx_fee": "0",
# "tx_id": ""
# },
# {
# "actual_amount": "0.10000000",
# "amount": "0.10000000",
# "coin_address": "15sr1VdyXQ6sVLqeJUJ1uPzLpmQtgUeBSB",
# "coin_type": "BCH",
# "coin_withdraw_id": 203,
# "confirmations": 11,
# "create_time": 1515806440,
# "status": "finish",
# "tx_fee": "0",
# "tx_id": "896371d0e23d64d1cac65a0b7c9e9093d835affb572fec89dd4547277fbdd2f6"
# },
# {
# "actual_amount": "0.00100000",
# "amount": "0.00100000",
# "coin_address": "1GVVx5UBddLKrckTprNi4VhHSymeQ8tsLF",
# "coin_type": "BCH",
# "coin_withdraw_id": 27,
# "confirmations": 0,
# "create_time": 1513933541,
# "status": "cancel",
# "tx_fee": "0",
# "tx_id": ""
# }
# ],
# "message": "Ok"
# }
#
return self.parseTransactions(response['data'], currency, since, limit)
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
if code is None:
raise ArgumentsRequired(self.id + ' fetchDeposits requires a currency code argument')
currency = self.currency(code)
request = {
'coin_type': currency['id'],
}
if limit is not None:
request['Limit'] = limit
response = self.privateGetBalanceCoinDeposit(self.extend(request, params))
# {
# "code": 0,
# "data": [
# {
# "actual_amount": "4.65397682",
# "actual_amount_display": "4.65397682",
# "add_explorer": "https://etherscan.io/address/0x361XXXXXX",
# "amount": "4.65397682",
# "amount_display": "4.65397682",
# "coin_address": "0x36dabcdXXXXXX",
# "coin_address_display": "0x361X*****XXXXX",
# "coin_deposit_id": 966191,
# "coin_type": "ETH",
# "confirmations": 30,
# "create_time": 1531661445,
# "explorer": "https://etherscan.io/tx/0x361XXXXXX",
# "remark": "",
# "status": "finish",
# "status_display": "finish",
# "transfer_method": "onchain",
# "tx_id": "0x361XXXXXX",
# "tx_id_display": "0x361XXXXXX"
# }
# ],
# "message": "Ok"
# }
#
return self.parseTransactions(response['data'], currency, since, limit)
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
path = self.implode_params(path, params)
url = self.urls['api'][api] + '/' + self.version + '/' + path
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
elif api == 'web':
url = self.urls['api'][api] + '/' + path
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
nonce = self.nonce()
query = self.extend({
'access_id': self.apiKey,
'tonce': str(nonce),
}, query)
query = self.keysort(query)
urlencoded = self.urlencode(query)
signature = self.hash(self.encode(urlencoded + '&secret_key=' + self.secret))
headers = {
'Authorization': signature.upper(),
'Content-Type': 'application/json',
}
if (method == 'GET') or (method == 'DELETE'):
url += '?' + urlencoded
else:
body = self.json(query)
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = self.fetch2(path, api, method, params, headers, body)
code = self.safe_string(response, 'code')
data = self.safe_value(response, 'data')
if code != '0' or not data:
responseCodes = {
'24': AuthenticationError,
'25': AuthenticationError,
'107': InsufficientFunds,
'600': OrderNotFound,
'601': InvalidOrder,
'602': InvalidOrder,
'606': InvalidOrder,
}
ErrorClass = self.safe_value(responseCodes, code, ExchangeError)
raise ErrorClass(response['message'])
return response
| 38.656627
| 465
| 0.467602
|
d77cd8ce948d69db216239d323730b616e09c769
| 5,868
|
py
|
Python
|
g_lyrics_funcs.py
|
donniebishop/genius_lyrics
|
41124a0512f65a0074a4784421c44ab88dba3aee
|
[
"MIT"
] | 5
|
2016-05-04T18:53:13.000Z
|
2017-09-05T14:53:27.000Z
|
g_lyrics_funcs.py
|
donniebishop/genius_lyrics
|
41124a0512f65a0074a4784421c44ab88dba3aee
|
[
"MIT"
] | 2
|
2016-05-04T16:11:05.000Z
|
2018-09-02T17:32:31.000Z
|
g_lyrics_funcs.py
|
donniebishop/genius_lyrics
|
41124a0512f65a0074a4784421c44ab88dba3aee
|
[
"MIT"
] | 4
|
2016-06-11T17:49:40.000Z
|
2020-04-28T18:24:26.000Z
|
#!/usr/bin/env python
import six
import lxml
import requests
import subprocess
from bs4 import BeautifulSoup
# Header uses client Authorization Token from api.genius.com. As such,
# it is only allowed to request read-only endpoints from the API,
# With that explained, I'm including it here. Mostly because I don't know
# a better way of doing it.
API = 'https://api.genius.com'
HEADERS = {'Authorization': 'Bearer rDyWJrXXwACCg-otwQKmomcYSYFv2oQAN3vlTCV_507CW_pEQTQfQ98HtUYXq3W8'}
class HitResult():
''' Class for representing metadata of search results. '''
def __init__(self, artist, title, song_id, url, api_call):
self.artist = artist
self.title = title
self.song_id = song_id
self.url = url
self.api_call = api_call
# for use at a later date
self.referents = []
self.annotations = []
def form_output(self):
'''Forms lyric sheet output for either paging or printing directly to a terminal.'''
header = '{} - {}'.format(self.artist, self.title)
divider = '-'*(len(header) + 3)
lyrics = get_lyrics_from_url(self.url)
output = header + '\n' + divider + '\n' + lyrics + '\n'
return output
def get_referents_annotations(self, force=False):
''' Use song_id to pull referents for any annotations for the song. '''
if self.referents == [] or force is True:
referents_endpoint = API + '/referents'
payload = {'song_id': self.song_id, 'text_format': 'plain'}
referents_request_object = requests.get(referents_endpoint, params=payload, headers=HEADERS)
if referents_request_object.status_code == 200:
r_json_response = referents_request_object.json()
for r in r_json_response['response']['referents']:
r_class = r['classification']
r_frag = r['fragment']
r_id = r['id']
r_url = r['url']
r_api_call = referents_request_object.url
self.referents.append(Referent(r_class, r_frag, r_id, r_url, r_api_call))
for a in r['annotations']:
a_id = a['id']
a_text = a['body']['plain']
a_share = a['share_url']
a_url = a['url']
a_votes = a['votes_total']
a_api_call = API + '/annotations/' + str(a_id)
self.annotations.append(Annotation(a_id, a_text, a_share, a_url, a_votes, a_api_call))
elif referents_request_object.status_code >= 500:
pass
elif self.referents != []:
return self.referents
class Referent():
""" Class for representing referents and their respective annotation. """
def __init__(self, classification, fragment, annotation_id, url, api_call):
self.classification = classification
self.fragment = fragment
self.annotation_id = annotation_id
self.url = url
self.api_call = api_call
class Annotation():
""" Class for reprsentation of annotation metadata. """
def __init__(self, annotation_id, text, share_url, url, votes, api_call):
self.annotation_id = annotation_id
self.text = text
self.share_url = share_url
self.url = url
self.votes = votes
self.api_call = api_call
def genius_search(query):
''' Uses the genius.com search API to return a list of HitResult instances,
formed by JSON responses from the search. '''
results = []
search_endpoint = API + '/search?'
payload = {'q': query}
search_request_object = requests.get(search_endpoint, params=payload, headers=HEADERS)
if search_request_object.status_code == 200:
s_json_response = search_request_object.json()
api_call = search_request_object.url
# Get search entry information from JSON response
for hit in s_json_response['response']['hits']:
artist = hit['result']['primary_artist']['name']
title = hit['result']['title']
song_id = hit['result']['id']
url = hit['result']['url']
results.append(HitResult(artist, title, song_id, url, api_call))
elif 400 <= search_request_object.status_code < 500:
six.print_('[!] Uh-oh, something seems wrong...')
six.print_('[!] Please submit an issue at https://github.com/donniebishop/genius_lyrics/issues')
sys.exit(1)
elif search_request_object.status_code >= 500:
six.print_('[*] Hmm... Genius.com seems to be having some issues right now.')
six.print_('[*] Please try your search again in a little bit!')
sys.exit(1)
return results
def get_lyrics_from_url(song_url):
'''Looks up song_url, parses page for lyrics and returns the lyrics.'''
get_url = requests.get(song_url)
song_soup = BeautifulSoup(get_url.text, 'lxml')
soup_lyrics = song_soup.lyrics.text
return soup_lyrics
def pick_from_search(results_array):
''' If -s/--search is called, return a list of top results, and prompt
choice from list. Will continue to prompt until it receives a valid choice.
Returns HitResult instance of the appropriate JSON response. '''
for n in range(len(results_array)):
Current = results_array[n]
result_line = '[{}] {} - {}'.format(n+1, Current.artist, Current.title)
six.print_(result_line)
choice = -1
while choice <= 0 or choice > len(results_array):
try:
choice = int(input('\nPlease select a song number: '))
except ValueError:
six.print_('[!] Please enter a number.')
choice = -1
actual_choice = choice - 1
return results_array[actual_choice]
| 36.447205
| 110
| 0.619973
|
605d1bdb44e8b15bda8e74841c61a2093e024a4b
| 9,120
|
py
|
Python
|
docs/source/conf.py
|
hjelmn/Lmod
|
563407ec0e6fdd03904949c1715e073a7ae00929
|
[
"MIT"
] | null | null | null |
docs/source/conf.py
|
hjelmn/Lmod
|
563407ec0e6fdd03904949c1715e073a7ae00929
|
[
"MIT"
] | null | null | null |
docs/source/conf.py
|
hjelmn/Lmod
|
563407ec0e6fdd03904949c1715e073a7ae00929
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Lmod documentation build configuration file, created by
# sphinx-quickstart on Thu Jun 25 22:52:04 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Lmod'
copyright = u'2015, Robert McLay'
author = u'Robert McLay'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '6.0'
# The full version, including alpha/beta/rc tags.
release = '6.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Lmoddoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Lmod.tex', u'Lmod Documentation',
u'Robert McLay', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'lmod', u'Lmod Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Lmod', u'Lmod Documentation',
author, 'Lmod', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 32
| 79
| 0.716776
|
bbaf0e228b1639eccdb936680f41c5a53e5e30af
| 852
|
py
|
Python
|
swayblur/paths.py
|
ForgottenUmbrella/swayblur
|
aab0c0bd4a5b49ae84a8540a4e3c6a7c1beeea25
|
[
"MIT"
] | 7
|
2021-11-25T15:29:51.000Z
|
2022-01-09T17:19:28.000Z
|
swayblur/paths.py
|
ForgottenUmbrella/swayblur
|
aab0c0bd4a5b49ae84a8540a4e3c6a7c1beeea25
|
[
"MIT"
] | 14
|
2021-11-10T06:24:09.000Z
|
2022-03-07T16:11:49.000Z
|
swayblur/paths.py
|
ForgottenUmbrella/swayblur
|
aab0c0bd4a5b49ae84a8540a4e3c6a7c1beeea25
|
[
"MIT"
] | 2
|
2021-11-25T19:00:01.000Z
|
2022-02-07T09:49:12.000Z
|
import pathlib
import shutil
DEFAULT_OGURI_DIR = pathlib.Path.home() / '.config/oguri/config'
CACHE_DIR = pathlib.Path.home() / '.cache/swayblur'
CACHE_VALIDATION_FILE = CACHE_DIR / 'settings.json'
# create the cache dir if it doesn't exist
def createCache() -> None:
CACHE_DIR.mkdir(parents=True, exist_ok=True)
# delete the cache dir
def deleteCache() -> None:
shutil.rmtree(CACHE_DIR, ignore_errors=True)
# check if a path exists
def exists(path: str) -> bool:
return pathlib.Path(path).is_file()
# returns the path to a given frame for a given output
def framePath(hash: str, frame: int) -> str:
return '%s/%s-%d.png' % (CACHE_DIR, hash, frame)
# returns the path to a cached image based on its path + hash
def cachedImagePath(path: str, hash: str) -> str:
return CACHE_DIR / ('%s%s' % (hash, pathlib.Path(path).suffix))
| 30.428571
| 67
| 0.70892
|
e59e988675e797298f863a8cf9ce37066b99c739
| 955
|
py
|
Python
|
Course3/Lab4/validations.py
|
zulfauzio/it-cert-automation-practice
|
223f36612336ac14815d54d2734e81db5c8dc26a
|
[
"Apache-2.0"
] | null | null | null |
Course3/Lab4/validations.py
|
zulfauzio/it-cert-automation-practice
|
223f36612336ac14815d54d2734e81db5c8dc26a
|
[
"Apache-2.0"
] | null | null | null |
Course3/Lab4/validations.py
|
zulfauzio/it-cert-automation-practice
|
223f36612336ac14815d54d2734e81db5c8dc26a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import re
def validate_user(username, minlen):
"""Checks if the received username matches the required conditions."""
if type(username) != str:
raise TypeError("username must be a string")
if minlen < 1:
raise ValueError("minlen must be at least 1")
# Usernames can't be shorter than minlen
if len(username) < minlen:
return False
# Usernames can only use letters, numbers, dots and underscores
if not re.match('^[a-z0-9._]*$', username):
return False
# Usernames can't begin with a number
if username[0].isnumeric():
return False
if not re.match('^[a-z]*$', username[0]):
return False
return True
print(validate_user("blue.kale", 3)) # True
print(validate_user(".blue.kale", 3)) # Currently True, should be False
print(validate_user("red_quinoa", 4)) # True
print(validate_user("_red_quinoa", 4)) # Currently True, should be False
| 32.931034
| 74
| 0.660733
|
9ca5b255c3e12db5b3426c3508a510d34e086c5a
| 6,371
|
py
|
Python
|
kubernetes/client/models/v1_azure_file_persistent_volume_source.py
|
dix000p/kubernetes-client-python
|
22e473e02883aca1058606092c86311f02f42be2
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/models/v1_azure_file_persistent_volume_source.py
|
dix000p/kubernetes-client-python
|
22e473e02883aca1058606092c86311f02f42be2
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/models/v1_azure_file_persistent_volume_source.py
|
dix000p/kubernetes-client-python
|
22e473e02883aca1058606092c86311f02f42be2
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1AzureFilePersistentVolumeSource(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'read_only': 'bool',
'secret_name': 'str',
'secret_namespace': 'str',
'share_name': 'str'
}
attribute_map = {
'read_only': 'readOnly',
'secret_name': 'secretName',
'secret_namespace': 'secretNamespace',
'share_name': 'shareName'
}
def __init__(self, read_only=None, secret_name=None, secret_namespace=None, share_name=None):
"""
V1AzureFilePersistentVolumeSource - a model defined in Swagger
"""
self._read_only = None
self._secret_name = None
self._secret_namespace = None
self._share_name = None
self.discriminator = None
if read_only is not None:
self.read_only = read_only
self.secret_name = secret_name
if secret_namespace is not None:
self.secret_namespace = secret_namespace
self.share_name = share_name
@property
def read_only(self):
"""
Gets the read_only of this V1AzureFilePersistentVolumeSource.
Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.
:return: The read_only of this V1AzureFilePersistentVolumeSource.
:rtype: bool
"""
return self._read_only
@read_only.setter
def read_only(self, read_only):
"""
Sets the read_only of this V1AzureFilePersistentVolumeSource.
Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.
:param read_only: The read_only of this V1AzureFilePersistentVolumeSource.
:type: bool
"""
self._read_only = read_only
@property
def secret_name(self):
"""
Gets the secret_name of this V1AzureFilePersistentVolumeSource.
the name of secret that contains Azure Storage Account Name and Key
:return: The secret_name of this V1AzureFilePersistentVolumeSource.
:rtype: str
"""
return self._secret_name
@secret_name.setter
def secret_name(self, secret_name):
"""
Sets the secret_name of this V1AzureFilePersistentVolumeSource.
the name of secret that contains Azure Storage Account Name and Key
:param secret_name: The secret_name of this V1AzureFilePersistentVolumeSource.
:type: str
"""
if secret_name is None:
raise ValueError("Invalid value for `secret_name`, must not be `None`")
self._secret_name = secret_name
@property
def secret_namespace(self):
"""
Gets the secret_namespace of this V1AzureFilePersistentVolumeSource.
the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod
:return: The secret_namespace of this V1AzureFilePersistentVolumeSource.
:rtype: str
"""
return self._secret_namespace
@secret_namespace.setter
def secret_namespace(self, secret_namespace):
"""
Sets the secret_namespace of this V1AzureFilePersistentVolumeSource.
the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod
:param secret_namespace: The secret_namespace of this V1AzureFilePersistentVolumeSource.
:type: str
"""
self._secret_namespace = secret_namespace
@property
def share_name(self):
"""
Gets the share_name of this V1AzureFilePersistentVolumeSource.
Share Name
:return: The share_name of this V1AzureFilePersistentVolumeSource.
:rtype: str
"""
return self._share_name
@share_name.setter
def share_name(self, share_name):
"""
Sets the share_name of this V1AzureFilePersistentVolumeSource.
Share Name
:param share_name: The share_name of this V1AzureFilePersistentVolumeSource.
:type: str
"""
if share_name is None:
raise ValueError("Invalid value for `share_name`, must not be `None`")
self._share_name = share_name
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1AzureFilePersistentVolumeSource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 29.910798
| 115
| 0.615288
|
af6a62d24262d77aeaf3b40a374b65bd86be194e
| 11,351
|
py
|
Python
|
venv/Lib/site-packages/neo4j/work/result.py
|
KwanYu/Airbnb-Backend
|
61b4c89f891378181447fc251fa0d1c2c5f435de
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/neo4j/work/result.py
|
KwanYu/Airbnb-Backend
|
61b4c89f891378181447fc251fa0d1c2c5f435de
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/neo4j/work/result.py
|
KwanYu/Airbnb-Backend
|
61b4c89f891378181447fc251fa0d1c2c5f435de
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright (c) 2002-2020 "Neo4j,"
# Neo4j Sweden AB [http://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import deque
from warnings import warn
from neo4j.data import DataDehydrator
from neo4j.work.summary import ResultSummary
class Result:
"""A handler for the result of Cypher query execution. Instances
of this class are typically constructed and returned by
:meth:`.Session.run` and :meth:`.Transaction.run`.
"""
def __init__(self, connection, hydrant, fetch_size, on_closed):
self._connection = connection
self._hydrant = hydrant
self._on_closed = on_closed
self._metadata = None
self._record_buffer = deque()
self._summary = None
self._bookmark = None
self._qid = -1
self._fetch_size = fetch_size
# states
self._discarding = False # discard the remainder of records
self._attached = False # attached to a connection
self._streaming = False # there is still more records to buffer upp on the wire
self._has_more = False # there is more records available to pull from the server
self._closed = False # the result have been properly iterated or consumed fully
def _tx_ready_run(self, query, parameters, **kwparameters):
# BEGIN+RUN does not carry any extra on the RUN message.
# BEGIN {extra}
# RUN "query" {parameters} {extra}
self._run(query, parameters, None, None, None, **kwparameters)
def _run(self, query, parameters, db, access_mode, bookmarks, **kwparameters):
query_text = str(query) # Query or string object
query_metadata = getattr(query, "metadata", None)
query_timeout = getattr(query, "timeout", None)
parameters = DataDehydrator.fix_parameters(dict(parameters or {}, **kwparameters))
self._metadata = {
"query": query_text,
"parameters": parameters,
"server": self._connection.server_info,
}
run_metadata = {
"metadata": query_metadata,
"timeout": query_timeout,
}
def on_attached(metadata):
self._metadata.update(metadata)
self._qid = metadata.get("qid", -1) # For auto-commit there is no qid and Bolt 3 do not support qid
self._keys = metadata.get("fields")
self._attached = True
def on_failed_attach(metadata):
self._metadata.update(metadata)
self._attached = False
self._on_closed()
self._connection.run(
query_text,
parameters=parameters,
mode=access_mode,
bookmarks=bookmarks,
metadata=query_metadata,
timeout=query_timeout,
db=db,
on_success=on_attached,
on_failure=on_failed_attach,
)
self._pull()
self._connection.send_all()
self._attach()
def _pull(self):
def on_records(records):
self._streaming = True
if not self._discarding:
self._record_buffer.extend(self._hydrant.hydrate_records(self._keys, records))
def on_summary():
self._attached = False
self._on_closed()
def on_failure(metadata):
self._attached = False
self._on_closed()
def on_success(summary_metadata):
has_more = summary_metadata.get("has_more")
if has_more:
self._has_more = True
self._streaming = False
return
else:
self._has_more = False
self._metadata.update(summary_metadata)
self._bookmark = summary_metadata.get("bookmark")
self._connection.pull(
n=self._fetch_size,
qid=self._qid,
on_records=on_records,
on_success=on_success,
on_failure=on_failure,
on_summary=on_summary,
)
def _discard(self):
def on_records(records):
pass
def on_summary():
self._attached = False
self._on_closed()
def on_failure(metadata):
self._metadata.update(metadata)
self._attached = False
self._on_closed()
def on_success(summary_metadata):
has_more = summary_metadata.get("has_more")
if has_more:
self._has_more = True
self._streaming = False
else:
self._has_more = False
self._discarding = False
self._metadata.update(summary_metadata)
self._bookmark = summary_metadata.get("bookmark")
# This was the last page received, discard the rest
self._connection.discard(
n=-1,
qid=self._qid,
on_records=on_records,
on_success=on_success,
on_failure=on_failure,
on_summary=on_summary,
)
def __iter__(self):
"""Iterator returning Records.
:returns: Record, it is an immutable ordered collection of key-value pairs.
:rtype: :class:`neo4j.Record`
"""
while self._record_buffer or self._attached:
while self._record_buffer:
yield self._record_buffer.popleft()
while self._attached is True: # _attached is set to False for _pull on_summary and _discard on_summary
self._connection.fetch_message() # Receive at least one message from the server, if available.
if self._attached:
if self._record_buffer:
yield self._record_buffer.popleft()
elif self._discarding and self._streaming is False:
self._discard()
self._connection.send_all()
elif self._has_more and self._streaming is False:
self._pull()
self._connection.send_all()
self._closed = True
def _attach(self):
"""Sets the Result object in an attached state by fetching messages from the connection to the buffer.
"""
if self._closed is False:
while self._attached is False:
self._connection.fetch_message()
def _buffer_all(self):
"""Sets the Result object in an detached state by fetching all records from the connection to the buffer.
"""
record_buffer = deque()
for record in self:
record_buffer.append(record)
self._closed = False
self._record_buffer = record_buffer
def _obtain_summary(self):
"""Obtain the summary of this result, buffering any remaining records.
:returns: The :class:`neo4j.ResultSummary` for this result
"""
if self._summary is None:
if self._metadata:
self._summary = ResultSummary(**self._metadata)
elif self._connection:
self._summary = ResultSummary(server=self._connection.server_info)
return self._summary
def keys(self):
"""The keys for the records in this result.
:returns: tuple of key names
:rtype: tuple
"""
return self._keys
def consume(self):
"""Consume the remainder of this result and return a ResultSummary.
:returns: The :class:`neo4j.ResultSummary` for this result
"""
if self._closed is False:
self._discarding = True
for _ in self:
pass
return self._obtain_summary()
def single(self):
"""Obtain the next and only remaining record from this result if available else return None.
Calling this method always exhausts the result.
A warning is generated if more than one record is available but
the first of these is still returned.
:returns: the next :class:`neo4j.Record` or :const:`None` if none remain
:warns: if more than one record is available
"""
records = list(self) # TODO: exhausts the result with self.consume if there are more records.
size = len(records)
if size == 0:
return None
if size != 1:
warn("Expected a result with a single record, but this result contains %d" % size)
return records[0]
def peek(self):
"""Obtain the next record from this result without consuming it.
This leaves the record in the buffer for further processing.
:returns: the next :class:`.Record` or :const:`None` if none remain
"""
if self._record_buffer:
return self._record_buffer[0]
if not self._attached:
return None
while self._attached:
self._connection.fetch_message()
if self._record_buffer:
return self._record_buffer[0]
return None
def graph(self):
"""Return a :class:`neo4j.graph.Graph` instance containing all the graph objects
in the result. After calling this method, the result becomes
detached, buffering all remaining records.
:returns: a result graph
:rtype: :class:`neo4j.graph.Graph`
"""
self._buffer_all()
return self._hydrant.graph
def value(self, key=0, default=None):
"""Helper function that return the remainder of the result as a list of values.
See :class:`neo4j.Record.value`
:param key: field to return for each remaining record. Obtain a single value from the record by index or key.
:param default: default value, used if the index of key is unavailable
:returns: list of individual values
:rtype: list
"""
return [record.value(key, default) for record in self]
def values(self, *keys):
"""Helper function that return the remainder of the result as a list of values lists.
See :class:`neo4j.Record.values`
:param keys: fields to return for each remaining record. Optionally filtering to include only certain values by index or key.
:returns: list of values lists
:rtype: list
"""
return [record.values(*keys) for record in self]
def data(self, *keys):
"""Helper function that return the remainder of the result as a list of dictionaries.
See :class:`neo4j.Record.data`
:param keys: fields to return for each remaining record. Optionally filtering to include only certain values by index or key.
:returns: list of dictionaries
:rtype: list
"""
return [record.data(*keys) for record in self]
| 34.926154
| 133
| 0.609109
|
efa091874534951823848189c8e26401c3dbf4cd
| 8,072
|
py
|
Python
|
homeassistant/components/isy994/climate.py
|
jonasjeeliasson/core
|
0301706fc631ad1f2cd2532667ba9dfe2f856198
|
[
"Apache-2.0"
] | 1
|
2021-04-29T06:51:10.000Z
|
2021-04-29T06:51:10.000Z
|
homeassistant/components/isy994/climate.py
|
jonasjeeliasson/core
|
0301706fc631ad1f2cd2532667ba9dfe2f856198
|
[
"Apache-2.0"
] | 46
|
2021-01-06T07:05:22.000Z
|
2022-03-31T06:05:25.000Z
|
homeassistant/components/isy994/climate.py
|
krzkowalczyk/home-assistant
|
513685bbeacca2c758d3ca33b337da3b7e72dd1d
|
[
"Apache-2.0"
] | 2
|
2020-06-03T20:24:39.000Z
|
2020-06-06T19:52:09.000Z
|
"""Support for Insteon Thermostats via ISY994 Platform."""
from __future__ import annotations
from typing import Callable
from pyisy.constants import (
CMD_CLIMATE_FAN_SETTING,
CMD_CLIMATE_MODE,
PROP_HEAT_COOL_STATE,
PROP_HUMIDITY,
PROP_SETPOINT_COOL,
PROP_SETPOINT_HEAT,
PROP_UOM,
PROTO_INSTEON,
)
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
DOMAIN as CLIMATE,
FAN_AUTO,
FAN_ON,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
SUPPORT_FAN_MODE,
SUPPORT_TARGET_TEMPERATURE,
SUPPORT_TARGET_TEMPERATURE_RANGE,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_TEMPERATURE,
PRECISION_TENTHS,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.core import HomeAssistant
from .const import (
_LOGGER,
DOMAIN as ISY994_DOMAIN,
HA_FAN_TO_ISY,
HA_HVAC_TO_ISY,
ISY994_NODES,
ISY_HVAC_MODES,
UOM_FAN_MODES,
UOM_HVAC_ACTIONS,
UOM_HVAC_MODE_GENERIC,
UOM_HVAC_MODE_INSTEON,
UOM_ISY_CELSIUS,
UOM_ISY_FAHRENHEIT,
UOM_ISYV4_NONE,
UOM_TO_STATES,
)
from .entity import ISYNodeEntity
from .helpers import convert_isy_value_to_hass, migrate_old_unique_ids
ISY_SUPPORTED_FEATURES = (
SUPPORT_FAN_MODE | SUPPORT_TARGET_TEMPERATURE | SUPPORT_TARGET_TEMPERATURE_RANGE
)
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: Callable[[list], None],
) -> bool:
"""Set up the ISY994 thermostat platform."""
entities = []
hass_isy_data = hass.data[ISY994_DOMAIN][entry.entry_id]
for node in hass_isy_data[ISY994_NODES][CLIMATE]:
entities.append(ISYThermostatEntity(node))
await migrate_old_unique_ids(hass, CLIMATE, entities)
async_add_entities(entities)
class ISYThermostatEntity(ISYNodeEntity, ClimateEntity):
"""Representation of an ISY994 thermostat entity."""
def __init__(self, node) -> None:
"""Initialize the ISY Thermostat entity."""
super().__init__(node)
self._node = node
self._uom = self._node.uom
if isinstance(self._uom, list):
self._uom = self._node.uom[0]
self._hvac_action = None
self._hvac_mode = None
self._fan_mode = None
self._temp_unit = None
self._current_humidity = 0
self._target_temp_low = 0
self._target_temp_high = 0
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return ISY_SUPPORTED_FEATURES
@property
def precision(self) -> str:
"""Return the precision of the system."""
return PRECISION_TENTHS
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement."""
uom = self._node.aux_properties.get(PROP_UOM)
if not uom:
return self.hass.config.units.temperature_unit
if uom.value == UOM_ISY_CELSIUS:
return TEMP_CELSIUS
if uom.value == UOM_ISY_FAHRENHEIT:
return TEMP_FAHRENHEIT
@property
def current_humidity(self) -> int | None:
"""Return the current humidity."""
humidity = self._node.aux_properties.get(PROP_HUMIDITY)
if not humidity:
return None
return int(humidity.value)
@property
def hvac_mode(self) -> str | None:
"""Return hvac operation ie. heat, cool mode."""
hvac_mode = self._node.aux_properties.get(CMD_CLIMATE_MODE)
if not hvac_mode:
return None
# Which state values used depends on the mode property's UOM:
uom = hvac_mode.uom
# Handle special case for ISYv4 Firmware:
if uom in (UOM_ISYV4_NONE, ""):
uom = (
UOM_HVAC_MODE_INSTEON
if self._node.protocol == PROTO_INSTEON
else UOM_HVAC_MODE_GENERIC
)
return UOM_TO_STATES[uom].get(hvac_mode.value)
@property
def hvac_modes(self) -> list[str]:
"""Return the list of available hvac operation modes."""
return ISY_HVAC_MODES
@property
def hvac_action(self) -> str | None:
"""Return the current running hvac operation if supported."""
hvac_action = self._node.aux_properties.get(PROP_HEAT_COOL_STATE)
if not hvac_action:
return None
return UOM_TO_STATES[UOM_HVAC_ACTIONS].get(hvac_action.value)
@property
def current_temperature(self) -> float | None:
"""Return the current temperature."""
return convert_isy_value_to_hass(
self._node.status, self._uom, self._node.prec, 1
)
@property
def target_temperature_step(self) -> float | None:
"""Return the supported step of target temperature."""
return 1.0
@property
def target_temperature(self) -> float | None:
"""Return the temperature we try to reach."""
if self.hvac_mode == HVAC_MODE_COOL:
return self.target_temperature_high
if self.hvac_mode == HVAC_MODE_HEAT:
return self.target_temperature_low
return None
@property
def target_temperature_high(self) -> float | None:
"""Return the highbound target temperature we try to reach."""
target = self._node.aux_properties.get(PROP_SETPOINT_COOL)
if not target:
return None
return convert_isy_value_to_hass(target.value, target.uom, target.prec, 1)
@property
def target_temperature_low(self) -> float | None:
"""Return the lowbound target temperature we try to reach."""
target = self._node.aux_properties.get(PROP_SETPOINT_HEAT)
if not target:
return None
return convert_isy_value_to_hass(target.value, target.uom, target.prec, 1)
@property
def fan_modes(self):
"""Return the list of available fan modes."""
return [FAN_AUTO, FAN_ON]
@property
def fan_mode(self) -> str:
"""Return the current fan mode ie. auto, on."""
fan_mode = self._node.aux_properties.get(CMD_CLIMATE_FAN_SETTING)
if not fan_mode:
return None
return UOM_TO_STATES[UOM_FAN_MODES].get(fan_mode.value)
def set_temperature(self, **kwargs) -> None:
"""Set new target temperature."""
target_temp = kwargs.get(ATTR_TEMPERATURE)
target_temp_low = kwargs.get(ATTR_TARGET_TEMP_LOW)
target_temp_high = kwargs.get(ATTR_TARGET_TEMP_HIGH)
if target_temp is not None:
if self.hvac_mode == HVAC_MODE_COOL:
target_temp_high = target_temp
if self.hvac_mode == HVAC_MODE_HEAT:
target_temp_low = target_temp
if target_temp_low is not None:
self._node.set_climate_setpoint_heat(int(target_temp_low))
# Presumptive setting--event stream will correct if cmd fails:
self._target_temp_low = target_temp_low
if target_temp_high is not None:
self._node.set_climate_setpoint_cool(int(target_temp_high))
# Presumptive setting--event stream will correct if cmd fails:
self._target_temp_high = target_temp_high
self.schedule_update_ha_state()
def set_fan_mode(self, fan_mode: str) -> None:
"""Set new target fan mode."""
_LOGGER.debug("Requested fan mode %s", fan_mode)
self._node.set_fan_mode(HA_FAN_TO_ISY.get(fan_mode))
# Presumptive setting--event stream will correct if cmd fails:
self._fan_mode = fan_mode
self.schedule_update_ha_state()
def set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target hvac mode."""
_LOGGER.debug("Requested operation mode %s", hvac_mode)
self._node.set_climate_mode(HA_HVAC_TO_ISY.get(hvac_mode))
# Presumptive setting--event stream will correct if cmd fails:
self._hvac_mode = hvac_mode
self.schedule_update_ha_state()
| 33.355372
| 84
| 0.66774
|
0efdbc4375b945d219f46b71493c01f6446f4945
| 7,381
|
py
|
Python
|
geofound/damping.py
|
eng-tools/geofound
|
b59e720cc0ca62c2999adca27cd66c45ae000828
|
[
"MIT"
] | 12
|
2017-12-16T10:15:57.000Z
|
2022-01-23T03:55:59.000Z
|
geofound/damping.py
|
eng-tools/geofound
|
b59e720cc0ca62c2999adca27cd66c45ae000828
|
[
"MIT"
] | 2
|
2018-01-29T16:28:38.000Z
|
2021-04-20T22:50:46.000Z
|
geofound/damping.py
|
eng-tools/geofound
|
b59e720cc0ca62c2999adca27cd66c45ae000828
|
[
"MIT"
] | 9
|
2018-02-09T17:20:55.000Z
|
2021-11-09T02:15:16.000Z
|
from geofound import tables_of_dyn_coefficients as tdc
import numpy as np
_pi = 3.14159265359
def calc_vert_via_gazetas_1991(sl, fd, a0, f_contact=1.0, saturated=False):
v_s = sl.get_shear_vel(saturated=saturated)
v_la = 3.4 / (_pi * (1 - sl.poissons_ratio)) * v_s
if saturated:
rho = sl.unit_sat_mass
else:
rho = sl.unit_dry_mass
l = max([fd.length, fd.width]) / 2
b = max([fd.length, fd.width]) / 2
f_dyn = tdc.get_cz_gazetas_v_lte_0p4(a0, l / b)
if sl.poissons_ratio > 0.4:
czf = tdc.get_czf_gazetas_v_gt_0p4(a0, l / b)
f_dyn *= czf
if fd.depth is not None and fd.depth != 0.0:
if fd.depth < 0.0:
raise ValueError(f'foundation depth must be zero or greater, not {fd.depth}')
h = min([fd.height, fd.depth])
a_w = 2 * h * (fd.width + fd.length) * f_contact
c_emb = rho * v_s * a_w
else:
c_emb = 0.0
return rho * v_la * fd.area * f_dyn + c_emb
def calc_vert_strip_via_gazetas_1991(sl, fd, a0, ip_axis='width', f_contact=1.0, saturated=False):
v_s = sl.get_shear_vel(saturated=saturated)
v_la = 3.4 / (_pi * (1 - sl.poissons_ratio)) * v_s
if saturated:
rho = sl.unit_sat_mass
else:
rho = sl.unit_dry_mass
l_oop = 1.0
l_ip = getattr(fd, ip_axis)
if a0:
f_dyn = tdc.get_cz_gazetas_v_lte_0p4(a0, lob=1000)
if sl.poissons_ratio > 0.4:
czf = tdc.get_czf_gazetas_v_gt_0p4(a0, lob=1000)
f_dyn *= czf
else:
f_dyn = 1.0
if fd.depth is not None and fd.depth != 0.0:
if fd.depth < 0.0:
raise ValueError(f'foundation depth must be zero or greater, not {fd.depth}')
h = min([fd.height, fd.depth])
a_w = 2 * h * (l_ip + l_oop) * f_contact
c_emb = rho * v_s * a_w
else:
c_emb = 0.0
return rho * v_la * (l_ip * l_oop) * f_dyn + c_emb
def calc_horz_via_gazetas_1991(sl, fd, a0, ip_axis='width', f_contact=1.0, saturated=False):
if saturated:
rho = sl.unit_sat_mass
else:
rho = sl.unit_dry_mass
if fd.length >= fd.width:
len_dominant = True
l = fd.length * 0.5
b = fd.width * 0.5
else:
len_dominant = False
l = fd.width * 0.5
b = fd.length * 0.5
if (ip_axis == 'length' and len_dominant) or (ip_axis == 'width' and not len_dominant):
y_axis = True # Direction of l
else:
y_axis = False # Direction of b
if y_axis:
f_dyn_v3 = tdc.get_cy_gazetas_v_e_0p3(a0, l / b)
f_dyn_v5 = tdc.get_cy_gazetas_v_e_0p3(a0, l / b)
f_dyn = np.interp(sl.poissons_ratio, [0.3, 0.5], [f_dyn_v3, f_dyn_v5])
else:
f_dyn = 1.0 # no dynamic effect
if fd.depth is not None and fd.depth != 0.0:
if fd.depth < 0.0:
raise ValueError(f'foundation depth must be zero or greater, not {fd.depth}')
v_s = sl.get_shear_vel(saturated=saturated)
v_la = 3.4 / (_pi * (1 - sl.poissons_ratio)) * v_s
l_ip = getattr(fd, ip_axis)
if ip_axis == 'width':
l_oop = fd.length
else:
l_oop = fd.width
h = min([fd.height, fd.depth])
a_wc = 2 * h * l_oop * f_contact
a_ws = 2 * h * l_ip * f_contact
c_emb = rho * v_s * a_ws + rho * v_la * a_wc
else:
c_emb = 0.0
return rho * sl.get_shear_vel(saturated=saturated) * fd.area * f_dyn + c_emb
def calc_horz_strip_via_gazetas_1991(sl, fd, a0, ip_axis='width', f_contact=1.0, saturated=False):
if saturated:
rho = sl.unit_sat_mass
else:
rho = sl.unit_dry_mass
f_dyn_v3 = tdc.get_cy_gazetas_v_e_0p3(a0, lob=1000)
f_dyn_v5 = tdc.get_cy_gazetas_v_e_0p3(a0, lob=1000)
f_dyn = np.interp(sl.poissons_ratio, [0.3, 0.5], [f_dyn_v3, f_dyn_v5])
l_oop = 1.0
l_ip = getattr(fd, ip_axis)
if fd.depth is not None and fd.depth != 0.0:
if fd.depth < 0.0:
raise ValueError(f'foundation depth must be zero or greater, not {fd.depth}')
v_s = sl.get_shear_vel(saturated=saturated)
v_la = 3.4 / (_pi * (1 - sl.poissons_ratio)) * v_s
h = min([fd.height, fd.depth])
a_wc = 2 * h * l_oop * f_contact
a_ws = 2 * h * l_ip * f_contact
c_emb = rho * v_s * a_ws + rho * v_la * a_wc
else:
c_emb = 0.0
return rho * sl.get_shear_vel(saturated=saturated) * (l_ip * l_oop) * f_dyn + c_emb
def calc_rot_via_gazetas_1991(sl, fd, a0, ip_axis='width', saturated=False, f_contact=1.0):
v_la = 3.4 / (_pi * (1 - sl.poissons_ratio)) * sl.get_shear_vel(saturated=saturated)
if saturated:
rho = sl.unit_sat_mass
else:
rho = sl.unit_dry_mass
if fd.length >= fd.width:
len_dominant = True
l = fd.length * 0.5
b = fd.width * 0.5
i_bx = fd.i_ll
i_by = fd.i_ww
else:
len_dominant = False
l = fd.width * 0.5
b = fd.length * 0.5
i_by = fd.i_ll
i_bx = fd.i_ww
if (ip_axis == 'width' and len_dominant) or (ip_axis == 'length' and not len_dominant):
xx_axis = True # weaker rotation
else:
xx_axis = False
if xx_axis:
c_static = rho * v_la * i_bx
f_dyn = tdc.get_crx_gazetas(a0, l / b)
c_emb = 0.0
else:
c_static = rho * v_la * i_by
f_dyn = tdc.get_cry_gazetas(a0, l / b)
c_emb = 0.0 # TODO: this is wrong - but formula is very hard to interpret
return c_static * f_dyn + c_emb
def calc_rot_strip_via_gazetas_1991(sl, fd, a0, ip_axis='width', saturated=False, f_contact=1.0):
v_la = 3.4 / (_pi * (1 - sl.poissons_ratio)) * sl.get_shear_vel(saturated=saturated)
if saturated:
rho = sl.unit_sat_mass
else:
rho = sl.unit_dry_mass
l_oop = 1.0
l_ip = getattr(fd, ip_axis)
i_bx = l_oop * l_ip ** 3 / 12
c_static = rho * v_la * i_bx
f_dyn = tdc.get_crx_gazetas(a0, lob=1000)
c_emb = 0.0
return c_static * f_dyn + c_emb
def calc_tors_via_gazetas_1991(sl, fd, a0, saturated=False):
j_t = fd.i_ll + fd.i_ww
if saturated:
rho = sl.unit_sat_mass
else:
rho = sl.unit_dry_mass
l = max([fd.length, fd.width]) / 2
b = max([fd.length, fd.width]) / 2
if a0:
f_dyn = tdc.get_ct_gazetas(a0, l / b)
else:
f_dyn = 1.0
return rho * sl.get_shear_vel(saturated=saturated) * j_t * f_dyn
def show_example():
import geofound as gf
sl = gf.create_soil()
sl.g_mod = 30e6
sl.poissons_ratio = 0.3
sl.unit_dry_weight = 16.0e3
lens = [0.5, 1.5, 10]
length = 1.5
width = 1.5
depth = 0.0
fd = gf.create_foundation(length, width, depth)
a0 = 1
c_rot = calc_rot_via_gazetas_1991(sl, fd, ip_axis='width', a0=a0)
c_strip = calc_rot_strip_via_gazetas_1991(sl, fd, ip_axis='width', a0=a0)
print(c_strip / c_rot * fd.length)
c_vert = calc_vert_via_gazetas_1991(sl, fd, a0=a0)
c_strip = calc_vert_strip_via_gazetas_1991(sl, fd, ip_axis='width', a0=a0)
print(c_strip / c_vert * fd.length)
c_horz = calc_horz_via_gazetas_1991(sl, fd, ip_axis='width', a0=a0)
c_strip = calc_horz_strip_via_gazetas_1991(sl, fd, ip_axis='width', a0=a0)
print(c_strip / c_horz * fd.length)
calc_rot_via_gazetas_1991(sl, fd, ip_axis='width', a0=0.5)
if __name__ == '__main__':
show_example()
| 33.55
| 98
| 0.599919
|
91c08ad014e28071d4dd5db1b27d3d3d3241c366
| 4,638
|
py
|
Python
|
scripts/study_case/ID_15/train.py
|
kzbnb/numerical_bugs
|
bc22e72bcc06df6ce7889a25e0aeed027bde910b
|
[
"Apache-2.0"
] | 8
|
2021-06-30T06:55:14.000Z
|
2022-03-18T01:57:14.000Z
|
scripts/study_case/ID_15/train.py
|
kzbnb/numerical_bugs
|
bc22e72bcc06df6ce7889a25e0aeed027bde910b
|
[
"Apache-2.0"
] | 1
|
2021-06-30T03:08:15.000Z
|
2021-06-30T03:08:15.000Z
|
scripts/study_case/ID_15/train.py
|
kzbnb/numerical_bugs
|
bc22e72bcc06df6ce7889a25e0aeed027bde910b
|
[
"Apache-2.0"
] | 2
|
2021-11-17T11:19:48.000Z
|
2021-11-18T03:05:58.000Z
|
import time
import numpy as np
import sys
sys.path.append("/data")
import tensorflow as tf
from scripts.study_case.ID_15.data import *
from scripts.study_case.ID_15.model import *
from scripts.study_case.ID_15.utils import *
np.random.seed(0)
# Data
tf.app.flags.DEFINE_string('input', '/data/scripts/study_case/ID_15/data/gridworld_8.mat',
'Path to data')
tf.app.flags.DEFINE_integer('imsize', 8, 'Size of input image')
# Parameters
tf.app.flags.DEFINE_float('lr', 0.001, 'Learning rate for RMSProp')
tf.app.flags.DEFINE_integer('epochs', 100, 'Maximum epochs to train for')
tf.app.flags.DEFINE_integer('k', 10, 'Number of value iterations')
tf.app.flags.DEFINE_integer('ch_i', 2, 'Channels in input layer')
tf.app.flags.DEFINE_integer('ch_h', 150, 'Channels in initial hidden layer')
tf.app.flags.DEFINE_integer('ch_q', 10, 'Channels in q layer (~actions)')
tf.app.flags.DEFINE_integer('batchsize', 12, 'Batch size')
tf.app.flags.DEFINE_integer('statebatchsize', 10,
'Number of state inputs for each sample (real number, technically is k+1)')
tf.app.flags.DEFINE_boolean('untied_weights', False, 'Untie weights of VI network')
# Misc.
tf.app.flags.DEFINE_integer('display_step', 1, 'Print summary output every n epochs')
tf.app.flags.DEFINE_boolean('log', False, 'Enable for tensorboard summary')
tf.app.flags.DEFINE_string('logdir', 'log/', 'Directory to store tensorboard summary')
config = tf.app.flags.FLAGS
# symbolic input image tensor where typically first channel is image, second is the reward prior
X = tf.placeholder(tf.float32, name="X", shape=[None, config.imsize, config.imsize, config.ch_i])
# symbolic input batches of vertical positions
S1 = tf.placeholder(tf.int32, name="S1", shape=[None, config.statebatchsize])
# symbolic input batches of horizontal positions
S2 = tf.placeholder(tf.int32, name="S2", shape=[None, config.statebatchsize])
y = tf.placeholder(tf.int32, name="y", shape=[None])
# Construct model (Value Iteration Network)
if (config.untied_weights):
nn = VI_Untied_Block(X, S1, S2, config)
else:
nn = VI_Block(X, S1, S2, config)
# Define loss and optimizer
dim = tf.shape(y)[0]
cost_idx = tf.concat([tf.reshape(tf.range(dim), [dim, 1]), tf.reshape(y, [dim, 1])], 1)
#MUTATION#
cost = -tf.reduce_mean(tf.gather_nd(tf.log(nn), [cost_idx]))
# cost = -tf.reduce_mean(tf.log(nn))
optimizer = tf.train.RMSPropOptimizer(learning_rate=config.lr, epsilon=1e-6, centered=True).minimize(cost)
obj_function = tf.reduce_min(tf.abs(nn))
# Test model & calculate accuracy
cp = tf.cast(tf.argmax(nn, 1), tf.int32)
err = tf.reduce_mean(tf.cast(tf.not_equal(cp, y), dtype=tf.float32))
# Initializing the variables
init = tf.global_variables_initializer()
saver = tf.train.Saver()
Xtrain, S1train, S2train, ytrain, Xtest, S1test, S2test, ytest = process_gridworld_data(input=config.input,
imsize=config.imsize)
# Launch the graph
with tf.Session() as sess:
tf.train.write_graph(sess.graph_def, '/data/scripts/study_case/pbtxt_files', 'tensorflow_value_iteration_networks.pbtxt')
if config.log:
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(config.logdir, sess.graph)
sess.run(init)
batch_size = config.batchsize
print(fmt_row(10, ["Epoch", "Train Cost", "Train Err", "Epoch Time"]))
'''inserted code'''
from scripts.utils.tf_utils import TensorFlowScheduler
scheduler = TensorFlowScheduler(name="tensorflow_value_iteration_networks_v1")
'''inserted code'''
epoch = 1
while True:
for i in range(0, Xtrain.shape[0], batch_size):
j = i + batch_size
if j <= Xtrain.shape[0]:
fd = {X: Xtrain[i:j], S1: S1train[i:j], S2: S2train[i:j],
y: ytrain[i * config.statebatchsize:j * config.statebatchsize]}
_, e_, loss, obj_function_val = sess.run([optimizer, err, cost, obj_function], feed_dict=fd)
x_val, S1_val, S2_val, y_val = sess.run([X, S1, S2, y], feed_dict=fd)
print(f"X ({np.max(x_val)}, {np.min(x_val)}), S1 ({np.max(S1_val)}, {np.min(S1_val)}), S2 ({np.max(S2_val)}, {np.min(S2_val)}), y ({np.max(y_val)}, {np.min(y_val)})")
'''inserted code'''
scheduler.loss_checker(loss)
'''inserted code'''
'''inserted code'''
scheduler.check_time()
'''inserted code'''
| 44.171429
| 182
| 0.668823
|
7d1317473531e4738d543cf0a45c0f079ef206c7
| 5,534
|
py
|
Python
|
python/paddle/fluid/tests/unittests/test_cuda_graph.py
|
2742195759/Paddle
|
ce034db1834af85539b22ab68492df9972ff3e69
|
[
"Apache-2.0"
] | 17,085
|
2016-11-18T06:40:52.000Z
|
2022-03-31T22:52:32.000Z
|
python/paddle/fluid/tests/unittests/test_cuda_graph.py
|
2742195759/Paddle
|
ce034db1834af85539b22ab68492df9972ff3e69
|
[
"Apache-2.0"
] | 29,769
|
2016-11-18T06:35:22.000Z
|
2022-03-31T16:46:15.000Z
|
python/paddle/fluid/tests/unittests/test_cuda_graph.py
|
2742195759/Paddle
|
ce034db1834af85539b22ab68492df9972ff3e69
|
[
"Apache-2.0"
] | 4,641
|
2016-11-18T07:43:33.000Z
|
2022-03-31T15:15:02.000Z
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.fluid as fluid
from paddle.device.cuda.graphs import CUDAGraph
import unittest
import numpy as np
from paddle.fluid.dygraph.base import switch_to_static_graph
from simple_nets import simple_fc_net_with_inputs
class TestCUDAGraph(unittest.TestCase):
def setUp(self):
if paddle.is_compiled_with_cuda() and not paddle.is_compiled_with_rocm(
):
fluid.set_flags({
'FLAGS_allocator_strategy': 'auto_growth',
'FLAGS_sync_nccl_allreduce': False,
'FLAGS_cudnn_deterministic': True
})
def random_tensor(self, shape):
return paddle.to_tensor(
np.random.randint(
low=0, high=10, size=shape).astype("float32"))
@switch_to_static_graph
def test_cuda_graph_static_graph(self):
if not paddle.is_compiled_with_cuda() or paddle.is_compiled_with_rocm():
return
seed = 100
loss_cuda_graph = self.cuda_graph_static_graph_main(
seed, use_cuda_graph=True)
loss_no_cuda_graph = self.cuda_graph_static_graph_main(
seed, use_cuda_graph=False)
self.assertEqual(loss_cuda_graph, loss_no_cuda_graph)
def cuda_graph_static_graph_main(self, seed, use_cuda_graph):
batch_size = 1
class_num = 10
image_shape = [batch_size, 784]
label_shape = [batch_size, 1]
paddle.seed(seed)
np.random.seed(seed)
startup = paddle.static.Program()
main = paddle.static.Program()
with paddle.static.program_guard(main, startup):
image = paddle.static.data(
name="image", shape=image_shape, dtype='float32')
label = paddle.static.data(
name="label", shape=label_shape, dtype='int64')
image.persistable = True
label.persistable = True
loss = simple_fc_net_with_inputs(image, label, class_num)
loss.persistable = True
lr = paddle.optimizer.lr.PiecewiseDecay(
boundaries=[2, 3, 4], values=[0.01, 0.02, 0.03, 0.04])
optimizer = paddle.optimizer.SGD(learning_rate=lr)
optimizer.minimize(loss)
place = paddle.CUDAPlace(0)
exe = paddle.static.Executor(place)
scope = paddle.static.Scope()
with paddle.static.scope_guard(scope):
exe.run(startup)
build_strategy = paddle.static.BuildStrategy()
build_strategy.allow_cuda_graph_capture = True
build_strategy.fix_op_run_order = True
build_strategy.fuse_all_optimizer_ops = True
compiled_program = paddle.static.CompiledProgram(
main).with_data_parallel(
loss_name=loss.name,
build_strategy=build_strategy,
places=place)
image_t = scope.var(image.name).get_tensor()
label_t = scope.var(label.name).get_tensor()
loss_t = scope.var(loss.name).get_tensor()
lr_var = main.global_block().var(lr._var_name)
self.assertTrue(lr_var.persistable)
lr_t = scope.var(lr_var.name).get_tensor()
cuda_graph = None
for batch_id in range(20):
image_t.set(
np.random.rand(*image_shape).astype('float32'), place)
label_t.set(np.random.randint(
low=0, high=class_num, size=label_shape, dtype='int64'),
place)
if batch_id == 1 and use_cuda_graph:
cuda_graph = CUDAGraph(place, mode="global")
cuda_graph.capture_begin()
exe.run(compiled_program)
cuda_graph.capture_end()
if cuda_graph:
lr_t.set(np.array([lr()], dtype='float32'), place)
cuda_graph.replay()
else:
exe.run(compiled_program)
lr.step()
if cuda_graph:
cuda_graph.reset()
return np.array(loss_t)
def test_cuda_graph_dynamic_graph(self):
if not paddle.is_compiled_with_cuda() or paddle.is_compiled_with_rocm():
return
shape = [2, 3]
x = self.random_tensor(shape)
z = self.random_tensor(shape)
g = CUDAGraph()
g.capture_begin()
y = x + 10
z.add_(x)
g.capture_end()
for _ in range(10):
z_np_init = z.numpy()
x_new = self.random_tensor(shape)
x.copy_(x_new, False)
g.replay()
x_np = x_new.numpy()
y_np = y.numpy()
z_np = z.numpy()
self.assertTrue((y_np - x_np == 10).all())
self.assertTrue((z_np - z_np_init == x_np).all())
g.reset()
if __name__ == "__main__":
unittest.main()
| 37.391892
| 80
| 0.599205
|
4b6d7e77251522ebf72c347c8c77897c5a45606d
| 1,644
|
py
|
Python
|
test/test_sparql/sparql/ConstructTests/Test10_22.py
|
renefritze/rdfextras
|
44ccf3ba1ccfc0e0c434146b98a5e236021da56d
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_sparql/sparql/ConstructTests/Test10_22.py
|
renefritze/rdfextras
|
44ccf3ba1ccfc0e0c434146b98a5e236021da56d
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_sparql/sparql/ConstructTests/Test10_22.py
|
renefritze/rdfextras
|
44ccf3ba1ccfc0e0c434146b98a5e236021da56d
|
[
"BSD-3-Clause"
] | null | null | null |
#!/d/Bin/Python/python.exe
# -*- coding: utf-8 -*-
#
#
# $Date: 2005/04/02 07:30:02 $, by $Author: ivan $, $Revision: 1.1 $
#
from testSPARQL import ns_rdf
from testSPARQL import ns_rdfs
from testSPARQL import ns_dc0
from testSPARQL import ns_foaf
from testSPARQL import ns_ns
from testSPARQL import ns_book
from testSPARQL import ns_vcard
from testSPARQL import ns_person
from rdflib.Literal import Literal
from rdflib import BNode
from rdfextras.sparql.sparql import PatternBNode
from rdfextras.sparql.sparqlOperators import lt, ge
import datetime
from rdfextras.sparql.graphPattern import GraphPattern
thresholdDate = datetime.date(2005,0o1,0o1)
rdfData = """<?xml version="1.0" encoding="UTF-8"?>
<rdf:RDF
xmlns:rdfs="http://www.w3.org/2000/01/rdf-schema#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:foaf="http://xmlns.com/foaf/0.1/"
xmlns:ns = "http://example.org/ns#"
>
<rdf:Description>
<foaf:givenname>Alice</foaf:givenname>
<foaf:family_name>Hacker</foaf:family_name>
</rdf:Description>
<rdf:Description>
<foaf:givenname>Bob</foaf:givenname>
<foaf:family_name>Hacker</foaf:family_name>
</rdf:Description>
</rdf:RDF>
"""
select = []
pattern = GraphPattern([("?x",ns_foaf["givenname"],"?name"),("?x",ns_foaf["family_name"],"?fname")])
optional = []
bnode = BNode("v") #PatternBNode("")
construct = GraphPattern([("?x", ns_vcard["N"],bnode),(bnode,ns_vcard["givenName"],"?name"),(bnode,ns_vcard["familyName"],"?fname")])
tripleStore = None
| 31.615385
| 135
| 0.676399
|
2edec0ee5f05beef50eee7d784690e0d55e4ed19
| 1,796
|
py
|
Python
|
vitrage/storage/__init__.py
|
soda-research/vitrage
|
8f912a97e8e8350429b37b11f8fbd2341e5997d8
|
[
"Apache-2.0"
] | null | null | null |
vitrage/storage/__init__.py
|
soda-research/vitrage
|
8f912a97e8e8350429b37b11f8fbd2341e5997d8
|
[
"Apache-2.0"
] | null | null | null |
vitrage/storage/__init__.py
|
soda-research/vitrage
|
8f912a97e8e8350429b37b11f8fbd2341e5997d8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
import six.moves.urllib.parse as urlparse
from stevedore import driver
import tenacity
from vitrage.utils.datetime import utcnow
_NAMESPACE = 'vitrage.storage'
LOG = log.getLogger(__name__)
OPTS = []
def get_connection_from_config(conf):
retries = conf.database.max_retries
url = conf.database.connection
try:
# TOTO(iafek): check why this call randomly fails
connection_scheme = urlparse.urlparse(url).scheme
LOG.debug('looking for %(name)r driver in %(namespace)r',
{'name': connection_scheme, 'namespace': _NAMESPACE})
mgr = driver.DriverManager(_NAMESPACE, connection_scheme)
except Exception as e:
LOG.exception('Failed to get scheme %s. Exception: %s ', str(url), e)
return None
@tenacity.retry(
wait=tenacity.wait_fixed(conf.database.retry_interval),
stop=tenacity.stop_after_attempt(retries if retries >= 0 else 5),
reraise=True)
def _get_connection():
"""Return an open connection to the database."""
return mgr.driver(conf, url)
return _get_connection()
def db_time():
ret = utcnow(with_timezone=False)
return ret.replace(microsecond=0)
| 29.933333
| 77
| 0.711024
|
9ac6ea180ffbcd237b1fc9c9efd48c570042199c
| 1,093
|
py
|
Python
|
tests/test_force_series.py
|
lahwaacz/tvnamer
|
3d67701e39f4368cad96004c25d96e1b10638806
|
[
"Unlicense"
] | null | null | null |
tests/test_force_series.py
|
lahwaacz/tvnamer
|
3d67701e39f4368cad96004c25d96e1b10638806
|
[
"Unlicense"
] | null | null | null |
tests/test_force_series.py
|
lahwaacz/tvnamer
|
3d67701e39f4368cad96004c25d96e1b10638806
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
"""Test ability to set the series name by series id
"""
from functional_runner import run_tvnamer, verify_out_data
from nose.plugins.attrib import attr
@attr("functional")
def test_series_id():
"""Test --series-id argument
"""
conf = """
{"batch": true}
"""
out_data = run_tvnamer(
with_files = ['whatever.s01e01.avi'],
with_config = conf,
with_flags = ["--series-id", '76156'],
with_input = "")
expected_files = ['Scrubs - [01x01] - My First Day.avi']
verify_out_data(out_data, expected_files)
@attr("functional")
def test_series_id_with_nameless_series():
"""Test --series-id argument with '6x17.etc.avi' type filename
"""
conf = """
{"always_rename": true,
"select_first": true}
"""
out_data = run_tvnamer(
with_files = ['s01e01.avi'],
with_config = conf,
with_flags = ["--series-id", '76156', "--batch"],
with_input = "")
expected_files = ['Scrubs - [01x01] - My First Day.avi']
verify_out_data(out_data, expected_files)
| 22.306122
| 66
| 0.614822
|
2e17649054fe847d4d5efb2c1481ecf619c4e912
| 8,140
|
py
|
Python
|
examples/demonstration/tensorflow_asr/models/keras/contextnet.py
|
o74589055/tf_asr
|
1801a035d15253fd25df4f9541457dd635f6d10d
|
[
"Apache-2.0"
] | 1
|
2021-03-20T09:21:49.000Z
|
2021-03-20T09:21:49.000Z
|
examples/demonstration/tensorflow_asr/models/keras/contextnet.py
|
o74589055/tf_asr
|
1801a035d15253fd25df4f9541457dd635f6d10d
|
[
"Apache-2.0"
] | null | null | null |
examples/demonstration/tensorflow_asr/models/keras/contextnet.py
|
o74589055/tf_asr
|
1801a035d15253fd25df4f9541457dd635f6d10d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Huy Le Nguyen (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
import tensorflow as tf
from .transducer import Transducer
from ..contextnet import ContextNetEncoder, L2
from ...utils.utils import get_reduced_length
class ContextNet(Transducer):
def __init__(self,
vocabulary_size: int,
encoder_blocks: List[dict],
encoder_alpha: float = 0.5,
prediction_embed_dim: int = 512,
prediction_embed_dropout: int = 0,
prediction_num_rnns: int = 1,
prediction_rnn_units: int = 320,
prediction_rnn_type: str = "lstm",
prediction_rnn_implementation: int = 2,
prediction_layer_norm: bool = True,
prediction_projection_units: int = 0,
joint_dim: int = 1024,
joint_activation: str = "tanh",
prejoint_linear: bool = True,
kernel_regularizer=L2,
bias_regularizer=L2,
name: str = "contextnet",
**kwargs):
super(ContextNet, self).__init__(
encoder=ContextNetEncoder(
blocks=encoder_blocks,
alpha=encoder_alpha,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
name=f"{name}_encoder"
),
vocabulary_size=vocabulary_size,
embed_dim=prediction_embed_dim,
embed_dropout=prediction_embed_dropout,
num_rnns=prediction_num_rnns,
rnn_units=prediction_rnn_units,
rnn_type=prediction_rnn_type,
rnn_implementation=prediction_rnn_implementation,
layer_norm=prediction_layer_norm,
projection_units=prediction_projection_units,
joint_dim=joint_dim,
joint_activation=joint_activation,
prejoint_linear=prejoint_linear,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
name=name, **kwargs
)
self.dmodel = self.encoder.blocks[-1].dmodel
self.time_reduction_factor = 1
for block in self.encoder.blocks: self.time_reduction_factor *= block.time_reduction_factor
def call(self, inputs, training=False, **kwargs):
enc = self.encoder([inputs["input"], inputs["input_length"]], training=training, **kwargs)
pred = self.predict_net([inputs["prediction"], inputs["prediction_length"]], training=training, **kwargs)
outputs = self.joint_net([enc, pred], training=training, **kwargs)
return {
"logit": outputs,
"logit_length": get_reduced_length(inputs["input_length"], self.time_reduction_factor)
}
def encoder_inference(self, features: tf.Tensor, input_length: tf.Tensor):
with tf.name_scope(f"{self.name}_encoder"):
input_length = tf.expand_dims(tf.shape(features)[0], axis=0)
outputs = tf.expand_dims(features, axis=0)
outputs = self.encoder([outputs, input_length], training=False)
return tf.squeeze(outputs, axis=0)
# -------------------------------- GREEDY -------------------------------------
@tf.function
def recognize(self,
features: tf.Tensor,
input_length: tf.Tensor,
parallel_iterations: int = 10,
swap_memory: bool = True):
"""
RNN Transducer Greedy decoding
Args:
features (tf.Tensor): a batch of padded extracted features
Returns:
tf.Tensor: a batch of decoded transcripts
"""
encoded = self.encoder([features, input_length], training=False)
return self._perform_greedy_batch(encoded, input_length,
parallel_iterations=parallel_iterations, swap_memory=swap_memory)
def recognize_tflite(self, signal, predicted, prediction_states):
"""
Function to convert to tflite using greedy decoding (default streaming mode)
Args:
signal: tf.Tensor with shape [None] indicating a single audio signal
predicted: last predicted character with shape []
prediction_states: lastest prediction states with shape [num_rnns, 1 or 2, 1, P]
Return:
transcript: tf.Tensor of Unicode Code Points with shape [None] and dtype tf.int32
predicted: last predicted character with shape []
encoder_states: lastest encoder states with shape [num_rnns, 1 or 2, 1, P]
prediction_states: lastest prediction states with shape [num_rnns, 1 or 2, 1, P]
"""
features = self.speech_featurizer.tf_extract(signal)
encoded = self.encoder_inference(features, tf.shape(features)[0])
hypothesis = self._perform_greedy(encoded, tf.shape(encoded)[0], predicted, prediction_states)
transcript = self.text_featurizer.indices2upoints(hypothesis.prediction)
return transcript, hypothesis.index, hypothesis.states
def recognize_tflite_with_timestamp(self, signal, predicted, states):
features = self.speech_featurizer.tf_extract(signal)
encoded = self.encoder_inference(features, tf.shape(features)[0])
hypothesis = self._perform_greedy(encoded, tf.shape(encoded)[0], predicted, states)
indices = self.text_featurizer.normalize_indices(hypothesis.prediction)
upoints = tf.gather_nd(self.text_featurizer.upoints, tf.expand_dims(indices, axis=-1)) # [None, max_subword_length]
num_samples = tf.cast(tf.shape(signal)[0], dtype=tf.float32)
total_time_reduction_factor = self.time_reduction_factor * self.speech_featurizer.frame_step
stime = tf.range(0, num_samples, delta=total_time_reduction_factor, dtype=tf.float32)
stime /= tf.cast(self.speech_featurizer.sample_rate, dtype=tf.float32)
etime = tf.range(total_time_reduction_factor, num_samples, delta=total_time_reduction_factor, dtype=tf.float32)
etime /= tf.cast(self.speech_featurizer.sample_rate, dtype=tf.float32)
non_blank = tf.where(tf.not_equal(upoints, 0))
non_blank_transcript = tf.gather_nd(upoints, non_blank)
non_blank_stime = tf.gather_nd(tf.repeat(tf.expand_dims(stime, axis=-1), tf.shape(upoints)[-1], axis=-1), non_blank)
non_blank_etime = tf.gather_nd(tf.repeat(tf.expand_dims(etime, axis=-1), tf.shape(upoints)[-1], axis=-1), non_blank)
return non_blank_transcript, non_blank_stime, non_blank_etime, hypothesis.index, hypothesis.states
# -------------------------------- BEAM SEARCH -------------------------------------
@tf.function
def recognize_beam(self,
features: tf.Tensor,
input_length: tf.Tensor,
lm: bool = False,
parallel_iterations: int = 10,
swap_memory: bool = True):
"""
RNN Transducer Beam Search
Args:
features (tf.Tensor): a batch of padded extracted features
lm (bool, optional): whether to use language model. Defaults to False.
Returns:
tf.Tensor: a batch of decoded transcripts
"""
encoded = self.encoder([features, input_length], training=False)
return self._perform_beam_search_batch(encoded, input_length, lm,
parallel_iterations=parallel_iterations, swap_memory=swap_memory)
| 47.602339
| 124
| 0.634521
|
5f923f69d777a8b80a1338d94b774f7a839aa5a3
| 12,421
|
py
|
Python
|
azure-mgmt-sql/azure/mgmt/sql/operations/database_vulnerability_assessments_operations.py
|
jmalobicky/azure-sdk-for-python
|
61234a3d83f8fb481d1dd2386e54e888864878fd
|
[
"MIT"
] | 1
|
2018-07-23T08:59:24.000Z
|
2018-07-23T08:59:24.000Z
|
azure-mgmt-sql/azure/mgmt/sql/operations/database_vulnerability_assessments_operations.py
|
jmalobicky/azure-sdk-for-python
|
61234a3d83f8fb481d1dd2386e54e888864878fd
|
[
"MIT"
] | null | null | null |
azure-mgmt-sql/azure/mgmt/sql/operations/database_vulnerability_assessments_operations.py
|
jmalobicky/azure-sdk-for-python
|
61234a3d83f8fb481d1dd2386e54e888864878fd
|
[
"MIT"
] | 1
|
2018-08-28T14:36:47.000Z
|
2018-08-28T14:36:47.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class DatabaseVulnerabilityAssessmentsOperations(object):
"""DatabaseVulnerabilityAssessmentsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar vulnerability_assessment_name: The name of the vulnerability assessment. Constant value: "default".
:ivar api_version: The API version to use for the request. Constant value: "2017-03-01-preview".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.vulnerability_assessment_name = "default"
self.api_version = "2017-03-01-preview"
self.config = config
def get(
self, resource_group_name, server_name, database_name, custom_headers=None, raw=False, **operation_config):
"""Gets the database's vulnerability assessment.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database for which the
vulnerability assessment is defined.
:type database_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DatabaseVulnerabilityAssessment or ClientRawResponse if
raw=true
:rtype: ~azure.mgmt.sql.models.DatabaseVulnerabilityAssessment or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'vulnerabilityAssessmentName': self._serialize.url("self.vulnerability_assessment_name", self.vulnerability_assessment_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DatabaseVulnerabilityAssessment', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/vulnerabilityAssessments/{vulnerabilityAssessmentName}'}
def create_or_update(
self, resource_group_name, server_name, database_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates or updates the database's vulnerability assessment.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database for which the
vulnerability assessment is defined.
:type database_name: str
:param parameters: The requested resource.
:type parameters:
~azure.mgmt.sql.models.DatabaseVulnerabilityAssessment
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: DatabaseVulnerabilityAssessment or ClientRawResponse if
raw=true
:rtype: ~azure.mgmt.sql.models.DatabaseVulnerabilityAssessment or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'vulnerabilityAssessmentName': self._serialize.url("self.vulnerability_assessment_name", self.vulnerability_assessment_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'DatabaseVulnerabilityAssessment')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DatabaseVulnerabilityAssessment', response)
if response.status_code == 201:
deserialized = self._deserialize('DatabaseVulnerabilityAssessment', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/vulnerabilityAssessments/{vulnerabilityAssessmentName}'}
def delete(
self, resource_group_name, server_name, database_name, custom_headers=None, raw=False, **operation_config):
"""Removes the database's vulnerability assessment.
:param resource_group_name: The name of the resource group that
contains the resource. You can obtain this value from the Azure
Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database for which the
vulnerability assessment is defined.
:type database_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'databaseName': self._serialize.url("database_name", database_name, 'str'),
'vulnerabilityAssessmentName': self._serialize.url("self.vulnerability_assessment_name", self.vulnerability_assessment_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/vulnerabilityAssessments/{vulnerabilityAssessmentName}'}
| 49.486056
| 234
| 0.686257
|
2883312d494a895571df61aae1ec93c7ecbfbcfe
| 583
|
py
|
Python
|
data/test/python/2883312d494a895571df61aae1ec93c7ecbfbcfeAddOnceEventDispatch.py
|
harshp8l/deep-learning-lang-detection
|
2a54293181c1c2b1a2b840ddee4d4d80177efb33
|
[
"MIT"
] | 84
|
2017-10-25T15:49:21.000Z
|
2021-11-28T21:25:54.000Z
|
data/test/python/2883312d494a895571df61aae1ec93c7ecbfbcfeAddOnceEventDispatch.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 5
|
2018-03-29T11:50:46.000Z
|
2021-04-26T13:33:18.000Z
|
data/test/python/2883312d494a895571df61aae1ec93c7ecbfbcfeAddOnceEventDispatch.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 24
|
2017-11-22T08:31:00.000Z
|
2022-03-27T01:22:31.000Z
|
# -*- coding: utf-8 -*-
from Libs.EventHandle.EventDispatch import EventDispatch;
'''
@brief 事件回调函数只能添加一次
'''
class AddOnceEventDispatch(EventDispatch):
def __init__(self, eventId_ = 0):
super(AddOnceEventDispatch, self).__init__(eventId_);
self.mTypeId = "AddOnceEventDispatch";
def addEventHandle(self, pThis, handle):
# 这个判断说明相同的函数只能加一次,但是如果不同资源使用相同的回调函数就会有问题,但是这个判断可以保证只添加一次函数,值得,因此不同资源需要不同回调函数
if (not self.existEventHandle(pThis, handle)):
super(AddOnceEventDispatch, self).addEventHandle(pThis, handle);
| 26.5
| 85
| 0.701544
|
8fc6353a08f313eba2d0efd1f82d0ef829097c9a
| 4,555
|
py
|
Python
|
cohesity_management_sdk/models/snapshot_copy_archival_policy.py
|
sachinthakare-cohesity/management-sdk-python
|
c95f67b7d387d5bab8392be43190e598280ae7b5
|
[
"MIT"
] | null | null | null |
cohesity_management_sdk/models/snapshot_copy_archival_policy.py
|
sachinthakare-cohesity/management-sdk-python
|
c95f67b7d387d5bab8392be43190e598280ae7b5
|
[
"MIT"
] | null | null | null |
cohesity_management_sdk/models/snapshot_copy_archival_policy.py
|
sachinthakare-cohesity/management-sdk-python
|
c95f67b7d387d5bab8392be43190e598280ae7b5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
import cohesity_management_sdk.models.archival_target
class SnapshotCopyArchivalPolicy(object):
"""Implementation of the 'Snapshot Copy Archival Policy.' model.
Specifies settings for copying Snapshots External Targets (such as AWS or
Tape). This also specifies the retention policy that should be applied to
Snapshots after they have been copied to the specified target.
Attributes:
copy_partial (bool): Specifies if Snapshots are copied from the first
completely successful Job Run or the first partially successful
Job Run occurring at the start of the replication schedule. If
true, Snapshots are copied from the first Job Run occurring at the
start of the replication schedule, even if first Job Run was not
completely successful i.e. Snapshots were not captured for all
Objects in the Job. If false, Snapshots are copied from the first
Job Run occurring at the start of the replication schedule that
was completely successful i.e. Snapshots for all the Objects in
the Job were successfully captured.
days_to_keep (long|int): Specifies the number of days to retain copied
Snapshots on the target.
multiplier (int): Specifies a factor to multiply the periodicity by,
to determine the copy schedule. For example if set to 2 and the
periodicity is hourly, then Snapshots from the first eligible Job
Run for every 2 hour period is copied.
periodicity (PeriodicityEnum): Specifies the frequency that Snapshots
should be copied to the specified target. Used in combination with
multipiler. 'kEvery' means that the Snapshot copy occurs after the
number of Job Runs equals the number specified in the multiplier.
'kHour' means that the Snapshot copy occurs hourly at the
frequency set in the multiplier, for example if multiplier is 2,
the copy occurs every 2 hours. 'kDay' means that the Snapshot copy
occurs daily at the frequency set in the multiplier. 'kWeek' means
that the Snapshot copy occurs weekly at the frequency set in the
multiplier. 'kMonth' means that the Snapshot copy occurs monthly
at the frequency set in the multiplier. 'kYear' means that the
Snapshot copy occurs yearly at the frequency set in the
multiplier.
target (ArchivalTarget): Specifies the archival target to copy the
Snapshots to.
"""
# Create a mapping from Model property names to API property names
_names = {
"copy_partial":'copyPartial',
"days_to_keep":'daysToKeep',
"multiplier":'multiplier',
"periodicity":'periodicity',
"target":'target'
}
def __init__(self,
copy_partial=None,
days_to_keep=None,
multiplier=None,
periodicity=None,
target=None):
"""Constructor for the SnapshotCopyArchivalPolicy class"""
# Initialize members of the class
self.copy_partial = copy_partial
self.days_to_keep = days_to_keep
self.multiplier = multiplier
self.periodicity = periodicity
self.target = target
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
copy_partial = dictionary.get('copyPartial')
days_to_keep = dictionary.get('daysToKeep')
multiplier = dictionary.get('multiplier')
periodicity = dictionary.get('periodicity')
target = cohesity_management_sdk.models.archival_target.ArchivalTarget.from_dictionary(dictionary.get('target')) if dictionary.get('target') else None
# Return an object of this model
return cls(copy_partial,
days_to_keep,
multiplier,
periodicity,
target)
| 42.971698
| 158
| 0.653128
|
74b559a84c5740e4b06851ede90510e390c688cd
| 883
|
py
|
Python
|
app/__init__.py
|
bgigous/flask-labs-project3
|
7764c52207e80121486c35aef406d780404accce
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
bgigous/flask-labs-project3
|
7764c52207e80121486c35aef406d780404accce
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
bgigous/flask-labs-project3
|
7764c52207e80121486c35aef406d780404accce
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from flask import Flask
from flask_login import LoginManager
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
from config import config
db = SQLAlchemy()
migrate = Migrate()
login_manager = LoginManager()
login_manager.session_protection = "strong"
login_manager.login_view = "auth.login"
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
db.init_app(app)
migrate.init_app(app, db=db)
login_manager.init_app(app)
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint, url_prefix="/auth")
from .utils import utils as utils_blueprint
app.register_blueprint(utils_blueprint)
return app
| 21.536585
| 62
| 0.757644
|
6a6fa6222cfc2128be6d5120f07de8a257fe276b
| 939
|
py
|
Python
|
cancat/vstruct/constants/__init__.py
|
TomSomerville/CanCatFork
|
3a3f684c794fd93daa722992b727fd9522b89ae6
|
[
"BSD-2-Clause"
] | 146
|
2015-04-28T16:13:39.000Z
|
2022-03-20T23:39:42.000Z
|
cancat/vstruct/constants/__init__.py
|
TomSomerville/CanCatFork
|
3a3f684c794fd93daa722992b727fd9522b89ae6
|
[
"BSD-2-Clause"
] | 18
|
2016-10-21T14:40:10.000Z
|
2022-01-08T01:44:19.000Z
|
cancat/vstruct/constants/__init__.py
|
TomSomerville/CanCatFork
|
3a3f684c794fd93daa722992b727fd9522b89ae6
|
[
"BSD-2-Clause"
] | 37
|
2015-06-22T11:45:00.000Z
|
2022-02-09T05:30:24.000Z
|
class VSConstResolver:
def __init__(self):
self.rev_lookup = {}
self.const_lookup = {}
def clearAll(self):
self.rev_lookup = {}
self.const_lookup = {}
def addModule(self, mod):
for name in dir(mod):
val = getattr(mod, name)
if type(val) not in (int,long):
continue
# First lets add the "reverse" lookup
revs = self.rev_lookup.get(val)
if revs == None:
revs = []
self.rev_lookup[val] = revs
revs.append(name)
# Now the forward....
self.const_lookup[name] = val
def constLookup(self, name):
return self.const_lookup.get(name)
def revLookup(self, const):
'''
Lookup the possible names of a constant based on
modules added with constAddModule()
'''
return self.rev_lookup.get(const)
| 25.378378
| 56
| 0.529286
|
348c8c1e4d3e990fd92abab7292a0e154457d83b
| 410
|
py
|
Python
|
template/mod_corh-suite2.py
|
Bafomet666/Bigbro
|
ae41c1f48b38e28e3e688cfe02ad5152147521f4
|
[
"BSD-2-Clause"
] | 48
|
2020-10-10T17:09:23.000Z
|
2022-03-22T10:42:16.000Z
|
template/mod_corh-suite2.py
|
Bafomet666/Bigbro
|
ae41c1f48b38e28e3e688cfe02ad5152147521f4
|
[
"BSD-2-Clause"
] | 3
|
2021-01-15T06:07:41.000Z
|
2022-02-11T09:50:53.000Z
|
template/mod_corh-suite2.py
|
Bafomet666/Bigbro
|
ae41c1f48b38e28e3e688cfe02ad5152147521f4
|
[
"BSD-2-Clause"
] | 12
|
2020-10-16T22:48:16.000Z
|
2021-11-16T01:06:10.000Z
|
#!/usr/bin/env python3
R = '\033[31m' # red
G = '\033[32m' # green
C = '\033[36m' # cyan
W = '\033[0m' # white
redirect = input(G + '[+]' + C + ' Запустить ? yes / no : ' + W)
with open('template/corh-suite2/js/location_temp.js', 'r') as js:
reader = js.read()
update = reader.replace('REDIRECT_URL', redirect)
with open('template/corh-suite2/js/location.js', 'w') as js_update:
js_update.write(update)
| 27.333333
| 67
| 0.631707
|
4108379a794f2449cb1f2a20d1aa34d0cad7cf13
| 5,157
|
py
|
Python
|
templates/cbs-tools/scripts/fas_perms_to_koji.py
|
CentOS/ansible-role-kojihub
|
6421d5ac81f0f86223e57eaecd93231ff798ebbd
|
[
"MIT"
] | null | null | null |
templates/cbs-tools/scripts/fas_perms_to_koji.py
|
CentOS/ansible-role-kojihub
|
6421d5ac81f0f86223e57eaecd93231ff798ebbd
|
[
"MIT"
] | 7
|
2020-03-20T14:29:14.000Z
|
2021-06-01T16:24:38.000Z
|
templates/cbs-tools/scripts/fas_perms_to_koji.py
|
CentOS/ansible-role-kojihub
|
6421d5ac81f0f86223e57eaecd93231ff798ebbd
|
[
"MIT"
] | 5
|
2020-03-20T14:16:31.000Z
|
2021-06-01T16:07:45.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2015, Thomas Oulevey <thomas.oulevey@cern.ch>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This script reads from a file, group information generated by FAS and sync
# it with koji
# No command line argument, options are hardcoded at this time.
import koji
import os.path
import sys
from collections import defaultdict
KOJI_URL = '{{ koji_hub_url }}'
CLIENT_CERT = os.path.expanduser('/etc/pki/koji/{{ koji_admin_pem }}')
CLIENTCA_CERT = os.path.expanduser('/etc/pki/koji/{{ koji_hub_cacert }}')
SERVERCA_CERT = os.path.expanduser('/etc/pki/ca-trust/extracted/openssl/ca-bundle.trust.crt')
FASDUMP = '/etc/bsadmin/groups'
SYSTEM_USERS = [{% for user in koji_system_users -%} '{{ user }}', {% endfor %}]
IMAGE_PERM = ['virt', 'cloud', 'atomic', 'cloudinstance', 'hyperscale']
def get_user_list():
users = [(x['name'], x['id']) for x in kojiclient.listUsers()]
return users if len(users) else None
def get_user(user):
user = kojiclient.getUser(user)
return user
def get_user_perms(user):
perms = kojiclient.getUserPerms(user[1])
return perms
def get_users_perms():
userlist = defaultdict(list)
for user in get_user_list():
userlist[user[0]] = get_user_perms(user)
return userlist if len(userlist) else None
def get_user_perms_from_file(user):
perms = get_users_perms_from_file()
return perms[user]
def get_all_defined_perms():
perms = []
for perm in kojiclient.getAllPerms():
perms.append(perm['name'])
return perms
def get_users_perms_from_file():
userlist = defaultdict(list)
try:
groups = open(FASDUMP, 'r')
except:
return None
for line in groups.readlines():
sig, users = line.strip('\n').split(':')
for user in users.replace(" ", "").split(','):
perm = "build-"+sig
userlist[user].append(perm)
userlist[user].append('build')
if sig in IMAGE_PERM:
userlist[user].append('image')
userlist[user].append('livecd')
userlist[user].append('appliance')
userlist[user].append('livemedia')
return userlist if len(userlist) else None
def fix_permissions(new, old):
usernames = list(set(new)|set(old))
# Do not touch system users
usernames = [u for u in usernames if u not in SYSTEM_USERS]
for username in usernames:
togrant = list(set(new[username]) - set(old[username]))
torevoke = list(set(old[username]) - set(new[username]))
user = get_user(username)
if togrant or torevoke:
print("\n# user:%s\n# NEW perms:%s\n# OLD perms:%s \
\n# To grant:%s\n# To revoke:%s" \
% (user, new[username], old[username], togrant, torevoke))
if not user:
# Create user if it doesn't exist yet
user = kojiclient.createUser(username)
# Always grant "build" permission for building from srpm
kojiclient.grantPermission(username, 'build')
for perm in togrant:
if perm in get_all_defined_perms():
kojiclient.grantPermission(username, perm)
for perm in torevoke:
if perm in get_all_defined_perms():
kojiclient.revokePermission(username, perm)
if __name__ == '__main__':
try:
kojiclient = koji.ClientSession(KOJI_URL)
kojiclient.ssl_login(CLIENT_CERT, CLIENTCA_CERT, SERVERCA_CERT)
except:
print("Could not connect to koji API")
sys.exit(2)
fas_perms = get_users_perms_from_file()
koji_perms = get_users_perms()
if not fas_perms:
print("Could not read %s file." % FASDUMP)
sys.exit(1)
if not koji_perms:
print("Could not read koji's user database")
sys.exit(2)
fix_permissions(fas_perms, koji_perms)
sys.exit(0)
| 37.100719
| 93
| 0.675199
|
761477cd2c8d4711f0f61a2caee826f927df49d1
| 1,420
|
py
|
Python
|
py/sqlclient.py
|
phil155/IDS_IoT
|
e79172a33db36ca08c7807fe2b439b78543c8126
|
[
"MIT"
] | null | null | null |
py/sqlclient.py
|
phil155/IDS_IoT
|
e79172a33db36ca08c7807fe2b439b78543c8126
|
[
"MIT"
] | null | null | null |
py/sqlclient.py
|
phil155/IDS_IoT
|
e79172a33db36ca08c7807fe2b439b78543c8126
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import sys
import time
import json
from constants import *
from crate.client import connect
def count_rows(cursor, devaddr):
cursor.execute(f"SELECT count(*) FROM {TABLE_SENSORS} WHERE devaddr = {devaddr}")
return cursor.fetchall()
def main(a):
connection = connect(NODEURL)
cursor = connection.cursor()
devaddr = str(a[1])
tmst_actual = int(a[2])
tmst_last = int(a[11])
tmst_dif = int(a[12])
count = int(a[13])
print("____SQL_client")
print(devaddr)
#print(tmst_actual)
time_dif = 0
if count == 1:
tmst_dif = tmst_last
elif count == 2:
time.sleep(2)
cursor.execute(f"DELETE FROM {TABLE_SENSORS} WHERE tmst = {tmst_last}")
message = json.dumps({"latitude": float(a[3]),
"longitude": float(a[4]),
"sf": int(a[5]),
"bw": int(a[6]),
"lsnr": float(a[7]),
"rssi": float(a[8]),
"lenpayload": int(a[9]),
"payload": str(a[10]),
"tmst": tmst_actual,
"tmst_dif":tmst_dif,
"flag": 0
})
#print(message)
cursor.execute("INSERT INTO sensors (devaddr, tmst, message) VALUES (?,?,?)", (devaddr, tmst_actual, message))
connection.close()
if __name__ == "__main__":
if len(sys.argv) == 1:
pass
else:
main(sys.argv)
| 25.818182
| 114
| 0.547183
|
2e00b768f8b79bbaf7662ab151b803a3c1bafea6
| 3,006
|
py
|
Python
|
Train/train.py
|
Mythologyli/Take-out-Customer-Service-Robot
|
fbbabfcc4d32826148cd9681eed6ae1fa635607a
|
[
"MIT"
] | null | null | null |
Train/train.py
|
Mythologyli/Take-out-Customer-Service-Robot
|
fbbabfcc4d32826148cd9681eed6ae1fa635607a
|
[
"MIT"
] | null | null | null |
Train/train.py
|
Mythologyli/Take-out-Customer-Service-Robot
|
fbbabfcc4d32826148cd9681eed6ae1fa635607a
|
[
"MIT"
] | null | null | null |
import jieba
import numpy as np
import pandas as pd
import jieba.analyse
import tensorflow.keras as keras
from keras.callbacks import History
from keras.models import Sequential
from keras.layers import Embedding
from keras.layers import Conv1D
from keras.layers import MaxPooling1D
from keras.layers import LSTM
from keras.layers import Dense
from matplotlib import pyplot as plt
def padding(word_sequences_list: list, max_length: int) -> np.ndarray:
res = []
for text in word_sequences_list:
if len(text) > max_length:
text = text[:max_length]
else:
text = text + [0 for i in range(max_length - len(text))]
res.append(text)
return np.array(res)
if __name__ == '__main__':
# 读取平衡处理后的外卖评价数据集
data_all_df = pd.read_csv('./Data/data_balance.csv', index_col=None)
sentence_list = []
label_list = []
# 将数据转化为字典形式
for sentence, label in zip(data_all_df['review'], data_all_df['label']):
sentence_list.append(sentence)
label_list.append(label)
# 分词
sentence_list = [".".join(jieba.cut(sentence, cut_all=False))
for sentence in sentence_list]
# 使用词汇表序列化
json_string = open('../Result/word.json', 'r', encoding='utf-8').read()
word_processor = keras.preprocessing.text.tokenizer_from_json(json_string)
word_sequences_list = word_processor.texts_to_sequences(sentence_list)
# 截断或补齐
word_sequences_processed_list = padding(word_sequences_list, 40)
# 验证集比例、数目
val_split = 0.2
val_counts = int(val_split * len(label_list))
# 切分验证集
val_x = word_sequences_processed_list[-val_counts:]
val_y = np.array(label_list[-val_counts:])
train_x = word_sequences_processed_list[:-val_counts]
train_y = np.array(label_list[:-val_counts])
# 选择模型
model = keras.Sequential()
# 构建网络
model = Sequential()
model.add(Embedding(20000, 32))
model.add(Conv1D(filters=32,
kernel_size=3,
padding='same',
activation='relu'))
model.add(MaxPooling1D(pool_size=2))
model.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print(model.summary())
# 训练模型
history: History = model.fit(train_x,
train_y,
batch_size=64,
epochs=5,
validation_data=(val_x, val_y))
# 保存模型
model.save('../Result/model')
# 显示训练历史
plt.plot(history.history['loss'], label='loss')
plt.plot(history.history['val_loss'], label='val_loss')
plt.legend(loc='upper right')
plt.show()
plt.plot(history.history['accuracy'], label='accuracy')
plt.plot(history.history['val_accuracy'], label='val_accuracy')
plt.legend()
plt.show()
| 29.470588
| 78
| 0.634398
|
7ab02c6e4913053b9c0ee42d1b80491840cd7712
| 611
|
py
|
Python
|
nnfs/optimizers.py
|
tblut/NNFS
|
75320c546043bc74f368a7a6edcd8bb70aa90dc4
|
[
"MIT"
] | null | null | null |
nnfs/optimizers.py
|
tblut/NNFS
|
75320c546043bc74f368a7a6edcd8bb70aa90dc4
|
[
"MIT"
] | null | null | null |
nnfs/optimizers.py
|
tblut/NNFS
|
75320c546043bc74f368a7a6edcd8bb70aa90dc4
|
[
"MIT"
] | null | null | null |
import numpy as np
class SGD:
def __init__(self, lr=0.01, momentum=0.0):
self.lr = lr
self.momentum = momentum
self.v = None
def apply_gradients(self, parameters):
if self.momentum > 0.0:
if not self.v:
self.v = [np.zeros(p.shape) for p in parameters]
for index, param in enumerate(parameters):
self.v[index] = self.momentum * self.v[index] - self.lr * param.grad
param.value += self.v[index]
else:
for param in parameters:
param.value -= self.lr * param.grad
| 30.55
| 84
| 0.543372
|
840f0a94ca3277b8e4ad00335c99911be217b828
| 8,060
|
py
|
Python
|
InvenTree/part/test_views.py
|
mtrazakhan/invent
|
dfcb8209855f566b8bd5a23e8bd3d5d1b726beaf
|
[
"MIT"
] | null | null | null |
InvenTree/part/test_views.py
|
mtrazakhan/invent
|
dfcb8209855f566b8bd5a23e8bd3d5d1b726beaf
|
[
"MIT"
] | 8
|
2020-06-06T01:14:46.000Z
|
2022-03-12T00:14:35.000Z
|
InvenTree/part/test_views.py
|
mtrazakhan/invent
|
dfcb8209855f566b8bd5a23e8bd3d5d1b726beaf
|
[
"MIT"
] | null | null | null |
""" Unit tests for Part Views (see views.py) """
from django.test import TestCase
from django.urls import reverse
from django.contrib.auth import get_user_model
from .models import Part
class PartViewTestCase(TestCase):
fixtures = [
'category',
'part',
'bom',
'location',
'company',
'supplier_part',
]
def setUp(self):
super().setUp()
# Create a user
User = get_user_model()
User.objects.create_user('username', 'user@email.com', 'password')
self.client.login(username='username', password='password')
class PartListTest(PartViewTestCase):
def test_part_index(self):
response = self.client.get(reverse('part-index'))
self.assertEqual(response.status_code, 200)
keys = response.context.keys()
self.assertIn('csrf_token', keys)
self.assertIn('parts', keys)
self.assertIn('user', keys)
def test_export(self):
""" Export part data to CSV """
response = self.client.get(reverse('part-export'), {'parts': '1,2,3,4,5,6,7,8,9,10'}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
self.assertIn('streaming_content', dir(response))
class PartDetailTest(PartViewTestCase):
def test_part_detail(self):
""" Test that we can retrieve a part detail page """
pk = 1
response = self.client.get(reverse('part-detail', args=(pk,)))
self.assertEqual(response.status_code, 200)
part = Part.objects.get(pk=pk)
keys = response.context.keys()
self.assertIn('part', keys)
self.assertIn('category', keys)
self.assertEqual(response.context['part'].pk, pk)
self.assertEqual(response.context['category'], part.category)
self.assertFalse(response.context['editing_enabled'])
def test_editable(self):
pk = 1
response = self.client.get(reverse('part-detail', args=(pk,)), {'edit': True})
self.assertEqual(response.status_code, 200)
self.assertTrue(response.context['editing_enabled'])
def test_bom_download(self):
""" Test downloading a BOM for a valid part """
response = self.client.get(reverse('bom-export', args=(1,)), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
self.assertIn('streaming_content', dir(response))
class PartTests(PartViewTestCase):
""" Tests for Part forms """
def test_part_edit(self):
response = self.client.get(reverse('part-edit', args=(1,)), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
keys = response.context.keys()
data = str(response.content)
self.assertIn('part', keys)
self.assertIn('csrf_token', keys)
self.assertIn('html_form', data)
self.assertIn('"title":', data)
def test_part_create(self):
""" Launch form to create a new part """
response = self.client.get(reverse('part-create'), {'category': 1}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
# And again, with an invalid category
response = self.client.get(reverse('part-create'), {'category': 9999}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
# And again, with no category
response = self.client.get(reverse('part-create'), {'name': 'Test part'}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
def test_part_duplicate(self):
""" Launch form to duplicate part """
# First try with an invalid part
response = self.client.get(reverse('part-duplicate', args=(9999,)), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
response = self.client.get(reverse('part-duplicate', args=(1,)), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
def test_make_variant(self):
response = self.client.get(reverse('make-part-variant', args=(1,)), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
class PartAttachmentTests(PartViewTestCase):
def test_valid_create(self):
""" test creation of an attachment for a valid part """
response = self.client.get(reverse('part-attachment-create'), {'part': 1}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
def test_invalid_create(self):
""" test creation of an attachment for an invalid part """
# TODO
pass
def test_edit(self):
""" test editing an attachment """
# TODO
pass
class PartQRTest(PartViewTestCase):
""" Tests for the Part QR Code AJAX view """
def test_html_redirect(self):
# A HTML request for a QR code should be redirected (use an AJAX request instead)
response = self.client.get(reverse('part-qr', args=(1,)))
self.assertEqual(response.status_code, 302)
def test_valid_part(self):
response = self.client.get(reverse('part-qr', args=(1,)), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
data = str(response.content)
self.assertIn('Part QR Code', data)
self.assertIn('<img class=', data)
def test_invalid_part(self):
response = self.client.get(reverse('part-qr', args=(9999,)), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
data = str(response.content)
self.assertIn('Error:', data)
class CategoryTest(PartViewTestCase):
""" Tests for PartCategory related views """
def test_create(self):
""" Test view for creating a new category """
response = self.client.get(reverse('category-create'), {'category': 1}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
def test_create_invalid_parent(self):
""" test creation of a new category with an invalid parent """
response = self.client.get(reverse('category-create'), {'category': 9999}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
# Form should still return OK
self.assertEqual(response.status_code, 200)
def test_edit(self):
""" Retrieve the part category editing form """
response = self.client.get(reverse('category-edit', args=(1,)), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
def test_set_category(self):
""" Test that the "SetCategory" view works """
url = reverse('part-set-category')
response = self.client.get(url, {'parts[]': 1}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
data = {
'part_id_10': True,
'part_id_1': True,
'part_category': 5
}
response = self.client.post(url, data, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
class BomItemTests(PartViewTestCase):
""" Tests for BomItem related views """
def test_create_valid_parent(self):
""" Create a BomItem for a valid part """
response = self.client.get(reverse('bom-item-create'), {'parent': 1}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
def test_create_no_parent(self):
""" Create a BomItem without a parent """
response = self.client.get(reverse('bom-item-create'), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
def test_create_invalid_parent(self):
""" Create a BomItem with an invalid parent """
response = self.client.get(reverse('bom-item-create'), {'parent': 99999}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response.status_code, 200)
| 34.152542
| 133
| 0.655459
|
7976b4f7c5209ee6c8a8839f3e86a77958333d08
| 29,107
|
py
|
Python
|
sphinx/builders/_epub_base.py
|
shimizukawa/sphinx
|
359fc3c7998e057bdb7884f20f5745efd53da49a
|
[
"BSD-2-Clause"
] | null | null | null |
sphinx/builders/_epub_base.py
|
shimizukawa/sphinx
|
359fc3c7998e057bdb7884f20f5745efd53da49a
|
[
"BSD-2-Clause"
] | null | null | null |
sphinx/builders/_epub_base.py
|
shimizukawa/sphinx
|
359fc3c7998e057bdb7884f20f5745efd53da49a
|
[
"BSD-2-Clause"
] | null | null | null |
"""
sphinx.builders._epub_base
~~~~~~~~~~~~~~~~~~~~~~~~~~
Base class of epub2/epub3 builders.
:copyright: Copyright 2007-2020 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import html
import os
import re
from os import path
from typing import Any, Dict, List, NamedTuple, Set, Tuple
from zipfile import ZIP_DEFLATED, ZIP_STORED, ZipFile
from docutils import nodes
from docutils.nodes import Element, Node
from docutils.utils import smartquotes
from sphinx import addnodes
from sphinx.builders.html import BuildInfo, StandaloneHTMLBuilder
from sphinx.locale import __
from sphinx.util import logging
from sphinx.util import status_iterator
from sphinx.util.fileutil import copy_asset_file
from sphinx.util.i18n import format_date
from sphinx.util.osutil import ensuredir, copyfile
try:
from PIL import Image
except ImportError:
Image = None
logger = logging.getLogger(__name__)
# (Fragment) templates from which the metainfo files content.opf and
# toc.ncx are created.
# This template section also defines strings that are embedded in the html
# output but that may be customized by (re-)setting module attributes,
# e.g. from conf.py.
COVERPAGE_NAME = 'epub-cover.xhtml'
TOCTREE_TEMPLATE = 'toctree-l%d'
LINK_TARGET_TEMPLATE = ' [%(uri)s]'
FOOTNOTE_LABEL_TEMPLATE = '#%d'
FOOTNOTES_RUBRIC_NAME = 'Footnotes'
CSS_LINK_TARGET_CLASS = 'link-target'
# XXX These strings should be localized according to epub_language
GUIDE_TITLES = {
'toc': 'Table of Contents',
'cover': 'Cover'
}
MEDIA_TYPES = {
'.xhtml': 'application/xhtml+xml',
'.css': 'text/css',
'.png': 'image/png',
'.gif': 'image/gif',
'.svg': 'image/svg+xml',
'.jpg': 'image/jpeg',
'.jpeg': 'image/jpeg',
'.otf': 'application/x-font-otf',
'.ttf': 'application/x-font-ttf',
'.woff': 'application/font-woff',
}
VECTOR_GRAPHICS_EXTENSIONS = ('.svg',)
# Regular expression to match colons only in local fragment identifiers.
# If the URI contains a colon before the #,
# it is an external link that should not change.
REFURI_RE = re.compile("([^#:]*#)(.*)")
class ManifestItem(NamedTuple):
href: str
id: str
media_type: str
class Spine(NamedTuple):
idref: str
linear: bool
class Guide(NamedTuple):
type: str
title: str
uri: str
class NavPoint(NamedTuple):
navpoint: str
playorder: int
text: str
refuri: str
children: List[Any] # mypy does not support recursive types
# https://github.com/python/mypy/issues/7069
def sphinx_smarty_pants(t: str, language: str = 'en') -> str:
t = t.replace('"', '"')
t = smartquotes.educateDashesOldSchool(t)
t = smartquotes.educateQuotes(t, language)
t = t.replace('"', '"')
return t
ssp = sphinx_smarty_pants
# The epub publisher
class EpubBuilder(StandaloneHTMLBuilder):
"""
Builder that outputs epub files.
It creates the metainfo files container.opf, toc.ncx, mimetype, and
META-INF/container.xml. Afterwards, all necessary files are zipped to an
epub file.
"""
# don't copy the reST source
copysource = False
supported_image_types = ['image/svg+xml', 'image/png', 'image/gif',
'image/jpeg']
supported_remote_images = False
# don't add links
add_permalinks = False
# don't use # as current path. ePub check reject it.
allow_sharp_as_current_path = False
# don't add sidebar etc.
embedded = True
# disable download role
download_support = False
# dont' create links to original images from images
html_scaled_image_link = False
# don't generate search index or include search page
search = False
coverpage_name = COVERPAGE_NAME
toctree_template = TOCTREE_TEMPLATE
link_target_template = LINK_TARGET_TEMPLATE
css_link_target_class = CSS_LINK_TARGET_CLASS
guide_titles = GUIDE_TITLES
media_types = MEDIA_TYPES
refuri_re = REFURI_RE
template_dir = ""
doctype = ""
def init(self) -> None:
super().init()
# the output files for epub must be .html only
self.out_suffix = '.xhtml'
self.link_suffix = '.xhtml'
self.playorder = 0
self.tocid = 0
self.id_cache = {} # type: Dict[str, str]
self.use_index = self.get_builder_config('use_index', 'epub')
self.refnodes = [] # type: List[Dict[str, Any]]
def create_build_info(self) -> BuildInfo:
return BuildInfo(self.config, self.tags, ['html', 'epub'])
def get_theme_config(self) -> Tuple[str, Dict]:
return self.config.epub_theme, self.config.epub_theme_options
# generic support functions
def make_id(self, name: str) -> str:
# id_cache is intentionally mutable
"""Return a unique id for name."""
id = self.id_cache.get(name)
if not id:
id = 'epub-%d' % self.env.new_serialno('epub')
self.id_cache[name] = id
return id
def get_refnodes(self, doctree: Node, result: List[Dict[str, Any]]) -> List[Dict[str, Any]]: # NOQA
"""Collect section titles, their depth in the toc and the refuri."""
# XXX: is there a better way than checking the attribute
# toctree-l[1-8] on the parent node?
if isinstance(doctree, nodes.reference) and doctree.get('refuri'):
refuri = doctree['refuri']
if refuri.startswith('http://') or refuri.startswith('https://') \
or refuri.startswith('irc:') or refuri.startswith('mailto:'):
return result
classes = doctree.parent.attributes['classes']
for level in range(8, 0, -1): # or range(1, 8)?
if (self.toctree_template % level) in classes:
result.append({
'level': level,
'refuri': html.escape(refuri),
'text': ssp(html.escape(doctree.astext()))
})
break
elif isinstance(doctree, nodes.Element):
for elem in doctree:
result = self.get_refnodes(elem, result)
return result
def check_refnodes(self, nodes: List[Dict[str, Any]]) -> None:
appeared = set() # type: Set[str]
for node in nodes:
if node['refuri'] in appeared:
logger.warning(__('duplicated ToC entry found: %s'), node['refuri'])
else:
appeared.add(node['refuri'])
def get_toc(self) -> None:
"""Get the total table of contents, containing the master_doc
and pre and post files not managed by sphinx.
"""
doctree = self.env.get_and_resolve_doctree(self.config.master_doc,
self, prune_toctrees=False,
includehidden=True)
self.refnodes = self.get_refnodes(doctree, [])
master_dir = path.dirname(self.config.master_doc)
if master_dir:
master_dir += '/' # XXX or os.sep?
for item in self.refnodes:
item['refuri'] = master_dir + item['refuri']
self.toc_add_files(self.refnodes)
def toc_add_files(self, refnodes: List[Dict[str, Any]]) -> None:
"""Add the master_doc, pre and post files to a list of refnodes.
"""
refnodes.insert(0, {
'level': 1,
'refuri': html.escape(self.config.master_doc + self.out_suffix),
'text': ssp(html.escape(
self.env.titles[self.config.master_doc].astext()))
})
for file, text in reversed(self.config.epub_pre_files):
refnodes.insert(0, {
'level': 1,
'refuri': html.escape(file),
'text': ssp(html.escape(text))
})
for file, text in self.config.epub_post_files:
refnodes.append({
'level': 1,
'refuri': html.escape(file),
'text': ssp(html.escape(text))
})
def fix_fragment(self, prefix: str, fragment: str) -> str:
"""Return a href/id attribute with colons replaced by hyphens."""
return prefix + fragment.replace(':', '-')
def fix_ids(self, tree: nodes.document) -> None:
"""Replace colons with hyphens in href and id attributes.
Some readers crash because they interpret the part as a
transport protocol specification.
"""
def update_node_id(node: Element) -> None:
"""Update IDs of given *node*."""
new_ids = []
for node_id in node['ids']:
new_id = self.fix_fragment('', node_id)
if new_id not in new_ids:
new_ids.append(new_id)
node['ids'] = new_ids
for reference in tree.traverse(nodes.reference):
if 'refuri' in reference:
m = self.refuri_re.match(reference['refuri'])
if m:
reference['refuri'] = self.fix_fragment(m.group(1), m.group(2))
if 'refid' in reference:
reference['refid'] = self.fix_fragment('', reference['refid'])
for target in tree.traverse(nodes.target):
update_node_id(target)
next_node = target.next_node(ascend=True) # type: Node
if isinstance(next_node, nodes.Element):
update_node_id(next_node)
for desc_signature in tree.traverse(addnodes.desc_signature):
update_node_id(desc_signature)
def add_visible_links(self, tree: nodes.document, show_urls: str = 'inline') -> None:
"""Add visible link targets for external links"""
def make_footnote_ref(doc: nodes.document, label: str) -> nodes.footnote_reference:
"""Create a footnote_reference node with children"""
footnote_ref = nodes.footnote_reference('[#]_')
footnote_ref.append(nodes.Text(label))
doc.note_autofootnote_ref(footnote_ref)
return footnote_ref
def make_footnote(doc: nodes.document, label: str, uri: str) -> nodes.footnote:
"""Create a footnote node with children"""
footnote = nodes.footnote(uri)
para = nodes.paragraph()
para.append(nodes.Text(uri))
footnote.append(para)
footnote.insert(0, nodes.label('', label))
doc.note_autofootnote(footnote)
return footnote
def footnote_spot(tree: nodes.document) -> Tuple[Element, int]:
"""Find or create a spot to place footnotes.
The function returns the tuple (parent, index)."""
# The code uses the following heuristic:
# a) place them after the last existing footnote
# b) place them after an (empty) Footnotes rubric
# c) create an empty Footnotes rubric at the end of the document
fns = tree.traverse(nodes.footnote)
if fns:
fn = fns[-1]
return fn.parent, fn.parent.index(fn) + 1
for node in tree.traverse(nodes.rubric):
if len(node) == 1 and node.astext() == FOOTNOTES_RUBRIC_NAME:
return node.parent, node.parent.index(node) + 1
doc = tree.traverse(nodes.document)[0]
rub = nodes.rubric()
rub.append(nodes.Text(FOOTNOTES_RUBRIC_NAME))
doc.append(rub)
return doc, doc.index(rub) + 1
if show_urls == 'no':
return
if show_urls == 'footnote':
doc = tree.traverse(nodes.document)[0]
fn_spot, fn_idx = footnote_spot(tree)
nr = 1
for node in tree.traverse(nodes.reference):
uri = node.get('refuri', '')
if (uri.startswith('http:') or uri.startswith('https:') or
uri.startswith('ftp:')) and uri not in node.astext():
idx = node.parent.index(node) + 1
if show_urls == 'inline':
uri = self.link_target_template % {'uri': uri}
link = nodes.inline(uri, uri)
link['classes'].append(self.css_link_target_class)
node.parent.insert(idx, link)
elif show_urls == 'footnote':
label = FOOTNOTE_LABEL_TEMPLATE % nr
nr += 1
footnote_ref = make_footnote_ref(doc, label)
node.parent.insert(idx, footnote_ref)
footnote = make_footnote(doc, label, uri)
fn_spot.insert(fn_idx, footnote)
footnote_ref['refid'] = footnote['ids'][0]
footnote.add_backref(footnote_ref['ids'][0])
fn_idx += 1
def write_doc(self, docname: str, doctree: nodes.document) -> None:
"""Write one document file.
This method is overwritten in order to fix fragment identifiers
and to add visible external links.
"""
self.fix_ids(doctree)
self.add_visible_links(doctree, self.config.epub_show_urls)
super().write_doc(docname, doctree)
def fix_genindex(self, tree: List[Tuple[str, List[Tuple[str, Any]]]]) -> None:
"""Fix href attributes for genindex pages."""
# XXX: modifies tree inline
# Logic modeled from themes/basic/genindex.html
for key, columns in tree:
for entryname, (links, subitems, key_) in columns:
for (i, (ismain, link)) in enumerate(links):
m = self.refuri_re.match(link)
if m:
links[i] = (ismain,
self.fix_fragment(m.group(1), m.group(2)))
for subentryname, subentrylinks in subitems:
for (i, (ismain, link)) in enumerate(subentrylinks):
m = self.refuri_re.match(link)
if m:
subentrylinks[i] = (ismain,
self.fix_fragment(m.group(1), m.group(2)))
def is_vector_graphics(self, filename: str) -> bool:
"""Does the filename extension indicate a vector graphic format?"""
ext = path.splitext(filename)[-1]
return ext in VECTOR_GRAPHICS_EXTENSIONS
def copy_image_files_pil(self) -> None:
"""Copy images using Pillow, the Python Imaging Libary.
The method tries to read and write the files with Pillow, converting
the format and resizing the image if necessary/possible.
"""
ensuredir(path.join(self.outdir, self.imagedir))
for src in status_iterator(self.images, __('copying images... '), "brown",
len(self.images), self.app.verbosity):
dest = self.images[src]
try:
img = Image.open(path.join(self.srcdir, src))
except OSError:
if not self.is_vector_graphics(src):
logger.warning(__('cannot read image file %r: copying it instead'),
path.join(self.srcdir, src))
try:
copyfile(path.join(self.srcdir, src),
path.join(self.outdir, self.imagedir, dest))
except OSError as err:
logger.warning(__('cannot copy image file %r: %s'),
path.join(self.srcdir, src), err)
continue
if self.config.epub_fix_images:
if img.mode in ('P',):
# See the Pillow documentation for Image.convert()
# https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.convert
img = img.convert()
if self.config.epub_max_image_width > 0:
(width, height) = img.size
nw = self.config.epub_max_image_width
if width > nw:
nh = (height * nw) / width
img = img.resize((nw, nh), Image.BICUBIC)
try:
img.save(path.join(self.outdir, self.imagedir, dest))
except OSError as err:
logger.warning(__('cannot write image file %r: %s'),
path.join(self.srcdir, src), err)
def copy_image_files(self) -> None:
"""Copy image files to destination directory.
This overwritten method can use Pillow to convert image files.
"""
if self.images:
if self.config.epub_fix_images or self.config.epub_max_image_width:
if not Image:
logger.warning(__('Pillow not found - copying image files'))
super().copy_image_files()
else:
self.copy_image_files_pil()
else:
super().copy_image_files()
def copy_download_files(self) -> None:
pass
def handle_page(self, pagename: str, addctx: Dict, templatename: str = 'page.html',
outfilename: str = None, event_arg: Any = None) -> None:
"""Create a rendered page.
This method is overwritten for genindex pages in order to fix href link
attributes.
"""
if pagename.startswith('genindex') and 'genindexentries' in addctx:
if not self.use_index:
return
self.fix_genindex(addctx['genindexentries'])
addctx['doctype'] = self.doctype
super().handle_page(pagename, addctx, templatename, outfilename, event_arg)
def build_mimetype(self) -> None:
"""Write the metainfo file mimetype."""
logger.info(__('writing mimetype file...'))
copy_asset_file(path.join(self.template_dir, 'mimetype'), self.outdir)
def build_container(self, outname: str = 'META-INF/container.xml') -> None: # NOQA
"""Write the metainfo file META-INF/container.xml."""
logger.info(__('writing META-INF/container.xml file...'))
outdir = path.join(self.outdir, 'META-INF')
ensuredir(outdir)
copy_asset_file(path.join(self.template_dir, 'container.xml'), outdir)
def content_metadata(self) -> Dict[str, Any]:
"""Create a dictionary with all metadata for the content.opf
file properly escaped.
"""
metadata = {} # type: Dict[str, Any]
metadata['title'] = html.escape(self.config.epub_title)
metadata['author'] = html.escape(self.config.epub_author)
metadata['uid'] = html.escape(self.config.epub_uid)
metadata['lang'] = html.escape(self.config.epub_language)
metadata['publisher'] = html.escape(self.config.epub_publisher)
metadata['copyright'] = html.escape(self.config.epub_copyright)
metadata['scheme'] = html.escape(self.config.epub_scheme)
metadata['id'] = html.escape(self.config.epub_identifier)
metadata['date'] = html.escape(format_date("%Y-%m-%d"))
metadata['manifest_items'] = []
metadata['spines'] = []
metadata['guides'] = []
return metadata
def build_content(self) -> None:
"""Write the metainfo file content.opf It contains bibliographic data,
a file list and the spine (the reading order).
"""
logger.info(__('writing content.opf file...'))
metadata = self.content_metadata()
# files
if not self.outdir.endswith(os.sep):
self.outdir += os.sep
olen = len(self.outdir)
self.files = [] # type: List[str]
self.ignored_files = ['.buildinfo', 'mimetype', 'content.opf',
'toc.ncx', 'META-INF/container.xml',
'Thumbs.db', 'ehthumbs.db', '.DS_Store',
'nav.xhtml', self.config.epub_basename + '.epub'] + \
self.config.epub_exclude_files
if not self.use_index:
self.ignored_files.append('genindex' + self.out_suffix)
for root, dirs, files in os.walk(self.outdir):
dirs.sort()
for fn in sorted(files):
filename = path.join(root, fn)[olen:]
if filename in self.ignored_files:
continue
ext = path.splitext(filename)[-1]
if ext not in self.media_types:
# we always have JS and potentially OpenSearch files, don't
# always warn about them
if ext not in ('.js', '.xml'):
logger.warning(__('unknown mimetype for %s, ignoring'), filename,
type='epub', subtype='unknown_project_files')
continue
filename = filename.replace(os.sep, '/')
item = ManifestItem(html.escape(filename),
html.escape(self.make_id(filename)),
html.escape(self.media_types[ext]))
metadata['manifest_items'].append(item)
self.files.append(filename)
# spine
spinefiles = set()
for refnode in self.refnodes:
if '#' in refnode['refuri']:
continue
if refnode['refuri'] in self.ignored_files:
continue
spine = Spine(html.escape(self.make_id(refnode['refuri'])), True)
metadata['spines'].append(spine)
spinefiles.add(refnode['refuri'])
for info in self.domain_indices:
spine = Spine(html.escape(self.make_id(info[0] + self.out_suffix)), True)
metadata['spines'].append(spine)
spinefiles.add(info[0] + self.out_suffix)
if self.use_index:
spine = Spine(html.escape(self.make_id('genindex' + self.out_suffix)), True)
metadata['spines'].append(spine)
spinefiles.add('genindex' + self.out_suffix)
# add auto generated files
for name in self.files:
if name not in spinefiles and name.endswith(self.out_suffix):
spine = Spine(html.escape(self.make_id(name)), False)
metadata['spines'].append(spine)
# add the optional cover
html_tmpl = None
if self.config.epub_cover:
image, html_tmpl = self.config.epub_cover
image = image.replace(os.sep, '/')
metadata['cover'] = html.escape(self.make_id(image))
if html_tmpl:
spine = Spine(html.escape(self.make_id(self.coverpage_name)), True)
metadata['spines'].insert(0, spine)
if self.coverpage_name not in self.files:
ext = path.splitext(self.coverpage_name)[-1]
self.files.append(self.coverpage_name)
item = ManifestItem(html.escape(self.coverpage_name),
html.escape(self.make_id(self.coverpage_name)),
html.escape(self.media_types[ext]))
metadata['manifest_items'].append(item)
ctx = {'image': html.escape(image), 'title': self.config.project}
self.handle_page(
path.splitext(self.coverpage_name)[0], ctx, html_tmpl)
spinefiles.add(self.coverpage_name)
auto_add_cover = True
auto_add_toc = True
if self.config.epub_guide:
for type, uri, title in self.config.epub_guide:
file = uri.split('#')[0]
if file not in self.files:
self.files.append(file)
if type == 'cover':
auto_add_cover = False
if type == 'toc':
auto_add_toc = False
metadata['guides'].append(Guide(html.escape(type),
html.escape(title),
html.escape(uri)))
if auto_add_cover and html_tmpl:
metadata['guides'].append(Guide('cover',
self.guide_titles['cover'],
html.escape(self.coverpage_name)))
if auto_add_toc and self.refnodes:
metadata['guides'].append(Guide('toc',
self.guide_titles['toc'],
html.escape(self.refnodes[0]['refuri'])))
# write the project file
copy_asset_file(path.join(self.template_dir, 'content.opf_t'), self.outdir, metadata)
def new_navpoint(self, node: Dict[str, Any], level: int, incr: bool = True) -> NavPoint:
"""Create a new entry in the toc from the node at given level."""
# XXX Modifies the node
if incr:
self.playorder += 1
self.tocid += 1
return NavPoint('navPoint%d' % self.tocid, self.playorder,
node['text'], node['refuri'], [])
def build_navpoints(self, nodes: List[Dict[str, Any]]) -> List[NavPoint]:
"""Create the toc navigation structure.
Subelements of a node are nested inside the navpoint. For nested nodes
the parent node is reinserted in the subnav.
"""
navstack = [] # type: List[NavPoint]
navstack.append(NavPoint('dummy', 0, '', '', []))
level = 0
lastnode = None
for node in nodes:
if not node['text']:
continue
file = node['refuri'].split('#')[0]
if file in self.ignored_files:
continue
if node['level'] > self.config.epub_tocdepth:
continue
if node['level'] == level:
navpoint = self.new_navpoint(node, level)
navstack.pop()
navstack[-1].children.append(navpoint)
navstack.append(navpoint)
elif node['level'] == level + 1:
level += 1
if lastnode and self.config.epub_tocdup:
# Insert starting point in subtoc with same playOrder
navstack[-1].children.append(self.new_navpoint(lastnode, level, False))
navpoint = self.new_navpoint(node, level)
navstack[-1].children.append(navpoint)
navstack.append(navpoint)
elif node['level'] < level:
while node['level'] < len(navstack):
navstack.pop()
level = node['level']
navpoint = self.new_navpoint(node, level)
navstack[-1].children.append(navpoint)
navstack.append(navpoint)
else:
raise
lastnode = node
return navstack[0].children
def toc_metadata(self, level: int, navpoints: List[NavPoint]) -> Dict[str, Any]:
"""Create a dictionary with all metadata for the toc.ncx file
properly escaped.
"""
metadata = {} # type: Dict[str, Any]
metadata['uid'] = self.config.epub_uid
metadata['title'] = html.escape(self.config.epub_title)
metadata['level'] = level
metadata['navpoints'] = navpoints
return metadata
def build_toc(self) -> None:
"""Write the metainfo file toc.ncx."""
logger.info(__('writing toc.ncx file...'))
if self.config.epub_tocscope == 'default':
doctree = self.env.get_and_resolve_doctree(self.config.master_doc,
self, prune_toctrees=False,
includehidden=False)
refnodes = self.get_refnodes(doctree, [])
self.toc_add_files(refnodes)
else:
# 'includehidden'
refnodes = self.refnodes
self.check_refnodes(refnodes)
navpoints = self.build_navpoints(refnodes)
level = max(item['level'] for item in self.refnodes)
level = min(level, self.config.epub_tocdepth)
copy_asset_file(path.join(self.template_dir, 'toc.ncx_t'), self.outdir,
self.toc_metadata(level, navpoints))
def build_epub(self) -> None:
"""Write the epub file.
It is a zip file with the mimetype file stored uncompressed as the first
entry.
"""
outname = self.config.epub_basename + '.epub'
logger.info(__('writing %s file...'), outname)
epub_filename = path.join(self.outdir, outname)
with ZipFile(epub_filename, 'w', ZIP_DEFLATED) as epub:
epub.write(path.join(self.outdir, 'mimetype'), 'mimetype', ZIP_STORED)
for filename in ['META-INF/container.xml', 'content.opf', 'toc.ncx']:
epub.write(path.join(self.outdir, filename), filename, ZIP_DEFLATED)
for filename in self.files:
epub.write(path.join(self.outdir, filename), filename, ZIP_DEFLATED)
| 41.228045
| 106
| 0.567114
|
884f536139f04a72743d0e389f5e4a32de8b8aa9
| 7,293
|
py
|
Python
|
sumo/tools/build/checkAuthors.py
|
iltempe/osmosi
|
c0f54ecdbb7c7b5602d587768617d0dc50f1d75d
|
[
"MIT"
] | null | null | null |
sumo/tools/build/checkAuthors.py
|
iltempe/osmosi
|
c0f54ecdbb7c7b5602d587768617d0dc50f1d75d
|
[
"MIT"
] | null | null | null |
sumo/tools/build/checkAuthors.py
|
iltempe/osmosi
|
c0f54ecdbb7c7b5602d587768617d0dc50f1d75d
|
[
"MIT"
] | 2
|
2017-12-14T16:41:59.000Z
|
2020-10-16T17:51:27.000Z
|
#!/usr/bin/env python
"""
@file checkAuthors.py
@author Michael Behrisch
@author Daniel Krajzewicz
@date 2011-11-07
@version $Id$
Checks authors for all files.
SUMO, Simulation of Urban MObility; see http://sumo.dlr.de/
Copyright (C) 2011-2017 DLR (http://www.dlr.de/) and contributors
This file is part of SUMO.
SUMO is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import subprocess
import xml.sax
from optparse import OptionParser
_SOURCE_EXT = [".h", ".cpp", ".py", ".pl", ".java"]
class PropertyReader(xml.sax.handler.ContentHandler):
"""Reads the svn properties of files as written by svn log --xml"""
def __init__(self, outfile):
self._out = outfile
self._authors = set()
self._currAuthor = None
self._value = ""
self._revision = None
def startElement(self, name, attrs):
self._value = ""
if name == 'logentry':
self._revision = attrs['revision']
def characters(self, content):
self._value += content
def endElement(self, name):
if name == 'author':
self._currAuthor = realNames.get(self._value, self._value)
if name == "msg":
msg = self._value.lower()
if self._revision in ignoreRevisions:
return
keep = True
ticket = msg.find("#")
while ticket >= 0:
keep = False
e = ticket + 1
while e < len(msg) and msg[e].isdigit():
e += 1
if msg[ticket + 1:e] not in ignoreTickets:
keep = True
break
ticket = msg.find("#", e)
if not keep:
return
if self._currAuthor not in self._authors:
self._authors.add(self._currAuthor)
print("@author", self._currAuthor, file=self._out)
try:
print(msg, file=self._out)
except UnicodeEncodeError:
pass
if self._currAuthor not in authorFiles:
authorFiles[self._currAuthor] = set()
authorFiles[self._currAuthor].add(self._out.name)
if "thank" in msg:
try:
print("THANKS", " ".join(msg.splitlines()), file=self._out)
print("thank %s %s" % (msg, self._out.name), file=log)
except UnicodeEncodeError:
pass
authorFiles["thank"].add(self._out.name)
def checkAuthors(fullName, pattern):
authors = set()
found = False
for line in open(fullName):
if line.startswith(pattern):
for item in line[len(pattern):].split(","):
a = item.strip()
found = True
if a in realNames.values():
authors.add(a)
else:
print("unknown author", a, fullName, file=log)
if not found:
print("no author", fullName, file=log)
return authors
def setAuthors(fullName, removal, add, pattern):
if options.fix:
out = open(fullName + ".tmp", "w")
authors = []
for line in open(fullName):
if line.startswith(pattern):
for item in line[len(pattern):].split(","):
a = item.strip()
if a in removal:
print("author %s not in svn log for %s" % (
a, fullName), file=log)
authors.append(a)
elif authors:
if options.fix:
for a in authors:
print("%s %s" % (pattern, a), file=out)
for a in add:
print("%s %s" % (pattern, a), file=out)
out.write(line)
elif add:
print("need to add author %s to %s" %
(add, fullName), file=log)
authors = []
elif options.fix:
out.write(line)
if options.fix:
out.close()
os.rename(out.name, fullName)
ignoreRevisions = set(["12129", "12128", "11445", "10974", "9705", "9477", "9429", "9348", "8566",
"8439", "8000", "7728", "7533", "6958", "6589", "6537",
"6399", "6069", "5922", "5048", "4669", "4389", "4257", "4166",
"4165", "4084", "4076", "4015", "3966", "3486"])
ignoreTickets = set(["2", "22", "409"])
sumoRoot = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
optParser = OptionParser()
optParser.add_option("-v", "--verbose", action="store_true",
default=False, help="tell me what you are doing")
optParser.add_option("-f", "--fix", action="store_true",
default=False, help="fix invalid svn properties")
optParser.add_option("-a", "--authors", action="store_true",
default=False, help="print author files")
optParser.add_option(
"-r", "--root", default=sumoRoot, help="root to start parsing at")
(options, args) = optParser.parse_args()
authorFiles = {"thank": set()}
realNames = {}
for line in open(os.path.join(sumoRoot, 'AUTHORS')):
entries = line.split()
author = ""
authorDone = False
getAccounts = False
for e in line.split():
if e[0] == "<":
author = author[:-1]
authorDone = True
if not authorDone:
author += e + " "
if e[-1] == ">":
getAccounts = True
if getAccounts:
realNames[e] = author
if author and author not in realNames.values():
realNames[author] = author
log = open(os.path.join(sumoRoot, 'author.log'), "w")
for root, dirs, files in os.walk(options.root):
for name in files:
ext = os.path.splitext(name)[1]
if ext in _SOURCE_EXT:
fullName = os.path.join(root, name)
print("checking authors for", fullName)
if ext in _SOURCE_EXT[:2]:
pattern = "/// @author"
elif ext == ".py":
pattern = "@author"
else:
print("cannot parse for authors", fullName, file=log)
continue
authors = checkAuthors(fullName, pattern)
p = subprocess.Popen(
["svn", "log", "--xml", fullName], stdout=subprocess.PIPE)
output = p.communicate()[0]
if p.returncode == 0:
if options.authors:
out = open(fullName + ".authors", "w")
else:
out = open(os.devnull, "w")
pr = PropertyReader(out)
xml.sax.parseString(output, pr)
out.close()
setAuthors(
fullName, authors - pr._authors, pr._authors - authors, pattern)
for ignoreDir in ['.svn', 'foreign', 'contributed', 'foxtools']:
if ignoreDir in dirs:
dirs.remove(ignoreDir)
print(authorFiles, file=log)
log.close()
| 35.75
| 98
| 0.529686
|
6b44095ceb94a4c308c4dcb98f9d027afeec8bd7
| 4,166
|
py
|
Python
|
bead_cli/web/graphviz.py
|
krisztianfekete/lib
|
180527203b8dadd0e1acd9c6f73be1887b61bc44
|
[
"Unlicense"
] | 1
|
2017-01-26T07:42:13.000Z
|
2017-01-26T07:42:13.000Z
|
bead_cli/web/graphviz.py
|
e3krisztian/bead
|
180527203b8dadd0e1acd9c6f73be1887b61bc44
|
[
"Unlicense"
] | 21
|
2017-02-16T13:49:06.000Z
|
2021-11-20T21:38:45.000Z
|
bead_cli/web/graphviz.py
|
ceumicrodata/bead
|
180527203b8dadd0e1acd9c6f73be1887b61bc44
|
[
"Unlicense"
] | 1
|
2016-10-25T22:09:53.000Z
|
2016-10-25T22:09:53.000Z
|
import html
from .freshness import Freshness
DOT_GRAPH_TEMPLATE = """\
digraph {{
layout=dot
rankdir="LR"
pad="1"
// pack/packmode removes edge labels, see https://gitlab.com/graphviz/graphviz/issues/1616
// re-enable for possibly prettier output if the above issue is solved
// pack="true"
// packmode="node"
// clustered node definitions
{bead_clusters}
// edges: input links
edge [headport="w" tailport="e"]
// edge [weight="100"]
// edge [labelfloat="true"]
edge [decorate="true"]
{bead_inputs}
}}
"""
def node_cluster(bead):
id = bead.name.replace('"', '\\"')
return f'"cluster_{id}"'
BEAD_COLOR = {
Freshness.PHANTOM: "red",
Freshness.SUPERSEDED: "grey",
Freshness.UP_TO_DATE: "green",
Freshness.OUT_OF_DATE: "orange",
}
def bead_color(bead):
return BEAD_COLOR[bead.freshness]
class Port:
def __init__(self, bead):
content_id = bead.content_id
self.input = f"in_{content_id}"
self.output = f"out_{content_id}"
def dot_cluster_as_fragments(cluster_name, beads, indent=' '):
assert beads
# beads are sorted in descending order by freeze_time
freeze_times = [b.freeze_time for b in beads]
assert freeze_times == sorted(freeze_times, reverse=True)
# they have the same name
assert {bead.name for bead in beads} == {cluster_name}
label = html.escape(cluster_name)
yield indent
yield node_cluster(beads[0])
yield '[shape="plaintext" color="grey" '
yield 'label=<<TABLE CELLBORDER="1">\n'
yield indent
yield ' <TR>'
yield '<TD BORDER="0"></TD>'
yield '<TD BORDER="0">'
yield f'<B><I>{label}</I></B>'
yield '</TD>'
yield '</TR>\n'
for bead in beads:
color = f'BGCOLOR="{bead_color(bead)}:none" style="radial"'
yield indent
yield ' <TR>'
yield f'<TD PORT="{Port(bead).input}" {color}></TD>'
yield f'<TD PORT="{Port(bead).output}" {color}>'
yield f'{bead.freeze_time}'
yield '</TD>'
yield '</TR>\n'
yield indent
yield '</TABLE>>'
yield ']'
class Context:
def __init__(self):
self.__unique_node_counter = 0
def _get_unique_node_id(self):
"""
Generate unique graphviz dot node ids.
"""
self.__unique_node_counter += 1
return f"unique_{self.__unique_node_counter}"
def dot_edge(self, bead_src, bead_dest, name, is_auxiliary_edge, indent=' '):
"""
Create an edge with a label in the DOT language between two beads.
This is more complicated, than one might think,
because GraphViz's output is unreadable for DAGs with several parallel paths:
edges are overlapped, producing a messy graph.
To amend this a conceptual edge is implemented with
a series of extra nodes and edges between them.
"""
src = f'{node_cluster(bead_src)}:{Port(bead_src).output}:e'
dest = f'{node_cluster(bead_dest)}:{Port(bead_dest).input}:w'
before_label = [src]
after_label = [dest]
silent_helper_nodes = []
color = bead_color(bead_src) if not is_auxiliary_edge else 'grey90'
label = html.escape(name)
# add auxiliary nodes before label
for _ in range(4):
unique_node = self._get_unique_node_id()
before_label.append(unique_node)
silent_helper_nodes.append(unique_node)
def long_path(nodes):
if len(nodes) > 1:
return ' -> '.join(nodes) + f'[color={color}];'
return ''
return ''.join(
[indent]
+ [f'{node}[shape=plain label=""];' for node in silent_helper_nodes]
+ [indent, '\n']
+ [indent, long_path(before_label)]
+ [indent, '\n']
+ [
indent,
f'{before_label[-1]} -> {after_label[0]} ',
'[',
f'fontcolor="{color}" color="{color}" fontsize="10" label="{label}" weight="100"',
']',
';'
]
+ [indent, '\n']
+ [indent, long_path(after_label)])
| 28.731034
| 98
| 0.587854
|
1e480325cd1a27a64aa6fdfdba753b9541297eeb
| 2,503
|
py
|
Python
|
bird2board/bird2board.py
|
ihuston/bird2board
|
2bffadf5e5fe95c5d67d38f084efc714fd92c8fe
|
[
"MIT"
] | 2
|
2022-03-25T18:24:04.000Z
|
2022-03-25T18:36:24.000Z
|
bird2board/bird2board.py
|
ihuston/bird2board
|
2bffadf5e5fe95c5d67d38f084efc714fd92c8fe
|
[
"MIT"
] | 21
|
2021-05-31T19:30:50.000Z
|
2021-06-29T20:30:16.000Z
|
bird2board/bird2board.py
|
ihuston/bird2board
|
2bffadf5e5fe95c5d67d38f084efc714fd92c8fe
|
[
"MIT"
] | null | null | null |
import logging
import pathlib
import requests
from bird2board import Pinboard, Twitter
class Bird2Board:
def __init__(self, pinboard_token, replace=False, shared=False, toread=False):
self.pinboard = Pinboard(auth_token=pinboard_token, replace=replace, shared=shared, toread=toread)
self.twitter = Twitter()
def convert_single_file(self, file_path: pathlib.Path):
try:
json_text = file_path.read_text()
except IOError:
logging.error(f"Error reading file {file_path}")
raise
else:
logging.info(f"Loaded JSON from file {file_path}.")
try:
tweets = self.twitter.parse_json(json_text)
except Exception:
logging.exception(f"Error parsing bookmark data from file {file_path}.")
raise
else:
logging.info(f"Parsed {len(tweets)} tweets from file.")
for tweet in tweets:
bookmark = self.pinboard.tweet_to_bookmark(tweet)
try:
self.pinboard.add_bookmark(bookmark)
except Exception:
logging.error(f"Error saving bookmark to Pinboard {bookmark['url']}")
raise
else:
logging.info(f"Saved bookmark to Pinboard: {bookmark['url']}")
logging.info(f"Converted tweets from {file_path}.")
return
def convert_directory(self, tweet_directory: pathlib.Path):
if tweet_directory.is_file():
self.convert_single_file(tweet_directory)
else:
files_with_errors = []
dir_size = len(list(tweet_directory.iterdir()))
logging.info(f"Converting directory with {dir_size} possible files.")
for i, p in enumerate(tweet_directory.iterdir()):
if p.suffix == ".json":
try:
self.convert_single_file(p)
except (ValueError, KeyError, IOError, requests.exceptions.HTTPError):
logging.info(f"Error with file {p}, moving to next file.")
files_with_errors.append(p)
logging.info(f"Converted file {i+1} of {dir_size}.")
else:
logging.info(f"Skipped file {p}.")
logging.debug(f"Files with errors so far: {files_with_errors}")
if len(files_with_errors) > 0:
logging.info(f"Files with errors during processing: {files_with_errors}")
| 39.109375
| 106
| 0.590491
|
159b1e6060bebde6da8d7f0d61a6ee3d1c27e093
| 11,913
|
py
|
Python
|
object_detection.py
|
mochaccino-latte/ur5-ros-control
|
ad4e107d726a8f8d7822aadc3e7a04b0b79bf17f
|
[
"Apache-2.0"
] | null | null | null |
object_detection.py
|
mochaccino-latte/ur5-ros-control
|
ad4e107d726a8f8d7822aadc3e7a04b0b79bf17f
|
[
"Apache-2.0"
] | null | null | null |
object_detection.py
|
mochaccino-latte/ur5-ros-control
|
ad4e107d726a8f8d7822aadc3e7a04b0b79bf17f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python2
import sys
# sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
import cv2
import time
import math
import numpy as np
import matplotlib.pyplot as plt
import pyrealsense2 as rs
from openpyxl import Workbook
# from universal_robot_kinematics import invKine
# from forward_kinematics import fwd_kin
from scipy.spatial.transform import Rotation as R
from pyquaternion import Quaternion
print("Environment Ready")
class IntelRealsense:
def __init__(self):
# initiate the pipeline
self.shift_x = 300
self.shift_y = 0
self.pp_k = 0
self.name = 1
self.colorizer = rs.colorizer()
self.pipe = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 848, 480, rs.format.z16, 90)
config.enable_stream(rs.stream.color, 848, 480, rs.format.bgr8, 30)
profile = self.pipe.start(config)
i = profile.get_stream(rs.stream.depth)
self.intr = i.as_video_stream_profile().get_intrinsics()
# extr = i.as_video_stream_profile().get_extrinsics_to()
# print('intr %s' %self.intr)
s = profile.get_device().query_sensors()[1]
s.set_option(rs.option.enable_auto_exposure, False)
self.depth_scale = profile.get_device().first_depth_sensor().get_depth_scale()
def reverse_perspective_projection(self, pp_x, pp_y, pp_depth):
intr = self.intr
image_plane = np.array([ [pp_x * pp_depth],
[pp_y * pp_depth],
[1 * pp_depth] ])
# print('Image Plane \n %s \n' %image_plane)
T_perspective_projection = np.array([ [intr.fx, 0, intr.ppx, 0],
[0, intr.fy, intr.ppy, 0],
[0, 0, 1, 0] ])
# print('T_perspective_projection: \n%s\n' %T_perspective_projection)
# T_perspective_projection * vec(world_coordinates) = image_plane
answer = np.linalg.lstsq(T_perspective_projection, image_plane, rcond=None)
return np.array(answer[0])
def perspective_projection(self, real_x, real_y, real_depth):
intr = self.intr
real_world = np.array([ [real_x],
[real_y],
[real_depth],
[1] ])
# print('Image Plane \n %s \n' %image_plane)
T_perspective_projection = np.array([ [intr.fx, 0, intr.ppx, 0],
[0, intr.fy, intr.ppy, 0],
[0, 0, 1, 0] ])
# T_perspective_projection * vec(world_coordinates) = image_plane
answer =( T_perspective_projection.dot(real_world) )/real_depth
return np.array(answer)
# def transformation_image2camera(self, pp_x, pp_y, pp_depth, pp_k, shift_x=0, shift_y=0):
# pp_x -= 424; pp_y -= 240;
# print('pp camera frame (%s, %s)' %(pp_x, pp_y))
# pingpong_camera = np.array([ [pp_x * pp_k *100],
# [pp_y * pp_k *100],
# [pp_depth * 100],
# [1] ])
# return pingpong_camera
def pingpong_detection(self, shift_x=0, shift_y=100, scope_side=60, scope_depth=50, display=True):
# pp_area = 0.001256 # meters
capture = time.time()
frameset = self.pipe.wait_for_frames()
depth_frame = frameset.get_depth_frame()
depth = np.asanyarray(depth_frame.get_data())
depth_shape = depth.shape
depth_crop = depth[0:depth_shape[0]-shift_y, 0+shift_x:depth_shape[1]-200]
min = depth_crop[depth_crop > 10].min() # Depth Values
# print('depth : %f' %(min))
if min > 700 and min < 2100:
min_pt = np.where(depth_crop == min)
depth_scope = depth_crop[int(min_pt[0][0]-scope_side/2):int(min_pt[0][0]+scope_side/2), int(min_pt[1][0]-scope_side/2): int(min_pt[1][0]+scope_side/2)]
numpix = 0; sumx = 0; sumy = 0
for row in range(0,depth_scope.shape[0]):
for col in range(0,depth_scope.shape[1]):
if depth_scope[row,col] < min+scope_depth and depth_scope[row,col] > min-scope_depth:numpix+=1; sumx += col; sumy += row;
if numpix != 0:ppscope_x = sumx/numpix; ppscope_y = sumy/numpix
else:ppscope_x = 0; ppscope_y = 0
pp_x = ppscope_x+shift_x+min_pt[1][0]-scope_side/2
pp_y = ppscope_y+min_pt[0][0]-scope_side/2
# min_depth_color = np.asanyarray(self.colorizer.colorize(depth_frame).get_data())
# min_x = shift_x+min_pt[1][0]
# min_y = min_pt[0][0]
# cv2.circle(min_depth_color, (int(min_x),int(min_y)), (int)(1),(0,255,0),-1)
# cv2.imwrite('/home/s/catkin_ws/src/ur_modern_driver/images/'+'Argmin.png',min_depth_color)
pp_depth = depth[int(pp_y),int(pp_x)] * self.depth_scale
else:
pp_x=0; pp_y=0; pp_depth=0
if display == True:
# cv2.imwrite('/home/s/catkin_ws/src/ur_modern_driver/images/'+'Depth_Original.png',depth)
depth_color = np.asanyarray(self.colorizer.colorize(depth_frame).get_data())
# cv2.imwrite('/home/s/catkin_ws/src/ur_modern_driver/images/'+'Depth_Colormap.png',depth_color)
# depth_scope = depth_color[0:depth_shape[0]-shift_y, 0+shift_x:depth_shape[1]-0]
# depth_scope = depth_scope[int(min_pt[0][0]-scope_side/2):int(min_pt[0][0]+scope_side/2), int(min_pt[1][0]-scope_side/2): int(min_pt[1][0]+scope_side/2)] #**
# cv2.imwrite('/home/s/catkin_ws/src/ur_modern_driver/images/'+'Ping-Pong_Scope1.png',depth_scope)
# cv2.circle(depth_scope, (int(ppscope_x),int(ppscope_y)), (int)(1),(0,255,0),-1)
# cv2.imwrite('/home/s/catkin_ws/src/ur_modern_driver/images/'+'Ping-Pong_Scope2.png',depth_scope)
cv2.line(depth_color, (shift_x,0), (shift_x, depth_shape[0]), (0,0,255), 1)
cv2.line(depth_color, (0,depth_shape[0]-shift_y), (depth_shape[1], depth_shape[0]-shift_y), (0,0,255), 1)
cv2.circle(depth_color, (int(pp_x),int(pp_y)), (int)(1),(0,255,0),-1)
# cv2.circle(depth_color, (int(np.size(depth,1)/2),int(np.size(depth,0)/2)), (int)(0),(0,0,255),5)
cv2.namedWindow('Object Detectiom using Depth Image', cv2.WINDOW_AUTOSIZE)
cv2.imshow('Object Detectiom using Depth Image', depth_color)
# cv2.imwrite('/home/s/catkin_ws/src/ur_modern_driver/images/'+'Ping-Pong.png',depth_color)
# if pp_x != 0 and pp_y != 0:
# filename = 'pingpong'+str(self.name)+'.png'
# cv2.imwrite('/home/s/catkin_ws/src/ur_modern_driver/images/'+filename,depth_color)
# self.name+=1
cv2.waitKey(0)
# key = cv2.waitKey(1)
# if key & 0xFF == ord('q') or key == 27:
# cv2.destroyAllWindows()
return pp_x, pp_y, pp_depth, capture
def pingpong_velocity(self, STATE, pp_x, pp_y, pp_depth, capture, display=True):
v_x = 999; v_y = 999; v_depth = 999;a_x =999; a_y = 999; a_depth = 999
if pp_x == -1.1 and pp_y == 1.5:
STATE = 'NONE'
else:
if STATE == 'NONE':STATE='INITIAL'
if STATE == 'INITIAL':
self.lastpp_x = pp_x; self.lastpp_y = pp_y; self.lastcapture = capture
self.lastpp_depth = pp_depth; STATE = 'VELOCITY'
elif STATE == 'VELOCITY':
delt = capture-self.lastcapture; self.lastcapture = capture
v_x = (pp_x - self.lastpp_x)/(delt); v_y = (pp_y - self.lastpp_y)/(delt)
v_depth = (pp_depth - self.lastpp_depth)/(delt)
self.lastv_x = v_x; self.lastv_y = v_y
self.lastv_depth = v_depth
self.lastpp_x = pp_x; self.lastpp_y = pp_y
self.lastpp_depth = pp_depth
STATE = 'KALMAN'
elif STATE == 'KALMAN':
delt = capture-self.lastcapture; self.lastcapture = capture
# print('Delta t %s' %delt)
v_x = (pp_x - self.lastpp_x)/(delt); v_y = (pp_y - self.lastpp_y)/(delt)
v_depth = (pp_depth - self.lastpp_depth)/(delt)
a_x = (v_x - self.lastv_x)/delt; a_y = (v_y - self.lastv_y)/delt
a_depth = (v_depth - self.lastv_depth)/(delt)
if display != True:
self.lastv_x = v_x; self.lastv_y = v_y
self.lastv_depth = v_depth
self.lastpp_x = pp_x; self.lastpp_y = pp_y
self.lastpp_depth = pp_depth
self.lasta_x = a_x;self.lasta_y = a_y
self.lasta_depth = a_depth
if display == True and STATE == 'KALMAN' :
frameset = self.pipe.wait_for_frames()
depth_frame = frameset.get_depth_frame()
depth_color = np.asanyarray(self.colorizer.colorize(depth_frame).get_data())
predpp_x = self.lastpp_x + self.lastv_x*delt
# predpp_y = self.lastpp_y + self.lastv_y*delt + 0.5*9.8*(delt**2)
self.lastv_x = v_x; self.lastv_y = v_y
self.lastpp_x = pp_x; self.lastpp_y = pp_y
cv2.line(depth_color, (int(predpp_x), 0), (int(predpp_x), depth_color.shape[0]), (0,255,0), 1)
# cv2.line(depth_color, (0, int(predpp_y)), (depth_color.shape[1],int(predpp_y)), (0,255,0), 1)
cv2.namedWindow('State Prediction', cv2.WINDOW_AUTOSIZE)
cv2.imshow('State Prediction', depth_color)
filename = 'pingpong'+str(self.name)+'.png'
cv2.imwrite('/home/idealab/catkin_ws/src/thesis/src/predict/'+filename,depth_color)
self.name+=1
key = cv2.waitKey(1)
if key & 0xFF == ord('q') or key == 27:
cv2.destroyAllWindows()
return v_x,v_y,v_depth,a_x,a_y,a_depth,STATE
def transformation_camera2base(self, x_camera=-1.10, y_camera=1.5, z_camera=0.31):
T_camera2base = np.array([ [-1, 0, 0, x_camera],
[0, 0, -1, y_camera],
[0, -1, 0, z_camera],
[0, 0, 0, 1]])
return T_camera2base
def transformation_end2base(self, x_end=0, y_end=0, z_end=0):
T_end2base = np.array([ [0, 0, -1, x_end],
[1, 0, 0, y_end],
[0, -1, 0, z_end],
[0, 0, 0, 1] ])
return T_end2base
# def rot2qua(self,MAT):
# # quaternion conversion
# w = math.sqrt(1 + MAT[0,0]**2 + MAT[1,1]**2 + MAT[2,2]**2)/2.0
# x = (MAT[2,1] - MAT[1,2])/(4.0*w)
# y = (MAT[0,2] - MAT[2,0])/(4.0*w)
# z = (MAT[1,0] - MAT[0,1])/(4.0*w)
# QUA = np.array([[x, 0, 0, 0],
# [0, y, 0, 0],
# [0, 0, z, 0],
# [0, 0, 0, w]])
# return QUA
# __main__
# IntelRealsense = IntelRealsense()
# if __name__ == '__main__':
# # Prepare for excel
# print("Initiate Object Detection")
# STATE = 'INITIAL'
# _,_,_,_, last_capture = IntelRealsense.pingpong_detection(display = False)
# TRAN = IntelRealsense.transformation_matrix(0, 0, 0)
# while True:
# # processing time
# pp_x, pp_y, pp_depth, pp_k, capture = IntelRealsense.pingpong_detection(display = True)
# processing = capture - last_capture
# v_x, v_y, v_depth, a_x, a_y, STATE = IntelRealsense.pingpong_velocity(STATE, pp_x, pp_y, pp_depth, capture, display = False)
# last_capture = capture
| 53.421525
| 170
| 0.558717
|
de7b4e9eeb3928be867fe66ca0ed9a3851930d95
| 2,359
|
py
|
Python
|
tests/test_cfg_paths.py
|
FRIARGREG/PROJ-WORKSHOPS
|
0568c1307b15aca7e428ef455ed2b7c798aad906
|
[
"MIT"
] | null | null | null |
tests/test_cfg_paths.py
|
FRIARGREG/PROJ-WORKSHOPS
|
0568c1307b15aca7e428ef455ed2b7c798aad906
|
[
"MIT"
] | null | null | null |
tests/test_cfg_paths.py
|
FRIARGREG/PROJ-WORKSHOPS
|
0568c1307b15aca7e428ef455ed2b7c798aad906
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Test the cfg_load.paths module."""
# core modules
import unittest
from copy import deepcopy
try:
from unittest.mock import patch
except ImportError: # Python 2.7
from mock import patch
# internal modules
import cfg_load.paths
class PathsTest(unittest.TestCase):
"""Tests for the cfg_load.paths module."""
def test_make_paths_absolute_empty(self):
cfg_load.paths.make_paths_absolute('/', {})
def test_make_paths_absolute_trivial(self):
cfg = {'foo': 'bar'}
loaded_cfg = cfg_load.paths.make_paths_absolute('/', deepcopy(cfg))
self.assertDictEqual(cfg, loaded_cfg)
def test_make_paths_absolute_begin_underscore(self):
cfg = {'_': 'don\'t touch me'}
loaded_cfg = cfg_load.paths.make_paths_absolute('/', deepcopy(cfg))
self.assertDictEqual(cfg, loaded_cfg)
cfg = {'_path': 'don\'t touch me'}
loaded_cfg = cfg_load.paths.make_paths_absolute('/', deepcopy(cfg))
self.assertDictEqual(cfg, loaded_cfg)
cfg = {'_path': {'a_path': 'don\'t touch me'}}
loaded_cfg = cfg_load.paths.make_paths_absolute('/', deepcopy(cfg))
self.assertDictEqual(cfg, loaded_cfg)
def test_make_paths_absolute_begin_underscore_path(self):
cfg = {'a_path': 'change.me'}
loaded_cfg = cfg_load.paths.make_paths_absolute('/home/user',
deepcopy(cfg))
exp = {'a_path': '/home/user/change.me'}
self.assertDictEqual(exp, loaded_cfg)
cfg = {'inner': {'a_path': 'change.me'}}
loaded_cfg = cfg_load.paths.make_paths_absolute('/home/user',
deepcopy(cfg))
exp = {'inner': {'a_path': '/home/user/change.me'}}
self.assertDictEqual(exp, loaded_cfg)
def simple_expanduser(input_):
return input_.replace('~', '/home/user')
@patch('os.path.expanduser', side_effect=simple_expanduser)
def test_make_paths_absolute_homedir(self, mock_expanduser):
cfg = {'a_path': '~/change.me'}
loaded_cfg = cfg_load.paths.make_paths_absolute('/home/user',
deepcopy(cfg))
exp = {'a_path': '/home/user/change.me'}
self.assertDictEqual(exp, loaded_cfg)
| 36.292308
| 75
| 0.616363
|
81f3e6c61b8b103c0baef752cd04ca810e4a8e0b
| 4,904
|
py
|
Python
|
ietf/liaisons/migrations/0002_schema_changes.py
|
ekr/ietfdb
|
8d936836b0b9ff31cda415b0a423e3f5b33ab695
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2
|
2021-11-20T03:40:40.000Z
|
2021-11-20T03:40:42.000Z
|
ietf/liaisons/migrations/0002_schema_changes.py
|
ekr/ietfdb
|
8d936836b0b9ff31cda415b0a423e3f5b33ab695
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
ietf/liaisons/migrations/0002_schema_changes.py
|
ekr/ietfdb
|
8d936836b0b9ff31cda415b0a423e3f5b33ab695
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('group', '0003_auto_20150304_0743'),
('person', '0001_initial'),
('doc', '0002_auto_20141222_1749'),
('name', '0007_populate_liaison_names'),
('liaisons', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='LiaisonStatementEvent',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('time', models.DateTimeField(auto_now_add=True)),
('desc', models.TextField()),
('by', models.ForeignKey(to='person.Person')),
('statement', models.ForeignKey(to='liaisons.LiaisonStatement')),
('type', models.ForeignKey(to='name.LiaisonStatementEventTypeName')),
],
options={'ordering': ['-time', '-id']},
bases=(models.Model,),
),
migrations.CreateModel(
name='LiaisonStatementGroupContacts',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('contacts', models.CharField(max_length=255,blank=True)),
('cc_contacts', models.CharField(max_length=255,blank=True)),
('group', models.ForeignKey(to='group.Group', unique=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='RelatedLiaisonStatement',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('relationship', models.ForeignKey(to='name.DocRelationshipName')),
('source', models.ForeignKey(related_name='source_of_set', to='liaisons.LiaisonStatement')),
('target', models.ForeignKey(related_name='target_of_set', to='liaisons.LiaisonStatement')),
],
options={
},
bases=(models.Model,),
),
migrations.RenameField(
model_name='liaisonstatement',
old_name='cc',
new_name='cc_contacts',
),
migrations.RenameField(
model_name='liaisonstatement',
old_name='to_contact',
new_name='to_contacts',
),
migrations.RenameField(
model_name='liaisonstatement',
old_name='technical_contact',
new_name='technical_contacts',
),
migrations.RenameField(
model_name='liaisonstatement',
old_name='response_contact',
new_name='response_contacts',
),
migrations.AddField(
model_name='liaisonstatement',
name='action_holder_contacts',
field=models.CharField(help_text=b'Who makes sure action is completed', max_length=255, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='liaisonstatement',
name='from_groups',
field=models.ManyToManyField(related_name='liaisonsatement_from_set', to='group.Group', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='liaisonstatement',
name='other_identifiers',
field=models.TextField(null=True, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='liaisonstatement',
name='state',
field=models.ForeignKey(default=b'pending', to='name.LiaisonStatementState'),
preserve_default=True,
),
migrations.AddField(
model_name='liaisonstatement',
name='tags',
field=models.ManyToManyField(to='name.LiaisonStatementTagName', null=True, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='liaisonstatement',
name='to_groups',
field=models.ManyToManyField(related_name='liaisonsatement_to_set', to='group.Group', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='liaisonstatement',
name='response_contacts',
field=models.CharField(help_text=b'Where to send a response', max_length=255, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='liaisonstatement',
name='technical_contacts',
field=models.CharField(help_text=b'Who to contact for clarification', max_length=255, blank=True),
preserve_default=True,
),
]
| 39.548387
| 114
| 0.579119
|
20c1cc4296501e7b9b9f5f5ed694230c41a91e38
| 9,909
|
py
|
Python
|
src/main/resources/org/broadinstitute/hellbender/tools/copynumber/case_denoising_calling.py
|
sunboy0523/gatk
|
f06971ac0824ad7da38b878334df377c30457a23
|
[
"BSD-3-Clause"
] | 1,273
|
2015-10-13T18:11:50.000Z
|
2022-03-28T09:25:13.000Z
|
src/main/resources/org/broadinstitute/hellbender/tools/copynumber/case_denoising_calling.py
|
sunboy0523/gatk
|
f06971ac0824ad7da38b878334df377c30457a23
|
[
"BSD-3-Clause"
] | 6,471
|
2015-10-08T02:31:06.000Z
|
2022-03-31T17:55:25.000Z
|
src/main/resources/org/broadinstitute/hellbender/tools/copynumber/case_denoising_calling.py
|
sunboy0523/gatk
|
f06971ac0824ad7da38b878334df377c30457a23
|
[
"BSD-3-Clause"
] | 598
|
2015-10-14T19:16:14.000Z
|
2022-03-29T10:03:03.000Z
|
import os
import sys
# set theano flags
user_theano_flags = os.environ.get("THEANO_FLAGS")
default_theano_flags = "device=cpu,floatX=float64,optimizer=fast_run,compute_test_value=ignore," + \
"openmp=true,blas.ldflags=-lmkl_rt,openmp_elemwise_minsize=10"
theano_flags = default_theano_flags + ("" if user_theano_flags is None else "," + user_theano_flags)
os.environ["THEANO_FLAGS"] = theano_flags
import logging
import argparse
import gcnvkernel
import shutil
import json
from typing import Dict, Any
logger = logging.getLogger("case_denoising_calling")
parser = argparse.ArgumentParser(description="gCNV case calling tool based on a previously trained model",
formatter_class=gcnvkernel.cli_commons.GCNVHelpFormatter)
# logging args
gcnvkernel.cli_commons.add_logging_args_to_argparse(parser)
hidden_denoising_args = {
"max_bias_factors",
"psi_t_scale",
"log_mean_bias_std",
"init_ard_rel_unexplained_variance",
"enable_bias_factors",
"enable_explicit_gc_bias_modeling",
"disable_bias_factors_in_active_class",
"num_gc_bins",
"gc_curve_sd"
}
hidden_calling_args = {
"p_active",
"class_coherence_length"
}
# add tool-specific args
group = parser.add_argument_group(title="Required arguments")
group.add_argument("--input_model_path",
type=str,
required=True,
default=argparse.SUPPRESS,
help="Path to denoising model parameters")
group.add_argument("--read_count_tsv_files",
type=str,
required=True,
nargs='+', # one or more
default=argparse.SUPPRESS,
help="List of read count files in the cohort (in .tsv format; must include sample name header)")
group.add_argument("--ploidy_calls_path",
type=str,
required=True,
default=argparse.SUPPRESS,
help="The path to the results of ploidy determination tool")
group.add_argument("--output_calls_path",
type=str,
required=True,
default=argparse.SUPPRESS,
help="Output path to write CNV calls")
group.add_argument("--output_opt_path",
type=str,
required=False,
default=argparse.SUPPRESS,
help="(advanced) Output path to write the latest optimizer state")
group.add_argument("--output_tracking_path",
type=str,
required=True,
default=argparse.SUPPRESS,
help="Output path to write tracked parameters, ELBO, etc.")
group.add_argument("--input_calls_path",
type=str,
required=False,
default=argparse.SUPPRESS,
help="Path to previously obtained calls to take as starting point")
group.add_argument("--input_opt_path",
type=str,
required=False,
default=argparse.SUPPRESS,
help="(advanced) Path to saved optimizer state to take as the starting point")
# add denoising config args
# Note: we are hiding parameters that are either set by the model or are irrelevant to the case calling task
gcnvkernel.DenoisingModelConfig.expose_args(
parser,
hide={"--" + arg for arg in hidden_denoising_args})
# add calling config args
# Note: we are hiding parameters that are either set by the model or are irrelevant to the case calling task
gcnvkernel.CopyNumberCallingConfig.expose_args(
parser,
hide={"--" + arg for arg in hidden_calling_args})
# override some inference parameters
gcnvkernel.HybridInferenceParameters.expose_args(parser)
def update_args_dict_from_saved_model(input_model_path: str,
_args_dict: Dict[str, Any]):
logging.info("Loading denoising model configuration from the provided model...")
with open(os.path.join(input_model_path, "denoising_config.json"), 'r') as fp:
loaded_denoising_config_dict = json.load(fp)
with open(os.path.join(input_model_path, "calling_config.json"), 'r') as fp:
loaded_calling_config_dict = json.load(fp)
# load arguments from the model denoising config that are hidden by the tool
for arg in hidden_denoising_args:
_args_dict[arg] = \
loaded_denoising_config_dict[arg]
for arg in hidden_calling_args:
_args_dict[arg] = \
loaded_calling_config_dict[arg]
logging.info("- bias factors enabled: "
+ repr(_args_dict['enable_bias_factors']))
logging.info("- explicit GC bias modeling enabled: "
+ repr(_args_dict['enable_explicit_gc_bias_modeling']))
logging.info("- bias factors in active classes disabled: "
+ repr(_args_dict['disable_bias_factors_in_active_class']))
if _args_dict['enable_bias_factors']:
logging.info("- maximum number of bias factors: "
+ repr(_args_dict['max_bias_factors']))
if _args_dict['enable_explicit_gc_bias_modeling']:
logging.info("- number of GC curve knobs: "
+ repr(_args_dict['num_gc_bins']))
logging.info("- GC curve prior standard deviation: "
+ repr(_args_dict['gc_curve_sd']))
if __name__ == "__main__":
# parse arguments
args = parser.parse_args()
gcnvkernel.cli_commons.set_logging_config_from_args(args)
logger.info("THEANO_FLAGS environment variable has been set to: {theano_flags}".format(theano_flags=theano_flags))
# check gcnvkernel version in the input model path
gcnvkernel.io_commons.check_gcnvkernel_version_from_path(args.input_model_path)
# copy the intervals to the calls path
# (we do this early to avoid inadvertent cleanup of temporary files)
gcnvkernel.io_commons.assert_output_path_writable(args.output_calls_path)
shutil.copy(os.path.join(args.input_model_path, gcnvkernel.io_consts.default_interval_list_filename),
os.path.join(args.output_calls_path, gcnvkernel.io_consts.default_interval_list_filename))
# load modeling interval list from the model
logging.info("Loading modeling interval list from the provided model...")
modeling_interval_list = gcnvkernel.io_intervals_and_counts.load_interval_list_tsv_file(
os.path.join(args.input_model_path, gcnvkernel.io_consts.default_interval_list_filename))
contigs_set = {target.contig for target in modeling_interval_list}
logging.info("The model contains {0} intervals and {1} contig(s)".format(
len(modeling_interval_list), len(contigs_set)))
# load sample names, truncated counts, and interval list from the sample read counts table
logging.info("Loading {0} read counts file(s)...".format(len(args.read_count_tsv_files)))
sample_names, n_st = gcnvkernel.io_intervals_and_counts.load_counts_in_the_modeling_zone(
args.read_count_tsv_files, modeling_interval_list)
# load read depth and ploidy metadata
sample_metadata_collection: gcnvkernel.SampleMetadataCollection = gcnvkernel.SampleMetadataCollection()
gcnvkernel.io_metadata.update_sample_metadata_collection_from_ploidy_determination_calls(
sample_metadata_collection, args.ploidy_calls_path)
# setup the inference task
args_dict = args.__dict__
# read model configuration and update args dict
update_args_dict_from_saved_model(args.input_model_path, args_dict)
# instantiate config classes
denoising_config = gcnvkernel.DenoisingModelConfig.from_args_dict(args_dict)
calling_config = gcnvkernel.CopyNumberCallingConfig.from_args_dict(args_dict)
inference_params = gcnvkernel.HybridInferenceParameters.from_args_dict(args_dict)
# instantiate and initialize the workspace
shared_workspace = gcnvkernel.DenoisingCallingWorkspace(
denoising_config, calling_config, modeling_interval_list,
n_st, sample_names, sample_metadata_collection)
initial_params_supplier = gcnvkernel.DefaultDenoisingModelInitializer(
denoising_config, calling_config, shared_workspace)
task = gcnvkernel.CaseDenoisingCallingTask(
denoising_config, calling_config, inference_params,
shared_workspace, initial_params_supplier, args.input_model_path)
if hasattr(args, 'input_calls_path'):
logger.info("A call path was provided to use as starting point...")
gcnvkernel.io_denoising_calling.SampleDenoisingAndCallingPosteriorsReader(
shared_workspace, task.continuous_model, task.continuous_model_approx,
args.input_calls_path)()
if hasattr(args, 'input_opt_path'):
logger.info("A saved optimizer state was provided to use as starting point...")
task.fancy_opt.load(args.input_opt_path)
try:
# go!
task.engage()
task.disengage()
except gcnvkernel.ConvergenceError as err:
logger.info(err.message)
# if inference diverged, pass an exit code to the Java side indicating that restart is needed
sys.exit(gcnvkernel.io_consts.diverged_inference_exit_code)
# save calls
gcnvkernel.io_denoising_calling.SampleDenoisingAndCallingPosteriorsWriter(
denoising_config, calling_config, shared_workspace, task.continuous_model, task.continuous_model_approx,
args.output_calls_path)()
# save optimizer state
if hasattr(args, 'output_opt_path'):
task.fancy_opt.save(args.output_opt_path)
# save ELBO history
if hasattr(args, 'output_tracking_path'):
gcnvkernel.io_commons.assert_output_path_writable(args.output_tracking_path)
elbo_hist_file = os.path.join(args.output_tracking_path, "elbo_history.tsv")
task.save_elbo_history(elbo_hist_file)
| 41.634454
| 118
| 0.701383
|
384aaf1a9fd0e95755f410308f3967c8724d43c1
| 13,760
|
py
|
Python
|
remoting/webapp/build-webapp.py
|
MIPS/external-chromium_org
|
e31b3128a419654fd14003d6117caa8da32697e7
|
[
"BSD-3-Clause"
] | 2
|
2018-11-24T07:58:44.000Z
|
2019-02-22T21:02:46.000Z
|
remoting/webapp/build-webapp.py
|
carlosavignano/android_external_chromium_org
|
2b5652f7889ccad0fbdb1d52b04bad4c23769547
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
remoting/webapp/build-webapp.py
|
carlosavignano/android_external_chromium_org
|
2b5652f7889ccad0fbdb1d52b04bad4c23769547
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 3
|
2017-07-31T19:09:52.000Z
|
2019-01-04T18:48:50.000Z
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Creates a directory with with the unpacked contents of the remoting webapp.
The directory will contain a copy-of or a link-to to all remoting webapp
resources. This includes HTML/JS and any plugin binaries. The script also
massages resulting files appropriately with host plugin data. Finally,
a zip archive for all of the above is produced.
"""
# Python 2.5 compatibility
from __future__ import with_statement
import os
import platform
import re
import shutil
import subprocess
import sys
import time
import zipfile
# Update the module path, assuming that this script is in src/remoting/webapp,
# and that the google_api_keys module is in src/google_apis. Note that
# sys.path[0] refers to the directory containing this script.
if __name__ == '__main__':
sys.path.append(
os.path.abspath(os.path.join(sys.path[0], '../../google_apis')))
import google_api_keys
def findAndReplace(filepath, findString, replaceString):
"""Does a search and replace on the contents of a file."""
oldFilename = os.path.basename(filepath) + '.old'
oldFilepath = os.path.join(os.path.dirname(filepath), oldFilename)
os.rename(filepath, oldFilepath)
with open(oldFilepath) as input:
with open(filepath, 'w') as output:
for s in input:
output.write(s.replace(findString, replaceString))
os.remove(oldFilepath)
def createZip(zip_path, directory):
"""Creates a zipfile at zip_path for the given directory."""
zipfile_base = os.path.splitext(os.path.basename(zip_path))[0]
zip = zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED)
for (root, dirs, files) in os.walk(directory):
for f in files:
full_path = os.path.join(root, f)
rel_path = os.path.relpath(full_path, directory)
zip.write(full_path, os.path.join(zipfile_base, rel_path))
zip.close()
def replaceUrl(destination, url_name, url_value):
"""Updates a URL in both plugin_settings.js and manifest.json."""
findAndReplace(os.path.join(destination, 'plugin_settings.js'),
"'" + url_name + "'", "'" + url_value + "'")
findAndReplace(os.path.join(destination, 'manifest.json'),
url_name, url_value)
def buildWebApp(buildtype, version, mimetype, destination, zip_path, plugin,
files, locales, patches):
"""Does the main work of building the webapp directory and zipfile.
Args:
buildtype: the type of build ("Official" or "Dev")
mimetype: A string with mimetype of plugin.
destination: A string with path to directory where the webapp will be
written.
zipfile: A string with path to the zipfile to create containing the
contents of |destination|.
plugin: A string with path to the binary plugin for this webapp.
files: An array of strings listing the paths for resources to include
in this webapp.
locales: An array of strings listing locales, which are copied, along
with their directory structure from the _locales directory down.
patches: An array of strings listing patch files to be applied to the
webapp directory. Paths in the patch file should be relative to
the remoting/webapp directory, for example a/main.html. Since
'git diff -p' works relative to the src/ directory, patches
obtained this way will need to be edited.
"""
# Ensure a fresh directory.
try:
shutil.rmtree(destination)
except OSError:
if os.path.exists(destination):
raise
else:
pass
os.mkdir(destination, 0775)
# Use symlinks on linux and mac for faster compile/edit cycle.
#
# On Windows Vista platform.system() can return 'Microsoft' with some
# versions of Python, see http://bugs.python.org/issue1082
# should_symlink = platform.system() not in ['Windows', 'Microsoft']
#
# TODO(ajwong): Pending decision on http://crbug.com/27185 we may not be
# able to load symlinked resources.
should_symlink = False
# Copy all the files.
for current_file in files:
destination_file = os.path.join(destination, os.path.basename(current_file))
destination_dir = os.path.dirname(destination_file)
if not os.path.exists(destination_dir):
os.makedirs(destination_dir, 0775)
if should_symlink:
# TODO(ajwong): Detect if we're vista or higher. Then use win32file
# to create a symlink in that case.
targetname = os.path.relpath(os.path.realpath(current_file),
os.path.realpath(destination_file))
os.symlink(targetname, destination_file)
else:
shutil.copy2(current_file, destination_file)
# Copy all the locales, preserving directory structure
destination_locales = os.path.join(destination, "_locales")
os.mkdir(destination_locales , 0775)
remoting_locales = os.path.join(destination, "remoting_locales")
os.mkdir(remoting_locales , 0775)
for current_locale in locales:
extension = os.path.splitext(current_locale)[1]
if extension == '.json':
locale_id = os.path.split(os.path.split(current_locale)[0])[1]
destination_dir = os.path.join(destination_locales, locale_id)
destination_file = os.path.join(destination_dir,
os.path.split(current_locale)[1])
os.mkdir(destination_dir, 0775)
shutil.copy2(current_locale, destination_file)
elif extension == '.pak':
destination_file = os.path.join(remoting_locales,
os.path.split(current_locale)[1])
shutil.copy2(current_locale, destination_file)
else:
raise Exception("Unknown extension: " + current_locale);
# Create fake plugin files to appease the manifest checker.
# It requires that if there is a plugin listed in the manifest that
# there be a file in the plugin with that name.
names = [
'remoting_host_plugin.dll', # Windows
'remoting_host_plugin.plugin', # Mac
'libremoting_host_plugin.ia32.so', # Linux 32
'libremoting_host_plugin.x64.so' # Linux 64
]
pluginName = os.path.basename(plugin)
for name in names:
if name != pluginName:
path = os.path.join(destination, name)
f = open(path, 'w')
f.write("placeholder for %s" % (name))
f.close()
# Copy the plugin. On some platforms (e.g. ChromeOS) plugin compilation may be
# disabled, in which case we don't need to copy anything.
if plugin:
newPluginPath = os.path.join(destination, pluginName)
if os.path.isdir(plugin):
# On Mac we have a directory.
shutil.copytree(plugin, newPluginPath)
else:
shutil.copy2(plugin, newPluginPath)
# Strip the linux build.
if ((platform.system() == 'Linux') and (buildtype == 'Official')):
subprocess.call(["strip", newPluginPath])
# Patch the files, if necessary. Do this before updating any placeholders
# in case any of the diff contexts refer to the placeholders.
for patch in patches:
patchfile = os.path.join(os.getcwd(), patch)
if subprocess.call(['patch', '-d', destination, '-i', patchfile,
'-p1', '-F0', '-s']) != 0:
print 'Patch ' + patch + ' failed to apply.'
return 1
# Set the version number in the manifest version.
findAndReplace(os.path.join(destination, 'manifest.json'),
'FULL_APP_VERSION',
version)
# Set the correct mimetype.
findAndReplace(os.path.join(destination, 'plugin_settings.js'),
'HOST_PLUGIN_MIMETYPE',
mimetype)
# Allow host names for google services/apis to be overriden via env vars.
oauth2AccountsHost = os.environ.get(
'OAUTH2_ACCOUNTS_HOST', 'https://accounts.google.com')
oauth2ApiHost = os.environ.get(
'OAUTH2_API_HOST', 'https://www.googleapis.com')
directoryApiHost = os.environ.get(
'DIRECTORY_API_HOST', 'https://www.googleapis.com')
oauth2BaseUrl = oauth2AccountsHost + '/o/oauth2'
oauth2ApiBaseUrl = oauth2ApiHost + '/oauth2'
directoryApiBaseUrl = directoryApiHost + '/chromoting/v1'
replaceUrl(destination, 'OAUTH2_BASE_URL', oauth2BaseUrl)
replaceUrl(destination, 'OAUTH2_API_BASE_URL', oauth2ApiBaseUrl)
replaceUrl(destination, 'DIRECTORY_API_BASE_URL', directoryApiBaseUrl)
# Substitute hosts in the manifest's CSP list.
findAndReplace(os.path.join(destination, 'manifest.json'),
'OAUTH2_ACCOUNTS_HOST', oauth2AccountsHost)
# Ensure we list the API host only once if it's the same for multiple APIs.
googleApiHosts = ' '.join(set([oauth2ApiHost, directoryApiHost]))
findAndReplace(os.path.join(destination, 'manifest.json'),
'GOOGLE_API_HOSTS', googleApiHosts)
# WCS and the OAuth trampoline are both hosted on talkgadget. Split them into
# separate suffix/prefix variables to allow for wildcards in manifest.json.
talkGadgetHostSuffix = os.environ.get(
'TALK_GADGET_HOST_SUFFIX', 'talkgadget.google.com')
talkGadgetHostPrefix = os.environ.get(
'TALK_GADGET_HOST_PREFIX', 'https://chromoting-client.')
oauth2RedirectHostPrefix = os.environ.get(
'OAUTH2_REDIRECT_HOST_PREFIX', 'https://chromoting-oauth.')
# Use a wildcard in the manifest.json host specs if the prefixes differ.
talkGadgetHostJs = talkGadgetHostPrefix + talkGadgetHostSuffix
talkGadgetBaseUrl = talkGadgetHostJs + '/talkgadget/'
if talkGadgetHostPrefix == oauth2RedirectHostPrefix:
talkGadgetHostJson = talkGadgetHostJs
else:
talkGadgetHostJson = 'https://*.' + talkGadgetHostSuffix
# Set the correct OAuth2 redirect URL.
oauth2RedirectHostJs = oauth2RedirectHostPrefix + talkGadgetHostSuffix
oauth2RedirectHostJson = talkGadgetHostJson
oauth2RedirectPath = '/talkgadget/oauth/chrome-remote-desktop'
oauth2RedirectBaseUrlJs = oauth2RedirectHostJs + oauth2RedirectPath
oauth2RedirectBaseUrlJson = oauth2RedirectHostJson + oauth2RedirectPath
if buildtype == 'Official':
oauth2RedirectUrlJs = ("'" + oauth2RedirectBaseUrlJs +
"/rel/' + chrome.i18n.getMessage('@@extension_id')")
oauth2RedirectUrlJson = oauth2RedirectBaseUrlJson + '/rel/*'
else:
oauth2RedirectUrlJs = "'" + oauth2RedirectBaseUrlJs + "/dev'"
oauth2RedirectUrlJson = oauth2RedirectBaseUrlJson + '/dev*'
thirdPartyAuthUrlJs = "'" + oauth2RedirectBaseUrlJs + "/thirdpartyauth'"
thirdPartyAuthUrlJson = oauth2RedirectBaseUrlJson + '/thirdpartyauth*'
findAndReplace(os.path.join(destination, 'plugin_settings.js'),
"'TALK_GADGET_URL'", "'" + talkGadgetBaseUrl + "'")
findAndReplace(os.path.join(destination, 'plugin_settings.js'),
"'OAUTH2_REDIRECT_URL'", oauth2RedirectUrlJs)
findAndReplace(os.path.join(destination, 'manifest.json'),
'TALK_GADGET_HOST', talkGadgetHostJson)
findAndReplace(os.path.join(destination, 'manifest.json'),
'OAUTH2_REDIRECT_URL', oauth2RedirectUrlJson)
# Configure xmpp server and directory bot settings in the plugin.
xmppServerAddress = os.environ.get(
'XMPP_SERVER_ADDRESS', 'talk.google.com:5222')
xmppServerUseTls = os.environ.get('XMPP_SERVER_USE_TLS', 'true')
directoryBotJid = os.environ.get(
'DIRECTORY_BOT_JID', 'remoting@bot.talk.google.com')
findAndReplace(os.path.join(destination, 'plugin_settings.js'),
"'XMPP_SERVER_ADDRESS'", "'" + xmppServerAddress + "'")
findAndReplace(os.path.join(destination, 'plugin_settings.js'),
"Boolean('XMPP_SERVER_USE_TLS')", xmppServerUseTls)
findAndReplace(os.path.join(destination, 'plugin_settings.js'),
"'DIRECTORY_BOT_JID'", "'" + directoryBotJid + "'")
findAndReplace(os.path.join(destination, 'plugin_settings.js'),
"'THIRD_PARTY_AUTH_REDIRECT_URL'",
thirdPartyAuthUrlJs)
findAndReplace(os.path.join(destination, 'manifest.json'),
"THIRD_PARTY_AUTH_REDIRECT_URL",
thirdPartyAuthUrlJson)
# Set the correct API keys.
# For overriding the client ID/secret via env vars, see google_api_keys.py.
apiClientId = google_api_keys.GetClientID('REMOTING')
apiClientSecret = google_api_keys.GetClientSecret('REMOTING')
findAndReplace(os.path.join(destination, 'plugin_settings.js'),
"'API_CLIENT_ID'",
"'" + apiClientId + "'")
findAndReplace(os.path.join(destination, 'plugin_settings.js'),
"'API_CLIENT_SECRET'",
"'" + apiClientSecret + "'")
# Use a consistent extension id for unofficial builds.
if buildtype != 'Official':
manifestKey = '"key": "remotingdevbuild",'
else:
manifestKey = ''
findAndReplace(os.path.join(destination, 'manifest.json'),
'MANIFEST_KEY_FOR_UNOFFICIAL_BUILD', manifestKey)
# Make the zipfile.
createZip(zip_path, destination)
return 0
def main():
if len(sys.argv) < 7:
print ('Usage: build-webapp.py '
'<build-type> <version> <mime-type> <dst> <zip-path> <plugin> '
'<other files...> [--patches <patches...>] '
'[--locales <locales...>]')
return 1
arg_type = ''
files = []
locales = []
patches = []
for arg in sys.argv[7:]:
if arg == '--locales' or arg == '--patches':
arg_type = arg
elif arg_type == '--locales':
locales.append(arg)
elif arg_type == '--patches':
patches.append(arg)
else:
files.append(arg)
return buildWebApp(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4],
sys.argv[5], sys.argv[6], files, locales, patches)
if __name__ == '__main__':
sys.exit(main())
| 41.445783
| 80
| 0.689317
|
2a52a2db5a7368533be352d027b234798307aaee
| 14,433
|
py
|
Python
|
src/sage/plot/polygon.py
|
robertwb/sage
|
1b1e6f608d1ef8ee664bb19e991efbbc68cbd51f
|
[
"BSL-1.0"
] | 2
|
2018-06-30T01:37:35.000Z
|
2018-06-30T01:37:39.000Z
|
src/sage/plot/polygon.py
|
boothby/sage
|
1b1e6f608d1ef8ee664bb19e991efbbc68cbd51f
|
[
"BSL-1.0"
] | null | null | null |
src/sage/plot/polygon.py
|
boothby/sage
|
1b1e6f608d1ef8ee664bb19e991efbbc68cbd51f
|
[
"BSL-1.0"
] | null | null | null |
"""
Polygons
"""
#*****************************************************************************
# Copyright (C) 2006 Alex Clemesha <clemesha@gmail.com>,
# William Stein <wstein@gmail.com>,
# 2008 Mike Hansen <mhansen@gmail.com>,
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
from six.moves import range
from sage.plot.primitive import GraphicPrimitive_xydata
from sage.misc.decorators import options, rename_keyword
from sage.plot.colors import to_mpl_color
class Polygon(GraphicPrimitive_xydata):
"""
Primitive class for the Polygon graphics type. For information
on actual plotting, please see :func:`polygon`, :func:`polygon2d`,
or :func:`~sage.plot.plot3d.shapes2.polygon3d`.
INPUT:
- xdata - list of `x`-coordinates of points defining Polygon
- ydata - list of `y`-coordinates of points defining Polygon
- options - dict of valid plot options to pass to constructor
EXAMPLES:
Note this should normally be used indirectly via :func:`polygon`::
sage: from sage.plot.polygon import Polygon
sage: P = Polygon([1,2,3],[2,3,2],{'alpha':.5})
sage: P
Polygon defined by 3 points
sage: P.options()['alpha']
0.500000000000000
sage: P.ydata
[2, 3, 2]
TESTS:
We test creating polygons::
sage: polygon([(0,0), (1,1), (0,1)])
Graphics object consisting of 1 graphics primitive
::
sage: polygon([(0,0,1), (1,1,1), (2,0,1)])
Graphics3d Object
"""
def __init__(self, xdata, ydata, options):
"""
Initializes base class Polygon.
EXAMPLES::
sage: P = polygon([(0,0), (1,1), (-1,3)], thickness=2)
sage: P[0].xdata
[0.0, 1.0, -1.0]
sage: P[0].options()['thickness']
2
"""
self.xdata = xdata
self.ydata = ydata
GraphicPrimitive_xydata.__init__(self, options)
def _repr_(self):
"""
String representation of Polygon primitive.
EXAMPLES::
sage: P = polygon([(0,0), (1,1), (-1,3)])
sage: p=P[0]; p
Polygon defined by 3 points
"""
return "Polygon defined by %s points"%len(self)
def __getitem__(self, i):
"""
Returns `i`th vertex of Polygon primitive, starting count
from 0th vertex.
EXAMPLES::
sage: P = polygon([(0,0), (1,1), (-1,3)])
sage: p=P[0]
sage: p[0]
(0.0, 0.0)
"""
return self.xdata[i], self.ydata[i]
def __setitem__(self, i, point):
"""
Changes `i`th vertex of Polygon primitive, starting count
from 0th vertex. Note that this only changes a vertex,
but does not create new vertices.
EXAMPLES::
sage: P = polygon([(0,0), (1,2), (0,1), (-1,2)])
sage: p=P[0]
sage: [p[i] for i in range(4)]
[(0.0, 0.0), (1.0, 2.0), (0.0, 1.0), (-1.0, 2.0)]
sage: p[2]=(0,.5)
sage: p[2]
(0.0, 0.5)
"""
i = int(i)
self.xdata[i] = float(point[0])
self.ydata[i] = float(point[1])
def __len__(self):
"""
Returns number of vertices of Polygon primitive.
EXAMPLES::
sage: P = polygon([(0,0), (1,2), (0,1), (-1,2)])
sage: p=P[0]
sage: len(p)
4
"""
return len(self.xdata)
def _allowed_options(self):
"""
Return the allowed options for the Polygon class.
EXAMPLES::
sage: P = polygon([(1,1), (1,2), (2,2), (2,1)], alpha=.5)
sage: P[0]._allowed_options()['alpha']
'How transparent the figure is.'
"""
return {'alpha':'How transparent the figure is.',
'thickness': 'How thick the border line is.',
'edgecolor':'The color for the border of filled polygons.',
'fill':'Whether or not to fill the polygon.',
'legend_label':'The label for this item in the legend.',
'legend_color':'The color of the legend text.',
'rgbcolor':'The color as an RGB tuple.',
'hue':'The color given as a hue.',
'zorder':'The layer level in which to draw'}
def _plot3d_options(self, options=None):
"""
Translate 2d plot options into 3d plot options.
EXAMPLES::
sage: P = polygon([(1,1), (1,2), (2,2), (2,1)], alpha=.5)
sage: p=P[0]; p
Polygon defined by 4 points
sage: q=p.plot3d()
sage: q.texture.opacity
0.5
"""
if options is None:
options = dict(self.options())
for o in ['thickness', 'zorder', 'legend_label', 'fill', 'edgecolor']:
options.pop(o, None)
return GraphicPrimitive_xydata._plot3d_options(self, options)
def plot3d(self, z=0, **kwds):
"""
Plots a 2D polygon in 3D, with default height zero.
INPUT:
- ``z`` - optional 3D height above `xy`-plane, or a list of
heights corresponding to the list of 2D polygon points.
EXAMPLES:
A pentagon::
sage: polygon([(cos(t), sin(t)) for t in srange(0, 2*pi, 2*pi/5)]).plot3d()
Graphics3d Object
Showing behavior of the optional parameter z::
sage: P = polygon([(0,0), (1,2), (0,1), (-1,2)])
sage: p = P[0]; p
Polygon defined by 4 points
sage: q = p.plot3d()
sage: q.obj_repr(q.testing_render_params())[2]
['v 0 0 0', 'v 1 2 0', 'v 0 1 0', 'v -1 2 0']
sage: r = p.plot3d(z=3)
sage: r.obj_repr(r.testing_render_params())[2]
['v 0 0 3', 'v 1 2 3', 'v 0 1 3', 'v -1 2 3']
sage: s = p.plot3d(z=[0,1,2,3])
sage: s.obj_repr(s.testing_render_params())[2]
['v 0 0 0', 'v 1 2 1', 'v 0 1 2', 'v -1 2 3']
TESTS:
Heights passed as a list should have same length as
number of points::
sage: P = polygon([(0,0), (1,2), (0,1), (-1,2)])
sage: p = P[0]
sage: q = p.plot3d(z=[2,-2])
Traceback (most recent call last):
...
ValueError: Incorrect number of heights given
"""
from sage.plot.plot3d.index_face_set import IndexFaceSet
options = self._plot3d_options()
options.update(kwds)
zdata=[]
if isinstance(z, list):
zdata=z
else:
zdata=[z]*len(self.xdata)
if len(zdata)==len(self.xdata):
return IndexFaceSet([[(x, y, z) for x, y, z in zip(self.xdata, self.ydata, zdata)]], **options)
else:
raise ValueError('Incorrect number of heights given')
def _render_on_subplot(self, subplot):
"""
TESTS::
sage: P = polygon([(0,0), (1,2), (0,1), (-1,2)])
"""
import matplotlib.patches as patches
options = self.options()
p = patches.Polygon([(self.xdata[i],self.ydata[i])
for i in range(len(self.xdata))])
p.set_linewidth(float(options['thickness']))
a = float(options['alpha'])
z = int(options.pop('zorder', 1))
p.set_alpha(a)
f = options.pop('fill')
p.set_fill(f)
c = to_mpl_color(options['rgbcolor'])
if f:
ec = options['edgecolor']
if ec is None:
p.set_color(c)
else:
p.set_facecolor(c)
p.set_edgecolor(to_mpl_color(ec))
else:
p.set_color(c)
p.set_label(options['legend_label'])
p.set_zorder(z)
subplot.add_patch(p)
def polygon(points, **options):
"""
Returns either a 2-dimensional or 3-dimensional polygon depending
on value of points.
For information regarding additional arguments, see either
:func:`polygon2d` or :func:`~sage.plot.plot3d.shapes2.polygon3d`.
Options may be found and set using the dictionaries ``polygon2d.options``
and ``polygon3d.options``.
EXAMPLES::
sage: polygon([(0,0), (1,1), (0,1)])
Graphics object consisting of 1 graphics primitive
sage: polygon([(0,0,1), (1,1,1), (2,0,1)])
Graphics3d Object
Extra options will get passed on to show(), as long as they are valid::
sage: polygon([(0,0), (1,1), (0,1)], axes=False)
Graphics object consisting of 1 graphics primitive
sage: polygon([(0,0), (1,1), (0,1)]).show(axes=False) # These are equivalent
"""
try:
return polygon2d(points, **options)
except ValueError:
from sage.plot.plot3d.shapes2 import polygon3d
return polygon3d(points, **options)
@rename_keyword(color='rgbcolor')
@options(alpha=1, rgbcolor=(0,0,1), edgecolor=None, thickness=None,
legend_label=None, legend_color=None,
aspect_ratio=1.0, fill=True)
def polygon2d(points, **options):
r"""
Returns a 2-dimensional polygon defined by ``points``.
Type ``polygon2d.options`` for a dictionary of the default
options for polygons. You can change this to change the
defaults for all future polygons. Use ``polygon2d.reset()``
to reset to the default options.
EXAMPLES:
We create a purple-ish polygon::
sage: polygon2d([[1,2], [5,6], [5,0]], rgbcolor=(1,0,1))
Graphics object consisting of 1 graphics primitive
By default, polygons are filled in, but we can make them
without a fill as well::
sage: polygon2d([[1,2], [5,6], [5,0]], fill=False)
Graphics object consisting of 1 graphics primitive
In either case, the thickness of the border can be controlled::
sage: polygon2d([[1,2], [5,6], [5,0]], fill=False, thickness=4, color='orange')
Graphics object consisting of 1 graphics primitive
For filled polygons, one can use different colors for the border
and the interior as follows::
sage: L = [[0,0]]+[[i/100, 1.1+cos(i/20)] for i in range(100)]+[[1,0]]
sage: polygon2d(L, color="limegreen", edgecolor="black", axes=False)
Graphics object consisting of 1 graphics primitive
Some modern art -- a random polygon, with legend::
sage: v = [(randrange(-5,5), randrange(-5,5)) for _ in range(10)]
sage: polygon2d(v, legend_label='some form')
Graphics object consisting of 1 graphics primitive
A purple hexagon::
sage: L = [[cos(pi*i/3),sin(pi*i/3)] for i in range(6)]
sage: polygon2d(L, rgbcolor=(1,0,1))
Graphics object consisting of 1 graphics primitive
A green deltoid::
sage: L = [[-1+cos(pi*i/100)*(1+cos(pi*i/100)),2*sin(pi*i/100)*(1-cos(pi*i/100))] for i in range(200)]
sage: polygon2d(L, rgbcolor=(1/8,3/4,1/2))
Graphics object consisting of 1 graphics primitive
A blue hypotrochoid::
sage: L = [[6*cos(pi*i/100)+5*cos((6/2)*pi*i/100),6*sin(pi*i/100)-5*sin((6/2)*pi*i/100)] for i in range(200)]
sage: polygon2d(L, rgbcolor=(1/8,1/4,1/2))
Graphics object consisting of 1 graphics primitive
Another one::
sage: n = 4; h = 5; b = 2
sage: L = [[n*cos(pi*i/100)+h*cos((n/b)*pi*i/100),n*sin(pi*i/100)-h*sin((n/b)*pi*i/100)] for i in range(200)]
sage: polygon2d(L, rgbcolor=(1/8,1/4,3/4))
Graphics object consisting of 1 graphics primitive
A purple epicycloid::
sage: m = 9; b = 1
sage: L = [[m*cos(pi*i/100)+b*cos((m/b)*pi*i/100),m*sin(pi*i/100)-b*sin((m/b)*pi*i/100)] for i in range(200)]
sage: polygon2d(L, rgbcolor=(7/8,1/4,3/4))
Graphics object consisting of 1 graphics primitive
A brown astroid::
sage: L = [[cos(pi*i/100)^3,sin(pi*i/100)^3] for i in range(200)]
sage: polygon2d(L, rgbcolor=(3/4,1/4,1/4))
Graphics object consisting of 1 graphics primitive
And, my favorite, a greenish blob::
sage: L = [[cos(pi*i/100)*(1+cos(pi*i/50)), sin(pi*i/100)*(1+sin(pi*i/50))] for i in range(200)]
sage: polygon2d(L, rgbcolor=(1/8, 3/4, 1/2))
Graphics object consisting of 1 graphics primitive
This one is for my wife::
sage: L = [[sin(pi*i/100)+sin(pi*i/50),-(1+cos(pi*i/100)+cos(pi*i/50))] for i in range(-100,100)]
sage: polygon2d(L, rgbcolor=(1,1/4,1/2))
Graphics object consisting of 1 graphics primitive
One can do the same one with a colored legend label::
sage: polygon2d(L, color='red', legend_label='For you!', legend_color='red')
Graphics object consisting of 1 graphics primitive
Polygons have a default aspect ratio of 1.0::
sage: polygon2d([[1,2], [5,6], [5,0]]).aspect_ratio()
1.0
AUTHORS:
- David Joyner (2006-04-14): the long list of examples above.
"""
from sage.plot.plot import xydata_from_point_list
from sage.plot.all import Graphics
if options["thickness"] is None: # If the user did not specify thickness
if options["fill"] and options["edgecolor"] is None:
# If the user chose fill
options["thickness"] = 0
else:
options["thickness"] = 1
xdata, ydata = xydata_from_point_list(points)
g = Graphics()
# Reset aspect_ratio to 'automatic' in case scale is 'semilog[xy]'.
# Otherwise matplotlib complains.
scale = options.get('scale', None)
if isinstance(scale, (list, tuple)):
scale = scale[0]
if scale == 'semilogy' or scale == 'semilogx':
options['aspect_ratio'] = 'automatic'
g._set_extra_kwds(Graphics._extract_kwds_for_show(options))
g.add_primitive(Polygon(xdata, ydata, options))
if options['legend_label']:
g.legend(True)
g._legend_colors = [options['legend_color']]
return g
| 33.487239
| 117
| 0.56059
|
56edd11a768ec784e8c4fd36a6b1d2fdf298487d
| 33,546
|
py
|
Python
|
lizard/lizard_c.py
|
pec27/lizard
|
5bfd0dae3b02c0c12eb72b71b6ef2b47ae0c83dd
|
[
"MIT"
] | 1
|
2019-07-09T13:21:47.000Z
|
2019-07-09T13:21:47.000Z
|
lizard/lizard_c.py
|
pec27/lizard
|
5bfd0dae3b02c0c12eb72b71b6ef2b47ae0c83dd
|
[
"MIT"
] | null | null | null |
lizard/lizard_c.py
|
pec27/lizard
|
5bfd0dae3b02c0c12eb72b71b6ef2b47ae0c83dd
|
[
"MIT"
] | 2
|
2018-11-06T12:52:45.000Z
|
2021-02-04T16:10:11.000Z
|
"""
Module for interfacing to the C-library (liblizard.so)
"""
from __future__ import print_function, division, absolute_import
# Note *dont* import unicode_literals since we require the ndpointer flags to
# be the default string types in both python 2 and 3 respectively
from numpy.ctypeslib import ndpointer
from numpy.linalg import eigvalsh
import ctypes
from numpy import float64, empty, array, int32, zeros, float32, require, int64, uint32, complex128
from numpy import roll, diff, flatnonzero, uint64, cumsum, square
from os import path
from .log import MarkUp as MU, null_log
_liblizard = None
_hash_kernel_set = False
c_contig = 'C_CONTIGUOUS'
def _initlib(log):
""" Init the library (if not already loaded) """
global _liblizard
if _liblizard is not None:
return _liblizard
name = path.join(path.dirname(path.abspath(__file__)), '../build/liblizard.so')
if not path.exists(name):
raise Exception('Library '+str(name)+' does not exist. Maybe you forgot to make it?')
print(MU.OKBLUE+'Loading liblizard - C functions for lizard calculations', name+MU.ENDC, file=log)
_liblizard = ctypes.cdll.LoadLibrary(name)
# Coordinate interpolation
# C declaration is below
# int interpolate_periodic(const int grid_width, const unsigned long long npts, const double *gridvals, const double *coordinates, double *out_interp)
func = _liblizard.interpolate_periodic
func.restype = ctypes.c_int
func.argtypes = [ctypes.c_int, ctypes.c_ulonglong, ndpointer(float64, flags=c_contig),
ndpointer(float64, flags=c_contig), ndpointer(float64, flags=c_contig)]
# Interpolation of grid of vectors
# void interp_vec3(const int grid_n, const unsigned long long npts,
# const double *grid, const double *pts, double *out)
func = _liblizard.interp_vec3
func.restype = None
func.argtypes = [ctypes.c_int, ctypes.c_ulonglong, ndpointer(float64, flags=c_contig),
ndpointer(float64, flags=c_contig), ndpointer(float64, flags=c_contig)]
# Make the Barnes-Hut tree
# int make_BH_refinement(int *tree, const int MAX_NODES,
# const int max_refine, const double rmin, const float bh_crit, const double *pos)
func = _liblizard.make_BH_refinement
func.restype = ctypes.c_int
func.argtypes = [ndpointer(int32), ctypes.c_int,
ctypes.c_int, ctypes.c_double, ctypes.c_float, ndpointer(float64)]
# Make the Barnes-Hut tree about an ellipsoidal refinement region
# int make_BH_ellipsoid_refinement(int *tree, const int MAX_NODES,
# const int max_refine, const double *A, const double k,
# const float bh_crit, const double *pos)
func = _liblizard.make_BH_ellipsoid_refinement
func.restype = ctypes.c_int
func.argtypes = [ndpointer(int32), ctypes.c_int,
ctypes.c_int, ndpointer(float64), ctypes.c_double, ctypes.c_float, ndpointer(float64)]
# Force a given set of refinement levels
# int force_refinement_levels(int *tree, const int total_nodes, const int MAX_NODES, const int *levels)
func = _liblizard.force_refinement_levels
func.restype = ctypes.c_int
func.argtypes = [ndpointer(int32), ctypes.c_int, ctypes.c_int,ndpointer(int32)]
# Find the depths and centres of each leaf
# leaf_depths_centres(const int *tree, int *out_depths, double *out_centres)
func = _liblizard.leaf_depths_centres
func.restype = None
func.argtypes = [ndpointer(int32), ndpointer(int32), ndpointer(float64)]
# count number of leaves in each node
# int count_leaves(const int *tree, const int node, const int n_leaves, int *out)
func = _liblizard.count_leaves
func.restype = ctypes.c_int
func.argtypes = [ndpointer(int32), ctypes.c_int, ctypes.c_int, ndpointer(int32)]
# Find the particles around a given node
# int leaves_in_box_ex_node(const int *tree, const int idx, const int excl_node, const int n_leaves,
# const double x0, const double y0, const double z0,
# const float chw, const float box_w, const int max_ngbs, int *out)
func = _liblizard.leaves_in_box_ex_node
func.restype = ctypes.c_int
func.argtypes = [ndpointer(int32), ctypes.c_int, ctypes.c_int, ctypes.c_int,
ctypes.c_double, ctypes.c_double, ctypes.c_double, ctypes.c_float, ctypes.c_float,
ctypes.c_int, ndpointer(int32)]
# Build octree
# int octree_from_pos(int *tree,const int max_nodes, const double *pos, const int n_pos)
func = _liblizard.octree_from_pos
func.restype = ctypes.c_int
func.argtypes = [ndpointer(int32), ctypes.c_int, ndpointer(float64), ctypes.c_int]
# Build gravity oct-tree
# int build_gravity_octtree(int *tree, const int max_nodes, double *mass_com, const int n_parts)
# func = _liblizard.build_gravity_octtree
#func.restype = ctypes.c_int
#func.argtypes = [ndpointer(int32), ctypes.c_int, ndpointer(float64), ctypes.c_int]
# Build Mass, CoM for each node in octree
# void octree_mcom(const int *tree, const int n_pos, const double *p_mcom, double *out)
func = _liblizard.octree_mcom
func.restype = None
func.argtypes = [ndpointer(int32), ctypes.c_int, ndpointer(float64), ndpointer(float64)]
# Initialise kernel
# int init_kernel(const int num_pts, const float *pts, const float rad_min, const float rad_max)
func = _liblizard.init_kernel
func.restype = ctypes.c_int
func.argtypes = [ctypes.c_int, ndpointer(ctypes.c_float, flags=c_contig), ctypes.c_float, ctypes.c_float]
# Evaluate a kernel for a series of points
# int kernel_evaluate(const double *pos, const int num_pos, const int *tree, const int n_leaves, const double *tree_mcom, double *out, float bh_crit)
func = _liblizard.kernel_evaluate
func.restype = ctypes.c_int
func.argtypes = [ndpointer(float64), ctypes.c_int, ndpointer(int32), ctypes.c_int, ndpointer(float64), ndpointer(float64), ctypes.c_float]
# Find all the leaves of a given node, in order
# int write_leaves(int *tree,const int n_leaves, const int node, int *out)
func = _liblizard.write_leaves
func.restype = ctypes.c_int
func.argtypes = [ndpointer(int32), ctypes.c_int, ctypes.c_int,ndpointer(int32)]
# Find the cell for each point
# void find_lattice(const double *pos, const int num_pos, const int nx, int *out)
func = _liblizard.find_lattice
func.restype = None
func.argtypes = [ndpointer(float64, flags=c_contig), ctypes.c_int, ctypes.c_int, ndpointer(int32)]
# Build octrees in a single sort+sweep
# int build_octree_iterator(const double *pos, const int num_pos, const int nx, const int bucket_size,
# int *restrict sort_idx, int *restrict out, const int buf_size)
func = _liblizard.build_octree_iterator
func.restype = ctypes.c_int
func.argtypes = [ndpointer(float64), ctypes.c_int, ctypes.c_int, ctypes.c_int,
ndpointer(int32), ndpointer(float64), ctypes.c_int]
# void fill_treewalk_xyzw(const int num_trees, double *restrict twn_ptr, const int32_t *restrict tree_sizes,
# const double *restrict xyzw)
func = _liblizard.fill_treewalk_xyzw
func.restype = ctypes.c_int
func.argtypes = [ctypes.c_int, ndpointer(float64), ndpointer(int32), ndpointer(float64)]
func =_liblizard.get_tree_iterator_size
func.restype = ctypes.c_int
func.argtypes = []
# Walk the tree-walk-nodes for BH gravity
# long BHTreeWalk(const int *restrict root_sizes, const int num_roots, const int max_depth,
# const int *restrict cells,
# const int ngrid, const double *restrict tree_walk_nodes,
# const double theta, const double *restrict xyzw, double *restrict acc)
func = _liblizard.BHTreeWalk
func.restype = ctypes.c_long
func.argtypes = [ndpointer(int32), ctypes.c_int, ctypes.c_int, ndpointer(int32),
ctypes.c_int, ndpointer(float64),
ctypes.c_double, ndpointer(float64), ndpointer(float64)]
# Set-up kernel for neighbour summation
# int setup_hash_kernel(const double rcut, const int num_wts, const double *kernel_wts)
func = _liblizard.setup_hash_kernel
func.restype = ctypes.c_int
func.argtypes = [ctypes.c_double, ctypes.c_int, ndpointer(float64)]
# Find neighbours
# long radial_kernel_evaluate(const double *xyzw, const int num_cells,
# const int* cells, const int ngrid, double *accel)
func = _liblizard.radial_kernel_evaluate
func.restype = ctypes.c_long
func.argtypes = [ndpointer(float64), ctypes.c_int, ndpointer(int32),
ctypes.c_int, ndpointer(float64)]
# long radial_kernel_cellmean(const double *xyzw, const int num_cells, const int* cells, const int ngrid,
# const int stencil, double *accel)
func = _liblizard.radial_kernel_cellmean
func.restype = ctypes.c_long
func.argtypes = [ndpointer(float64), ctypes.c_int, ndpointer(int32),
ctypes.c_int, ctypes.c_int, ndpointer(float64)]
# Find neighbours
# long find_ngbs5x5(const double *mpos, const int num_cells, const int* cells,
# const double rcrit, const int ngrid, double *accel)
func = _liblizard.find_ngbs5x5
func.restype = ctypes.c_long
func.argtypes = [ndpointer(float64), ctypes.c_int, ndpointer(int32),
ctypes.c_double, ctypes.c_int, ndpointer(float64)]
# Cloud-in-cell interpolation of points
# int cloud_in_cell_3d(const int num_pts, const int ngrid, const double *mcom, double *out)
func = _liblizard.cloud_in_cell_3d
func.restype = ctypes.c_int
func.argtypes = [ctypes.c_int,ctypes.c_int,ndpointer(float64, ndim=2),ndpointer(float64)]
# int cloud_in_cell_3d_vel(const int num_pts, const int ngrid, const double * mcom, const double * mom, double complex *out)
func = _liblizard.cloud_in_cell_3d_vel
func.restype = ctypes.c_int
func.argtypes = [ctypes.c_int,ctypes.c_int,ndpointer(float64),ndpointer(float64),ndpointer(complex128)]
# Five-point gradient for derivatives
# void gradient_5pt_3d(const int ngrid, const double *vals, double *restrict out)
func = _liblizard.gradient_5pt_3d
func.restype = None
func.argtypes = [ctypes.c_int,ndpointer(float64),ndpointer(float64)]
# unpack vals in 0,1,... n/2 into 3d grid
# void unpack_kgrid(const int n, const double *packed_vals, double *unpacked_vals)
func = _liblizard.unpack_kgrid
func.restype = None
func.argtypes = [ctypes.c_int,ndpointer(float64),ndpointer(float64)]
# Find masks for cell and its neighbours
# void ngb_cell_masks_3x3x3(const int num_pts, const double rcrit, const int *cells,
# const uint64_t *ngb_masks, const double *pts, uint64_t *out)
func = _liblizard.ngb_cell_masks_3x3x3
func.restype = None
func.argtypes = [ctypes.c_int,ctypes.c_double,ndpointer(int32),
ndpointer(uint64),ndpointer(float64),
ndpointer(uint64)]
# Greedy region-growing
# int region_grow_domains3d(const int ngrid, const long max_sum, int *restrict grid,
# int *restrict bdrybuf, const int bufsize)
func = _liblizard.region_grow_domains3d
func.restype = ctypes.c_int
func.argtypes = [ctypes.c_int,ctypes.c_long, ndpointer(int32),ndpointer(int32), ctypes.c_int]
return _liblizard
def gradient_5pt_3d(grid, log=null_log):
"""
Find the derivative of a periodic 3d lattice using the 5-point stencil
Returns an (n,n,n,3) array of the derivatives (in x,y,z).
"""
g = require(grid, dtype=float64, requirements=['C'])
n = g.shape[0]
assert(g.shape==(n,n,n))
lib = _initlib(log)
out = empty((n*n*n*3),dtype=float64)
lib.gradient_5pt_3d(n,g,out)
out.shape = (n,n,n,3)
return out
def get_cic(pos, ngrid, mass=None, mom=None,log=null_log):
"""
pos - centre-of-mass for the particles (centres should be in [0, ngrid) )
ngrid - number of cells (width) of the box
[mass]- masses of the particles (optional, default 1)
[mom] - momenta of the particles - if this is set we return a complex value
for each cell containing the CIC in the real part, and the time
derivative in the imaginary.
"""
npts = pos.shape[0]
assert(pos.shape==(npts, 3))
mcom = empty((npts, 4), dtype=float64)
mcom[:,1:] = pos
if mcom[:,1:].min()<0:
raise Exception("All positions should be >=0 (try making periodic first?)")
if mcom[:,1:].max()>=ngrid:
raise Exception("All positions should be <ngrid (try making periodic first?)")
if mass is None:
mcom[:,0] = 1
else:
mcom[:,0] = mass
lib = _initlib(log)
if mom is None:
# No momenta so just the standard cic
# Initialise to zero since we just accumulate
res = zeros(ngrid*ngrid*ngrid, dtype=float64)
lib.cloud_in_cell_3d(npts, ngrid, mcom, res)
res.shape = (ngrid,ngrid,ngrid)
return res
# Have momentum too, so call with vel
assert(mom.shape==(npts,3))
r_mom = require(mom, dtype=float64, requirements=['C'])
# Initialise to zero since we just accumulate
res = zeros(ngrid*ngrid*ngrid, dtype=complex128)
lib.cloud_in_cell_3d_vel(npts, ngrid, mcom, r_mom,res)
res.shape = (ngrid,ngrid,ngrid)
return res
def get_cells(pts, ncell, log=null_log):
"""
For an (N,3) array of points in [0,1), find lattice index for
(ncell**3,) array
"""
lib = _initlib(log)
p = require(pts, dtype=float64, requirements=['C'])
npts = p.shape[0]
assert(p.shape ==(npts,3))
out = empty(npts, dtype=int32)
res = lib.find_lattice(p, npts, ncell, out)
return out
def build_octrees(pos, bucket_size, ngrid, wts, log=null_log, buf_size=None):
""" Build the grid of octrees in a single sweep with fancy position decorating """
lib = _initlib(log)
sizeof_twn = lib.get_tree_iterator_size()
guess_nodes = buf_size is None
npos = len(pos)
pts = require(pos, dtype=float64, requirements=['C'])
if guess_nodes:
max_nodes = int(npos*2.1)+1
buf_size = max_nodes * (sizeof_twn//8)
print('Guessed number of nodes {:,}'.format(max_nodes),file=log)
buf = empty(buf_size, dtype=float64)
sort_idx = empty(npos, dtype=int32)
num_roots = lib.build_octree_iterator(pts, npos, ngrid, bucket_size,
sort_idx, buf, buf_size)
if num_roots==-1:
raise Exception('Out of memory')
if num_roots==-2:
raise Exception('>bucket_size points have indistinguishable double representations')
class tree:
root_counts = buf.view(dtype=int32)[:num_roots]
num_nodes = root_counts.sum()
root_indices = empty(num_roots, dtype=int32)
root_cells = buf.view(dtype=int32)[num_roots:num_roots*2]
it = buf[num_roots:num_roots + (sizeof_twn*num_nodes)//8]
sof32 = sizeof_twn//4 # size of TWN in 32 bit ints
# use intimate knowledge of structure layout
n = it.view(dtype=int32)[sof32-2::sof32]
depth_next = it.view(dtype=int32)[sof32-3::sof32]
breadth_next = it.view(dtype=int32)[sof32-4::sof32]
depths = it.view(dtype=int32)[sof32-1::sof32]
tree.root_indices[1:] = cumsum(tree.root_counts[:-1])
tree.root_indices[0] = 0
tree.fill = tree.n[tree.root_indices]
print('{:,} filled cells(trees),'.format(num_roots),
'{:,}-{:,} nodes per tree,'.format(tree.root_counts.min(), tree.root_counts.max()),
'av. %.2f'%tree.root_counts.mean(dtype=float64), file=log)
print('{:,}-{:,} points per tree'.format(tree.fill.min(),tree.fill.max()),
'(av. %.2f),'%tree.fill.mean(dtype=float64),
'av. point in a tree of {:,} points'.format(square(tree.fill.astype(int64)).sum()//npos),
file=log)
leaf_counts = tree.n[flatnonzero(tree.n<=bucket_size)]
av_leaf_size_per_pt = square(leaf_counts).sum()/float(npos)
print('%d-%d points per leaf (leaf size %d), average %.2f, average point is in a leaf of %.2f pts'%(leaf_counts.min(), leaf_counts.max(), bucket_size, leaf_counts.mean(dtype=float64), av_leaf_size_per_pt), file=log)
print('Actual number of nodes used {:,}, total memory {:,} bytes'.format(tree.num_nodes, tree.num_nodes*sizeof_twn),file=log)
print('Indexing {:,} points for octree-ordered xyzw'.format(npos),file=log)
xyzw = empty((npos+tree.num_nodes, 4), dtype=float64)
xyzw[:npos,:3] = pts[sort_idx]
if sum(array(wts).shape)<=1:
xyzw[:npos,3] = wts
else:
xyzw[:npos,3] = wts[sort_idx]
print('Building xyzw for {:,} nodes'.format(tree.num_nodes), file=log)
tree.max_depth = lib.fill_treewalk_xyzw(num_roots, tree.it, tree.root_counts, xyzw)
tree.xyzw = xyzw
print('Max leaf depth %d'%tree.max_depth, file=log)
return tree, sort_idx
def bh_tree_walk(tree, ngrid, theta, xyzw,log=null_log):
""" Kernel summation over BH tree """
lib = _initlib(log)
if not _hash_kernel_set:
raise Exception('Please set-up the kernel before trying a neighbour-summation')
num_trees = len(tree.root_cells)
rt_sizes = require(tree.root_counts, dtype=int32, requirements=['C'])
cells = require(tree.root_cells, dtype=int32, requirements=['C'])
twn = require(tree.it, dtype=float64, requirements=['C'])
xyzw = require(tree.xyzw, dtype=float64, requirements=['C'])
npts = len(xyzw) - tree.num_nodes
out = zeros(npts*3, dtype=float64)
num_kernels = lib.BHTreeWalk(rt_sizes, num_trees, tree.max_depth, cells, ngrid, twn, theta, xyzw, out)
if num_kernels<0:
raise Exception('Hash table too big for kernel summation')
acc = out
acc.shape = (npts, 3)
return num_kernels, acc
def lattice_setup_kernel(rad_max, weights, log=null_log):
"""
setup the kernel interpolation weights with
0 -> 0, dx -> wts[0], 2dx -> wts[1],..., rad_max -> wts[-1]
where dx := rad_max / len(wts)
"""
global _hash_kernel_set
lib = _initlib(log)
# C-ordering of double array
wts = require(weights, dtype=float64, requirements=['C'])
res = lib.setup_hash_kernel(rad_max, len(wts), wts)
if res<0:
raise Exception('You can only set up to MAX_KERNEL_WTS-1=%d weights in the interpolation table, use less or recompile'%(-(res+1)))
_hash_kernel_set = True
return
def lattice_kernel(pts, lattice_data, ngrid, log=null_log, masses=None, stencil=None):
if not _hash_kernel_set:
raise Exception('Please set-up the kernel before trying a neighbour-summation')
lib = _initlib(log)
ncells = lattice_data.shape[0]
npts = len(pts)
xyzw = empty((npts, 4), dtype=float64)
xyzw[:,:3] = pts
if masses is None:
xyzw[:,3] = 1
else:
xyzw[:,3] = masses
# lattice pos, start, end for each cell
cells = require(lattice_data, dtype=int32, requirements=['C'])
accel = zeros(xyzw[:,1:].shape, dtype=float64)
if stencil is None:
res = lib.radial_kernel_evaluate(xyzw, ncells, cells, ngrid, accel)
else:
res = lib.radial_kernel_cellmean(xyzw, ncells, cells, ngrid, stencil, accel)
if res==-2:
raise Exception('Monopole stencil must be 5 or 7')
if res==-1:
raise Exception('Hash table not big enough to store data')
pairs = res
return pairs, accel
def lattice_kernel5x5(pts, lattice_data, rcrit, ngrid):
lib = _initlib()
ncells = lattice_data.shape[0]
npts = len(pts)
mpos = empty((npts, 4), dtype=float64)
mpos[:,1:] = pts
mpos[:,0] = 1
# lattice pos, start, end for each cell
cells = require(lattice_data, dtype=int32, requirements=['C'])
accel = zeros(mpos[:,1:].shape, dtype=float64)
res = lib.find_ngbs5x5(mpos, ncells, cells, rcrit, ngrid, accel)
return res
def build_hash3d(pts, nlevel, hash_prime, bucket_bits, log=null_log):
lib = _initlib()
npts = pts.shape[0]
if nlevel*3+bucket_bits>27:
raise Exception('Number of grid elements %d is too many')
ngrid = 2**nlevel
bucket_depth = 2**bucket_bits
tot_buckets = 8**nlevel
out = zeros((ngrid,ngrid,ngrid,bucket_depth), dtype=int64)
res = lib.build_hash3d(pts, npts, nlevel, hash_prime, bucket_bits, out)
print('Maximum shift {:,} or {:,} buckets'.format(res, res//bucket_depth), file=log)
return out
def kernel_hash3d(pts, hgrid, hash_prime, rcrit):
lib = _initlib()
ngrid = hgrid.shape[0]
bucket_depth = hgrid.shape[3]
npts = len(pts)
out = zeros(pts.shape, dtype=float64)
evals = lib.ngb_sum_hash3d(pts, npts, ngrid, hash_prime, bucket_depth, hgrid, rcrit, out)
return out, evals
def leaves_for_node(tree, n_leaves, node, log=null_log):
""" return all leaves of the given node """
lib = _initlib(log)
out = empty(n_leaves, dtype=int32)
leaves_found=lib.write_leaves(tree, n_leaves, node, out)
print('leaves found', leaves_found, file=log)
out = out[:leaves_found]
return out
def kernel_evaluate(pos, tree, tree_mcom, rmin, rmax, kvals):
"""
kvals - logarithmically spaced values between rmin and rmax
"""
lib = _initlib(null_log)
n_kernel = len(kvals)
res = lib.init_kernel(n_kernel, array(kvals, dtype=float32), rmin, rmax)
if res!=0:
raise Exception('Failed to initialise. Too many points for kernel?')
n_leaves = tree_mcom.shape[0] - tree.shape[0]
out = empty(pos.shape, dtype=float64)
num_evals = lib.kernel_evaluate(pos, pos.shape[0], tree, n_leaves, tree_mcom, out, 0.1)
if num_evals<0:
raise Exception('Neighbour seach for kernel ran out of stack space')
return num_evals, out
def octree_mcom(tree, mass, pos):
"""
Find mass and centre-of-mass for each node in the tree
"""
lib = _initlib(null_log)
num_pos, dim = pos.shape
assert(dim==3)
n_nodes = tree.shape[0]
part_mcom = empty((num_pos+n_nodes,4), dtype=float64) # expected dtype
part_mcom[:num_pos,0] = mass # particle mass
part_mcom[:num_pos,1:] = pos
lib.octree_mcom(tree, num_pos, part_mcom, part_mcom[num_pos:])
inv_mass = 1.0/part_mcom[num_pos:,0] # TODO could be zero if we have massless particles
for i in range(3):
part_mcom[num_pos:,i+1] *= inv_mass
return part_mcom
def neighbours_for_node(tree, num_leaves, node, xyz0, box_w, max_leaves=1000000):
assert(len(xyz0)==3)
assert(node!=0)
lib = _initlib(null_log)
out = empty(max_leaves, dtype=int32)
found = lib.leaves_in_box_ex_node(tree, 0, node, num_leaves, xyz0[0], xyz0[1], xyz0[2], 0.5, box_w, max_leaves, out)
if found==-1:
raise Exception('Out of space, try to raise max_leaves=%d?'%max_leaves)
return out[:found]
def count_leaves(tree, num_leaves):
"""
For the given octree find the number for each leaf
"""
num_nodes, octants = tree.shape
assert(octants==8) # should be 8 octants for each node!
assert(tree.dtype==int32)
_initlib()
out = empty(num_nodes, dtype=int32)
check = _liblizard.count_leaves(tree, 0, num_leaves, out)
assert(check==num_leaves) # Total number of leaves should equal number in the tree!
return out
def make_BH_tree(pos, rmin, max_refinement=None,max_nodes=None, bh_crit=0.1, allowed_levels=None, log=null_log):
"""
Make the Barnes-Hut refinement tree
pos - position of the central refinement region
rmin - distance out to which we want maximum refinement
bh_crit - Barnes-Hut opening angle criteria for refinement
[allowed_levels] - Allowed levels for the tree
"""
if allowed_levels is not None:
if max_refinement is not None:
print('WARNING: max_refinement ignored as allowed_levels has been set',file=log)
max_refinement = max(allowed_levels)
else:
if max_refinement is None:
raise Exception('One of max_refinement or allowed_levels must be set!')
if max_refinement==0:
# Nothing to do, we will never refine!
return array([], dtype=int32)
_initlib(log)
if max_nodes is None:
# Make a guess for how many nodes we need
max_nodes = 1000000
tree = empty(max_nodes*8, dtype=int32)
ppos = array(pos, dtype=float64)
nodes_used = _liblizard.make_BH_refinement(tree, max_nodes,
max_refinement, rmin, bh_crit, ppos)
if nodes_used==-1:
raise Exception('max_nodes=%d was not enough. Try to increase?'%max_nodes)
if allowed_levels is None:
tree.shape = (max_nodes,8)
return tree[:nodes_used]
# Otherwise make us have good levels
levels = array(sorted(allowed_levels), dtype=int32)
nodes_used = _liblizard.force_refinement_levels(tree, nodes_used, max_nodes, levels)
if nodes_used==-1:
raise Exception('max_nodes=%d was not enough. Try to increase?'%max_nodes)
tree.shape = (max_nodes,8)
return tree[:nodes_used]
def make_BH_ellipsoid_tree(pos, A, max_refinement=None,max_nodes=None, bh_crit=0.1, allowed_levels=None,log=null_log):
"""
Like make_BH_tree, but for an ellipsoidal region.
pos - position of the central refinement region
A - Matrix for ellipse, such that the surface within which we want
maximum refinement is x.A.x = 1
bh_crit - Barnes-Hut opening angle criteria for refinement
[allowed_levels] - Allowed levels for the tree
"""
if allowed_levels is not None:
if max_refinement is not None:
print('WARNING: max_refinement ignored as allowed_levels has been set',file=log)
max_refinement = max(allowed_levels)
else:
if max_refinement is None:
raise Exception('One of max_refinement or allowed_levels must be set!')
if max_refinement==0:
# Nothing to do, we will never refine!
return array([], dtype=int32)
lib = _initlib(log)
if max_nodes is None:
# Make a guess for how many nodes we need
max_nodes = 1000000
# Matrix for the ellipsoid
Aconv = empty((3,3), dtype=float64)
k = 1.0/eigvalsh(A).max()
Aconv[:] = A * k
tree = empty(max_nodes*8, dtype=int32)
ppos = array(pos, dtype=float64)
nodes_used = lib.make_BH_ellipsoid_refinement(tree, max_nodes,
max_refinement, Aconv,k, bh_crit, ppos)
if nodes_used==-1:
raise Exception('max_nodes=%d was not enough. Try to increase?'%max_nodes)
if allowed_levels is None:
tree.shape = (max_nodes,8)
return tree[:nodes_used]
# Otherwise make us have good levels
levels = array(sorted(allowed_levels), dtype=int32)
nodes_used = _liblizard.force_refinement_levels(tree, nodes_used, max_nodes, levels)
if nodes_used==-1:
raise Exception('max_nodes={:,} was not enough. Try to increase?'.format(max_nodes))
tree.shape = (max_nodes,8)
return tree[:nodes_used]
def build_octree(pos, max_nodes=None):
npos, dim = pos.shape
assert(dim==3) # should be 3 dimensional!
if max_nodes is None:
max_nodes = npos//2 + 1
lib = _initlib(null_log)
tree = empty((max_nodes,8), dtype=int32)
nodes_used = lib.octree_from_pos(tree, max_nodes, pos, npos)
if nodes_used==-1:
raise Exception('max_nodes=%d was not enough. Try to increase?'%max_nodes)
return tree[:nodes_used]
def build_gravity_octtree(pos,masses, max_nodes=None):
"""
Make the gravity octtree for the particles
"""
nparts = len(masses)
if max_nodes is None:
max_nodes = nparts // 2
_initlib()
# Store mass,com data of the particles
mcom = empty((nparts+max_nodes, 4), dtype=float64)
mcom[:nparts,0] = masses
mcom[:nparts,1:] = pos
tree = empty((max_nodes,8), dtype=int32)
nodes_used = _liblizard.build_gravity_octtree(tree, max_nodes, mcom, nparts)
if nodes_used==-1:
raise Exception('max_nodes=%d was not enough. Try to increase?'%max_nodes)
return tree[:nodes_used], mcom[:(nparts+nodes_used)]
def leaf_depths_centres(tree):
"""
Get every leaf depth and centre for the given tree.
Centres are in 0-1
"""
if len(tree)==0:
# Whole tree is one leaf!
return array([0]), array([[0.5,0.5,0.5]], dtype=float64)
assert(tree.dtype==int32)
num_leaves = tree.max()+1
out_depths = empty(num_leaves, dtype=int32)
out_centres = empty((num_leaves,3), dtype=float64)
lib = _initlib(null_log)
lib.leaf_depths_centres(tree, out_depths, out_centres)
return out_depths, out_centres
def map_coords(vals,coords,log=null_log,order=1):
"""
do the interpolation from the 3d grid (periodically wrapped)
"""
if order!=1:
raise Exception('Can only do linear interpolation at this point!')
_initlib(log)
assert(len(vals.shape)==3)
grid_width = vals.shape[0]
assert(vals.shape[1]==grid_width)
assert(vals.shape[2]==grid_width)
num_pts = coords.shape[1]
assert(coords.shape==(3,num_pts))
# C-contiguous double arrays (makes a copy if necc)
pcoords = require(coords, dtype=float64, requirements=['C'])
gvals = require(vals, dtype=float64, requirements=['C'])
out = empty(num_pts, dtype=float64)
res = _liblizard.interpolate_periodic(grid_width, num_pts, gvals, pcoords, out)
if res != 0:
print('An error occurred', res,file=log)
return out
def interp_vec3(grid,coords,log=null_log):
"""
Interpolation from the 3d grid of 3d vectors (periodically wrapped)
Like map_coords but speed-up for vectors
grid - (N,N,N,3) array
coords - (M,3) array in right-open interval [0,N)
[log]
returns (M,3) array of 1d interpolated values
"""
lib = _initlib(log)
# C-contiguous double arrays (makes a copy if necc)
gvals = require(grid, dtype=float64, requirements=['C'])
grid_width = gvals.shape[0]
assert(gvals.shape==(grid_width,grid_width,grid_width,3))
pcoords = require(coords, dtype=float64, requirements=['C'])
num_pts = pcoords.shape[0]
assert(pcoords.shape==(num_pts,3))
if pcoords.max()>=grid_width or pcoords.min()<0:
print('Max pos', pcoords.max(axis=0), 'min', pcoords.min(axis=0), file=log)
raise Exception('All coordinates must be in the right-open interval [0,grid_width)')
out = empty(num_pts*3, dtype=float64) # make sure this is big enough...
res = lib.interp_vec3(grid_width, num_pts, gvals, pcoords, out)
out.shape = (num_pts, 3)
return out
def unpack_kgrid(n, vals, log=null_log):
"""
Unpack the 'pyramid' of values u>=v>=w into the (n,n,n) k-grid.
n - the size of the grid
vals - m(m+1)(m+2)/6 values in the pyramid
returns out - (n,n,n) float64 array.
"""
lib = _initlib(log)
v = require(vals, dtype=float64, requirements=['C'])
m = 1+n//2
assert(len(vals)==(m*(m+1)*(m+2))//6)
out = empty(n*n*n, dtype=float64)
lib.unpack_kgrid(n, v, out)
out.shape = (n,n,n)
return out
def adjacent_cell_masks(pts, cell_masks, rcrit, log=null_log):
"""
For each point create a bitmask that that is the logical-or of all masks
for all the domains that it is within rcrit of.
"""
cell_masks = array(cell_masks)
ngrid = cell_masks.shape[0]
r = float(rcrit) * ngrid
if r>1.0:
raise Exception('rcrit>1.0, need to look outside 3x3x3 block of neighbours')
assert(cell_masks.shape==(ngrid,ngrid,ngrid))
num_pts = len(pts)
pos = array(pts)*ngrid
pos = require(pos, dtype=float64, requirements=['C'])
assert(pos.shape==(num_pts, 3))
# initialise space for masks
ngb_masks = empty((ngrid, ngrid, ngrid, 27), dtype=uint64)
# Fill in the masks of all the neighbouring cells
ngb = 0
inc = [1,0,-1] # roll increments for left, middle, right
for i in inc:
ri = roll(cell_masks, i, axis=0)
for j in inc:
rj = roll(ri, j, axis=1)
for k in inc:
ngb_masks[:,:,:,ngb] = roll(rj, k, axis=2)
ngb += 1
cells = get_cells(pts, ngrid, log)
res = empty(num_pts, dtype=uint64)
lib = _initlib(log)
lib.ngb_cell_masks_3x3x3(num_pts, r*1.001, cells, ngb_masks, pos, res)
return res
def domain_regiongrow(weights, min_sum, log=null_log, buf=None):
"""
weights - (n,n,n) grid of >=0 integer weights
min_sum - minimum sum in each domain
returns - (n,n,n) grid of nearly contiguous domains
"""
grid = -array(weights, dtype=int32, order='C')
n = grid.shape[0]
assert(grid.shape==(n,n,n))
if buf is None:
bdry_buffer = empty(min(1000000, 4*grid.size), dtype=int32) # 4 bytes
else:
bdry_buffer = require(buf, dtype=int32, order='C')
lib = _initlib(log)
res = lib.region_grow_domains3d(n, -min_sum, grid, bdry_buffer, 4*bdry_buffer.size)
if res==-1:
raise Exception('Ran out of memory on floodfill')
return grid
| 36.50272
| 219
| 0.663626
|
f776fc7eaf9422868ac563d329e1fdaed77a1395
| 2,427
|
py
|
Python
|
api.py
|
htcr/deeplab-pytorch
|
8cea35415112fefb6a886d0d98ab64350ed09601
|
[
"MIT"
] | null | null | null |
api.py
|
htcr/deeplab-pytorch
|
8cea35415112fefb6a886d0d98ab64350ed09601
|
[
"MIT"
] | null | null | null |
api.py
|
htcr/deeplab-pytorch
|
8cea35415112fefb6a886d0d98ab64350ed09601
|
[
"MIT"
] | null | null | null |
import numpy as np
import cv2
import os
import os.path as osp
import torch
import yaml
from addict import Dict
import matplotlib.pyplot as plt
from .libs.models import *
from .libs.utils import DenseCRF
from demo import preprocessing, inference
class DeepLabV2Masker(object):
def __init__(self, crf=True):
cur_dir = osp.dirname(osp.realpath(__file__))
config_path = osp.join(
cur_dir,
'configs/human.yaml'
)
model_path = osp.join(
cur_dir,
'data/models/human/deeplabv2_resnet101_msc/all_human/checkpoint_final.pth'
)
device = torch.device('cuda')
CONFIG = Dict(yaml.load(open(config_path, 'r')))
torch.set_grad_enabled(False)
# CRF post-processor
self.crf = crf
if crf:
self.postprocessor = DenseCRF(
iter_max=CONFIG.CRF.ITER_MAX,
pos_xy_std=CONFIG.CRF.POS_XY_STD,
pos_w=CONFIG.CRF.POS_W,
bi_xy_std=CONFIG.CRF.BI_XY_STD,
bi_rgb_std=CONFIG.CRF.BI_RGB_STD,
bi_w=CONFIG.CRF.BI_W,
)
else:
self.postprocessor = None
self.model = eval(CONFIG.MODEL.NAME)(n_classes=CONFIG.DATASET.N_CLASSES)
state_dict = torch.load(model_path, map_location=lambda storage, loc: storage)
self.model.load_state_dict(state_dict)
self.model.eval()
self.model.to(device)
print("Model:", CONFIG.MODEL.NAME)
self.CONFIG = CONFIG
self.device = device
def get_mask(self, image, bk):
ori_h, ori_w = image.shape[:2]
image, raw_image = preprocessing(image, self.device, self.CONFIG)
bk = cv2.resize(bk, raw_image.shape[:2][::-1])
diff = np.maximum(raw_image, bk).astype(np.float32) / (np.minimum(raw_image, bk).astype(np.float32) + 0.1)
diff = (diff - np.min(diff)) / (np.max(diff) - np.min(diff)) * 255
diff = diff.astype(np.uint8)
raw_image = diff
#plt.imshow(raw_image)
#plt.show()
labelmap = inference(self.model, image, raw_image, self.postprocessor)
mask = labelmap == 1
mask = mask.astype(np.uint8) * 255
mask = cv2.resize(mask, (ori_w, ori_h))
mask = np.where(mask > 128, 255, 0).astype(np.uint8)
return mask
| 29.962963
| 114
| 0.589205
|
141075135cad1b2e253211658c0a0b228ac00137
| 1,028
|
py
|
Python
|
tests/test_extra_links.py
|
David-Le-Nir/sphinxcontrib-needs
|
fe809445505fa1e9bf5963eab1d6283dad405e92
|
[
"MIT"
] | 90
|
2016-11-30T21:23:10.000Z
|
2022-01-11T16:33:56.000Z
|
tests/test_extra_links.py
|
David-Le-Nir/sphinxcontrib-needs
|
fe809445505fa1e9bf5963eab1d6283dad405e92
|
[
"MIT"
] | 359
|
2016-12-02T14:53:44.000Z
|
2022-03-31T11:59:03.000Z
|
tests/test_extra_links.py
|
David-Le-Nir/sphinxcontrib-needs
|
fe809445505fa1e9bf5963eab1d6283dad405e92
|
[
"MIT"
] | 25
|
2018-06-20T18:56:13.000Z
|
2022-03-25T06:11:40.000Z
|
from pathlib import Path
from sphinx_testing import with_app
@with_app(buildername="html", srcdir="doc_test/doc_extra_links")
def test_extra_links_html(app, status, warning):
app.build()
html = Path(app.outdir, "index.html").read_text()
assert "TEST_001" in html
assert "tested by" in html
assert "tests" in html
assert "blocked by" in html
assert "blocks" in html
# Check for correct dead_links handling
assert '<span class="needs_dead_link">DEAD_LINK_ALLOWED</span>' in html
assert '<span class="needs_dead_link forbidden">DEAD_LINK_NOT_ALLOWED</span>' in html
assert '<span class="needs_dead_link forbidden">REQ_005.invalid</span>' in html
@with_app(buildername="latex", srcdir="doc_test/doc_extra_links")
def test_extra_links_latex(app, status, warning):
app.build()
tex = Path(app.outdir, "needstestdocs.tex").read_text()
assert "TEST_001" in tex
assert "tested by" in tex
assert "tests" in tex
assert "blocked by" in tex
assert "blocks" in tex
| 33.16129
| 89
| 0.720817
|
fd3d8c4675e54e4597418b10fd10de5d8fd3dd0e
| 1,463
|
py
|
Python
|
django/server/views.py
|
5u3it/observability-boilerplate1
|
b6f41c7395491d6f9b3bd5b301e70431539e98a4
|
[
"MIT"
] | null | null | null |
django/server/views.py
|
5u3it/observability-boilerplate1
|
b6f41c7395491d6f9b3bd5b301e70431539e98a4
|
[
"MIT"
] | null | null | null |
django/server/views.py
|
5u3it/observability-boilerplate1
|
b6f41c7395491d6f9b3bd5b301e70431539e98a4
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.http import HttpResponse
from django.conf import settings
# import opentracing
import requests
from py_zipkin.zipkin import zipkin_span
# Create your views here.
def http_transport(encoded_span):
# The collector expects a thrift-encoded list of spans. Instead of
# decoding and re-encoding the already thrift-encoded message, we can just
# add header bytes that specify that what follows is a list of length 1.
body = encoded_span
# body = '\x0c\x00\x00\x00\x01' + encoded_span
print(body)
requests.post(
'http://zipkin:9411/api/v1/spans',
data=body,
headers={'Content-Type': 'application/x-thrift'},
)
def server_index(request):
return HttpResponse("Hello, world. You're at the server index.")
def server_simple(request):
try:
return HttpResponse("This is a simple traced request.")
except Exception as e:
print(e)
def server_log(request):
# span = settings.OPENTRACING_TRACER.get_span(request)
# if span is not None:
# span.log_event("Hello, world!")
return HttpResponse("Something was logged")
def server_child_span(request):
# span = settings.OPENTRACING_TRACER.get_span(request)
# if span is not None:
# child_span = settings.OPENTRACING_TRACER._tracer.start_span("child span", child_of=span.context)
# child_span.finish()
return HttpResponse("A child span was created")
| 32.511111
| 106
| 0.708817
|
f8d949201f96cb37f7dcd21e366522d4667e3fb8
| 6,227
|
py
|
Python
|
ant_environments/ant_reacher_3_levels/agent.py
|
erick-alv/Hierarchical-Actor-Critc-HAC-
|
a01ea24665d1442872c7f829701ed390d2cc560e
|
[
"MIT"
] | null | null | null |
ant_environments/ant_reacher_3_levels/agent.py
|
erick-alv/Hierarchical-Actor-Critc-HAC-
|
a01ea24665d1442872c7f829701ed390d2cc560e
|
[
"MIT"
] | null | null | null |
ant_environments/ant_reacher_3_levels/agent.py
|
erick-alv/Hierarchical-Actor-Critc-HAC-
|
a01ea24665d1442872c7f829701ed390d2cc560e
|
[
"MIT"
] | null | null | null |
import numpy as np
from layer import Layer
from environment import Environment
import pickle as cpickle
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import os
import pickle as cpickle
# Below class instantiates an agent
class Agent():
def __init__(self,FLAGS, env, agent_params):
self.FLAGS = FLAGS
self.sess = tf.Session()
# Set subgoal testing ratio each layer will use
self.subgoal_test_perc = agent_params["subgoal_test_perc"]
# Create agent with number of levels specified by user
self.layers = [Layer(i,FLAGS,env,self.sess,agent_params) for i in range(FLAGS.agents)]
# Below attributes will be used help save network parameters
self.saver = None
self.model_dir = None
self.model_loc = None
# Initialize actor/critic networks. Load saved parameters if not retraining
self.initialize_networks()
# goal_array will store goal for each layer of agent.
self.goal_array = [None for i in range(FLAGS.agents)]
self.current_state = None
# Track number of low-level actions executed
self.steps_taken = 0
# Below hyperparameter specifies number of Q-value updates made after each episode
self.num_updates = 40
# Below parameters will be used to store performance results
self.performance_log = []
self.other_params = agent_params
# Determine whether or not each layer's goal was achieved. Also, if applicable, return the highest level whose goal was achieved.
def check_goals(self,env):
# goal_status is vector showing status of whether a layer's goal has been achieved
goal_status = [False for i in range(self.FLAGS.agents)]
max_lay_achieved = None
# Project current state onto the subgoal and end goal spaces
proj_subgoal = env.project_state_to_subgoal(env.sim, self.current_state)
proj_end_goal = env.project_state_to_end_goal(env.sim, self.current_state)
for i in range(self.FLAGS.agents):
goal_achieved = True
# If at highest layer, compare to end goal thresholds
if i == self.FLAGS.agents - 1:
# Check dimensions are appropriate
assert len(proj_end_goal) == len(self.goal_array[i]) == len(env.end_goal_thresholds), "Projected end goal, actual end goal, and end goal thresholds should have same dimensions"
# Check whether layer i's goal was achieved by checking whether projected state is within the goal achievement threshold
for j in range(len(proj_end_goal)):
if np.absolute(self.goal_array[i][j] - proj_end_goal[j]) > env.end_goal_thresholds[j]:
goal_achieved = False
break
# If not highest layer, compare to subgoal thresholds
else:
# Check that dimensions are appropriate
assert len(proj_subgoal) == len(self.goal_array[i]) == len(env.subgoal_thresholds), "Projected subgoal, actual subgoal, and subgoal thresholds should have same dimensions"
# Check whether layer i's goal was achieved by checking whether projected state is within the goal achievement threshold
for j in range(len(proj_subgoal)):
if np.absolute(self.goal_array[i][j] - proj_subgoal[j]) > env.subgoal_thresholds[j]:
goal_achieved = False
break
# If projected state within threshold of goal, mark as achieved
if goal_achieved:
goal_status[i] = True
max_lay_achieved = i
else:
goal_status[i] = False
return goal_status, max_lay_achieved
def initialize_networks(self):
model_vars = tf.trainable_variables()
self.saver = tf.train.Saver(model_vars)
# Set up directory for saving models
self.model_dir = os.getcwd() + '/models'
self.model_loc = self.model_dir + '/HAC.ckpt'
if not os.path.exists(self.model_dir):
os.makedirs(self.model_dir)
# Initialize actor/critic networks
self.sess.run(tf.global_variables_initializer())
# If not retraining, restore weights
# if we are not retraining from scratch, just restore weights
if self.FLAGS.retrain == False:
self.saver.restore(self.sess, tf.train.latest_checkpoint(self.model_dir))
# Save neural network parameters
def save_model(self, episode):
self.saver.save(self.sess, self.model_loc, global_step=episode)
# Update actor and critic networks for each layer
def learn(self):
for i in range(len(self.layers)):
self.layers[i].learn(self.num_updates)
# Train agent for an episode
def train(self,env, episode_num,total_episodes):
# Select final goal from final goal space, defined in "design_agent_and_env2.py"
self.goal_array[self.FLAGS.agents - 1] = env.get_next_goal(self.FLAGS.test)
print("Next End Goal: ", self.goal_array[self.FLAGS.agents - 1])
# Select initial state from in initial state space, defined in environment.py
self.current_state = env.reset_sim(self.goal_array[self.FLAGS.agents - 1])
if env.name == "ant_reacher.xml":
print("Initial Ant Position: ", self.current_state[:3])
# print("Initial State: ", self.current_state)
# Reset step counter
self.steps_taken = 0
# Train for an episode
goal_status, max_lay_achieved = self.layers[self.FLAGS.agents - 1].train(self, env, episode_num = episode_num)
# Update actor/critic networks if not testing
if not self.FLAGS.test and total_episodes > 25:
self.learn()
# Return whether end goal was achieved
return goal_status[self.FLAGS.agents - 1]
# Save performance evaluations
def log_performance(self, success_rate):
# Add latest success_rate to list
self.performance_log.append(success_rate)
# Save log
cpickle.dump(self.performance_log,open("performance_log.p","wb"))
| 37.287425
| 192
| 0.654408
|
1c0600daed121325593c2b840c862bd773fc3f7a
| 2,321
|
py
|
Python
|
pytest-embedded-arduino/pytest_embedded_arduino/app.py
|
espressif/pytest-embedded
|
14077e64d3dfa74c03dcbcd1775fc994ba3e4928
|
[
"MIT"
] | 12
|
2021-06-07T11:39:30.000Z
|
2022-03-07T18:42:04.000Z
|
pytest-embedded-arduino/pytest_embedded_arduino/app.py
|
espressif/pytest-embedded
|
14077e64d3dfa74c03dcbcd1775fc994ba3e4928
|
[
"MIT"
] | 30
|
2021-08-10T10:27:41.000Z
|
2022-03-25T08:08:29.000Z
|
pytest-embedded-arduino/pytest_embedded_arduino/app.py
|
espressif/pytest-embedded
|
14077e64d3dfa74c03dcbcd1775fc994ba3e4928
|
[
"MIT"
] | 6
|
2021-11-16T13:10:45.000Z
|
2022-03-31T10:51:29.000Z
|
import json
import os
from typing import List, Optional, Tuple
from pytest_embedded.app import App
class ArduinoApp(App):
"""
Arduino App class
Attributes:
app_path (str): Application path.
build_dir (str): Build directory.
sketch (str): Sketch name.
fqbn (str): Fully Qualified Board Name.
target (str) : ESPxx chip.
flash_files (List[Tuple[int, str, str]]): List of (offset, file path, encrypted) of files need to be flashed in.
flash_settings (dict[str, Any]): dict of flash settings
binary_offsets (dict[str, List[int, int, int]]): dict of binaries' offset.
"""
flash_settings = {'flash_mode': 'dio', 'flash_size': 'detect', 'flash_freq': '80m'}
binary_offsets = {
'esp32': [0x1000, 0x8000, 0x10000],
'esp32s2': [0x1000, 0x8000, 0x10000],
'esp32c3': [0x0, 0x8000, 0x10000],
}
def __init__(
self,
app_path: Optional[str] = None,
build_dir: Optional[str] = None,
**kwargs,
):
"""
Args:
app_path (str): Application path.
build_dir: Build directory.
"""
super().__init__(app_path, build_dir, **kwargs)
self.sketch = os.path.basename(app_path)
self.build_path = os.path.realpath(os.path.join(self.app_path, self.build_dir))
self.fqbn = self._get_fqbn(self.build_path)
self.target = self.fqbn.split(':')[2]
self.flash_files = self._get_bin_files(self.build_path, self.sketch, self.target)
def _get_fqbn(self, build_path) -> str:
options_file = os.path.realpath(os.path.join(build_path, 'build.options.json'))
with open(options_file) as f:
options = json.load(f)
fqbn = options['fqbn']
return fqbn
def _get_bin_files(self, build_path, sketch, target) -> List[Tuple[int, str, bool]]:
bootloader = os.path.realpath(os.path.join(build_path, sketch + '.ino.bootloader.bin'))
partitions = os.path.realpath(os.path.join(build_path, sketch + '.ino.partitions.bin'))
app = os.path.realpath(os.path.join(build_path, sketch + '.ino.bin'))
files = [bootloader, partitions, app]
offsets = self.binary_offsets[target]
return [(offsets[i], files[i], False) for i in range(3)]
| 36.265625
| 120
| 0.619561
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.