content
stringlengths 5
1.05M
|
|---|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'use_system_sqlite%': 0,
},
'target_defaults': {
'defines': [
'SQLITE_CORE',
'SQLITE_ENABLE_BROKEN_FTS3',
'DSQLITE_ENABLE_FTS3_PARENTHESIS',
'SQLITE_ENABLE_FTS3',
'SQLITE_ENABLE_FTS4',
'SQLITE_ENABLE_MEMORY_MANAGEMENT',
'SQLITE_SECURE_DELETE',
'SQLITE_SEPARATE_CACHE_POOLS',
'THREADSAFE',
'_HAS_EXCEPTIONS=0',
],
},
'targets': [
{
'target_name': 'sqlite',
'conditions': [
[ 'v8_is_3_28==1', {
'defines': [
'V8_IS_3_28=1'
],
}],
[ 'node_win_onecore==1', {
'defines': ['SQLITE_OS_WINRT']
}],
[ 'v8_is_3_14==1', {
'defines': [
'V8_IS_3_14=1'
],
}],
['use_system_sqlite', {
'type': 'none',
'direct_dependent_settings': {
'defines': [
'USE_SYSTEM_SQLITE',
],
},
'conditions': [
['OS == "ios"', {
'dependencies': [
'sqlite_regexp',
],
'link_settings': {
'libraries': [
'$(SDKROOT)/usr/lib/libsqlite3.dylib',
],
},
}],
],
}, { # !use_system_sqlite
'product_name': 'sqlite3',
'type': 'static_library',
'sources': [
'sqlite3.h',
'sqlite3ext.h',
'sqlite3.c',
],
'include_dirs': [
# 'amalgamation',
# Needed for fts2 to build.
# 'src/src',
],
'direct_dependent_settings': {
'include_dirs': [
'.',
'../..',
],
},
'msvs_disabled_warnings': [
4018, 4244, 4267,
],
'conditions': [
['OS=="linux"', {
'link_settings': {
'libraries': [
'-ldl',
],
},
}],
['OS == "mac" or OS == "ios"', {
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/CoreFoundation.framework',
],
},
}],
['OS=="ios"', {
'xcode_settings': {
'ALWAYS_SEARCH_USER_PATHS': 'NO',
'GCC_CW_ASM_SYNTAX': 'NO', # No -fasm-blocks
'GCC_DYNAMIC_NO_PIC': 'NO', # No -mdynamic-no-pic
# (Equivalent to -fPIC)
'GCC_ENABLE_CPP_EXCEPTIONS': 'NO', # -fno-exceptions
'GCC_ENABLE_CPP_RTTI': 'NO', # -fno-rtti
'GCC_ENABLE_PASCAL_STRINGS': 'NO', # No -mpascal-strings
'GCC_THREADSAFE_STATICS': 'NO', # -fno-threadsafe-statics
'PREBINDING': 'NO', # No -Wl,-prebind
'EMBED_BITCODE': 'YES',
'IPHONEOS_DEPLOYMENT_TARGET': '6.0',
'GCC_GENERATE_DEBUGGING_SYMBOLS': 'NO',
'USE_HEADERMAP': 'NO',
'OTHER_CFLAGS': [
'-fno-strict-aliasing',
'-fno-standalone-debug'
],
'OTHER_CPLUSPLUSFLAGS': [
'-fno-strict-aliasing',
'-fno-standalone-debug'
],
'OTHER_LDFLAGS': [
'-s'
],
'WARNING_CFLAGS': [
'-Wall',
'-Wendif-labels',
'-W',
'-Wno-unused-parameter',
],
},
'defines':[ '__IOS__' ],
'conditions': [
['target_arch=="ia32"', {
'xcode_settings': {'ARCHS': ['i386']},
}],
['target_arch=="x64"', {
'xcode_settings': {'ARCHS': ['x86_64']},
}],
[ 'target_arch in "arm64 arm armv7s"', {
'xcode_settings': {
'OTHER_CFLAGS': [
'-fembed-bitcode'
],
'OTHER_CPLUSPLUSFLAGS': [
'-fembed-bitcode'
],
}
}],
[ 'target_arch=="arm64"', {
'xcode_settings': {'ARCHS': ['arm64']},
}],
[ 'target_arch=="arm"', {
'xcode_settings': {'ARCHS': ['armv7']},
}],
[ 'target_arch=="armv7s"', {
'xcode_settings': {'ARCHS': ['armv7s']},
}],
[ 'target_arch=="x64" or target_arch=="ia32"', {
'xcode_settings': { 'SDKROOT': 'iphonesimulator' },
}, {
'xcode_settings': { 'SDKROOT': 'iphoneos', 'ENABLE_BITCODE': 'YES'},
}]
],
}],
['OS == "android"', {
'defines': [
'HAVE_USLEEP=1',
'SQLITE_DEFAULT_JOURNAL_SIZE_LIMIT=1048576',
'SQLITE_DEFAULT_AUTOVACUUM=1',
'SQLITE_TEMP_STORE=3',
'SQLITE_ENABLE_FTS3_BACKWARDS',
'DSQLITE_DEFAULT_FILE_FORMAT=4',
],
}],
['os_posix == 1 and OS != "mac" and OS != "ios" and OS != "android"', {
'cflags': [
# SQLite doesn't believe in compiler warnings,
# preferring testing.
# http://www.sqlite.org/faq.html#q17
'-Wno-int-to-pointer-cast',
'-Wno-pointer-to-int-cast',
],
}],
['clang==1', {
'xcode_settings': {
'WARNING_CFLAGS': [
# sqlite does `if (*a++ && *b++);` in a non-buggy way.
'-Wno-empty-body',
# sqlite has some `unsigned < 0` checks.
'-Wno-tautological-compare',
],
},
'cflags': [
'-Wno-empty-body',
'-Wno-tautological-compare',
],
}],
],
}],
],
},
],
}
|
n, m = [int(x) for x in input().split()]
while n:
ans = 1
for x in range(n, n+m):
ans *= x
for x in range(2, m+1):
ans //= x
print(ans)
n, m = [int(x) for x in input().split()]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'Jason'
import re
def injection(input_path, weibo_dir_path):
ret = []
with open(input_path, 'r') as fr:
for line in fr:
if line.strip() == '':
continue
temp_list = re.split(r'\s', line.strip())
ret.append((temp_list[0], temp_list[1]))
input_str = '['
for idx in range(len(ret) - 1):
input_str += "('{0:s}', '{1:s}'), ".format(ret[idx][0], ret[idx][1])
input_str += "('{0:s}', '{1:s}')]".format(ret[-1][0], ret[-1][1])
new_doc = ''
with open(weibo_dir_path + 'WeiboSpider/settings.py', 'r') as fr:
for line in fr:
if re.search('SPEC_WEIBO_ENABLED', line.strip()):
new_doc += 'SPEC_WEIBO_ENABLED = True\n'
elif re.search('SPEC_WEIBO_LIST', line.strip()):
new_doc += 'SPEC_WEIBO_LIST = {0:s}\n'.format(input_str)
else:
new_doc += line
with open(weibo_dir_path + 'WeiboSpider/settings.py', 'w') as fw:
fw.write(new_doc)
if __name__ == '__main__':
input_path = '/home/cuckootan/Desktop/sample.txt'
weibo_dir_path = '/home/cuckootan/Desktop/WeiboSpider/'
injection(input_path, weibo_dir_path)
|
# Copyright 2015 Fortinet Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from netaddr import IPAddress
from netaddr import IPNetwork
from oslo_log import log as logging
from oslo_utils import excutils
from neutron_lib import constants as n_consts
from neutron_lib.plugins import directory
from neutron.api import extensions as neutron_extensions
from neutron import context as neutron_context
from neutron.db.models import l3 as l3_db
from neutron.plugins.common import constants as const
from neutron_fwaas.db.firewall import firewall_db
from neutron_fwaas.db.firewall import firewall_router_insertion_db
import neutron_fwaas.extensions as extensions
from neutron_fwaas.extensions import firewall as fw_ext
from bell_fortinet._i18n import _LE
from bell_fortinet.common import config
from bell_fortinet.common import constants as constants
from bell_fortinet.common import utils as utils
from bell_fortinet.db import models as fortinet_db
from bell_fortinet.tasks import constants as t_consts
from bell_fortinet.tasks import tasks
LOG = logging.getLogger(__name__)
FORTINET_FW = "fortinet_fw"
FORTINET_FW_PLUGIN = "fortinet_fw_plugin"
class FortinetFirewallPlugin(
firewall_db.Firewall_db_mixin,
firewall_router_insertion_db.FirewallRouterInsertionDbMixin):
"""Implementation of the Neutron Firewall Service Plugin.
This class manages the workflow of FWaaS request/response.
Most DB related works are implemented in class
firewall_db.Firewall_db_mixin.
"""
neutron_extensions.append_api_extensions_path(extensions.__path__)
supported_extension_aliases = ["fwaas", "fwaasrouterinsertion"]
path_prefix = fw_ext.FIREWALL_PREFIX
def __init__(self):
"""Do the initialization for the firewall service plugin here."""
self._fortigate = config.fgt_info
self._driver = config.get_apiclient()
self.task_manager = tasks.TaskManager()
self.task_manager.start()
firewall_db.subscribe()
def _rpc_update_firewall(self, context, firewall_id):
status_update = {"firewall": {"status": const.PENDING_UPDATE}}
super(FortinetFirewallPlugin, self).update_firewall(
context, firewall_id, status_update)
fw_with_rules = self._make_firewall_dict_with_rules(context,
firewall_id)
# this is triggered on an update to fw rule or policy, no
# change in associated routers.
fw_with_rules['add-router-ids'] = self.get_firewall_routers(
context, firewall_id)
fw_with_rules['del-router-ids'] = []
self._apply_firewall(context, **fw_with_rules)
def _rpc_update_firewall_policy(self, context, firewall_policy_id):
firewall_policy = self.get_firewall_policy(context, firewall_policy_id)
if firewall_policy:
for firewall_id in firewall_policy['firewall_list']:
self._rpc_update_firewall(context, firewall_id)
def _ensure_update_firewall(self, context, firewall_id):
fwall = self.get_firewall(context, firewall_id)
if fwall['status'] in [const.PENDING_CREATE,
const.PENDING_UPDATE,
const.PENDING_DELETE]:
raise fw_ext.FirewallInPendingState(firewall_id=firewall_id,
pending_state=fwall['status'])
def _ensure_update_firewall_policy(self, context, firewall_policy_id):
firewall_policy = self.get_firewall_policy(context, firewall_policy_id)
if firewall_policy and 'firewall_list' in firewall_policy:
for firewall_id in firewall_policy['firewall_list']:
self._ensure_update_firewall(context, firewall_id)
def _ensure_update_firewall_rule(self, context, firewall_rule_id):
fw_rule = self.get_firewall_rule(context, firewall_rule_id)
if 'firewall_policy_id' in fw_rule and fw_rule['firewall_policy_id']:
self._ensure_update_firewall_policy(context,
fw_rule['firewall_policy_id'])
def _get_routers_for_create_firewall(self, tenant_id, context, firewall):
# pop router_id as this goes in the router association db
# and not firewall db
LOG.debug("# _get_routers_for_create_firewall called Fortinet_plugin")
router_ids = firewall['firewall'].pop('router_ids', None)
if router_ids == n_consts.ATTR_NOT_SPECIFIED:
# old semantics router-ids keyword not specified pick up
# all routers on tenant.
l3_plugin = directory.get_plugin(n_consts.L3)
ctx = neutron_context.get_admin_context()
routers = l3_plugin.get_routers(ctx)
router_ids = [
router['id']
for router in routers
if router['tenant_id'] == tenant_id]
# validation can still fail this if there is another fw
# which is associated with one of these routers.
self.validate_firewall_routers_not_in_use(context, router_ids)
return router_ids
else:
if not router_ids:
# This indicates that user specifies no routers.
return []
else:
# some router(s) provided.
self.validate_firewall_routers_not_in_use(context, router_ids)
return router_ids
def create_firewall(self, context, firewall):
LOG.debug("create_firewall() called Fortinet_plugin")
tenant_id = firewall['firewall']['tenant_id']
fw_new_rtrs = self._get_routers_for_create_firewall(
tenant_id, context, firewall)
if not fw_new_rtrs:
# no messaging to agent needed, and fw needs to go
# to INACTIVE(no associated rtrs) state.
status = const.INACTIVE
fw = super(FortinetFirewallPlugin, self).create_firewall(
context, firewall, status)
fw['router_ids'] = []
return fw
else:
fw = super(FortinetFirewallPlugin, self).create_firewall(
context, firewall)
fw['router_ids'] = fw_new_rtrs
fw_with_rules = (
self._make_firewall_dict_with_rules(context, fw['id']))
fw_with_rtrs = {'fw_id': fw['id'], 'router_ids': fw_new_rtrs}
self.set_routers_for_firewall(context, fw_with_rtrs)
fw_with_rules['add-router-ids'] = fw_new_rtrs
fw_with_rules['del-router-ids'] = []
self._apply_firewall(context, **fw_with_rules)
return fw
def update_firewall(self, context, id, firewall):
LOG.debug("Fortinet_plugin update_firewall() called, "
"id is %(id)s, firewall is %(fw)s",
{'id': id, 'fw': firewall})
self._ensure_update_firewall(context, id)
# pop router_id as this goes in the router association db
# and not firewall db
router_ids = firewall['firewall'].pop('router_ids', None)
fw_current_rtrs = self.get_firewall_routers(context, id)
if router_ids is not None:
if router_ids == []:
# This indicates that user is indicating no routers.
fw_new_rtrs = []
else:
self.validate_firewall_routers_not_in_use(
context, router_ids, id)
fw_new_rtrs = router_ids
self.update_firewall_routers(context, {'fw_id': id,
'router_ids': fw_new_rtrs})
else:
# router-ids keyword not specified for update pick up
# existing routers.
fw_new_rtrs = self.get_firewall_routers(context, id)
if not fw_new_rtrs and not fw_current_rtrs:
# no messaging to agent needed, and we need to continue
# in INACTIVE state
firewall['firewall']['status'] = const.INACTIVE
fw = super(FortinetFirewallPlugin, self).update_firewall(
context, id, firewall)
fw['router_ids'] = []
return fw
else:
firewall['firewall']['status'] = const.PENDING_UPDATE
fw = super(FortinetFirewallPlugin, self).update_firewall(
context, id, firewall)
fw['router_ids'] = fw_new_rtrs
fw_with_rules = (
self._make_firewall_dict_with_rules(context, fw['id']))
# determine rtrs to add fw to and del from
fw_with_rules['add-router-ids'] = fw_new_rtrs
fw_with_rules['del-router-ids'] = list(
set(fw_current_rtrs).difference(set(fw_new_rtrs)))
# last-router drives agent to ack with status to set state to INACTIVE
fw_with_rules['last-router'] = not fw_new_rtrs
LOG.debug("## update_firewall %s: Add Routers: %s, Del Routers: %s",
fw['id'],
fw_with_rules['add-router-ids'],
fw_with_rules['del-router-ids'])
self._apply_firewall(context, **fw_with_rules)
#self.agent_rpc.update_firewall(context, fw_with_rules)
return fw
def update_firewall_for_delete_router(self, context, router_id):
LOG.debug("fwaas delete_router() called, router_id: %(rtid)s",
{'rtid': router_id})
cls = firewall_router_insertion_db.FirewallRouterAssociation
db_fw_rt = fortinet_db.query_record(context, cls, router_id=router_id)
if not db_fw_rt:
return None
firewall = {u'firewall': {'router_ids': []}}
return self.update_firewall(context, db_fw_rt.fw_id, firewall)
def delete_db_firewall_object(self, context, id):
super(FortinetFirewallPlugin, self).delete_firewall(context, id)
def delete_firewall(self, context, id):
LOG.debug("Fortinet_plugin delete_firewall() called, fw_id %(id)s",
{'id': id})
fw_with_rules = (
self._make_firewall_dict_with_rules(context, id))
status = {"firewall": {"status": const.PENDING_DELETE}}
super(FortinetFirewallPlugin, self).update_firewall(
context, id, status)
# Reflect state change in fw_with_rules
fw_with_rules['del-router-ids'] = self.get_firewall_routers(
context, id)
self._apply_firewall(context, **fw_with_rules)
self.delete_db_firewall_object(context, id)
def update_firewall_policy(self, context, id, firewall_policy):
LOG.debug("update_firewall_policy called, "
"id =%(id)s, firewall_policy=%(fp)s",
{'id': id, 'fp': firewall_policy})
self._ensure_update_firewall_policy(context, id)
firewall_policy_old = self.get_firewall_policy(context, id)
firewall_rule_ids = firewall_policy_old.get('firewall_rules', [])
tenant_id = firewall_policy_old.get('tenant_id', None)
fwp = super(FortinetFirewallPlugin,
self).update_firewall_policy(context, id, firewall_policy)
for fwr_id in firewall_rule_ids:
fw_rule = self.get_firewall_rule(context, fwr_id)
self._delete_firewall_rule(context, tenant_id, **fw_rule)
self._rpc_update_firewall_policy(context, id)
return fwp
def create_firewall_rule(self, context, firewall_rule):
"""
:param context:
:param firewall_rule:
firewall_rule={'firewall_rule': {... }}
:return:
"""
LOG.debug("create_firewall_rule() firewall_rule=%(fwr)s",
{'fwr': firewall_rule})
return super(FortinetFirewallPlugin,
self).create_firewall_rule(context, firewall_rule)
def delete_firewall_rule(self, context, id):
super(FortinetFirewallPlugin, self).delete_firewall_rule(context, id)
def update_firewall_rule(self, context, id, firewall_rule):
LOG.debug("update_firewall_rule() id: %(id)s, "
"firewall_rule: %(firewall_rule)s",
{'id': id, 'firewall_rule': firewall_rule})
try:
fwr = self._update_firewall_rule_dict(context, id, firewall_rule)
self._update_firewall_rule(context, id, fwr)
self._ensure_update_firewall_rule(context, id)
fwr = super(FortinetFirewallPlugin,
self).update_firewall_rule(context, id, firewall_rule)
utils.update_status(self, context, t_consts.TaskStatus.COMPLETED)
return fwr
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE("update_firewall_rule %(fwr)s failed"),
{'fwr': firewall_rule})
utils._rollback_on_err(self, context, e)
def insert_rule(self, context, id, rule_info):
self._ensure_update_firewall_policy(context, id)
try:
fwp = super(FortinetFirewallPlugin,
self).insert_rule(context, id, rule_info)
self._rpc_update_firewall_policy(context, id)
utils.update_status(self, context, t_consts.TaskStatus.COMPLETED)
return fwp
except Exception as e:
with excutils.save_and_reraise_exception():
self.remove_rule(context, id, rule_info)
utils._rollback_on_err(self, context, e)
def remove_rule(self, context, id, rule_info):
LOG.debug("Fortinet_plugin remove_rule() called")
self._ensure_update_firewall_policy(context, id)
if rule_info.get('firewall_rule_id', None):
firewall_rule = self._get_firewall_rule(
context, rule_info['firewall_rule_id'])
fwr = self._make_firewall_rule_dict(firewall_rule)
self._delete_firewall_rule(context, fwr['tenant_id'], **fwr)
fwp = super(FortinetFirewallPlugin, self).remove_rule(
context, id, rule_info)
self._rpc_update_firewall_policy(context, id)
return fwp
def get_firewalls(self, context, filters=None, fields=None):
LOG.debug("fwaas get_firewalls() called, filters=%(filters)s, "
"fields=%(fields)s",
{'filters': filters, 'fields': fields})
fw_list = super(FortinetFirewallPlugin, self).get_firewalls(
context, filters, fields)
for fw in fw_list:
fw_current_rtrs = self.get_firewall_routers(context, fw['id'])
fw['router_ids'] = fw_current_rtrs
return fw_list
def get_firewall(self, context, id, fields=None):
LOG.debug("fwaas get_firewall() called")
res = super(FortinetFirewallPlugin, self).get_firewall(
context, id, fields)
fw_current_rtrs = self.get_firewall_routers(context, id)
res['router_ids'] = fw_current_rtrs
return res
def _apply_firewall(self, context, **fw_with_rules):
tenant_id = fw_with_rules['tenant_id']
default_fwr = self._make_default_firewall_rule_dict(tenant_id)
try:
if fw_with_rules.get('del-router-ids', None):
for fwr in list(fw_with_rules.get('firewall_rule_list', None)):
self._delete_firewall_rule(context, tenant_id, **fwr)
if default_fwr:
self._delete_firewall_rule(
context, tenant_id, **default_fwr)
self.update_firewall_status(
context, fw_with_rules['id'], const.INACTIVE)
if fw_with_rules.get('add-router-ids', None):
vdom = getattr(
fortinet_db.Fortinet_ML2_Namespace.query_one(
context, tenant_id=tenant_id), 'vdom', None)
if not vdom:
raise fw_ext.FirewallInternalDriverError(
driver='Fortinet_fwaas_plugin')
if default_fwr:
self._add_firewall_rule(context, tenant_id, **default_fwr)
for fwr in reversed(
list(fw_with_rules.get('firewall_rule_list', None))):
self._add_firewall_rule(context, tenant_id, **fwr)
self.update_firewall_status(
context, fw_with_rules['id'], const.ACTIVE)
else:
self.update_firewall_status(
context, fw_with_rules['id'], const.INACTIVE)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE("apply_firewall %(fws)s failed"),
{'fws': fw_with_rules})
utils._rollback_on_err(self, context, e)
utils.update_status(self, context, t_consts.TaskStatus.COMPLETED)
def _add_firewall_rule(self, context, fwp_tenant_id, **fwr):
"""
:param obj:
:param context:
:param kwargs: dictionary, firewall rule
firewall_rule: {'source_ip_address': u'192.176.10.0/24',... }
:return:
"""
LOG.debug("# _add_firewall_rule() called")
namespace = fortinet_db.Fortinet_ML2_Namespace.query_one(
context, tenant_id=fwp_tenant_id)
vdom = getattr(namespace, 'vdom', None)
if not vdom or not fwr:
return None
inf_int, inf_ext = utils.get_vlink_intf(
self, context, vdom=namespace.vdom)
srcaddr = self._add_fwr_ip_address(
context, vdom, place='source_ip_address', **fwr)
dstaddr = self._add_fwr_ip_address(
context, vdom, place='destination_ip_address', **fwr)
service = self._add_fwr_service(context, vdom, **fwr)
action = self._get_fwr_action(**fwr)
profiles = self._get_fwp_profiles(action)
match_vip = 'enable'
name = fwr.get('name', '')
# add a basic firewall rule('accept': incoming, 'deny': bidirectional)
fortinet_fwp = utils.add_fwpolicy_to_head(self, context,
vdom=vdom,
srcaddr=srcaddr['name'],
srcintf='any',
dstaddr=dstaddr['name'],
dstintf='any',
service=service['name'],
match_vip=match_vip,
action=action,
comments=name,
**profiles)
utils.add_record(self, context,
fortinet_db.Fortinet_FW_Rule_Association,
fwr_id=fwr['id'],
fortinet_pid=fortinet_fwp.id,
type=constants.TYPE_INT)
if action in ['accept']:
# if allow, for the outgoing traffic it need to enable nat
fortinet_fwp = utils.add_fwpolicy_to_head(self, context,
vdom=vdom,
srcaddr=srcaddr['name'],
srcintf='any',
dstaddr=dstaddr['name'],
dstintf=inf_int,
nat='enable',
service=service['name'],
action=action,
comments=name,
**profiles)
utils.add_record(self, context,
fortinet_db.Fortinet_FW_Rule_Association,
fwr_id=fwr['id'],
fortinet_pid=fortinet_fwp.id,
type=constants.TYPE_EXT)
def _update_firewall_rule(self, context, id, firewall_rule):
LOG.debug("# _add_firewall_rule() called")
fwps_int = fortinet_db.Fortinet_FW_Rule_Association.query_all(
context, fwr_id=id, type=constants.TYPE_INT)
fwps_ext = fortinet_db.Fortinet_FW_Rule_Association.query_all(
context, fwr_id=id, type=constants.TYPE_EXT)
if fwps_ext and fwps_int:
fwps = fwps_int + fwps_ext
else:
fwps = fwps_int or fwps_ext
if not fwps:
return
firewall_rule.setdefault('id', id)
srcaddr = self._make_fortinet_fwaddress_dict(
place='source_ip_address', **firewall_rule)
dstaddr = self._make_fortinet_fwaddress_dict(
place='destination_ip_address', **firewall_rule)
service = self._make_fortinet_fwservice_dict(**firewall_rule)
action = self._get_fwr_action(**firewall_rule)
profiles = self._get_fwp_profiles(action)
for fwp in fwps_int:
vdom = fwp.fortinet_policy.vdom
if service['name'] != 'ALL':
utils.set_fwservice(self, context, vdom=vdom, **service)
if srcaddr['name'] != 'all':
utils.set_fwaddress(self, context, vdom=vdom, **srcaddr)
if dstaddr['name'] != 'all':
utils.set_fwaddress(self, context, vdom=vdom, **dstaddr)
# check whether related firewall policies need to update
fwp = fwps_int[0].fortinet_policy
name = firewall_rule.setdefault('name', fwp.comments)
if fwp.srcaddr == srcaddr['name'] and fwp.action == action and \
fwp.dstaddr == dstaddr['name'] and fwp.service == service['name']:
return
if action in ['accept']:
for fwp in fwps:
fortinet_fwp = utils.set_fwpolicy(self, context,
id=fwp.fortinet_pid,
srcaddr=srcaddr['name'],
dstaddr=dstaddr['name'],
service=service['name'],
action=action,
comments=name,
**profiles)
if not fwps_ext:
inf_int, inf_ext = utils.get_vlink_intf(
self, context, vdom=fortinet_fwp.vdom)
utils.add_fwaas_subpolicy(self, context,
before=fortinet_fwp.edit_id,
vdom=fortinet_fwp.vdom,
srcaddr=srcaddr['name'],
dstaddr=dstaddr['name'],
dstintf=inf_int,
nat='enable',
service=service['name'],
action=action,
comments=name,
fwr_id=id,
type=constants.TYPE_EXT,
**profiles)
elif action in ['deny']:
for fwp_ext in fwps_ext:
utils.delete_fwaas_subpolicy(self, context,
fwr_id=fwp_ext.fwr_id,
fortinet_pid=fwp_ext.fortinet_pid)
for fwp in fwps_int:
utils.set_fwpolicy(self, context,
id=fwp.fortinet_pid,
srcaddr=srcaddr['name'],
dstaddr=dstaddr['name'],
service=service['name'],
action=action,
comments=name,
**profiles)
for fwp in fwps_int:
vdom = fwp.fortinet_policy.vdom
if service['name'] == 'ALL':
#delete all old services if exist
utils.delete_fwservice(self, context, vdom=vdom, name=id)
if srcaddr['name'] == 'all':
name = constants.PREFIX['source_ip_address'] + id
utils.delete_fwaddress(self, context, vdom=vdom, name=name)
if dstaddr['name'] == 'all':
name = constants.PREFIX['destination_ip_address'] + id
utils.delete_fwaddress(self, context, vdom=vdom, name=name)
def _delete_firewall_rule(self, context, fwp_tenant_id, **fwr):
"""
:param obj:
:param context:
:param kwargs: dictionary, firewall rule
firewall_rule: {'source_ip_address': u'192.176.10.0/24',... }
:return:
"""
# need to consider shared firewall rules
LOG.debug("# _delete_firewall_rule() called")
namespace = fortinet_db.Fortinet_ML2_Namespace.query_one(
context, tenant_id=fwp_tenant_id)
if not namespace:
return None
fwp_assed = fortinet_db.Fortinet_FW_Rule_Association.query_all(
context, fwr_id=fwr['id'])
for fwp in fwp_assed:
fortinet_db.delete_record(
context, fortinet_db.Fortinet_FW_Rule_Association,
fwr_id=fwp.fwr_id, fortinet_pid=fwp.fortinet_pid)
utils.delete_fwpolicy(
self, context, id=fwp.fortinet_pid, vdom=namespace.vdom)
if fwr.get('source_ip_address', None):
srcaddr = constants.PREFIX['source_ip_address'] + fwr['id']
utils.delete_fwaddress(
self, context, vdom=namespace.vdom, name=srcaddr)
if fwr.get('destination_ip_address', None):
dstaddr = constants.PREFIX['destination_ip_address'] + fwr['id']
utils.delete_fwaddress(
self, context, vdom=namespace.vdom, name=dstaddr)
self._delete_fwr_service(context, namespace.vdom, **fwr)
def _update_firewall_rule_dict(self, context, id, firewall_rule):
fwr = firewall_rule.get('firewall_rule', firewall_rule)
fwr_db = self._get_firewall_rule(context, id)
if fwr_db.firewall_policy_id:
fwp_db = self._get_firewall_policy(context,
fwr_db.firewall_policy_id)
if 'shared' in fwr and not fwr['shared']:
if fwr_db['tenant_id'] != fwp_db['tenant_id']:
raise fw_ext.FirewallRuleInUse(firewall_rule_id=id)
fwr.setdefault('source_port',
self._get_port_range_from_min_max_ports(
fwr_db['source_port_range_min'],
fwr_db['source_port_range_max']))
fwr.setdefault('destination_port',
self._get_port_range_from_min_max_ports(
fwr_db['destination_port_range_min'],
fwr_db['destination_port_range_max']))
keys = ['name', 'protocol', 'action', 'shared', 'ip_version',
'source_ip_address', 'destination_ip_address', 'enabled']
for key in keys:
fwr.setdefault(key, fwr_db[key])
return fwr
def _make_default_firewall_rule_dict(self, tenant_id):
if tenant_id and self._fortigate["enable_default_fwrule"]:
return {'id': tenant_id,
'tenant_id': tenant_id,
'name': '_default_rule_deny_all',
'description': '_default_rule_deny_all',
'protocol': None,
'source_ip_address': None,
'source_port': None,
'destination_ip_address': None,
'destination_port': None,
'action': 'deny'}
else:
return {}
def _add_fwr_ip_address(self, context, vdom,
place='source_ip_address', **fwr):
fwaddress = self._make_fortinet_fwaddress_dict(
place=place, vdom=vdom, **fwr)
utils.add_fwaddress(self, context, **fwaddress)
return fwaddress
def _make_fortinet_fwaddress_dict(self, place='source_ip_address',
vdom=None, **fwr):
fwaddress = {}
if place not in ['source_ip_address', 'destination_ip_address']:
raise ValueError("_add_fwr_ip_address() value error of where")
if vdom:
fwaddress.setdefault('vdom', vdom)
if fwr.get(place, None):
fwaddress.setdefault('name', constants.PREFIX[place] + fwr['id'])
fwaddress.setdefault('subnet', utils.get_subnet(fwr[place]))
else:
fwaddress.setdefault('name', 'all')
return fwaddress
def _add_fwr_service(self, context, vdom, **fwr):
kw_service = self._make_fortinet_fwservice_dict(vdom=vdom, **fwr)
utils.add_fwservice(self, context, **kw_service)
return kw_service
def _make_fortinet_fwservice_dict(self, vdom=None, **fwr):
LOG.debug("_make_fortinet_fwservice_dict() fwr=%(fwr)s", {'fwr': fwr})
kw_service = {}
if vdom:
kw_service.setdefault('vdom', vdom)
if fwr['protocol'] in ['any', None] and \
not fwr['destination_port'] and not fwr['source_port']:
# SamSu: The firewall service 'all' was already added by default
kw_service.setdefault('name', 'ALL')
else:
portrange = ':'.join([
utils.port_range(fwr['destination_port']),
utils.port_range(fwr['source_port'])])
if fwr['protocol'] in ['tcp']:
kw_service.setdefault('tcp_portrange', portrange)
elif fwr['protocol'] in ['udp']:
kw_service.setdefault('udp_portrange', portrange)
elif fwr['protocol'] in ['icmp']:
kw_service.setdefault('protocol', 'ICMP')
kw_service.setdefault('name', fwr['id'])
kw_service.setdefault('comment', fwr.get('name', ''))
return kw_service
def _delete_fwr_service(self, context, vdom, **fwr):
LOG.debug("# _delete_fwr_service() fwr=%(fwr)s", {'fwr': fwr})
if fwr['protocol'] in ['any', None] and \
not fwr['destination_port'] and not fwr['source_port']:
return None
else:
return utils.delete_fwservice(
self, context, vdom=vdom, name=fwr['id'])
def _get_fwr_action(self, **fwr):
if fwr.get('action', None) in ['allow']:
action = 'accept'
else:
action = 'deny'
return action
def _get_fwp_profiles(self, action):
profiles = {
'av_profile': None,
'webfilter_profile': None,
'ips_sensor': None,
'application_list': None,
'ssl_ssh_profile': None
}
if action in ['allow', 'accept']:
for key in profiles:
profiles[key] = self._fortigate[key]
return profiles
def _get_fip_before_id(self, context, fwr_id):
fwp_assed = fortinet_db.Fortinet_FW_Rule_Association.query_one(
context, type=constants.TYPE_EXT, fwr_id=fwr_id)
if not fwp_assed:
fwp_assed = fortinet_db.Fortinet_FW_Rule_Association.query_one(
context, type=constants.TYPE_INT, fwr_id=fwr_id)
fwp = fortinet_db.query_record(context,
fortinet_db.Fortinet_Firewall_Policy,
id=fwp_assed.fortinet_pid)
return getattr(fwp, 'edit_id', None)
def _get_fips_in_fw(self, context, tenant_id, fw_net):
fw_fips = []
if not fw_net:
return fw_fips
namespace = fortinet_db.Fortinet_ML2_Namespace.query_one(
context, tenant_id=tenant_id)
if not namespace:
return fw_fips
db_fips = fortinet_db.query_records(
context, l3_db.FloatingIP, tenant_id=tenant_id,
status=n_consts.FLOATINGIP_STATUS_ACTIVE)
for fip in db_fips:
if getattr(fip, 'fixed_ip_address', None) and \
IPAddress(fip.fixed_ip_address) in IPNetwork(fw_net):
fw_fips.append((fip.id, fip.floating_ip_address))
return fw_fips
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import fiber
class TestFiber(unittest.TestCase):
def test_fib(self):
@fiber.fiber(locals=locals())
def fib(n):
if n == 0:
return 0
if n == 1:
return 1
return fib(n-1) + fib(n=n-2)
self.maxDiff = None
want = """
def __fiberfn_fib(frame):
if frame['__pc'] == 0:
if frame['n'] == 0:
if frame['__pc'] == 0:
frame['__pc'] = 1
return RetOp(value=0)
if frame['n'] == 1:
if frame['__pc'] == 0:
frame['__pc'] = 1
return RetOp(value=1)
frame['__pc'] = 1
return CallOp(func='fib', args=[frame['n'] - 1], kwargs={}, ret_variable='__tmp0__')
if frame['__pc'] == 1:
frame['__pc'] = 2
return CallOp(func='fib', args=[], kwargs={'n': frame['n'] - 2}, ret_variable='__tmp1__')
if frame['__pc'] == 2:
frame['__pc'] = 3
return RetOp(value=frame['__tmp0__'] + frame['__tmp1__'])
""".strip()
self.assertEqual(want, fib.__fibercode__)
def test_sum(self):
@fiber.fiber(locals=locals())
def sum(lst, acc):
if not lst:
return acc
return sum(lst[1:], acc + lst[0])
want = """
def __fiberfn_sum(frame):
if frame['__pc'] == 0:
if not frame['lst']:
if frame['__pc'] == 0:
frame['__pc'] = 1
return RetOp(value=frame['acc'])
frame['__pc'] = 1
return TailCallOp(func='sum', args=[frame['lst'][1:], frame['acc'] + frame['lst'][0]], kwargs={})
""".strip()
self.assertEqual(want, sum.__fibercode__)
if __name__ == '__main__':
unittest.main()
|
import unittest
from matrox import Matrix, DimensionError, fill_matrix
from matrox.linalg import *
class TestMatrixFactorizations(unittest.TestCase):
def test_permute_matrix(self):
matrix = Matrix([[1, 1, 1], [1, 1, 1], [1, 1, 1]])
permute = permute_matrix(matrix)
self.assertEqual(repr(permute),
"Matrix([['1', '0', '0'], ['0', '1', '0'], ['0', '0', '1']])")
matrix = Matrix([[0, 1, 1], [1, 2, 1], [2, 7, 9]])
permute = permute_matrix(matrix)
self.assertEqual(repr(permute),
"Matrix([['0', '1', '0'], ['1', '0', '0'], ['0', '0', '1']])")
matrix = fill_matrix(2, 3, 1)
with self.assertRaises(DimensionError) as c:
permute = permute_matrix(matrix)
self.assertTrue("Matrix must be a square matrix." in str(c.exception))
def test_upper_triangular(self):
matrix = Matrix([[2, 1], [6, 8]], fraction=True)
upper, history, inverse_history = \
upper_triangular(matrix, history=True, inverse_history=True)
self.assertEqual(repr(upper), "Matrix([['2', '1'], ['0', '5']])")
self.assertEqual(repr(history), "[Matrix([['1', '0'], ['-3', '1']])]")
self.assertEqual(repr(inverse_history),
"[Matrix([['1', '0'], ['3', '1']])]")
matrix = fill_matrix(2, 3, 1)
with self.assertRaises(DimensionError) as c:
upper = upper_triangular(matrix)
self.assertTrue("Matrix must be a square matrix." in str(c.exception))
def test_lower_triangular(self):
matrix = Matrix([[2, 1], [6, 8]], fraction=True)
lower, history, inverse_history = \
lower_triangular(matrix, history=True, inverse_history=True)
self.assertEqual(repr(lower), "Matrix([['5/4', '0'], ['6', '8']])")
self.assertEqual(repr(history), "[Matrix([['1', '-1/8'], ['0', '1']])]")
self.assertEqual(repr(inverse_history),
"[Matrix([['1', '1/8'], ['0', '1']])]")
matrix = fill_matrix(2, 3, 1)
with self.assertRaises(DimensionError) as c:
lower = lower_triangular(matrix)
self.assertTrue("Matrix must be a square matrix." in str(c.exception))
def test_lu_factorization(self):
matrix = Matrix([[2, 1], [6, 8]], fraction=True)
lower, upper = lu_factorization(matrix)
self.assertEqual(repr(lower), "Matrix([['1', '0'], ['3', '1']])")
self.assertEqual(repr(upper), "Matrix([['2', '1'], ['0', '5']])")
self.assertEqual(repr(lower * upper), repr(matrix))
matrix = fill_matrix(2, 3, 1)
with self.assertRaises(DimensionError) as c:
lower, upper = lu_factorization(matrix)
self.assertTrue("Matrix must be a square matrix." in str(c.exception))
def test_ldu_factorization(self):
matrix = Matrix([[1, 5, 4], [3, 6, 4], [8, 2, 3]], fraction=True)
lower, diag, upper = ldu_factorization(matrix)
self.assertEqual(repr(lower),
"Matrix([['1', '0', '0'], ['3', '1', '0'], ['8', '38/9', '1']])")
self.assertEqual(repr(diag),
"Matrix([['1', '0', '0'], ['0', '-9', '0'], ['0', '0', '43/9']])")
self.assertEqual(repr(upper),
"Matrix([['1', '5', '4'], ['0', '1', '8/9'], ['0', '0', '1']])")
self.assertEqual(repr(lower * diag * upper), repr(matrix))
matrix = fill_matrix(2, 3, 1)
with self.assertRaises(DimensionError) as c:
lower, diag, upper = ldu_factorization(matrix)
self.assertTrue("Matrix must be a square matrix." in str(c.exception))
def test_plu_factorization(self):
matrix = Matrix([[1, 5, 4], [3, 6, 4], [8, 2, 3]], fraction=True)
permuted, lower, upper = plu_factorization(matrix)
self.assertEqual(repr(permuted),
"Matrix([['1', '0', '0'], ['0', '1', '0'], ['0', '0', '1']])")
self.assertEqual(repr(lower),
"Matrix([['1', '0', '0'], ['3', '1', '0'], ['8', '38/9', '1']])")
self.assertEqual(repr(upper),
"Matrix([['1', '5', '4'], ['0', '-9', '-8'], ['0', '0', '43/9']])")
self.assertEqual(repr(lower * upper), repr(permuted * matrix))
matrix = Matrix([[0, 5, 4], [3, 0, 4], [8, 2, 3]], fraction=True)
permuted, lower, upper = plu_factorization(matrix)
self.assertEqual(repr(permuted),
"Matrix([['0', '1', '0'], ['1', '0', '0'], ['0', '0', '1']])")
self.assertEqual(repr(lower),
"Matrix([['1', '0', '0'], ['0', '1', '0'], ['8/3', '2/5', '1']])")
self.assertEqual(repr(upper),
"Matrix([['3', '0', '4'], ['0', '5', '4'], ['0', '0', '-139/15']])")
self.assertEqual(repr(lower * upper), repr(permuted * matrix))
matrix = fill_matrix(2, 3, 1)
with self.assertRaises(DimensionError) as c:
permuted, lower, upper = plu_factorization(matrix)
self.assertTrue("Matrix must be a square matrix." in str(c.exception))
def test_ldlt_factorization(self):
matrix = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]], fraction=True)
lower, diag, lower_t = ldlt_factorization(matrix)
self.assertEqual(repr(lower), "None")
self.assertEqual(repr(diag), "None")
self.assertEqual(repr(lower_t), "None")
matrix = Matrix([[1, 2, 3], [2, 1, 4], [3, 4, 1]], fraction=True)
lower, diag, lower_t = ldlt_factorization(matrix)
self.assertEqual(repr(lower),
"Matrix([['1', '0', '0'], ['2', '1', '0'], ['3', '2/3', '1']])")
self.assertEqual(repr(diag),
"Matrix([['1', '0', '0'], ['0', '-3', '0'], ['0', '0', '-20/3']])")
self.assertEqual(repr(lower_t),
"Matrix([['1', '2', '3'], ['0', '1', '2/3'], ['0', '0', '1']])")
self.assertEqual(repr(lower * diag * lower_t), repr(matrix))
matrix = fill_matrix(2, 3, 1)
with self.assertRaises(DimensionError) as c:
lower, diag, lower_t = ldlt_factorization(matrix)
self.assertTrue("Matrix must be a square matrix." in str(c.exception))
|
#! /usr/bin/python3
import boto3
from vectorize import vectorize
import io
import numpy as np
from copy import deepcopy
import random
from queue import Queue
from threading import Thread
import time
s3 = boto3.client('s3')
image_paths = Queue()
np_arrs = Queue(maxsize=100)
unsuccessful = []
with open('images.txt', 'r') as in_f:
lines = in_f.readlines()
random.shuffle(lines)
for line in lines:
image_paths.put(line.strip())
def get_data(key):
print('Got ' + key)
obj = io.BytesIO()
s3.download_fileobj('isitanime-data-clean', key, obj)
return np.asarray(vectorize(obj, scale_size=512)) / 256
def fetch_thread():
while not image_paths.empty():
im = image_paths.get()
try:
np_arrs.put((get_data(im), im))
except Exception:
unsuccessful.append(im)
def store_thread():
fcount = 0
while (not image_paths.empty()) or (not np_arrs.empty()):
if np_arrs.qsize() < 100:
time.sleep(3)
if (image_paths.empty()):
pass
else:
print('Queued: ' + str(np_arrs.qsize()))
continue
arrs = []
names = []
while len(arrs) < 100 and not np_arrs.empty():
arr, name = np_arrs.get()
arrs.append(arr)
names.append(name)
np.savez_compressed('efs/' + str(fcount), *arrs)
with open('efs/' + str(fcount) + '.txt', 'w') as out_f:
for line in names:
out_f.write(line)
out_f.write('\n')
fcount += 1
for _ in range(10):
t = Thread(target=fetch_thread)
t.start()
t = Thread(target=store_thread)
t.start()
t.join()
|
import os
import numpy as np
import tensorflow as tf
from utils import save_images, make_dirs
from utils import data_loader
class Experiments(object):
def __init__(self, config, model):
self.config = config
self.model = model
_, self.testloader = data_loader(config)
# directory to save experimental results
self.dir = make_dirs(os.path.join(self.config.result_path, self.config.experiment_path))
def image_generation(self):
# the number of samples
nx = ny = 20
num_samples = nx * ny
# sampling z from N(0, 1)
z_samples = np.random.normal(0, 1, (num_samples, self.config.latent_dim))
# generate images
_, samples = self.model.dec(z_samples)
# save images
path = make_dirs(os.path.join(self.dir, 'image_generation')) + '/epoch_{}.png'.format(self.model.global_epoch.numpy())
save_images(samples, path)
|
"""
Premium Question
"""
from bisect import bisect_left
from collections import defaultdict
import sys
__author__ = 'Daniel'
class WordDistance:
#
# @param {string[]} words
def __init__(self, words):
"""
initialize your data structure here.
:type words: list[str]
"""
self.word_dict = defaultdict(list)
for i, w in enumerate(words):
self.word_dict[w].append(i)
def shortest(self, word1, word2):
"""
:type word1: str
:type word2: str
:rtype: int
"""
mini = sys.maxint
for i in self.word_dict[word1]:
idx = bisect_left(self.word_dict[word2], i)
for nei in (-1, 0):
if 0 <= idx+nei < len(self.word_dict[word2]):
mini = min(mini, abs(i-self.word_dict[word2][idx+nei]))
return mini
|
import tkinter as tk
from typing import TYPE_CHECKING, Optional
from core.gui.frames.base import DetailsFrame, InfoFrameBase
from core.gui.utils import bandwidth_text
from core.gui.wrappers import Interface
if TYPE_CHECKING:
from core.gui.app import Application
from core.gui.graph.edges import CanvasEdge
from core.gui.graph.node import CanvasNode
from core.gui.graph.edges import CanvasWirelessEdge
def get_iface(canvas_node: "CanvasNode", net_id: int) -> Optional[Interface]:
iface = None
for edge in canvas_node.edges:
link = edge.link
if link.node1_id == net_id:
iface = link.iface2
elif link.node2_id == net_id:
iface = link.iface1
return iface
class EdgeInfoFrame(InfoFrameBase):
def __init__(
self, master: tk.BaseWidget, app: "Application", edge: "CanvasEdge"
) -> None:
super().__init__(master, app)
self.edge: "CanvasEdge" = edge
def draw(self) -> None:
self.columnconfigure(0, weight=1)
link = self.edge.link
options = link.options
src_node = self.app.core.session.nodes[link.node1_id]
dst_node = self.app.core.session.nodes[link.node2_id]
frame = DetailsFrame(self)
frame.grid(sticky=tk.EW)
frame.add_detail("Source", src_node.name)
iface1 = link.iface1
if iface1:
mac = iface1.mac if iface1.mac else "auto"
frame.add_detail("MAC", mac)
ip4 = f"{iface1.ip4}/{iface1.ip4_mask}" if iface1.ip4 else ""
frame.add_detail("IP4", ip4)
ip6 = f"{iface1.ip6}/{iface1.ip6_mask}" if iface1.ip6 else ""
frame.add_detail("IP6", ip6)
frame.add_separator()
frame.add_detail("Destination", dst_node.name)
iface2 = link.iface2
if iface2:
mac = iface2.mac if iface2.mac else "auto"
frame.add_detail("MAC", mac)
ip4 = f"{iface2.ip4}/{iface2.ip4_mask}" if iface2.ip4 else ""
frame.add_detail("IP4", ip4)
ip6 = f"{iface2.ip6}/{iface2.ip6_mask}" if iface2.ip6 else ""
frame.add_detail("IP6", ip6)
if link.options:
frame.add_separator()
bandwidth = bandwidth_text(options.bandwidth)
frame.add_detail("Bandwidth", bandwidth)
frame.add_detail("Delay", f"{options.delay} us")
frame.add_detail("Jitter", f"\u00B1{options.jitter} us")
frame.add_detail("Loss", f"{options.loss}%")
frame.add_detail("Duplicate", f"{options.dup}%")
class WirelessEdgeInfoFrame(InfoFrameBase):
def __init__(
self, master: tk.BaseWidget, app: "Application", edge: "CanvasWirelessEdge"
) -> None:
super().__init__(master, app)
self.edge: "CanvasWirelessEdge" = edge
def draw(self) -> None:
link = self.edge.link
src_canvas_node = self.app.canvas.nodes[self.edge.src]
src_node = src_canvas_node.core_node
dst_canvas_node = self.app.canvas.nodes[self.edge.dst]
dst_node = dst_canvas_node.core_node
# find interface for each node connected to network
net_id = link.network_id
iface1 = get_iface(src_canvas_node, net_id)
iface2 = get_iface(dst_canvas_node, net_id)
frame = DetailsFrame(self)
frame.grid(sticky=tk.EW)
frame.add_detail("Source", src_node.name)
if iface1:
mac = iface1.mac if iface1.mac else "auto"
frame.add_detail("MAC", mac)
ip4 = f"{iface1.ip4}/{iface1.ip4_mask}" if iface1.ip4 else ""
frame.add_detail("IP4", ip4)
ip6 = f"{iface1.ip6}/{iface1.ip6_mask}" if iface1.ip6 else ""
frame.add_detail("IP6", ip6)
frame.add_separator()
frame.add_detail("Destination", dst_node.name)
if iface2:
mac = iface2.mac if iface2.mac else "auto"
frame.add_detail("MAC", mac)
ip4 = f"{iface2.ip4}/{iface2.ip4_mask}" if iface2.ip4 else ""
frame.add_detail("IP4", ip4)
ip6 = f"{iface2.ip6}/{iface2.ip6_mask}" if iface2.ip6 else ""
frame.add_detail("IP6", ip6)
|
from abc import ABC, abstractmethod
class BaseExplainer(ABC):
def __init__(self, model_to_explain, graphs, features, task):
self.model_to_explain = model_to_explain
self.graphs = graphs
self.features = features
self.type = task
@abstractmethod
def prepare(self, args):
"""Prepars the explanation method for explaining.
Can for example be used to train the method"""
pass
@abstractmethod
def explain(self, index):
"""
Main method for explaining samples
:param index: index of node/graph in self.graphs
:return: explanation for sample
"""
pass
|
# This script contains all of the code necessary for galcv.
# We want a function that returns the cosmic variance values for input apparent magnitudes
import numpy as np
import pandas as pd
import os
from scipy import integrate
from scipy import interpolate
# A dictionary of the parameters for which I have the exact interp files
fitParams = dict(mag=np.array([22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34]), z=np.array([5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15.]), zW=np.array([0.1, 0.15, 0.25, 0.5, 0.75, 1., 1.5, 2.]))
# This is a tuple of int and float types to check inputs against
intOrFloat = (int, np.int8, np.int16, np.int32, np.int64, float, np.float16, np.float32, np.float64)
# How many decimal places to round the outputs to
roundTo = 4
def getcv(mag, area, z, zW=1., appOrAbs='apparent', CMF_method='nu-scaling', interpWarning=1, goFast='No'):
'''
This function returns relative cosmic variance results. This function is a wrapper function for formatting. The actual calculation happens in singlecv()
Parameters
-------------------------
mag : int, float, list, or numpy.ndarray
The magnitude(s) to consider. This must be in rest-UV (1500 - 2800 Angstroms) AB magnitude (default apparent magnitude; see appOrAbs param)
area : int or float
The area of a survey in arcmin^2 (square survey pattern only)
z : int or float
The central redshift of the survey
zW : int or float
The width of the redshift bin the survey is considering. Default is 1.
appOrAbs: 'apparent' or 'absolute'
Whether the mag input(s) are in apparent magnitudes or in absolute magnitudes
CMF_method: 'nu-scaling' or 'PS-scaling'
The method used for generating the conditional mass function. See Trapp & Furlanetto (2020) for details.
interpWarning: int or float
Flag for displaying interpolation warning message. 0 for no message, 1 for short message (Default), 2 for long message
Returns
-------------------------
A Python list of cosmic variance values of the same length as the mag input
'''
# My code uses apparent magnitudes, so it they are given in absolute magnitudes, convert to apparent first
if appOrAbs == 'absolute':
mag = absToApp(Mabs=mag, z=z)
# Check to make sure the keywords have the correct formats
if goFast == 'No':
checkVars(mag=mag, area=area, z=z, zW=zW, appOrAbs=appOrAbs, CMF_method=CMF_method, interpWarning=interpWarning)
# Now, if mag is just an int or float, return an int or float
if isinstance(mag, intOrFloat):
return singlecv(mag=mag, area=area, z=z, zW=zW, CMF_method=CMF_method, interpWarning=interpWarning, goFast=goFast)
else: # else, return a list of the cv values
answer = [singlecv(mag=a_mag, area=area, z=z, zW=zW, CMF_method=CMF_method, interpWarning=interpWarning, goFast=goFast) for a_mag in mag]
if any([a_answer == np.nan for a_answer in answer]):
if goFast == 'No':
print('\nSome mag values are too bright to estimate cosmic variance. Those values are returned as np.nan objects.')
return answer
# If we want other variables able to be passed as an array, we need a nest of
# if statements for the ways to properly call singlecv()
def singlecv(mag, area, z, zW, CMF_method, interpWarning, goFast):
'''
This function returns relative cosmic variance results by reading in interp files of cosmic variance for parameters near those given, and interpolating between them.
Parameters
-------------------------
mag : int or float
The magnitudes to consider. The type of magnitude is determined by the magType argument
area : int or float
The area of a survey in arcmin^2
z : int or float
The central redshift of the survey
zW : int or float
The width of the redshift bin the survey is considering. Default is 1
interpWarning: bolean
Flag for displaying interpolation warning message
'''
kwargs_fitMatches = dict(mag=mag, z=z, zW=zW)
# For each parameter, check to see if the provided one matches one of the fit files exactly
isExact, interpBetween = fitMatches(kwargs_fitMatches, fitParams)
# isExact is a bolean that tells us if the parameters ALL have exact fit files for them
# if so, this function becomes very simple.
if isExact:
# Read in the fit files and output the answer
thecv = readcv(mag=mag, area=area, z=z, zW=zW, CMF_method=CMF_method)
return round(thecv, roundTo)
##############
else: # Gotta do some interpolation
if interpWarning == 1:
print('Parameter combination requires interpolation.')
elif interpWarning == 2:
print('This code does not provide results for the specific combination of \'z\' and \'zW\' parameters. In this case, we interpolate between nearest valid parameters, which are:')
print('mag: ', fitParams['mag'])
print('z: ', fitParams['z'])
print('zW: ', fitParams['zW'])
print('We interpolate with log10(cosmic variance) between parameters, as cosmic variance is close to a powerlaw with these parameters.')
print('To shorten this message, set interpWarning = 1. To stop this message, set interpWarning = 0')
# interpBetween has all of the combination of parameters we need.
# Create empty arrays to hold the
mags = np.zeros(2)
zs = np.zeros((2, 2))
zWs = np.zeros((2, 2, 2))
zW_epcvs = np.zeros((2, 2, 2))
# Read in all of the fit files adjacent to the desired parameters.
# If one or more of the parameters are an exact match for a fit file, read it in twice
for i in range(2):
i_mag = interpBetween['mag'][i]
mags[i] = i_mag
for j in range(2):
j_z = interpBetween['z'][j]
zs[i][j] = j_z
for k in range(2):
k_zW = interpBetween['zW'][k]
zWs[i][j][k] = k_zW
zW_epcvs[i][j][k] = readcv(mag=i_mag, area=area, z=j_z, zW=k_zW, CMF_method=CMF_method, verbose=False)
if np.any(np.isnan(zW_epcvs)): # If any of the epcv values required are np.nan, can't do the interpolation
if goFast == 'No':
print('Apparent magnitude {:.2f} is too bright for a cosmic variance interpolation estimate at this area, z, and zW'.format(mag))
return np.nan
# Now we interpolate between constant mag and z, but differing zW epcvs
z_epcvs = np.zeros((2, 2))
for i in range(2):
for j in range(2):
j_epcvs = zW_epcvs[i][j]
j_zWs = zWs[i][j]
z_epcvs[i][j] = interpcv(zW, ixs=j_zWs, iys=j_epcvs)
# now interpolate between constant mag but differing values of z
mag_epcvs = np.zeros(2)
for i in range(2):
i_epcvs = z_epcvs[i]
i_zs = zs[i]
mag_epcvs[i] = interpcv(z, ixs=i_zs, iys=i_epcvs)
# Finally, interpolate between magnitudes
final_epcvs = interpcv(mag, ixs=mags, iys=mag_epcvs)
return round(final_epcvs, roundTo)
def fitMatches(kwargs, fitParams):
'''
This function takes two dictionaries, one is the parameters that are being called, the other is the parameters for which I have exact fit files. If I have exact fit files for all parameters, then isExact is returned True, and interpBetween is returned None. Otherwise, isExact is returned False and interpBetween is a dict of the parameters of fit files that I should use to interp between
Parameters
-------------------------
kwargs : dict
A dictionary of all the parameters passed to singlecv
fitParams : dict
A dictionary of all the parameters for which I have exact fit files
'''
mag = kwargs['mag']
mags = fitParams['mag']
z = kwargs['z']
zs = fitParams['z']
zW = kwargs['zW']
zWs = fitParams['zW']
# If all of the parameters have exact fit files, return True and None
if (mag in mags) and (z in zs) and (zW in zWs):
return (True, None)
else:
# Check to see if mag is one of the ones I have an fit file for.
# If so, return itself twice for the singlecv function to use
if mag in mags:
interpBetween = dict(mag=[mag, mag])
else: # Otherwise, append the closest values on either side
maglo = mags[mags < mag].max()
maghi = mags[mags > mag].min()
interpBetween = dict(mag=[maglo, maghi])
# Now the same for the z parameters
if z in zs:
interpBetween['z'] = [z, z]
else:
zlo = zs[zs < z].max()
zhi = zs[zs > z].min()
interpBetween['z'] = [zlo, zhi]
# Finally the zW param
if zW in zWs:
interpBetween['zW'] = [zW, zW]
else:
zWlo = zWs[zWs < zW].max()
zWhi = zWs[zWs > zW].min()
interpBetween['zW'] = [zWlo, zWhi]
return (False, interpBetween)
def readcv(mag, area, z, zW, CMF_method, verbose=True):
'''
This function reads in the cv fits and returns the cv value, or np.nan if there is no estimate
Parameters
-------------------------
mag : int or float
The apparent magnitude to consider
area : int or float
The survey area in arcmin^2
z : int or float
The redshift
zW : int or float
The redshift bin width
'''
if mag not in fitParams['mag']:
raise Exception('Error, readcv should only be recieving magnitudes I know are in the fit files.')
# Which CMF method file to read in
if CMF_method == 'nu-scaling':
CMF_string = 'stdScale'
elif CMF_method == 'PS-scaling':
CMF_string = 'PSfrac'
# Read in the file of fits
THIS_FOLDER = os.path.dirname(os.path.abspath(__file__))
my_file = os.path.join(THIS_FOLDER, 'varepsilonCV_' + CMF_string + '_z{:.0f}_zW{:.2f}.pkl'.format(z, zW))
dffit = pd.read_pickle(my_file)
mapps = dffit['mapp'].values
Psis = dffit['Psi'].values
gammas = dffit['gamma'].values
bs = dffit['b'].values
minAs = dffit['minA'].values
maxAs = dffit['maxA'].values
if mag not in mapps:
raise Exception('Error, the mag supplied to readcv is not in the mapps array from the fit file, but it should be.')
# What index is the mag in the mapps array?
whichIndex = np.where(mapps == mag)[0][0]
# Check to see if there is an estimate at this survey area,
# and also make sure there is an estimate at this magnitude at all
if (area >= minAs[whichIndex]) and ~np.isnan(minAs[whichIndex]):
return 10**log10Eps(area, Psis[whichIndex], gammas[whichIndex], bs[whichIndex])
else:
if verbose:
print('apparent magnitude {:.2f} is too bright to calculate cosmic variance.'.format(mag))
i = 1
while (mag + i) < max(mapps): # Finding the brightest mag that has an estimate at this area
checkIdx = np.where(mapps == (mag + i))[0][0]
if (area >= minAs[checkIdx]) and minAs[checkIdx] != -9999:
print('minimum magnitude for this area, z, and zW: mag = ', (mag + i))
break
i += 1
print('Returning np.nan object.')
return np.nan
def interpcv(x, ixs, iys):
'''
This function takes 2 x values (ixs) and 2 y values (iys), interpolates a line between them (in log10(y)), and then evaluates x on that line
Parameters
-------------------------
x : int or float
The value to evaluate at
ixs : int or float
The x values for the interpolation
iys : int or float
The epcv values
'''
if ixs[0] > ixs[1]:
raise Exception('The two ixs put into interpcv should be sorted low to high')
if x < ixs[0] or x > ixs[1]:
raise Exception('The x value given to interpcv should be between the two ixs values')
iys = np.log10(iys)
rise = iys[1] - iys[0]
run = ixs[1] - ixs[0]
# If the same x and y values are given, just return one of the y values.
if (rise == 0) or (run == 0):
return 10**iys[1]
else:
slope = rise / run
ans = iys[0] + slope * (x - ixs[0])
return 10**ans
def inRange(value, theRange):
'''
This function checks if the value is within the range theRange
Parameters
-------------------------
value : int or float
The numerical value
theRange: list
The lower and upper bounds of the range
'''
if not isinstance(value, intOrFloat):
raise Exception('The argument of inRange() must be a single int or float')
if type(theRange) != list:
raise Exception('theRange must be a list with two values')
elif len(theRange) != 2:
raise Exception('theRange must be a list with two values')
if (value >= theRange[0]) and (value <= theRange[1]):
return True
else:
return False
def checkVars(mag, area, z, zW, appOrAbs, CMF_method, interpWarning):
# Check if the variables have the correct dtype and are in the correct ranges. If not, raise an exception.
magRange = [min(fitParams['mag']), max(fitParams['mag'])]
areaRange = [1, 3.16e4]
zRange = [min(fitParams['z']), max(fitParams['z'])]
zWRange = [min(fitParams['zW']), max(fitParams['zW'])]
# Check the mag variable
if type(mag) in (list, np.ndarray):
if any([not isinstance(a_mag, intOrFloat) for a_mag in mag]):
raise Exception('All values in the list/array \'mag\' must be a int or float')
elif any([not inRange(a_mag, magRange) for a_mag in mag]):
raise Exception('at least one mag value is outside of apparent mag range: {}'.format(magRange))
elif isinstance(mag, intOrFloat):
if not inRange(mag, magRange):
raise Exception('mag value is outside of apparent mag range: {}'.format(magRange))
else:
raise Exception('\'mag\' must be a float, int, list, or numpy array')
# Now the area variable
if isinstance(area, intOrFloat):
if not inRange(area, areaRange):
raise Exception('area value outside of area range: {}'.format(areaRange))
else:
raise Exception('area must be a float or an int')
# Now the z variable
if isinstance(z, intOrFloat):
if not inRange(z, zRange):
raise Exception('z value outside of z range: {}'.format(zRange))
else:
raise Exception('z must be a float or an int')
# Now the zW variable
if isinstance(zW, intOrFloat):
if not inRange(zW, zWRange):
raise Exception('zW value outside of zW range: {}'.format(zWRange))
else:
raise Exception('zW must be a float or an int')
# Now the appOrAbs variable
if type(appOrAbs) != str:
raise Exception('appOrAbs must be \'apparent\' or \'absolute\'')
# Now the CMF_method variable
if type(CMF_method) != str:
raise Exception('CMF_method must be \'nu-scaling\' or \'PS-scaling\'')
# Now for interpWarning
if isinstance(interpWarning, intOrFloat):
if interpWarning not in [0, 1, 2]:
raise Exception('interpWarning be equal to 0, 1, or 2')
else:
raise Exception('interpWarning must be int or float')
def log10Eps(area, Psi, gamma, b):
'''
This function returns log10(epsilon_cv) given the fit parameters Psi, gamma, and b
Parameters
-------------------------
area : int or float
The area of the survey
Psi, gamma, b: int or float
The parameters of the varepsiloncv fit
'''
return Psi * area**gamma + b
def lincv(mass, area, z, zW=1, message='yes'):
'''
Warning! Use with caution and only if outside the bounds of 'galcv.getcv()'. This function is designed to be used at larger areas and larger masses (brighter galaxies) than galcv.getcv(). For additional questions see the README on GitHub. This function returns the 1-sigma linear approximation of cosmic variance for haloes of the chosen mass (in solar masses) in the chosen volume at the chosen redshift. Also, this method assumes the survey volume is a sphere. If your survey volume is actually very elongated in some direction, this method will overestimate cosmic variance.
Parameters
-------------------------
mass : int or float or array-like
Mass of a halo (in units of solar mass)
area : int or float
Survey area in square arcminutes
z : int or float
Central redshift (range: 4 - 15)
zW : int or float
Redshift width (default = 1; range: 0.1 - 2)
message : 'yes' or 'no'
Whether or not to print the warning message
Returns
-------------------------
A NumPy list of cosmic variance values of the same length as the mass input
'''
if message == 'yes':
print('Warning! Use with caution and only if outside the bounds of \'galcv.getcv()\'. This function is designed to be used at larger areas and larger masses (brighter galaxies) than galcv.getcv(). For additional questions see the README on GitHub. This function returns the 1-sigma linear approximation of cosmic variance for haloes of the chosen mass (in solar masses) in the chosen volume at the chosen redshift. Also, this method assumes the survey volume is a sphere. If your survey volume is actually very elongated in some direction, this method will overestimate cosmic variance.')
tckS = np.load(os.path.dirname(os.path.abspath(__file__)) + '/sigma0fM_interp.npy', allow_pickle=True)
# sigma associated with the halo mass
sigM = 10**interpolate.splev(np.log10(mass), tckS) * growthFac(z=z)
alpha = -1.36
beta = -1.14
# Calculating the Trac linear bias
bTR = 1
bTR += (alpha / delCrit0(z=z)) * (sigM / 2.54)**alpha / (1 + (sigM / 2.54)**alpha)
bTR -= (2 * beta) / (sigM**2 * delCrit0(z=z))
CRITDENMSOLMPC = 2.7755e11
# Calculating the sigma associated with the survey area
s_sr = arcmin2ToSr(arcmin2=area)
s_Vol = volCM(z1=z - 0.5 * zW, z2=z + 0.5 * zW, Omega=s_sr)
s_RegMass = s_Vol * CRITDENMSOLMPC * om0hh
sigM_Reg = 10**interpolate.splev(np.log10(s_RegMass), tckS) * growthFac(z=z)
# The cosmic variance is the survey-wide 1-sigma fluctuation times the bias factor
if z < 4 or z > 15:
print('\n\n\nAnother Warning! You have chosen a redshift outside the recommended range of 4 - 15. I\'ll still output an answer but be wary.')
if area < 3e4:
print('\n\n\nAnother Warning! This area is covered in getcv(); you may be able to use that instead.')
if zW/z > 0.25:
print('\n\n\nAnother Warning! The redshift bin width is getting large compared to the redshift.')
if zW > 2:
print('\n\n\nAnother Warning! The redshift bin width is getting large. Recommend to keep it below 2')
if np.any(mass > 1e14):
print('\n\n\nAnother Warning! There is a very massive halo here! Are you sure that is what you wanted?')
if np.any(mass < 4e8):
print('\n\n\nAnother Warning! There is a very small-mass halo here... Are you sure that is what you wanted?')
return bTR * sigM_Reg
###Now to define various cosmology equations for use in the main methods
def absToApp(Mabs='ER', z='ER'):
'''
This function converts absolute magnitude into apparent
Parameters
-------------------------
Mabs : int or float or array-like
absolute magnitudes
z : int or float
redshift
'''
DL_parsec = lum_dist(z=z)
# Convert to parsecs
DL_parsec = DL_parsec * 1.e6
mapp = Mabs
mapp = mapp + 5.0 * np.log10(DL_parsec / 10)
mapp = mapp - 2.5 * np.log10(1.0 + z)
return mapp
def lum_dist(z='ER', **kwargs):
'''
This function returns the luminosity distance to some redshift
Parameters
-------------------------
z : int or float
redshift
'''
ans = comv_dist(z=z) * (1.0 + z)
return ans
HPARAM = 0.678 # hubble constant today/100
OMEGA0 = 0.308 # matter fraction today
OMEGANU = 0.0 # radiation fraction today
LAMBDA0 = 0.692 # dark energy fraction today
SPEEDOFLIGHTMPCPERSEC = 9.71561e-15
TCMB = 2.726 # CMB temp today
thetaCMB = TCMB / 2.7
om0hh = OMEGA0 * HPARAM * HPARAM
zEquality = 2.50e4 * om0hh * pow(thetaCMB, -4.0) - 1.0
def comv_dist(z='ER', **kwargs):
'''
This function returns the comoving distance to some redshift
Parameters
-------------------------
z : int or float
redshift
'''
def wrapper_comv_dist(x):
# What is the hubble constant today in 1/s
Hnot = 100.0 * HPARAM * 3.24078e-20
Hofz = Hnot * np.sqrt(OMEGA0 * (1. + x)**3. + OMEGANU * (1. + x)**4. + LAMBDA0)
ansWR = SPEEDOFLIGHTMPCPERSEC / Hofz
return ansWR
ans, abserr = integrate.quad(wrapper_comv_dist, 0.0, z)
if abs(abserr / ans) > 1e-4:
print('Warning! Comoving distance calculation err is high')
print('err/value = ' + str(abs(abserr / ans)))
return ans
def growthFac(z):
omZ = omegaZ(z=z)
lamZ = lambdaZ(z=z)
D = ((1.0 + zEquality) / (1.0 + z) * 5.0 * omZ / 2.0 * pow(pow(omZ, 4.0 / 7.0) - lamZ + (1.0 + omZ / 2.0) * (1.0 + lamZ / 70.0), -1.0))
D = D / ((1.0 + zEquality) * 5.0 / 2.0 * OMEGA0 * pow(pow(OMEGA0, 4.0 / 7.0) - LAMBDA0 + (1.0 + OMEGA0 / 2.0) * (1.0 + LAMBDA0 / 70.0), -1.0))
return D
def omegaZ(z):
z = z + 1.
temp = OMEGA0 * z**3.0
temp = temp / (temp + LAMBDA0 + (1. - OMEGA0 - LAMBDA0) * z**2)
return temp
def lambdaZ(z):
z = z + 1.
temp = LAMBDA0
temp = temp / (temp + OMEGA0 * z**3 + ((1. - OMEGA0 - LAMBDA0) * z**2))
return temp
def delCrit0(z):
t1 = 0.15 * (12 * np.pi)**(2. / 3.)
omZ = 0 + omegaZ(z=z)
if abs(omZ - 1) < 1.0e-5:
return t1
elif abs(LAMBDA0) < 1.0e-5:
t1 = t1 * omZ**0.0185
return t1
else:
t1 = t1 * omZ**0.0055
return t1
def arcmin2ToSr(arcmin2):
steradians = arcmin2 * (1. / (60. * 180. / np.pi))**2.
return steradians
def volCM(z1='ER', z2='ER', Omega='ER'):
if z1 == 0:
print('Warning 8623478: You can\'t have z1=0. Pick some small value instead if you must.')
# First, interpolate the comoving radius between the redshift bounds
the_dz = 0.1
the_zs = np.arange(z1, z2 + 2 * the_dz, the_dz)
the_rcoms = np.zeros(len(the_zs))
for i in range(len(the_zs)):
the_rcoms[i] = comv_dist(the_zs[i])
tckRCOM = interpolate.splrep(the_zs, the_rcoms, k=1, s=0)
def wrapper_volCM(x):
Hnot = 100.0 * HPARAM * 3.24078e-20
Hofz = Hnot * np.sqrt(OMEGA0 * (1. + x)**3. + OMEGANU * (1. + x)**4. + LAMBDA0)
c_over_Hofz = SPEEDOFLIGHTMPCPERSEC / Hofz
ansWR = c_over_Hofz
ansWR = ansWR * interpolate.splev(x, tckRCOM)**2
return ansWR
ans, abserr = integrate.quad(wrapper_volCM, z1, z2, limit=100)
if abs(abserr / ans) > 1e-4:
print('Warning! Volum (comoving) calculation err is high')
print('err/value = ' + str(abs(abserr / ans)))
return ans * Omega
|
#self-hosting guide: https://gist.github.com/emxsys/a507f3cad928e66f6410e7ac28e2990f
#updated to use scratchcloud
from scratchcloud import CloudClient, CloudChange
import scEncoder
import os, time, json
from mcstatus import JavaServer
from dotenv import load_dotenv
from random import randint
#get .env passcodes
load_dotenv()
username = os.environ['USERNAME']
password = os.environ['PASSWORD']
client = CloudClient(username=username, project_id='690926913')
encoder = scEncoder.Encoder()
chars = scEncoder.ALL_CHARS
def lookup(server_ip):
try:
server = JavaServer(server_ip)
status = server.status()
#query = server.query()
modt = status.description
print(modt)
description = ''.join([i for i in modt if i in chars])
print(description)
raw = status.raw
rawjson = json.dumps(raw)
rawdata = json.loads(str(rawjson))
players = []
for name in rawdata['players']['sample']:
print(name['name'])
players.append(name['name'])
print(players)
players1 = players[0:6]
playersample=' '.join([str(item) for item in players1])
print(playersample)
return randint(0, 10000), server_ip, "{0}/{1}".format(status.players.online, status.players.max), status.version.name, status.latency, description, playersample
except:
try:
server = JavaServer(server_ip)
status = server.status()
return randint(0, 10000), server_ip, "{0}/{1}".format(status.players.online, status.players.max), status.version.name, status.latency
except:
return "Error"
@client.event
async def on_connect():
print('Project connected.')
@client.event
async def on_disconnect():
print('Project disconnected!')
@client.cloud_event('REQUEST')
async def on_request(var: CloudChange):
print(f'The {var.name} variable was changed to {var.value}!')
server_ip = encoder.decode(var.value)
response = lookup(server_ip)
if response == "Error":
await client.set_cloud('RESPONSE', '400')
print("Error response sent")
await client.set_cloud('REQUEST', '400')
else:
print(response) #response object needs to be sent in two pieces to avoid scratch cloud character limit
print(encoder.encode_list(list(response)))
await client.set_cloud('RESPONSE', encoder.encode_list(list(response[0:5])))
await client.set_cloud('modt', encoder.encode(response[5]))
await client.set_cloud('playersample', encoder.encode(response[6]))
await client.set_cloud('REQUEST', '200')
client.run(password)
|
import re
from openstackinabox.services.cinder.v1 import base
from openstackinabox.models.cinder import model
from openstackinabox.services.cinder.v1 import volumes
class CinderV1Service(base.CinderV1ServiceBase):
def __init__(self, keystone_service):
super(CinderV1Service, self).__init__(keystone_service, 'cinder/v1')
self.log_info('initializing cinder v1.0 services...')
self.model = model.CinderModel(keystone_service.model)
self.__subservices = [
{
'path': re.compile('^/volumes'),
'service': volumes.CinderV1Volumes(
self.model,
keystone_service
)
}
]
for subservice in self.__subservices:
self.register_subservice(
subservice['path'],
subservice['service']
)
self.log_info('initialized')
|
from enum import Enum, auto
"""
Enum of Task status
Represents all possible statuses of task
"""
class TaskStatus(Enum):
COMPLETED = auto()
UNCOMPLETED = auto()
PLANNED = auto()
RUNNING = auto()
FINISHED = auto()
def __json__(self):
"""
JSON serialization for TaskStatus
:return: Task status name
"""
return self.name
for_json = __json__ # supported by simplejson
|
import numpy as np
import random
from segmentation.utils import visualize
import importlib
importlib.reload(visualize)
def test_visualize_z_scores():
# Create random Z score
z = np.random.uniform(low=-5, high=5, size=(26, 26))
# Assign random phenotype titles
pheno_titles = [chr(i) for i in range(ord('a'), ord('z') + 1)]
plot = visualize.visualize_z_scores(z, pheno_titles)
def test_visualize_cells():
rand_type = []
ids = []
for x in range(0,1000):
rand_type.append(chr(random.randint(0,25) + 97))
ids.append(random.randint(1,5))
print(rand_type, ids)
rand_dict = {"PatientID": ids, "cell_type": rand_type}
df = pd.DataFrame.from_dict(rand_dict)
print(df)
visualize_cell_distribution_in_all_patients(df, "cell_type")
visualize_distribution_of_cell_count(df, "PatientID", "cell_type")
visualize_proportion_of_cell_count(df, "PatientID", "cell_type")
|
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import DetailView
from django.views.generic.list import ListView
from django.views.generic.edit import CreateView, DeleteView, UpdateView, \
FormMixin, FormView
from django.urls import reverse_lazy
from .models import Machine, Device, Service
from .forms import DeviceSearchForm, MachineSetupForm, ServiceForm
import time
class MachineIndex(LoginRequiredMixin, ListView):
"""
host list
"""
template_name = 'machines/index.html'
context_object_name = 'machines'
def get_queryset(self):
return Machine.objects.prefetch_related('location').order_by('pk')
class MachineDetail(DetailView):
"""
host details
"""
model = Machine
template_name = 'machines/detail.html'
queryset = Machine.objects.prefetch_related('device').all()
class MachineCreate(CreateView):
"""
create a host
"""
model = Machine
fields = ['name', 'location']
class MachineUpdate(UpdateView):
"""
edit machine
"""
model = Machine
fields = '__all__'
class MachineDelete(DeleteView):
"""
remove a host
"""
model = Machine
success_url = reverse_lazy('machines:index')
class ServiceIndexView(LoginRequiredMixin, ListView):
"""
service list
"""
template_name = 'machines/service_index.html'
context_object_name = 'services'
paginate_by = 25
def get_queryset(self):
query = Service.objects.prefetch_related('machine', 'device')\
.all()
return query.order_by('-date')
class ServiceCreate(LoginRequiredMixin, CreateView):
"""
service add
"""
model = Service
fields = ['machine', 'date', 'description', 'device']
def get_initial(self, **kwargs):
return {'machine': self.request.GET.get("machine_id", None),
'date': time.strftime('%Y-%m-%d')}
class ServiceUpdate(LoginRequiredMixin, UpdateView):
"""
service update
"""
model = Service
form_class = ServiceForm
class ServiceDelete(LoginRequiredMixin, DeleteView):
"""
service delete
"""
model = Service
def get_success_url(self):
machine = self.object.machine
return reverse_lazy('machines:detail', kwargs={'pk': machine.pk})
class DeviceIndexView(LoginRequiredMixin, FormMixin, ListView):
"""
component list
"""
template_name = 'machines/device_index.html'
form_class = DeviceSearchForm
context_object_name = 'device'
model = Device
paginate_by = 25
def get_queryset(self):
query = Device.objects.prefetch_related('type', 'location').all()
if self.device_type:
query = query.filter(type=self.device_type)
if self.location:
query = query.filter(location=self.location)
return query
def get_initial(self):
initials = {}
if self.device_type:
initials['device_type'] = self.device_type
if self.location:
initials['location'] = self.location
return initials
def dispatch(self, request, *args, **kwargs):
self.device_type = request.GET.get('device_type', None)
self.location = request.GET.get('location', None)
return super().dispatch(request, *args, **kwargs)
class DeviceDetailView(LoginRequiredMixin, DetailView):
"""
component detail
"""
model = Device
template_name = 'machines/device_detail.html'
class DeviceCreate(LoginRequiredMixin, CreateView):
"""
component add
"""
model = Device
fields = ['type', 'location', 'date', 'name', 'price', 'company',
'invoice', 'serial', 'machine', 'invoice_pdf']
def get_initial(self, **kwargs):
return {'date': time.strftime('%Y-%m-%d')}
class DeviceUpdate(LoginRequiredMixin, UpdateView):
"""
component update
"""
model = Device
fields = ['type', 'location', 'date', 'name', 'price', 'company',
'invoice', 'serial', 'machine', 'invoice_pdf']
class DeviceDelete(LoginRequiredMixin, DeleteView):
"""
component delete
"""
model = Device
success_url = reverse_lazy('machines:device_index')
class MachineSetupUpload(FormView):
""" upload ansile setup json """
form_class = MachineSetupForm
success_url = reverse_lazy('machines:index')
template_name = 'machines/setup_upload.html'
def post(self, request, *args, **kwargs):
form_class = self.get_form_class()
form = self.get_form(form_class)
files = request.FILES.getlist('file_field')
if form.is_valid():
for f in files:
import json
str = f.file.read().decode()
j = json.loads(str)
if 'failed' in j:
continue
fqdn = j['ansible_facts']['ansible_fqdn']
try:
obj = Machine.objects.get(FQDN=fqdn)
except Machine.DoesNotExist:
return self.form_invalid(form)
obj.date = j['ansible_facts']['ansible_date_time']['date']
obj.form = j['ansible_facts']['ansible_form_factor']
obj.bios = j['ansible_facts']['ansible_bios_date']
obj.prod = j['ansible_facts']['ansible_product_name']
obj.vendor = j['ansible_facts']['ansible_system_vendor']
obj.OS = "%s %s" % (j['ansible_facts']['ansible_distribution'],
j['ansible_facts']['ansible_distribution_version']) # noqa
obj.kernel = j['ansible_facts']['ansible_kernel']
obj.CPU = j['ansible_facts']['ansible_processor'][2]
obj.cores = j['ansible_facts']['ansible_processor_cores']
obj.arch = j['ansible_facts']['ansible_architecture']
obj.mem = j['ansible_facts']['ansible_memtotal_mb']
# obj.disk = j ['ansible_facts']['ansible_devices']['sda']['model'] # noqa
obj.IPs = ', '.join(j['ansible_facts']['ansible_all_ipv4_addresses']) # noqa
obj.gateway = j['ansible_facts']['ansible_default_ipv4']['gateway'], # noqa
obj.gate_iface =j['ansible_facts']['ansible_default_ipv4']['interface'] # noqa
obj.save()
return self.form_valid(form)
else:
return self.form_invalid(form)
|
name = 'bah'
version = '2.1'
authors = ["joe.bloggs"]
uuid = "3c027ce6593244af947e305fc48eec96"
description = "bah humbug"
private_build_requires = ["build_util"]
variants = [
["foo-1.0"],
["foo-1.1"]]
hashed_variants = True
build_command = 'python {root}/build.py {install}'
|
""" Day 08 of the 2021 Advent of Code
https://adventofcode.com/2021/day/8
https://adventofcode.com/2021/day/8/input """
def load_data(path):
data_list = []
with open(path, "r") as file:
for line in file:
ref_values, values = line.split(" | ")
ref_values = ref_values.split(" ")
ref_values = [i.strip() for i in ref_values]
ref_values = sorted(ref_values, key=len)
ref_values = ["".join(sorted(i, key=str.lower)) for i in ref_values]
assert len(ref_values) == 10
values = values.split(" ")
values = [i.strip() for i in values]
values = ["".join(sorted(i, key=str.lower)) for i in values]
assert len(values) == 4
data_list.append((ref_values, values))
return data_list
def value_in_value(ref, value):
for i in ref:
if not i in value:
return False
return True
def value_minus_value(value, min_value):
for i in min_value:
value = value.replace(i, "")
return value
def order_ref_values(ref_values):
values_order = [None] * 10
len_5 = ref_values[3 : 5 + 1]
len_6 = ref_values[6 : 8 + 1]
# 1
rechts = ref_values[0]
values_order[1] = rechts
values_order[4] = ref_values[2]
values_order[7] = ref_values[1]
values_order[8] = ref_values[9]
# 2
values_order[3] = [i for i in len_5 if value_in_value(values_order[1], i)][0]
values_order[6] = [i for i in len_6 if not value_in_value(values_order[1], i)][0]
# 3
rechts_boven = value_minus_value(values_order[8], values_order[6])
values_order[2] = [i for i in len_5 if not value_in_value(values_order[1], i) and value_in_value(rechts_boven, i)][0]
values_order[5] = [i for i in len_5 if not value_in_value(values_order[1], i) and not value_in_value(rechts_boven, i)][0]
# 4
links = value_minus_value(values_order[8], values_order[3])
values_order[9] = [i for i in len_6 if value_in_value(values_order[1], i) and not value_in_value(links, i)][0]
values_order[0] = [i for i in len_6 if value_in_value(values_order[1], i) and value_in_value(links, i)][0]
return values_order
def part_1(data):
solution = 0
for i in data:
for value in i[1]:
if len(value) in [2, 4, 3, 7]:
solution = solution + 1
return solution
def part_2(data):
solution = 0
for line in data:
line_number = ""
ordered_ref = order_ref_values(line[0])
for value in line[1]:
for index in range(10):
if value == ordered_ref[index]:
line_number = line_number + str(index)
break
solution = solution + int(line_number)
return solution
def main():
data = load_data("..//Data//Prod.txt")
data_test = load_data("..//Data//Test.txt")
assert part_1(data_test) == 26
assert part_1(data) == 445
assert part_2(data_test) == 61229
assert part_2(data) == 1043101
if __name__ == "__main__":
main()
|
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test citest.reporting.html_renderer module."""
import os
import shutil
import tempfile
import unittest
from citest.base import (
Journal,
StreamJournalNavigator)
class StreamNavigatorTest(unittest.TestCase):
# pylint: disable=missing-docstring
@classmethod
def setUpClass(cls):
cls.temp_dir = tempfile.mkdtemp(prefix='journal_nav_test')
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.temp_dir)
def test_iterator(self):
journal = Journal()
path = os.path.join(self.temp_dir, 'test_iterator.journal')
expect = []
journal.open_with_path(path, TestString='TestValue', TestNum=123)
expect.append({'_type': 'JournalMessage',
'_value': 'Starting journal.',
'TestString': 'TestValue',
'TestNum': 123})
journal.write_message('Initial Message')
expect.append({'_type': 'JournalMessage', '_value': 'Initial Message'})
journal.begin_context('OUTER', TestProperty='BeginOuter')
expect.append({'_type': 'JournalContextControl',
'control': 'BEGIN',
'_title': 'OUTER',
'TestProperty': 'BeginOuter'})
journal.write_message('Context Message', format='pre')
expect.append({'_type': 'JournalMessage', '_value': 'Context Message',
'format': 'pre'})
journal.end_context(TestProperty='END OUTER')
expect.append({'_type': 'JournalContextControl',
'control': 'END',
'TestProperty': 'END OUTER'})
journal.terminate(EndProperty='xyz')
expect.append({'_type': 'JournalMessage',
'_value': 'Finished journal.',
'EndProperty': 'xyz'})
# We're going to pop off expect, so reverse it
# so that we can check in order.
expect.reverse()
navigator = StreamJournalNavigator.new_from_path(path)
for record in navigator:
del(record['_thread'])
del(record['_timestamp'])
self.assertEquals(record, expect.pop())
self.assertEquals([], expect)
if __name__ == '__main__':
unittest.main()
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def rotateRight(self, head: ListNode, k: int) -> ListNode:
if head == None or head.next == None:
return head
count = 1
p = head
while p.next:
count+=1
p=p.next
rot = k%count
temp = head
p.next = head
for i in range(count-rot-1):
temp = temp.next
answer = temp.next
temp.next = None
return answer
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for OSS-Fuzz infrastructure."""
import os
import re
import stat
import helper
ALLOWED_FUZZ_TARGET_EXTENSIONS = ['', '.exe']
FUZZ_TARGET_SEARCH_STRING = 'LLVMFuzzerTestOneInput'
VALID_TARGET_NAME = re.compile(r'^[a-zA-Z0-9_-]+$')
def chdir_to_root():
"""Changes cwd to OSS-Fuzz root directory."""
# Change to oss-fuzz main directory so helper.py runs correctly.
if os.getcwd() != helper.OSSFUZZ_DIR:
os.chdir(helper.OSSFUZZ_DIR)
def is_fuzz_target_local(file_path):
"""Returns whether |file_path| is a fuzz target binary (local path).
Copied from clusterfuzz src/python/bot/fuzzers/utils.py
with slight modifications.
"""
filename, file_extension = os.path.splitext(os.path.basename(file_path))
if not VALID_TARGET_NAME.match(filename):
# Check fuzz target has a valid name (without any special chars).
return False
if file_extension not in ALLOWED_FUZZ_TARGET_EXTENSIONS:
# Ignore files with disallowed extensions (to prevent opening e.g. .zips).
return False
if not os.path.exists(file_path) or not os.access(file_path, os.X_OK):
return False
if filename.endswith('_fuzzer'):
return True
if os.path.exists(file_path) and not stat.S_ISREG(os.stat(file_path).st_mode):
return False
with open(file_path, 'rb') as file_handle:
return file_handle.read().find(FUZZ_TARGET_SEARCH_STRING.encode()) != -1
def get_fuzz_targets(path):
"""Get list of fuzz targets in a directory.
Args:
path: A path to search for fuzz targets in.
Returns:
A list of paths to fuzzers or an empty list if None.
"""
if not os.path.exists(path):
return []
fuzz_target_paths = []
for root, _, _ in os.walk(path):
for filename in os.listdir(path):
file_path = os.path.join(root, filename)
if is_fuzz_target_local(file_path):
fuzz_target_paths.append(file_path)
return fuzz_target_paths
def get_container_name():
"""Gets the name of the current docker container you are in.
/proc/self/cgroup can be used to check control groups e.g. Docker.
See: https://docs.docker.com/config/containers/runmetrics/ for more info.
Returns:
Container name or None if not in a container.
"""
with open('/proc/self/cgroup') as file_handle:
if 'docker' not in file_handle.read():
return None
with open('/etc/hostname') as file_handle:
return file_handle.read().strip()
|
import sys
import subprocess
import shlex
from os import path
args = sys.argv
usage = '''
Usage: python %s -inkey [public.key] -in [signed.bin] -out [output.bin]
''' % args[0]
not_there = False
not_there_list = []
for arg in ["-inkey", "-out", "-in"]:
if arg not in args:
not_there = True
not_there_list.append(arg)
if not_there:
print 'Missing arg(s): %s.' % not_there_list
print usage
sys.exit(1)
inkey = args[args.index("-inkey")+1]
out = args[args.index("-out")+1]
_in = args[args.index("-in")+1]
command = "openssl rsautl -verify -pubin -inkey %s -in %s -out %s" % (inkey, _in, out)
print command
split = shlex.split(command)
call = subprocess.call(split)
sys.exit(call)
|
# Copyright (c) OpenMMLab. All rights reserved.
import math
import torch
from torch.utils.data import DistributedSampler as _DistributedSampler
class DistributedSampler(_DistributedSampler):
def __init__(self,
dataset,
num_replicas=None,
rank=None,
shuffle=True,
seed=0):
super().__init__(
dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
# for the compatibility from PyTorch 1.3+
self.seed = seed if seed is not None else 0
def __iter__(self):
# deterministically shuffle based on epoch
if self.shuffle:
g = torch.Generator()
g.manual_seed(self.epoch + self.seed)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
# in case that indices is shorter than half of total_size
indices = (indices *
math.ceil(self.total_size / len(indices)))[:self.total_size]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
|
# A* ALGORITHM
class Node:
def __init__(self, row, col, value):
self.id = str(row) + "-" + str(col)
self.row = row
self.col = col
self.value = value
self.distanceFromStart = float("inf")
self.estimatedDistanceToEnd = float("inf")
self.cameFrom = None
# O(W * H * log(W * H)) time and O(W * H) space
# W -> Width, H -> Height
def aStarAlgorithm(startRow, startCol, endRow, endCol, graph):
# Write your code here.
nodes = initializeNodes(graph)
startNode = nodes[startRow][startCol]
endNode = nodes[endRow][endCol]
startNode.distanceFromStart = 0
startNode.estimatedDistanceToEnd = calculateManhattanDistance(startNode, endNode)
nodesToVisit = MinHeap([startNode])
while not nodesToVisit.isEmpty():
currentMinDistanceNode = nodesToVisit.remove()
if currentMinDistanceNode == endNode:
break
neighbors = getNeighboringNodes(currentMinDistanceNode, nodes)
for neighbor in neighbors:
if neighbor.value == 1:
continue
tentativeDistanceToNeighbor = currentMinDistanceNode.distanceFromStart + 1
if tentativeDistanceToNeighbor >= neighbor.distanceFromStart:
continue
neighbor.cameFrom = currentMinDistanceNode
neighbor.distanceFromStart = tentativeDistanceToNeighbor
neighbor.estimatedDistanceToEnd = tentativeDistanceToNeighbor + calculateManhattanDistance(
neighbor, endNode
)
if not nodesToVisit.containsNode(neighbor):
nodesToVisit.insert(neighbor)
else:
nodesToVisit.update(neighbor)
return reconstructPath(endNode)
def initializeNodes(graph):
nodes = []
for i, row in enumerate(graph):
nodes.append([])
for j, value in enumerate(row):
nodes[i].append(Node(i, j, value))
return nodes
def calculateManhattanDistance(currentNode, endNode):
currentRow = currentNode.row
currentCol = currentNode.col
endRow = endNode.row
endCol = endNode.col
return abs(currentRow - endRow) + abs(currentCol - endCol)
def getNeighboringNodes(node, nodes):
neighbors = []
numRows = len(nodes)
numCols = len(nodes[0])
row = node.row
col = node.col
if row < numRows - 1:
neighbors.append(nodes[row + 1][col])
if row > 0:
neighbors.append(nodes[row - 1][col])
if col < numCols - 1:
neighbors.append(nodes[row][col + 1])
if col > 0:
neighbors.append(nodes[row][col - 1])
return neighbors
def reconstructPath(endNode):
if not endNode.cameFrom:
return []
currentNode = endNode
path = []
while currentNode is not None:
path.append([currentNode.row, currentNode.col])
currentNode = currentNode.cameFrom
return path[::-1]
class MinHeap:
def __init__(self, array):
self.nodePositionsInHeap = {node.id: idx for idx, node in enumerate(array)}
self.heap = self.buildHeap(array)
def isEmpty(self):
return len(self.heap) == 0
def buildHeap(self, array):
firstParentIdx = (len(array) - 2) // 2
for currentIdx in reversed(range(firstParentIdx + 1)):
self.siftDown(currentIdx, len(array) - 1, array)
return array
def siftDown(self, currentIdx, endIdx, heap):
childOneIdx = currentIdx * 2 + 1
while childOneIdx <= endIdx:
childTwoIdx = currentIdx * 2 + 2 if currentIdx * 2 + 2 <= endIdx else -1
if (
childTwoIdx != -1
and heap[childTwoIdx].estimatedDistanceToEnd < heap[childOneIdx].estimatedDistanceToEnd
):
idxToSwap = childTwoIdx
else:
idxToSwap = childOneIdx
if heap[idxToSwap].estimatedDistanceToEnd < heap[currentIdx].estimatedDistanceToEnd:
self.swap(currentIdx, idxToSwap, heap)
currentIdx = idxToSwap
childOneIdx = currentIdx * 2 + 1
else:
return
def siftUp(self, currentIdx, heap):
parentIdx = (currentIdx - 1) // 2
while currentIdx > 0 and heap[currentIdx].estimatedDistanceToEnd < heap[parentIdx].estimatedDistanceToEnd:
self.swap(currentIdx, parentIdx, heap)
currentIdx = parentIdx
parentIdx = (currentIdx - 1) // 2
def remove(self):
if self.isEmpty():
return
self.swap(0, len(self.heap) - 1, self.heap)
node = self.heap.pop()
del self.nodePositionsInHeap[node.id]
self.siftDown(0, len(self.heap) - 1, self.heap)
return node
def insert(self, node):
self.heap.append(node)
self.nodePositionsInHeap[node.id] = len(self.heap) - 1
self.siftUp(len(self.heap) - 1, self.heap)
def swap(self, i, j, heap):
self.nodePositionsInHeap[heap[i].id] = j
self.nodePositionsInHeap[heap[j].id] = i
heap[i], heap[j] = heap[j], heap[i]
def containsNode(self, node):
return node.id in self.nodePositionsInHeap
def update(self, node):
self.siftUp(self.nodePositionsInHeap[node.id], self.heap)
|
#!/usr/bin/env python
import re
import setuptools
def get_info():
with open("README.md", "r") as fh:
long_description = fh.read()
version = re.search(
r'^__version__\s*=\s*"(.*)"',
open("experimenting/__init__.py").read(),
re.MULTILINE,
).group(1)
return long_description, version
long_description, version = get_info()
setuptools.setup(
name='lifting_events_to_3d_hpe',
version=version,
description='Code for 2021 CVPRw "Lifting Monocular Events to 3D Human Poses"',
long_description=long_description,
author='Gianluca Scarpellini',
author_email='gianluca.scarpellini@iit.it',
url='https://github.com/iit-pavis/lifting_events_to_3d_hpe',
install_requires=[
'albumentations',
'opencv-python==4.2.0.34',
'h5py',
'scikit-image',
'scikit-learn',
'scikit-video',
'scipy',
'torch>1.4',
'kornia',
'hydra-core==1.0.0rc1',
'omegaconf',
'pytorch-lightning==1.1.6',
'torchvision',
'tqdm',
'segmentation_models_pytorch',
],
packages=setuptools.find_packages(exclude=["tests", "tests/*"]),
test_suite="tests",
long_description_content_type="text/markdown",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: GPL License",
"Operating System :: Linux",
],
)
|
import factory
from .models import Order, Payment
class OrderFactory(factory.DjangoModelFactory):
class Meta:
model = Order
name = factory.Sequence(lambda n: "order {}".format(n))
class PaymentFactory(factory.DjangoModelFactory):
class Meta:
model = Payment
order = factory.SubFactory(OrderFactory)
amount = 200
currency = "PLN"
backend = "getpaid.backends.payu"
|
"""Installation script for coex using setuptools."""
from setuptools import setup
setup(
name='coex',
version='1.0',
packages=['coex'],
install_requires=['numpy', 'scipy'],
author='Adam Rall',
author_email='arall@buffalo.edu',
license='BSD',
keywords='molecular simulation',
)
|
#! python3
# __author__ = "YangJiaHao"
# date: 2018/2/13
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2019 John Dewey
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import pytest
import ucl
from konfig import config
@pytest.fixture
def _instance():
return config.Config()
@pytest.fixture
def _daemonset():
return """
apiVersion = "apps/v1"
kind = "DaemonSet"
metadata {
name = "name"
namespace = "namespace"
labels = {
foo = "bar"
}
}
spec {
selector {
matchLabels {
name = "name"
}
}
}
"""
def test_config_loads(_instance, _daemonset):
_instance._data = _daemonset
got = _instance.loads()
want = {
'apiVersion': 'apps/v1',
'kind': 'DaemonSet',
'metadata': {
'name': 'name',
'namespace': 'namespace',
'labels': {
'foo': 'bar',
}
},
'spec': {
'selector': {
'matchLabels': {
'name': 'name'
}
}
}
}
assert want == got
|
"""
rec_secret.py
Recoveres a secret using the Fuzzy Key Recovery scheme
"""
import click
from fuzzy import RecoverSecret, bytes_to_hex, FuzzyError, FuzzyState
def work(words, key_count, secret) -> None:
"""
1. re-create the state object from json file
2. call RecoverySecret on the recovery words (words)
3. print the recovery words
An FuzzyError exception is throw upon failure
"""
with open(secret, 'r') as fobj:
repn = fobj.read()
state = FuzzyState.Loads(repn)
recovery_words = [int(word) for word in words.split()]
keys = RecoverSecret(state, recovery_words, key_count)
print("keys:")
for key in keys:
print("-", bytes_to_hex(key))
@click.command()
@click.option('--words',
type=str,
prompt="words",
required=True,
help='recovery words as integers eg. "8 6 0 3"')
@click.option('--key-count',
type=int,
default=1,
help='number of keys to be generated [default=1]')
@click.option('--secret',
type=str,
default='secret.json',
help='path to JSON file holding the secret (FuzzyState)')
def rec_secret(words, key_count, secret) -> None:
"""
recover a secret
example:
python3 rec_secret.py --words "1 2 3" [--secret secret.json] [--key-count 1]
"""
work(words, key_count, secret)
if __name__ == '__main__':
try:
# pylint: disable=no-value-for-parameter
rec_secret()
# pylint: enable=no-value-for-parameter
except FuzzyError as error:
print("\nKey Recovery Failed:")
print(" ", error.message)
# work("1 2 3", 1, "output.json")
|
"""
Configuration file for Flask and Flask-SQLAlchemy modules.
All environment variables are stored in local .env file.
"""
import os
from dotenv import load_dotenv
load_dotenv() #load environment variables from .env file
class Config(object):
db_host = os.environ.get('DB_HOST')
db_name = os.environ.get('DB_NAME')
db_password = os.environ.get('DB_PASSWORD')
db_port = os.environ.get('DB_PORT')
db_user = os.environ.get('DB_USERNAME')
SQLALCHEMY_DATABASE_URI = f"postgresql://{db_user}:{db_password}@{db_host}:{db_port}/{db_name}"
SECRET_KEY = os.environ.get('FLASK_SECRET_KEY')
SQLALCHEMY_TRACK_MODIFICATIONS = False
PROPAGATE_EXCEPTIONS = True
|
""" Find a nearby root of the coupled radial/angular Teukolsky equations.
TODO Documentation.
"""
from __future__ import division, print_function, absolute_import
import logging
import numpy as np
from scipy import optimize
from .angular import sep_const_closest, C_and_sep_const_closest
from . import radial
# TODO some documentation here, better documentation throughout
class NearbyRootFinder(object):
"""Object to find and store results from simultaneous roots of
radial and angular QNM equations, following the
Leaver and Cook-Zalutskiy approach.
Parameters
----------
a: float [default: 0.]
Dimensionless spin of black hole, 0 <= a < 1.
s: int [default: -2]
Spin of field of interest
m: int [default: 2]
Azimuthal number of mode of interest
A_closest_to: complex [default: 4.+0.j]
Complex value close to desired separation constant. This is
intended for tracking the l-number of a sequence starting
from the analytically-known value at a=0
l_max: int [default: 20]
Maximum value of l to include in the spherical-spheroidal
matrix for finding separation constant and mixing
coefficients. Must be sufficiently larger than l of interest
that angular spectral method can converge. The number of
l's needed for convergence depends on a.
omega_guess: complex [default: .5-.5j]
Initial guess of omega for root-finding
tol: float [default: sqrt(double epsilon)]
Tolerance for root-finding omega
cf_tol: float [defailt: 1e-10]
Tolerance for continued fraction calculation
n_inv: int [default: 0]
Inversion number of radial infinite continued fraction,
which selects overtone number of interest
Nr: int [default: 300]
Truncation number of radial infinite continued
fraction. Must be sufficiently large for convergence.
Nr_min: int [default: 300]
Floor for Nr (for dynamic control of Nr)
Nr_max: int [default: 4000]
Ceiling for Nr (for dynamic control of Nr)
r_N: complex [default: 1.]
Seed value taken for truncation of infinite continued
fraction. UNUSED, REMOVE
"""
def __init__(self, *args, **kwargs):
# Set defaults before using values in kwargs
self.a = 0.
self.s = -2
self.m = 2
self.A0 = 4.+0.j
self.l_max = 20
self.omega_guess = .5-.5j
self.tol = np.sqrt(np.finfo(float).eps)
self.cf_tol = 1e-10
self.n_inv = 0
self.Nr = 300
self.Nr_min = 300
self.Nr_max = 4000
self.r_N = 1.
self.set_params(**kwargs)
def set_params(self, *args, **kwargs):
"""Set the parameters for root finding. Parameters are
described in the class documentation. Finally calls
:meth:`clear_results`.
"""
# TODO This violates DRY, do better.
self.a = kwargs.get('a', self.a)
self.s = kwargs.get('s', self.s)
self.m = kwargs.get('m', self.m)
self.A0 = kwargs.get('A_closest_to', self.A0)
self.l_max = kwargs.get('l_max', self.l_max)
self.omega_guess = kwargs.get('omega_guess', self.omega_guess)
self.tol = kwargs.get('tol', self.tol)
self.cf_tol = kwargs.get('cf_tol', self.cf_tol)
self.n_inv = kwargs.get('n_inv', self.n_inv)
self.Nr = kwargs.get('Nr', self.Nr)
self.Nr_min = kwargs.get('Nr_min', self.Nr_min)
self.Nr_max = kwargs.get('Nr_max', self.Nr_max)
self.r_N = kwargs.get('r_N', self.r_N)
# Optional pole factors
self.poles = np.array([])
# TODO: Check that values make sense
self.clear_results()
def clear_results(self):
"""Clears the stored results from last call of :meth:`do_solve`"""
self.solved = False
self.opt_res = None
self.omega = None
self.A = None
self.C = None
self.cf_err = None
self.n_frac = None
self.poles = np.array([])
def __call__(self, x):
"""Internal function for usage with optimize.root, for an
instance of this class to act like a function for
root-finding. optimize.root only works with reals so we pack
and unpack complexes into float[2]
"""
omega = x[0] + 1.j*x[1]
# oblateness parameter
c = self.a * omega
# Separation constant at this a*omega
A = sep_const_closest(self.A0, self.s, c, self.m,
self.l_max)
# We are trying to find a root of this function:
# inv_err = radial.leaver_cf_trunc_inversion(omega, self.a,
# self.s, self.m, A,
# self.n_inv,
# self.Nr, self.r_N)
# TODO!
# Determine the value to use for cf_tol based on
# the Jacobian, cf_tol = |d cf(\omega)/d\omega| tol.
inv_err, self.cf_err, self.n_frac = radial.leaver_cf_inv_lentz(omega, self.a,
self.s, self.m, A,
self.n_inv, self.cf_tol,
self.Nr_min, self.Nr_max)
# logging.info("Lentz terminated with cf_err={}, n_frac={}".format(self.cf_err, self.n_frac))
# Insert optional poles
pole_factors = np.prod(omega - self.poles)
supp_err = inv_err / pole_factors
return [np.real(supp_err), np.imag(supp_err)]
def do_solve(self):
"""Try to find a root of the continued fraction equation,
using the parameters that have been set in :meth:`set_params`."""
# For the default (hybr) method, tol sets 'xtol', the
# tolerance on omega.
self.opt_res = optimize.root(self,
[np.real(self.omega_guess), np.imag(self.omega_guess)],
method = 'hybr', tol = self.tol)
if (not self.opt_res.success):
tmp_opt_res = self.opt_res
self.clear_results()
self.opt_res = tmp_opt_res
return None
self.solved = True
self.omega = self.opt_res.x[0] + 1.j*self.opt_res.x[1]
c = self.a * self.omega
# As far as I can tell, scipy.linalg.eig already normalizes
# the eigenvector to unit norm, and the coefficient with the
# largest norm is real
self.A, self.C = C_and_sep_const_closest(self.A0,
self.s, c,
self.m, self.l_max)
return self.omega
def get_cf_err(self):
"""Return the continued fraction error and the number of
iterations in the last evaluation of the continued fraction.
Returns
-------
cf_err: float
n_frac: int
"""
return self.cf_err, self.n_frac
def set_poles(self, poles=[]):
""" Set poles to multiply error function.
Parameters
----------
poles: array_like as complex numbers [default: []]
"""
self.poles = np.array(poles).astype(complex)
|
import base64
import datetime
import logging
import os
import traceback
from io import BytesIO
import pdfkit
from dateutil.relativedelta import relativedelta
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.core.files.base import ContentFile
from django.core.files.storage import default_storage as storage
from django.db import transaction
from django.db.models import Sum
from django.template.loader import render_to_string
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from PIL import Image
from rest_framework import exceptions as rf_exceptions
from rest_framework import serializers, status
from waldur_core.core import models as core_models
from waldur_core.core import serializers as core_serializers
from waldur_core.core import utils as core_utils
from waldur_core.structure import filters as structure_filters
from waldur_core.structure import models as structure_models
from waldur_mastermind.common.utils import create_request
from waldur_mastermind.invoices import models as invoice_models
from waldur_mastermind.invoices import registrators
from waldur_mastermind.invoices.utils import get_full_days
from waldur_mastermind.marketplace import attribute_types
from . import models, plugins
logger = logging.getLogger(__name__)
def get_order_item_processor(order_item):
if order_item.resource:
offering = order_item.resource.offering
else:
offering = order_item.offering
if order_item.type == models.RequestTypeMixin.Types.CREATE:
return plugins.manager.get_processor(offering.type, 'create_resource_processor')
elif order_item.type == models.RequestTypeMixin.Types.UPDATE:
return plugins.manager.get_processor(offering.type, 'update_resource_processor')
elif order_item.type == models.RequestTypeMixin.Types.TERMINATE:
return plugins.manager.get_processor(offering.type, 'delete_resource_processor')
def process_order_item(order_item: models.OrderItem, user):
processor = get_order_item_processor(order_item)
if not processor:
order_item.error_message = (
'Skipping order item processing because processor is not found.'
)
order_item.set_state_erred()
order_item.save(update_fields=['state', 'error_message'])
return
try:
order_item.set_state_executing()
order_item.save(update_fields=['state'])
processor(order_item).process_order_item(user)
except Exception as e:
# Here it is necessary to catch all exceptions.
# If this is not done, then the order will remain in the executed status.
order_item.error_message = str(e)
order_item.error_traceback = traceback.format_exc()
order_item.set_state_erred()
logger.error(f'Error processing order item { order_item }.')
order_item.save(update_fields=['state', 'error_message', 'error_traceback'])
def validate_order_item(order_item, request):
processor = get_order_item_processor(order_item)
if processor:
try:
processor(order_item).validate_order_item(request)
except NotImplementedError:
# It is okay if validation is not implemented yet
pass
def create_screenshot_thumbnail(screenshot):
pic = screenshot.image
fh = storage.open(pic.name, 'rb')
image = Image.open(fh)
image.thumbnail(settings.WALDUR_MARKETPLACE['THUMBNAIL_SIZE'], Image.ANTIALIAS)
fh.close()
thumb_extension = os.path.splitext(pic.name)[1]
thumb_extension = thumb_extension.lower()
thumb_name = os.path.basename(pic.name)
if thumb_extension in ['.jpg', '.jpeg']:
FTYPE = 'JPEG'
elif thumb_extension == '.gif':
FTYPE = 'GIF'
elif thumb_extension == '.png':
FTYPE = 'PNG'
else:
return
temp_thumb = BytesIO()
image.save(temp_thumb, FTYPE)
temp_thumb.seek(0)
screenshot.thumbnail.save(thumb_name, ContentFile(temp_thumb.read()), save=True)
temp_thumb.close()
def create_order_pdf(order):
logo_path = settings.WALDUR_CORE['SITE_LOGO']
if logo_path:
with open(logo_path, 'rb') as image_file:
deployment_logo = base64.b64encode(image_file.read()).decode("utf-8")
else:
deployment_logo = None
context = dict(
order=order,
currency=settings.WALDUR_CORE['CURRENCY_NAME'],
deployment_name=settings.WALDUR_CORE['SITE_NAME'],
deployment_address=settings.WALDUR_CORE['SITE_ADDRESS'],
deployment_email=settings.WALDUR_CORE['SITE_EMAIL'],
deployment_phone=settings.WALDUR_CORE['SITE_PHONE'],
deployment_logo=deployment_logo,
)
html = render_to_string('marketplace/order.html', context)
pdf = pdfkit.from_string(html, False)
return pdf
def import_resource_metadata(resource):
instance = resource.scope
fields = {'action', 'action_details', 'state', 'runtime_state'}
for field in fields:
if field == 'state':
value = instance.get_state_display()
else:
value = getattr(instance, field, None)
if field in fields:
resource.backend_metadata[field] = value
if instance.backend_id:
resource.backend_id = instance.backend_id
resource.name = instance.name
resource.save(
update_fields=['backend_metadata', 'attributes', 'name', 'backend_id']
)
def get_service_provider_info(source):
try:
resource = models.Resource.objects.get(scope=source)
customer = resource.offering.customer
service_provider = getattr(customer, 'serviceprovider', None)
return {
'service_provider_name': customer.name,
'service_provider_uuid': ''
if not service_provider
else service_provider.uuid.hex,
}
except models.Resource.DoesNotExist:
return {}
def get_offering_details(offering):
if not isinstance(offering, models.Offering):
return {}
return {
'offering_type': offering.type,
'offering_name': offering.name,
'offering_uuid': offering.uuid.hex,
'service_provider_name': offering.customer.name,
'service_provider_uuid': offering.customer.uuid.hex,
}
def format_list(resources):
"""
Format comma-separated list of IDs from Django queryset.
"""
return ', '.join(map(str, sorted(resources.values_list('id', flat=True))))
def get_order_item_url(order_item):
return core_utils.format_homeport_link(
'projects/{project_uuid}/marketplace-order-item-details/{order_item_uuid}/',
order_item_uuid=order_item.uuid.hex,
project_uuid=order_item.order.project.uuid,
)
def fill_activated_field(apps, schema_editor):
# We cannot use RequestTypeMixin.Types.CREATE and OrderItem.States.Done because this function called in migrations
state_done = 3
type_create = 1
OrderItem = apps.get_model('marketplace', 'OrderItem')
for order_item in OrderItem.objects.filter(type=type_create, state=state_done):
if not order_item.activated and order_item.resource:
order_item.activated = order_item.resource.created
order_item.save()
def get_info_about_missing_usage_reports():
now = timezone.now()
billing_period = core_utils.month_start(now)
whitelist_types = [
offering_type
for offering_type in plugins.manager.get_offering_types()
if plugins.manager.enable_usage_notifications(offering_type)
]
offering_ids = models.OfferingComponent.objects.filter(
billing_type=models.OfferingComponent.BillingTypes.USAGE,
offering__type__in=whitelist_types,
).values_list('offering_id', flat=True)
resource_with_usages = models.ComponentUsage.objects.filter(
billing_period=billing_period
).values_list('resource', flat=True)
resources_without_usages = models.Resource.objects.filter(
state=models.Resource.States.OK, offering_id__in=offering_ids
).exclude(id__in=resource_with_usages)
result = []
for resource in resources_without_usages:
rows = list(
filter(lambda x: x['customer'] == resource.offering.customer, result)
)
if rows:
rows[0]['resources'].append(resource)
else:
result.append(
{'customer': resource.offering.customer, 'resources': [resource],}
)
return result
def get_public_resources_url(customer):
return core_utils.format_homeport_link(
'organizations/{organization_uuid}/marketplace-public-resources/',
organization_uuid=customer.uuid,
)
def validate_limits(limits, offering):
if not plugins.manager.can_update_limits(offering.type):
raise serializers.ValidationError(
{'limits': _('Limits update is not supported for this resource.')}
)
valid_component_types = set(
offering.components.filter(
billing_type=models.OfferingComponent.BillingTypes.LIMIT
).values_list('type', flat=True)
)
invalid_types = set(limits.keys()) - valid_component_types
if invalid_types:
raise serializers.ValidationError(
{'limits': _('Invalid types: %s') % ', '.join(invalid_types)}
)
# Validate max and min limit value.
components_map = {
component.type: component
for component in offering.components.filter(type__in=valid_component_types)
}
for key, value in limits.items():
component = components_map.get(key)
if not component:
continue
if component.max_value and value > component.max_value:
raise serializers.ValidationError(
_('The limit %s value cannot be more than %s.')
% (value, component.max_value)
)
if component.min_value and value < component.min_value:
raise serializers.ValidationError(
_('The limit %s value cannot be less than %s.')
% (value, component.min_value)
)
def validate_attributes(attributes, category):
category_attributes = models.Attribute.objects.filter(section__category=category)
required_attributes = category_attributes.filter(required=True).values_list(
'key', flat=True
)
missing_attributes = set(required_attributes) - set(attributes.keys())
if missing_attributes:
raise rf_exceptions.ValidationError(
{
'attributes': _(
'These attributes are required: %s'
% ', '.join(sorted(missing_attributes))
)
}
)
for attribute in category_attributes:
value = attributes.get(attribute.key)
if value is None:
# Use default attribute value if it is defined
if attribute.default is not None:
attributes[attribute.key] = attribute.default
continue
validator = attribute_types.get_attribute_type(attribute.type)
if not validator:
continue
try:
validator.validate(
value, list(attribute.options.values_list('key', flat=True))
)
except ValidationError as e:
raise rf_exceptions.ValidationError({attribute.key: e.message})
def create_offering_components(offering, custom_components=None):
fixed_components = plugins.manager.get_components(offering.type)
category_components = {
component.type: component
for component in models.CategoryComponent.objects.filter(
category=offering.category
)
}
for component_data in fixed_components:
models.OfferingComponent.objects.create(
offering=offering,
parent=category_components.get(component_data.type, None),
**component_data._asdict(),
)
if custom_components:
for component_data in custom_components:
models.OfferingComponent.objects.create(offering=offering, **component_data)
def get_resource_state(state):
SrcStates = core_models.StateMixin.States
DstStates = models.Resource.States
mapping = {
SrcStates.CREATION_SCHEDULED: DstStates.CREATING,
SrcStates.CREATING: DstStates.CREATING,
SrcStates.UPDATE_SCHEDULED: DstStates.UPDATING,
SrcStates.UPDATING: DstStates.UPDATING,
SrcStates.DELETION_SCHEDULED: DstStates.TERMINATING,
SrcStates.DELETING: DstStates.TERMINATING,
SrcStates.OK: DstStates.OK,
SrcStates.ERRED: DstStates.ERRED,
}
return mapping.get(state, DstStates.ERRED)
def get_marketplace_offering_uuid(serializer, scope):
try:
return models.Resource.objects.get(scope=scope).offering.uuid
except ObjectDoesNotExist:
return
def get_marketplace_offering_name(serializer, scope):
try:
return models.Resource.objects.get(scope=scope).offering.name
except ObjectDoesNotExist:
return
def get_marketplace_category_uuid(serializer, scope):
try:
return models.Resource.objects.get(scope=scope).offering.category.uuid
except ObjectDoesNotExist:
return
def get_marketplace_category_name(serializer, scope):
try:
return models.Resource.objects.get(scope=scope).offering.category.title
except ObjectDoesNotExist:
return
def get_marketplace_resource_uuid(serializer, scope):
try:
return models.Resource.objects.get(scope=scope).uuid
except ObjectDoesNotExist:
return
def get_marketplace_plan_uuid(serializer, scope):
try:
resource = models.Resource.objects.get(scope=scope)
if resource.plan:
return resource.plan.uuid
except ObjectDoesNotExist:
return
def get_marketplace_resource_state(serializer, scope):
try:
return models.Resource.objects.get(scope=scope).get_state_display()
except ObjectDoesNotExist:
return
def get_is_usage_based(serializer, scope):
try:
return models.Resource.objects.get(scope=scope).offering.is_usage_based
except ObjectDoesNotExist:
return
def get_is_limit_based(serializer, scope):
try:
return models.Resource.objects.get(scope=scope).offering.is_limit_based
except ObjectDoesNotExist:
return
def add_marketplace_offering(sender, fields, **kwargs):
fields['marketplace_offering_uuid'] = serializers.SerializerMethodField()
setattr(sender, 'get_marketplace_offering_uuid', get_marketplace_offering_uuid)
fields['marketplace_offering_name'] = serializers.SerializerMethodField()
setattr(sender, 'get_marketplace_offering_name', get_marketplace_offering_name)
fields['marketplace_category_uuid'] = serializers.SerializerMethodField()
setattr(sender, 'get_marketplace_category_uuid', get_marketplace_category_uuid)
fields['marketplace_category_name'] = serializers.SerializerMethodField()
setattr(sender, 'get_marketplace_category_name', get_marketplace_category_name)
fields['marketplace_resource_uuid'] = serializers.SerializerMethodField()
setattr(sender, 'get_marketplace_resource_uuid', get_marketplace_resource_uuid)
fields['marketplace_plan_uuid'] = serializers.SerializerMethodField()
setattr(sender, 'get_marketplace_plan_uuid', get_marketplace_plan_uuid)
fields['marketplace_resource_state'] = serializers.SerializerMethodField()
setattr(sender, 'get_marketplace_resource_state', get_marketplace_resource_state)
fields['is_usage_based'] = serializers.SerializerMethodField()
setattr(sender, 'get_is_usage_based', get_is_usage_based)
fields['is_limit_based'] = serializers.SerializerMethodField()
setattr(sender, 'get_is_limit_based', get_is_limit_based)
def get_offering_costs(offering, active_customers, start, end):
costs = []
date = start
while date <= end:
year = date.year
month = date.month
invoice_items = invoice_models.InvoiceItem.objects.filter(
details__offering_uuid=offering.uuid.hex,
project__customer__in=active_customers,
invoice__year=year,
invoice__month=month,
)
stats = {
'tax': 0,
'total': 0,
'price': 0,
'price_current': 0,
'period': '%s-%02d' % (year, month),
}
for item in invoice_items:
stats['tax'] += item.tax
stats['total'] += item.total
stats['price'] += item.price
stats['price_current'] += item.price_current
costs.append(stats)
date += relativedelta(months=1)
return costs
def get_offering_customers(offering, active_customers):
resources = models.Resource.objects.filter(
offering=offering, project__customer__in=active_customers,
)
customers_ids = resources.values_list('project__customer_id', flat=True)
return structure_models.Customer.objects.filter(id__in=customers_ids)
def get_start_and_end_dates_from_request(request):
serializer = core_serializers.DateRangeFilterSerializer(data=request.query_params)
serializer.is_valid(raise_exception=True)
today = datetime.date.today()
default_start = datetime.date(year=today.year - 1, month=today.month, day=1)
start_year, start_month = serializer.validated_data.get(
'start', (default_start.year, default_start.month)
)
end_year, end_month = serializer.validated_data.get(
'end', (today.year, today.month)
)
end = datetime.date(year=end_year, month=end_month, day=1)
start = datetime.date(year=start_year, month=start_month, day=1)
return start, end
def get_active_customers(request, view):
customers = structure_models.Customer.objects.all()
return structure_filters.AccountingStartDateFilter().filter_queryset(
request, customers, view
)
def get_offering_component_stats(offering, active_customers, start, end):
component_stats = []
resources = models.Resource.objects.filter(
offering=offering, project__customer__in=active_customers,
)
resources_ids = resources.values_list('id', flat=True)
date = start
while date <= end:
year = date.year
month = date.month
period = '%s-%02d' % (year, month)
# for consistency with usage resource usage reporting, assume values at the beginning of the last day
period_visible = (
core_utils.month_end(date)
.replace(hour=0, minute=0, second=0, microsecond=0)
.isoformat()
)
invoice_items = invoice_models.InvoiceItem.objects.filter(
resource_id__in=resources_ids, invoice__year=year, invoice__month=month,
)
for item in invoice_items:
# Case when invoice item details includes plan component data.
plan_component_id = item.details.get('plan_component_id')
if not plan_component_id:
continue
try:
plan_component = models.PlanComponent.objects.get(pk=plan_component_id)
offering_component = plan_component.component
if (
offering_component.billing_type
== models.OfferingComponent.BillingTypes.LIMIT
):
component_stats.append(
{
'usage': item.quantity,
'description': offering_component.description,
'measured_unit': offering_component.measured_unit,
'type': offering_component.type,
'name': offering_component.name,
'period': period,
'date': period_visible,
'offering_component_id': offering_component.id,
}
)
if (
offering_component.billing_type
== models.OfferingComponent.BillingTypes.USAGE
):
if [
*filter(
lambda x: x['period'] == period
and x['offering_component_id'] == offering_component.id,
component_stats,
)
]:
continue
usages = models.ComponentUsage.objects.filter(
component=offering_component, billing_period=date
).aggregate(usage=Sum('usage'))['usage']
component_stats.append(
{
'usage': usages,
'description': offering_component.description,
'measured_unit': offering_component.measured_unit,
'type': offering_component.type,
'name': offering_component.name,
'period': period,
'date': period_visible,
'offering_component_id': offering_component.id,
}
)
if (
offering_component.billing_type
== models.OfferingComponent.BillingTypes.FIXED
):
other = [
*filter(
lambda x: x['period'] == period
and x['offering_component_id'] == offering_component.id,
component_stats,
)
]
if other:
other[0]['usage'] += item.get_factor()
continue
component_stats.append(
{
'usage': item.get_factor(),
'description': offering_component.description,
'measured_unit': offering_component.measured_unit,
'type': offering_component.type,
'name': offering_component.name,
'period': period,
'date': period_visible,
'offering_component_id': offering_component.id,
}
)
except models.PlanComponent.DoesNotExist:
logger.error(
'PlanComponent with id %s is not found.' % plan_component_id
)
date += relativedelta(months=1)
# delete internal data
[s.pop('offering_component_id', None) for s in component_stats]
return component_stats
class MoveResourceException(Exception):
pass
@transaction.atomic
def move_resource(resource: models.Resource, project):
if project.customer.blocked:
raise rf_exceptions.ValidationError('New customer must be not blocked')
old_project = resource.project
resource.project = project
resource.save(update_fields=['project'])
if resource.scope:
resource.scope.project = project
resource.scope.save(update_fields=['project'])
for service_settings in structure_models.ServiceSettings.objects.filter(
scope=resource.scope
):
models.Offering.objects.filter(scope=service_settings).update(
project=project
)
order_ids = resource.orderitem_set.values_list('order_id', flat=True)
for order in models.Order.objects.filter(pk__in=order_ids):
if order.items.exclude(resource=resource).exists():
raise MoveResourceException(
'Resource moving is not possible, '
'because related orders are related to other resources.'
)
order.project = project
order.save(update_fields=['project'])
for invoice_item in invoice_models.InvoiceItem.objects.filter(
resource=resource,
invoice__state=invoice_models.Invoice.States.PENDING,
project=old_project,
):
start_invoice = invoice_item.invoice
target_invoice, _ = registrators.RegistrationManager.get_or_create_invoice(
project.customer,
date=datetime.date(
year=start_invoice.year, month=start_invoice.month, day=1
),
)
if target_invoice.state != invoice_models.Invoice.States.PENDING:
raise MoveResourceException(
'Resource moving is not possible, '
'because invoice items moving is not possible.'
)
invoice_item.project = project
invoice_item.project_uuid = project.uuid.hex
invoice_item.project_name = project.name
invoice_item.invoice = target_invoice
invoice_item.save(
update_fields=['project', 'project_uuid', 'project_name', 'invoice']
)
start_invoice.update_current_cost()
target_invoice.update_current_cost()
def get_invoice_item_for_component_usage(component_usage):
if not component_usage.plan_period:
# Field plan_period is optional if component_usage is not connected with billing
return
else:
if component_usage.plan_period.end:
plan_period_end = component_usage.plan_period.end
else:
plan_period_end = core_utils.month_end(component_usage.billing_period)
if component_usage.plan_period.start:
plan_period_start = component_usage.plan_period.start
else:
plan_period_start = component_usage.billing_period
try:
item = invoice_models.InvoiceItem.objects.get(
invoice__year=component_usage.billing_period.year,
invoice__month=component_usage.billing_period.month,
resource=component_usage.resource,
start__gte=plan_period_start,
end__lte=plan_period_end,
details__offering_component_type=component_usage.component.type,
)
return item
except invoice_models.InvoiceItem.DoesNotExist:
pass
def serialize_resource_limit_period(period):
billing_periods = get_full_days(period['start'], period['end'])
return {
'start': period['start'].isoformat(),
'end': period['end'].isoformat(),
'quantity': period['quantity'],
'billing_periods': billing_periods,
'total': str(period['quantity'] * billing_periods),
}
def check_customer_blocked_for_terminating(resource):
try:
project = resource.project
except structure_models.Project.DoesNotExist:
project = structure_models.Project.all_objects.get(pk=resource.project_id)
if project.customer.blocked:
raise rf_exceptions.ValidationError(_('Blocked organization is not available.'))
def schedule_resources_termination(resources):
from waldur_mastermind.marketplace import views
if not resources:
return
view = views.ResourceViewSet.as_view({'post': 'terminate'})
user = core_utils.get_system_robot()
if not user:
logger.error(
'Staff user with username system_robot for terminating resources '
'of project with due date does not exist.'
)
return
for resource in resources:
response = create_request(view, user, {}, uuid=resource.uuid.hex)
if response.status_code != status.HTTP_200_OK:
logger.error(
'Terminating resource %s has failed. %s'
% (resource.uuid.hex, response.content)
)
def create_local_resource(order_item, scope):
resource = models.Resource(
project=order_item.order.project,
offering=order_item.offering,
plan=order_item.plan,
limits=order_item.limits,
attributes=order_item.attributes,
name=order_item.attributes.get('name') or '',
scope=scope if scope and type(scope) != str else None,
backend_id=scope if scope and type(scope) == str else '',
)
resource.init_cost()
resource.save()
resource.init_quotas()
order_item.resource = resource
order_item.save(update_fields=['resource'])
return resource
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/02_effective_area.ipynb (unless otherwise specified).
__all__ = ['EffectiveArea']
# Cell
import os
import numpy as np
from astropy.io import fits
class EffectiveArea(object):
""" manage the effective area calculation from a CALDB FITS file
Must specify either a file_path, or a value for CALDB, or that the CALDB environment varable is set
"""
def __init__(self,
irf: 'IRF to use'= 'P8R2_SOURCE_V6',
file_path: 'folder to find the AEFF file'=None,
CALDB: 'path to override environment variable'=None,
use_phidep:'use azmithual dependence for effective area'=False,
):
"""
"""
#ct0_file,ct1_file = get_irf_file(self.irf,CALDB=self.CALDB)
if CALDB is None:
self.CALDB=os.environ.get('CALDB',None)
assert (file_path or self.CALDB), 'No path given for effective area'
if not file_path:
if os.path.exists(f'{self.CALDB}/data' ):
self.CALDB+='/data'
self.file_path = f'{self.CALDB}/glast/lat/bcf/ea'
self.aeff_file = os.path.expandvars(f'{file_path}/aeff_{irf}_FB.fits')
assert os.path.exists(self.aeff_file), f'Effective area file {self.aeff_file} not found'
ct0_file = ct1_file = self.aeff_file ##cdbm.get_aeff()
self._read_aeff(ct0_file,ct1_file)
if use_phidep:
self._read_phi(ct0_file,ct1_file)
def _read_file(self,filename,tablename,columns):
with fits.open(filename) as hdu:
table = hdu[tablename]
cbins = np.append(table.data.field('CTHETA_LO')[0],table.data.field('CTHETA_HI')[0][-1])
ebins = np.append(table.data.field('ENERG_LO')[0],table.data.field('ENERG_HI')[0][-1])
images = [np.asarray(table.data.field(c)[0],dtype=float).reshape(len(cbins)-1,len(ebins)-1) for c in columns]
return ebins,cbins,images
def _read_aeff(self,ct0_file,ct1_file):
try:
ebins,cbins,feffarea = self._read_file(ct0_file,'EFFECTIVE AREA',['EFFAREA'])
ebins,cbins,beffarea = self._read_file(ct1_file,'EFFECTIVE AREA',['EFFAREA'])
except KeyError:
ebins,cbins,feffarea = self._read_file(ct0_file,'EFFECTIVE AREA_FRONT',['EFFAREA'])
ebins,cbins,beffarea = self._read_file(ct1_file,'EFFECTIVE AREA_BACK',['EFFAREA'])
self.ebins,self.cbins = ebins,cbins
self.feffarea = feffarea[0]*1e4;self.beffarea = beffarea[0]*1e4
self.aeff = _InterpTable(np.log10(ebins),cbins)
self.faeff_aug = self.aeff.augment_data(self.feffarea)
self.baeff_aug = self.aeff.augment_data(self.beffarea)
def _read_phi(self,ct0_file,ct1_file):
try:
ebins,cbins,fphis = self._read_file(ct0_file,'PHI_DEPENDENCE',['PHIDEP0','PHIDEP1'])
ebins,cbins,bphis = self._read_file(ct1_file,'PHI_DEPENDENCE',['PHIDEP0','PHIDEP1'])
except KeyError:
ebins,cbins,fphis = self._read_file(ct0_file,'PHI_DEPENDENCE_FRONT',['PHIDEP0','PHIDEP1'])
ebins,cbins,bphis = self._read_file(ct1_file,'PHI_DEPENDENCE_BACK',['PHIDEP0','PHIDEP1'])
self.fphis = fphis; self.bphis = bphis
self.phi = _InterpTable(np.log10(ebins),cbins,augment=False)
def _phi_mod(self,e,c,event_class,phi):
# assume phi has already been reduced to range 0 to pi/2
if phi is None: return 1
tables = self.fphis if event_class==0 else self.bphis
par0 = self.phi(e,c,tables[0],bilinear=False)
par1 = self.phi(e,c,tables[1],bilinear=False,reset_indices=False)
norm = 1. + par0/(1. + par1)
phi = 2*abs((2./np.pi)*phi - 0.5)
return (1. + par0*phi**par1)/norm
def __call__(self,e,c,phi=None,event_class=-1,bilinear=True):
""" Return bilinear (or nearest-neighbour) interpolation.
Input:
e -- bin energy; potentially array
c -- bin cos(theta); potentially array
NB -- if e and c are both arrays, they must be of the same
size; in other words, no outer product is taken
"""
#print(f'Eff call: ({e,c})'); #return
e = np.log10(e)
at = self.aeff
if event_class == -1:
return (at(e,c,self.faeff_aug,bilinear=bilinear)*self._phi_mod(e,c,0,phi),
at(e,c,self.baeff_aug,bilinear=bilinear,reset_indices=False)*self._phi_mod(e,c,1,phi))
elif event_class == 0:
return at(e,c,self.faeff_aug)*self._phi_mod(e,c,0,phi)
return at(e,c,self.baeff_aug)*self._phi_mod(e,c,1,phi)
def get_file_names(self):
return self.ct0_file,self.ct1_file
def plots(self, fignum=1):
"""
"""
import matplotlib.pyplot as plt
ee = np.logspace(2,6)
ct = np.linspace(0.2,1)
plt.rc('font', size=14)
fig, (ax1,ax2) = plt.subplots(1,2, figsize=(10,4), num=fignum)
for et, etname in enumerate('Front Back'.split()):
ax1.semilogx(ee, [ea(e, 1)[et] for e in ee], '-', lw=2, label=etname)
ax2.plot(ct, [self(1000,x )[et]/ea(1000,1)[et] for x in ct], '-', lw=2, label=etname)
ax1.legend()
ax1.set(xlabel='Energy [Mev]', ylabel='effective area', ylim=(0,None))
ax1.grid(alpha=0.5)
ax2.legend()
ax2.set(xlabel =r'$\cos\ \theta$',ylabel='relative to normal at 1 GeV')
ax2.grid(alpha=0.5)
fig.set_facecolor('white')
class _InterpTable(object):
"""Helper class -- does 2-d interpolation
"""
def __init__(self,xbins,ybins,augment=True):
""" Interpolation bins in energy and cos(theta)."""
self.xbins_0,self.ybins_0 = xbins,ybins
self.augment = augment
if augment:
x0 = xbins[0] - (xbins[1]-xbins[0])/2
x1 = xbins[-1] + (xbins[-1]-xbins[-2])/2
y0 = ybins[0] - (ybins[1]-ybins[0])/2
y1 = ybins[-1] + (ybins[-1]-ybins[-2])/2
self.xbins = np.concatenate(([x0],xbins,[x1]))
self.ybins = np.concatenate(([y0],ybins,[y1]))
else:
self.xbins = xbins; self.ybins = ybins
self.xbins_s = (self.xbins[:-1]+self.xbins[1:])/2
self.ybins_s = (self.ybins[:-1]+self.ybins[1:])/2
def augment_data(self,data):
""" Build a copy of data with outer edges replicated."""
d = np.empty([data.shape[0]+2,data.shape[1]+2])
d[1:-1,1:-1] = data
d[0,1:-1] = data[0,:]
d[1:-1,0] = data[:,0]
d[-1,1:-1] = data[-1,:]
d[1:-1,-1] = data[:,-1]
d[0,0] = data[0,0]
d[-1,-1] = data[-1,-1]
d[0,-1] = data[0,-1]
d[-1,0] = data[-1,0]
return d
def set_indices(self,x,y,bilinear=True):
if bilinear and (not self.augment):
print('Not equipped for bilinear, going to nearest neighbor.')
bilinear = False
self.bilinear = bilinear
if not bilinear:
i = np.searchsorted(self.xbins,x)-1
j = np.searchsorted(self.ybins,y)-1
else:
i = np.searchsorted(self.xbins_s,x)-1
j = np.searchsorted(self.ybins_s,y)-1
self.indices = i,j
def value(self,x,y,data):
i,j = self.indices
# NB transpose here
if not self.bilinear: return data[j,i]
x2,x1 = self.xbins_s[i+1],self.xbins_s[i]
y2,y1 = self.ybins_s[j+1],self.ybins_s[j]
f00 = data[j,i]
f11 = data[j+1,i+1]
f01 = data[j+1,i]
f10 = data[j,i+1]
norm = (x2-x1)*(y2-y1)
return ( (x2-x)*(f00*(y2-y)+f01*(y-y1)) + (x-x1)*(f10*(y2-y)+f11*(y-y1)) )/norm
def __call__(self,x,y,data,bilinear=True,reset_indices=True):
if reset_indices:
self.set_indices(x,y,bilinear=bilinear)
return self.value(x,y,data)
|
import bpy
op = bpy.context.active_operator
export_selected = True
export_apply = True
op.export_format = 'GLB'
op.ui_tab = 'GENERAL'
op.export_copyright = ''
op.export_image_format = 'NAME'
op.export_texture_dir = ''
op.export_texcoords = True
op.export_normals = True
op.export_draco_mesh_compression_enable = False
op.export_draco_mesh_compression_level = 6
op.export_draco_position_quantization = 14
op.export_draco_normal_quantization = 10
op.export_draco_texcoord_quantization = 12
op.export_draco_generic_quantization = 12
op.export_tangents = False
op.export_materials = True
op.export_colors = True
op.export_cameras = False
op.export_extras = False
op.export_yup = True
op.export_apply = True
op.export_animations = True
op.export_frame_range = True
op.export_frame_step = 1
op.export_force_sampling = True
op.export_nla_strips = True
op.export_def_bones = False
op.export_current_frame = False
op.export_skins = True
op.export_all_influences = False
op.export_morph = True
op.export_morph_normal = True
op.export_morph_tangent = False
op.export_lights = False
op.export_displacement = False
op.will_save_settings = False
|
from gym.envs.registration import register
register(
id='maze-v0',
entry_point='gym_maze.envs:MazeEnvSample5x5',
max_episode_steps=2000,
)
register(
id='maze-sample-5x5-v0',
entry_point='gym_maze.envs:MazeEnvSample5x5',
max_episode_steps=10000000000000,
)
register(
id='maze-empty-5x5-v0',
entry_point='gym_maze.envs:MazeEnvEmpty5x5',
max_episode_steps=10000000000000,
)
register(
id='maze-warehouse-5x5-v0',
entry_point='gym_maze.envs:MazeEnvWarehouse5x5',
max_episode_steps=10000000000000,
)
register(
id='maze-warehouse-8x8-v0',
entry_point='gym_maze.envs:MazeEnvWarehouse8x8',
max_episode_steps=10000000000000,
)
register(
id='maze-warehouse-11x11-v0',
entry_point='gym_maze.envs:MazeEnvWarehouse11x11',
max_episode_steps=10000000000000,
)
register(
id='maze-warehouse-14x14-v0',
entry_point='gym_maze.envs:MazeEnvWarehouse14x14',
max_episode_steps=10000000000000,
)
register(
id='maze-warehouse-17x17-v0',
entry_point='gym_maze.envs:MazeEnvWarehouse17x17',
max_episode_steps=10000000000000,
)
register(
id='maze-warehouse-20x20-v0',
entry_point='gym_maze.envs:MazeEnvWarehouse20x20',
max_episode_steps=10000000000000,
)
register(
id='maze-random-5x5-v0',
entry_point='gym_maze.envs:MazeEnvRandom5x5',
max_episode_steps=10000000000000,
nondeterministic=True,
)
register(
id='maze-random-8x8-v0',
entry_point='gym_maze.envs:MazeEnvRandom8x8',
max_episode_steps=10000000000000,
nondeterministic=True,
)
register(
id='maze-random-11x11-v0',
entry_point='gym_maze.envs:MazeEnvRandom11x11',
max_episode_steps=10000000000000,
nondeterministic=True,
)
register(
id='maze-random-14x14-v0',
entry_point='gym_maze.envs:MazeEnvRandom14x14',
max_episode_steps=10000000000000,
nondeterministic=True,
)
register(
id='maze-random-17x17-v0',
entry_point='gym_maze.envs:MazeEnvRandom17x17',
max_episode_steps=10000000000000,
nondeterministic=True,
)
register(
id='maze-random-20x120-v0',
entry_point='gym_maze.envs:MazeEnvRandom20x20',
max_episode_steps=10000000000000,
nondeterministic=True,
)
register(
id='maze-sample-10x10-v0',
entry_point='gym_maze.envs:MazeEnvSample10x10',
max_episode_steps=10000000000000,
)
register(
id='maze-random-10x10-v0',
entry_point='gym_maze.envs:MazeEnvRandom10x10',
max_episode_steps=10000000000000,
nondeterministic=True,
)
register(
id='maze-sample-3x3-v0',
entry_point='gym_maze.envs:MazeEnvSample3x3',
max_episode_steps=10000000000000,
)
register(
id='maze-random-3x3-v0',
entry_point='gym_maze.envs:MazeEnvRandom3x3',
max_episode_steps=10000000000000,
nondeterministic=True,
)
register(
id='maze-random-25x25-v0',
entry_point='gym_maze.envs:MazeEnvRandom25x25',
max_episode_steps=10000000000000,
)
register(
id='maze-random-50x50-v0',
entry_point='gym_maze.envs:MazeEnvRandom50x50',
max_episode_steps=10000000000000,
)
register(
id='maze-random-60x60-v0',
entry_point='gym_maze.envs:MazeEnvRandom60x60',
max_episode_steps=10000000000000,
)
register(
id='maze-random-75x75-v0',
entry_point='gym_maze.envs:MazeEnvRandom75x75',
max_episode_steps=10000000000000,
)
register(
id='maze-sample-100x100-v0',
entry_point='gym_maze.envs:MazeEnvSample100x100',
max_episode_steps=10000000000000,
)
register(
id='maze-random-100x100-v0',
entry_point='gym_maze.envs:MazeEnvRandom100x100',
max_episode_steps=10000000000000,
nondeterministic=True,
)
register(
id='maze-random-10x10-plus-v0',
entry_point='gym_maze.envs:MazeEnvRandom10x10Plus',
max_episode_steps=10000000000000,
nondeterministic=True,
)
register(
id='maze-random-20x20-plus-v0',
entry_point='gym_maze.envs:MazeEnvRandom20x20Plus',
max_episode_steps=10000000000000,
nondeterministic=True,
)
register(
id='maze-random-30x30-plus-v0',
entry_point='gym_maze.envs:MazeEnvRandom30x30Plus',
max_episode_steps=10000000000000,
nondeterministic=True,
)
|
from django.views.generic import View
from django.http import HttpResponse
from django.utils.decorators import method_decorator
from stronghold.decorators import public
from stronghold.views import StrongholdPublicMixin
class ProtectedView(View):
"""A view we want to be private"""
def get(self, request, *args, **kwargs):
return HttpResponse("ProtectedView")
class PublicView(View):
""" A view we want to be public"""
@method_decorator(public)
def dispatch(self, *args, **kwargs):
return super(PublicView, self).dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
return HttpResponse("PublicView")
class PublicView2(StrongholdPublicMixin, View):
""" A view we want to be public, using the StrongholdPublicMixin"""
def get(self, request, *args, **kwargs):
return HttpResponse("PublicView")
@public
def public_view3(request):
""" A function view we want to be public"""
return HttpResponse("PublicView")
|
"""
Libary of linear regressions.
"""
import numpy as np
from scipy.stats import linregress
from astroML.linear_model import TLS_logL
from scipy.odr import Model, Data, RealData, ODR
import scipy.optimize as op
import emcee
def mcmc_linear_model(x, y, xerr, yerr, nwalkers=100, nruns=2000,
cut=100):
"""
This was built using the following references:
* http://dan.iel.fm/emcee/current/user/line/
* http://www.astroml.org/book_figures/chapter8/fig_total_least_squares.html
* https://github.com/astroML/astroML/blob/master/astroML/linear_model/TLS.py
The linear models is $y=a*x+b$
Returns (a,b), ((a_lower, a_upper), (b_lower, b_upper))
"""
# translate between typical slope-intercept representation,
# and the normal vector representation
def get_a_b(theta):
b = np.dot(theta, theta) / theta[1]
a = -theta[0] / theta[1]
return a, b
def get_beta(a, b):
denom = (1 + a * a)
return np.array([-b * a / denom, b / denom])
# Define the log-maximum likelihood
def lnlike(theta, x, y, xerr, yerr):
arr = np.column_stack((x, y))
arr_err = np.column_stack((xerr, yerr))
return TLS_logL(theta, arr, arr_err**2)
# Get a first extimative of the parameters
# based on the maximum likelihood
nll = lambda *args: -lnlike(*args)
x0 = get_beta(*linregress(x, y)[:2]) # Initial guesses from
# Ordinary Least Squares
result = op.minimize(nll, x0=x0, args=(x, y, xerr, yerr),
method='Nelder-Mead')
theta = result["x"]
# Define the log-prior
def lnprior(theta):
a, b = get_a_b(theta)
if -5.0 < a < 0.5 and 0.0 < b < 10.0:
return 0.0
return -np.inf
# Define the full log-probability function
def lnprob(theta, x, y, xerr, yerr):
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, x, y, xerr, yerr)
# Set the MCMC walkers
ndim = 2
pos = [theta + 1e-4*np.random.randn(ndim) for i in range(nwalkers)]
# Set and Run
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(x, y, xerr, yerr))
sampler.run_mcmc(pos, nruns)
# Get the samples. Do not get the first `cut` ones.
samples = sampler.chain[:, cut:, :].reshape((-1, ndim))
coeffs = np.array([get_a_b(bt) for bt in samples])
# Get the a, b and their errors
# The value of a and are the median of the
# sampling distribution, while the lower and
# upper values are the 16% and 84% percentiles.
a_mcmc, b_mcmc = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(coeffs, [16, 50, 84], axis=0)))
return (a_mcmc[0], b_mcmc[0]), ((a_mcmc[1], a_mcmc[2]),
(b_mcmc[1], b_mcmc[2]))
def odr_linear_fit(x, y, xerr, yerr):
"""
Obtained from Scipy ODR webpage with small modifications
https://docs.scipy.org/doc/scipy/reference/odr.html
"""
def f(B, x):
'''Linear function y = m*x + b'''
# B is a vector of the parameters.
# x is an array of the current x values.
# x is in the same format as the x passed to Data or RealData.
#
# Return an array in the same format as y passed to Data or RealData.
return B[0]*x + B[1]
# Create a model
linear = Model(f)
# Create a Data instance
mydata = Data(x, y, wd=1./np.power(xerr,2), we=1./np.power(yerr,2))
#Instantiate ODR with your data, model and initial parameter estimate.:
beta0 = linregress(x, y)[:2]
myodr = ODR(mydata, linear, beta0=beta0)
#Run the fit.:
myoutput = myodr.run()
a, b = myoutput.beta
a_error, b_error = myoutput.sd_beta
return a, b, a_error, b_error
|
"""Unit test package for siepic_analysis_package."""
|
'''
Created on 08.04.2019
@author: mort
ipywidget interface to the GEE for IR-MAD
'''
import ee, time, warnings, math
import ipywidgets as widgets
from IPython.display import display
from ipyleaflet import (Map,DrawControl,TileLayer,
basemaps,basemap_to_tiles,
LayersControl,
MeasureControl,
FullScreenControl)
from auxil.eeMad import imad,radcal
from geopy.geocoders import photon
ee.Initialize()
geolocator = photon.Photon(timeout=10)
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
poly = ee.Geometry.MultiPolygon([])
# poly = ee.Geometry.Polygon([[6.30154, 50.948329], [6.293307, 50.877329],
# [6.427091, 50.875595], [6.417486, 50.947464],
# [6.30154, 50.948329]])
def chi2cdf(chi2,df):
''' Chi square cumulative distribution function '''
return ee.Image(chi2.divide(2)).gammainc(ee.Number(df).divide(2))
def makefeature(data):
''' for exporting as CSV to Drive '''
return ee.Feature(None, {'data': data})
def handle_draw(self, action, geo_json):
global poly
coords = geo_json['geometry']['coordinates']
if action == 'created':
poly = ee.Geometry.MultiPolygon(poly.coordinates().add(coords))
w_preview.disabled = True
w_export.disabled = True
w_collect.disabled = False
elif action == 'deleted':
poly1 = ee.Geometry.MultiPolygon(coords)
poly = poly.difference(poly1)
if len(poly.coordinates().getInfo()) == 0:
w_collect.disabled = True
dc = DrawControl(polyline={},circle={})
dc.on_draw(handle_draw)
# def GetTileLayerUrl(ee_image_object):
# map_id = ee.Image(ee_image_object).getMapId()
# tile_url_template = "https://earthengine.googleapis.com/map/{mapid}/{{z}}/{{x}}/{{y}}?token={token}"
# return tile_url_template.format(**map_id)
def GetTileLayerUrl(ee_image_object):
map_id = ee.Image(ee_image_object).getMapId()
return map_id["tile_fetcher"].url_format
w_text = widgets.Textarea(
layout = widgets.Layout(width='75%'),
value = 'Algorithm output',
rows = 4,
disabled = False
)
w_platform = widgets.RadioButtons(
options=['SENTINEL/S2(VNIR/SWIR)','SENTINEL/S2(NIR/SWIR)','LANDSAT LC08','LANDSAT LE07','LANDSAT LT05'],
value='SENTINEL/S2(VNIR/SWIR)',
description='Platform:',
disabled=False
)
w_startdate1 = widgets.Text(
value='2020-05-01',
placeholder=' ',
description='Start T1:',
disabled=False
)
w_enddate1 = widgets.Text(
value='2020-07-01',
placeholder=' ',
description='End T1:',
disabled=False
)
w_startdate2 = widgets.Text(
value='2020-08-01',
placeholder=' ',
description='Start T2:',
disabled=False
)
w_enddate2 = widgets.Text(
value='2020-10-01',
placeholder=' ',
description='End T2:',
disabled=False
)
w_iterations = widgets.IntText(
value=30,
placeholder=' ',
description='Max Iter:',
disabled=False
)
w_scale = widgets.IntText(
value=30,
placeholder=' ',
description='Scale:',
disabled=False
)
w_exportname = widgets.Text(
value='users/<username>/<path>',
placeholder=' ',
disabled=False
)
w_location = widgets.Text(
value='Jülich',
placeholder=' ',
description='',
disabled=False
)
w_goto = widgets.Button(description="GoTo",disabled=False)
w_collect = widgets.Button(description="Collect",disabled=True)
w_preview = widgets.Button(description="Preview",disabled=True)
w_export = widgets.Button(description='Export to assets',disabled=True)
w_dates1 = widgets.VBox([w_startdate1,w_enddate1,w_iterations])
w_dates2 = widgets.VBox([w_startdate2,w_enddate2,w_scale])
w_dates = widgets.HBox([w_platform,w_dates1,w_dates2])
w_exp = widgets.HBox([w_export,w_exportname])
w_go = widgets.HBox([w_collect,w_preview,w_exp])
w_txt = widgets.HBox([w_text,w_goto,w_location])
box = widgets.VBox([w_txt,w_dates,w_go])
def on_widget_change(b):
w_preview.disabled = True
w_export.disabled = True
w_platform.observe(on_widget_change,names='value')
w_startdate1.observe(on_widget_change,names='value')
w_enddate1.observe(on_widget_change,names='value')
w_startdate2.observe(on_widget_change,names='value')
w_enddate2.observe(on_widget_change,names='value')
def on_goto_button_clicked(b):
try:
location = geolocator.geocode(w_location.value)
m.center = (location.latitude,location.longitude)
m.zoom = 11
except Exception as e:
print('Error: %s'%e)
w_goto.on_click(on_goto_button_clicked)
def on_collect_button_clicked(b):
global result,m,collection,count, \
w_startdate1,w_enddate1,w_startdate2, \
w_platfform,w_enddate2,w_changemap, \
scale,nbands, \
image1,image2, \
madnames,coords,timestamp1,timestamp2
try:
coords = ee.List(poly.bounds().coordinates().get(0))
w_text.value = 'collecting, please wait ...'
cloudcover = 'CLOUD_COVER'
scale = 30.0
rgb = ['B4','B5','B7']
if w_platform.value=='SENTINEL/S2(VNIR/SWIR)':
collectionid = 'COPERNICUS/S2'
scale = 10.0
bands = ['B2','B3','B4','B8']
rgb = ['B8','B4','B3']
cloudcover = 'CLOUDY_PIXEL_PERCENTAGE'
elif w_platform.value=='SENTINEL/S2(NIR/SWIR)':
collectionid = 'COPERNICUS/S2'
scale = 20.0
bands = ['B5','B6','B7','B8A','B11','B12']
rgb = ['B5','B7','B11']
cloudcover = 'CLOUDY_PIXEL_PERCENTAGE'
elif w_platform.value=='LANDSAT LC08':
collectionid = 'LANDSAT/LC08/C01/T1_RT_TOA'
bands = ['B2','B3','B4','B5','B6','B7']
rgb = ['B5','B6','B7']
elif w_platform.value=='LANDSAT LE07':
collectionid = 'LANDSAT/LE07/C01/T1_RT_TOA'
bands = ['B1','B2','B3','B4','B5','B7']
else:
collectionid = 'LANDSAT/LT05/C01/T1_TOA'
bands = ['B1','B2','B3','B4','B5','B7']
collection1 = ee.ImageCollection(collectionid) \
.filterBounds(ee.Geometry.Point(coords.get(0))) \
.filterBounds(ee.Geometry.Point(coords.get(1))) \
.filterBounds(ee.Geometry.Point(coords.get(2))) \
.filterBounds(ee.Geometry.Point(coords.get(3))) \
.filterDate(ee.Date(w_startdate1.value), ee.Date(w_enddate1.value)) \
.sort(cloudcover, True)
count = collection1.size().getInfo()
if count==0:
raise ValueError('No images found for first time interval: '+collectionid)
collection2 = ee.ImageCollection(collectionid) \
.filterBounds(ee.Geometry.Point(coords.get(0))) \
.filterBounds(ee.Geometry.Point(coords.get(1))) \
.filterBounds(ee.Geometry.Point(coords.get(2))) \
.filterBounds(ee.Geometry.Point(coords.get(3))) \
.filterDate(ee.Date(w_startdate2.value), ee.Date(w_enddate2.value)) \
.sort(cloudcover, True)
count = collection2.size().getInfo()
if count==0:
raise ValueError('No images found for second time interval: '+collectionid)
image1 = ee.Image(collection1.first()).select(bands)
timestamp1 = ee.Date(image1.get('system:time_start')).getInfo()
timestamp1 = time.gmtime(int(timestamp1['value'])/1000)
timestamp1 = time.strftime('%c', timestamp1)
systemid1 = image1.get('system:id').getInfo()
cloudcover1 = image1.get(cloudcover).getInfo()
image2 = ee.Image(collection2.first()).select(bands)
timestamp2 = ee.Date(image2.get('system:time_start')).getInfo()
timestamp2 = time.gmtime(int(timestamp2['value'])/1000)
timestamp2 = time.strftime('%c', timestamp2)
systemid2 = image2.get('system:id').getInfo()
cloudcover2 = image2.get(cloudcover).getInfo()
txt = 'Image1: %s \n'%systemid1
txt += 'Acquisition date: %s, Cloud cover: %f \n'%(timestamp1,cloudcover1)
txt += 'Image2: %s \n'%systemid2
txt += 'Acquisition date: %s, Cloud cover: %f \n'%(timestamp2,cloudcover2)
w_text.value = txt
nbands = image1.bandNames().length()
madnames = ['MAD'+str(i+1) for i in range(nbands.getInfo())]
# co-register
image2 = image2.register(image1,60)
w_preview.disabled = False
w_export.disabled = False
# display first image
if len(m.layers)>3:
m.remove_layer(m.layers[3])
img = image1.clip(poly).select(rgb).rename('r','g','b')
ps = img.reduceRegion(ee.Reducer.percentile([2,98]),maxPixels=1e10).getInfo()
mn = [ps['r_p2'],ps['g_p2'],ps['b_p2']]
mx = [ps['r_p98'],ps['g_p98'],ps['b_p98']]
m.add_layer(TileLayer(url=GetTileLayerUrl(img.visualize(min=mn,max=mx))))
except Exception as e:
w_text.value = 'Error: %s'%e
w_collect.on_click(on_collect_button_clicked)
def on_preview_button_clicked(b):
global nbands
try:
w_text.value = 'iteration started, please wait ...\n'
# iMAD
inputlist = ee.List.sequence(1,w_iterations.value)
first = ee.Dictionary({'done':ee.Number(0),
'scale':ee.Number(w_scale.value),
'niter':ee.Number(0),
'image':image1.addBands(image2).clip(poly),
'allrhos': [ee.List.sequence(1,nbands)],
'chi2':ee.Image.constant(0),
'MAD':ee.Image.constant(0)})
result = ee.Dictionary(inputlist.iterate(imad,first))
MAD = ee.Image(result.get('MAD')).rename(madnames)
niter = ee.Number(result.get('niter')).getInfo()
# threshold
nbands = MAD.bandNames().length()
chi2 = ee.Image(result.get('chi2')).rename(['chi2'])
pval = chi2cdf(chi2,nbands).subtract(1).multiply(-1)
tst = pval.gt(ee.Image.constant(0.0001))
MAD = MAD.where(tst,ee.Image.constant(0))
allrhos = ee.Array(result.get('allrhos')).toList()
txt = 'Canonical correlations: %s \nIterations: %i\n'%(str(allrhos.get(-1).getInfo()),niter)
w_text.value += txt
if len(m.layers)>3:
m.remove_layer(m.layers[3])
MAD2 = MAD.select(1).rename('b')
ps = MAD2.reduceRegion(ee.Reducer.percentile([1,99])).getInfo()
mn = ps['b_p1']
mx = ps['b_p99']
m.add_layer(TileLayer(url=GetTileLayerUrl( MAD2.visualize(min=mn,max=mx))))
except Exception as e:
w_text.value = 'Error: %s\n Retry collect/preview or export to assets'%e
w_preview.on_click(on_preview_button_clicked)
def on_export_button_clicked(b):
global w_exportname, nbands
try:
# iMAD
inputlist = ee.List.sequence(1,w_iterations.value)
first = ee.Dictionary({'done':ee.Number(0),
'scale':ee.Number(w_scale.value),
'niter':ee.Number(0),
'image':image1.addBands(image2).clip(poly),
'allrhos': [ee.List.sequence(1,nbands)],
'chi2':ee.Image.constant(0),
'MAD':ee.Image.constant(0)})
result = ee.Dictionary(inputlist.iterate(imad,first))
MAD = ee.Image(result.get('MAD')).rename(madnames)
# threshold
chi2 = ee.Image(result.get('chi2')).rename(['chi2'])
pval = chi2cdf(chi2,nbands).subtract(1).multiply(-1)
tst = pval.gt(ee.Image.constant(0.0001))
MAD = MAD.where(tst,ee.Image.constant(0))
allrhos = ee.Array(result.get('allrhos')).toList().slice(1,-1)
# radcal
ncmask = chi2cdf(chi2,nbands).lt(ee.Image.constant(0.05)).rename(['invarpix'])
inputlist1 = ee.List.sequence(0,nbands.subtract(1))
first = ee.Dictionary({'image':image1.addBands(image2),
'ncmask':ncmask,
'nbands':nbands,
'scale':ee.Number(w_scale.value),
'rect':poly,
'coeffs': ee.List([]),
'normalized':ee.Image()})
result1 = ee.Dictionary(inputlist1.iterate(radcal,first))
coeffs = ee.List(result1.get('coeffs'))
sel = ee.List.sequence(1,nbands)
normalized = ee.Image(result1.get('normalized')).select(sel)
MADs = ee.Image.cat(MAD,chi2,ncmask,image1.clip(poly),image2.clip(poly),normalized)
assexport = ee.batch.Export.image.toAsset(MADs,
description='assetExportTask',
assetId=w_exportname.value,scale=scale,maxPixels=1e9)
assexport.start()
assexportid = str(assexport.id)
w_text.value= 'Exporting change map, chisqr, original images and normalized image to %s\n task id: %s'%(w_exportname.value,assexportid)
except Exception as e:
w_text.value = 'Error: %s'%e
# export metadata to drive
ninvar = ee.String(ncmask.reduceRegion(ee.Reducer.sum().unweighted(),
scale=scale,maxPixels= 1e9).toArray().project([0]))
metadata = ee.List(['IR-MAD: '+time.asctime(),
'Platform: '+w_platform.value,
'Asset export name: '+w_exportname.value,
'Timestamps: %s %s'%(timestamp1,timestamp2)]) \
.cat(['Canonical Correlations:']) \
.cat(allrhos) \
.cat(['Radiometric Normalization, Invariant Pixels:']) \
.cat([ninvar]) \
.cat(['Slope, Intercept, R:']) \
.cat(coeffs)
fileNamePrefix=w_exportname.value.replace('/','-')
gdexport = ee.batch.Export.table.toDrive(ee.FeatureCollection(metadata.map(makefeature)).merge(ee.Feature(poly)),
description='driveExportTask_meta',
folder = 'gee',
fileNamePrefix=fileNamePrefix )
gdexport.start()
w_text.value += '\n Exporting metadata to Drive/EarthEngineImages/%s\n task id: %s'%(fileNamePrefix,str(gdexport.id))
w_export.on_click(on_export_button_clicked)
def run():
global m,center
center = [51.0,6.4]
osm = basemap_to_tiles(basemaps.OpenStreetMap.Mapnik)
ews = basemap_to_tiles(basemaps.Esri.WorldStreetMap)
ewi = basemap_to_tiles(basemaps.Esri.WorldImagery)
dc = DrawControl(polyline={},circlemarker={})
dc.rectangle = {"shapeOptions": {"fillColor": "#0000ff","color": "#0000ff","fillOpacity": 0.05}}
dc.polygon = {"shapeOptions": {"fillColor": "#0000ff","color": "#0000ff","fillOpacity": 0.05}}
dc.on_draw(handle_draw)
lc = LayersControl(position='topright')
fs = FullScreenControl(position='topleft')
mc = MeasureControl(position='topright',primary_length_unit = 'kilometers')
m = Map(center=center, zoom=11, layout={'height':'500px'},layers=(ewi,ews,osm),controls=(mc,dc,lc,fs))
# m = Map(center=center, zoom=11, layout={'height':'500px'},controls=(lc,dc,fs,mc,sm_control))
display(m)
return box
|
# This script tests LED display and speaker of calliope mini
from calliope_mini import *
import music
# list of pitch values. Every value
# represents a frequency (Hz)
melody = [200, 300, 200, 300, 200,
300, 100, 300, 100, 300, 100, 300,
400, 300, 400, 300, 500, 600, 700, 300, 200]
pom = True
# image object can be created and then passed to
# the display and drawn. Every five numbers between :
# represent one row on the display
img_1 = Image('09090:90909:09090:90909:09090:')
img_2 = Image('90909:09090:90909:09090:90909:')
# this function plays a melody and switches between
# two display patterns. This function can be improved
# using multiprocessing
def play(pom, img_1, img_2):
music.stop()
for p in melody:
music.pitch(p)
if pom:
display.show(img_1)
pom = False
else:
display.show(img_2)
pom = True
sleep(200)
while True:
play(pom, img_1, img_2)
|
""" schedule
cog that handles all scheduling tasks
which includes scheduled announcements for members """
from discord.ext import commands
import random
import json
import aiocron
class Schedule(commands.Cog):
def __init__(self, bot, anilist_api, emojis, config):
self.bot = bot
self.anilist_api = anilist_api
self.emojis = emojis
self.channel_id = config['CHANNEL_ID']
self.role_id = config['ROLE_ID']
# set up skip parameter
self.skip = False
@aiocron.crontab('0 17 * * 0')
async def on_monday_reminder():
await self.sunday_message()
@aiocron.crontab('0 17 * * 1')
async def on_sunday_reminder():
await self.monday_message()
@commands.command()
async def schedule(self, ctx, anilist_url_1, anilist_url_2, anilist_url_3):
""" schedules the three anime sessions - must provide 3 anilist urls """
# build emojis
positive_emoji = self.bot.get_emoji(random.choice(self.emojis['positive']))
# build a dictionary for the scheduled anime
schedule = {
'anime_1': anilist_url_1,
'anime_2': anilist_url_2,
'anime_3': anilist_url_3,
}
# dump it to a temporary file
with open('data/schedule.json', 'w') as fn:
json.dump(schedule, fn)
usr_msg = "かしこまりました、ご主人様。"
usr_msg += "\nI have successfully updated the scheduled messages!"
await ctx.send(usr_msg)
await ctx.send(positive_emoji)
@commands.command()
async def skip(self, ctx):
""" has bot skip the pinging and scheduling until reactivated again """
# set up the skip parameter
self.skip = True
usr_msg = "かしこまりました、ご主人様。"
usr_msg += "\nI have deactivated the scheduling upon your request!"
await ctx.send(usr_msg)
await ctx.send(positive_emoji)
@commands.command()
async def activate(self, ctx):
""" has bot activate the pinging and scheduling until deactivated again """
# set up the skip parameter
self.skip = False
usr_msg = "かしこまりました、ご主人様。"
usr_msg += "\nI have activated the scheduling upon your request!"
await ctx.send(usr_msg)
await ctx.send(positive_emoji)
async def sunday_message(self):
""" sunday scheduled message """
# do not run method if skip parameter is true
if self.skip:
return
# set up the channel to post in
channel = self.bot.get_channel(int(self.channel_id))
# build emojis
positive_emoji = self.bot.get_emoji(random.choice(self.emojis['positive']))
# load the requested scheduled anime
schedule = {}
with open('data/schedule.json', 'r') as fn:
schedule = json.load(fn)
# set up the anime urls
anime_1_url = schedule.get('anime_1', '')
anime_2_url = schedule.get('anime_2', '')
# set up the titles
anime_1_title = self.anilist_api.get_title(anime_1_url)
anime_2_title = self.anilist_api.get_title(anime_2_url)
# set up the progress
anime_1_progress = self.anilist_api.get_current_progress(anime_1_url)
anime_2_progress = self.anilist_api.get_current_progress(anime_2_url)
# set up the message to the user
usr_msg = f'<@&{self.role_id}> おはようございます、ご主人様たち!'
usr_msg += '\nWe have the following anime scheduled for today.'
usr_msg += f'\n **2:00 PM PST**: {anime_1_title}. Currently on episode {anime_1_progress}.'
usr_msg += f'\n **7:30 PM PST**: {anime_2_title}. Currently on episode {anime_2_progress}.'
usr_msg += f'\nHope to see you there!'
await channel.send(usr_msg)
await channel.send(positive_emoji)
async def monday_message(self):
""" monday scheduled message """
# do not run method if skip parameter is true
if self.skip:
return
# set up the channel to post in
channel = self.bot.get_channel(int(self.channel_id))
# build emojis
positive_emoji = self.bot.get_emoji(random.choice(self.emojis['positive']))
# load the requested scheduled anime
schedule = {}
with open('data/schedule.json', 'r') as fn:
schedule = json.load(fn)
# set up the anime urls
anime_3_url = schedule.get('anime_3', '')
# set up the titles
anime_3_title = self.anilist_api.get_title(anime_3_url)
# set up the progress
anime_3_progress = self.anilist_api.get_current_progress(anime_3_url)
# set up the message to the user
usr_msg = f'<@&{self.role_id}> おはようございます、ご主人様たち!'
usr_msg += '\nWe have the following anime scheduled for today.'
usr_msg += f'\n **7:30 PM PST**: {anime_3_title}. Currently on episode {anime_3_progress}.'
usr_msg += f'\nHope to see you there!'
await channel.send(usr_msg)
await channel.send(positive_emoji)
|
import unittest
from zeppos_data_manager.data_cleaner import DataCleaner
from datetime import datetime
class TestTheProjectMethods(unittest.TestCase):
def test_to_date_method(self):
self.assertEqual(DataCleaner.to_date('01/01/2020'), datetime.strptime('2020-01-01 00:00:00', '%Y-%m-%d %H:%M:%S'))
def test_to_numeric_method(self):
self.assertEqual(DataCleaner.to_numeric('0'), 0)
def test_to_string_method(self):
self.assertEqual(DataCleaner.to_string(0), '0')
def test_is_numeric_method(self):
self.assertEqual(DataCleaner.is_numeric(1), True)
self.assertEqual(DataCleaner.is_numeric("1"), True)
self.assertEqual(DataCleaner.is_numeric(1.0), True)
self.assertEqual(DataCleaner.is_numeric("1.0"), True)
self.assertEqual(DataCleaner.is_numeric("nan"), False)
self.assertEqual(DataCleaner.is_numeric("a"), False)
self.assertEqual(DataCleaner.is_numeric(None), False)
def test_is_integer_method(self):
self.assertEqual(DataCleaner.is_integer(1), True)
self.assertEqual(DataCleaner.is_integer(1.0), False)
self.assertEqual(DataCleaner.is_integer(1.1), False)
self.assertEqual(DataCleaner.is_integer("1"), True)
self.assertEqual(DataCleaner.is_integer("1.0"), False)
self.assertEqual(DataCleaner.is_integer("1.1"), False)
self.assertEqual(DataCleaner.is_integer(None), False)
def test_is_decimal_method(self):
self.assertEqual(DataCleaner.is_decimal(1.1), True)
self.assertEqual(DataCleaner.is_decimal(1.0), True)
self.assertEqual(DataCleaner.is_decimal("1.1"), True)
self.assertEqual(DataCleaner.is_decimal("1.0"), True)
self.assertEqual(DataCleaner.is_decimal("1"), False)
self.assertEqual(DataCleaner.is_decimal(1), False)
self.assertEqual(DataCleaner.is_decimal("a.1"), False)
def test_is_alpha_only_method(self):
self.assertEqual(DataCleaner.is_alpha_only("test"), True)
self.assertEqual(DataCleaner.is_alpha_only(""), False)
self.assertEqual(DataCleaner.is_alpha_only(None), False)
self.assertEqual(DataCleaner.is_alpha_only(1), False)
self.assertEqual(DataCleaner.is_alpha_only("1"), False)
self.assertEqual(DataCleaner.is_alpha_only("test1"), False)
self.assertEqual(DataCleaner.is_alpha_only("test test"), False)
def test_is_nan_method(self):
self.assertEqual(DataCleaner.is_nan(0), False)
def test_is_date_method(self):
self.assertEqual(DataCleaner.is_date('1/1/1900'), True)
def test_lreplace_method(self):
self.assertEqual(DataCleaner.lreplace("0", "_", "011"), "_11")
def test_rreplace_method(self):
self.assertEqual(DataCleaner.rreplace("0", "_", "110"), "11_")
def test_drop_first_characters_method(self):
self.assertEqual(DataCleaner.drop_first_characters("12345", 2), "345")
def test_replace_alpha_numeric_value_only_method(self):
self.assertEqual(DataCleaner.replace_alpha_numeric_value_only(" $231aA "), "231aA")
def test_pad_left_method(self):
self.assertEqual(DataCleaner.pad_left('2', '0', 2), '02')
self.assertEqual(DataCleaner.pad_left('02', '0', 2), '02')
self.assertEqual(DataCleaner.pad_left('002', '0', 2), '02')
def test_get_date_format_method(self):
self.assertEqual(DataCleaner.get_date_format('1900'), '%Y')
self.assertEqual(DataCleaner.get_date_format('1/1/1900'), '%m/%d/%Y')
self.assertEqual(DataCleaner.get_date_format('111900'), None)
def test_clean_filename_method(self):
self.assertEqual(DataCleaner.clean_filename("$my_File_123"), "_my_File_123")
def test_adjust_escape_character_method(self):
self.assertEqual('\\"', DataCleaner.adjust_escape_character('"'))
self.assertEqual("\\'", DataCleaner.adjust_escape_character("'"))
self.assertEqual('\\r', DataCleaner.adjust_escape_character('\r'))
self.assertEqual('\\n', DataCleaner.adjust_escape_character('\n'))
def test_strip_content_method(self):
# don't adjust alignment. The unit test will fail!
self.assertEqual("test", DataCleaner.strip_content(" test"))
self.assertEqual("""test1
test2""", DataCleaner.strip_content("""
test1
test2
"""))
self.assertEqual("""test1
test2
""", DataCleaner.strip_content("""
test1
test2
""", remove_last_line_seperator=False))
if __name__ == '__main__':
unittest.main()
|
import unittest2
from django import test
from django.core import exceptions, serializers
from django_bitmask_field import BitmaskField, BitmaskFormField
from .models import TestModel, ContributingModel, TestForm
class TestCase(test.TestCase, unittest2.TestCase):
pass
class BitmaskFieldTestCase(TestCase):
def test_bitmaskfield_return_error_on_invalid_choices(self):
cases = dict(
none=[(None, 'choice')],
str=[('foo', 'choice')],
negative=[(-1, 'choice')],
optgroup=[('optgroup', [(None, 'choice')])],
)
for case, choices in cases.items():
with self.subTest(case=case):
field = BitmaskField(choices=choices)
field.contribute_to_class(ContributingModel, 'bitmask')
errors = field.check()
self.assertEqual(1, len(errors))
error = errors[0]
self.assertEqual("all 'choices' must be of integer type.", error.msg)
def test_bitmaskfield_max_length_validation(self):
field = BitmaskField(max_length=1)
field.clean(256, None)
with self.assertRaises(exceptions.ValidationError):
field.clean(257, None)
def test_bitmaskfield_cleans_valid_choice(self):
field = BitmaskField(choices=[(1, 'choice 0'), ('optgroup', [(4, 'choice 1')])])
cases = dict(
first_choice=dict( # 0001
choice=1,
expected_cleaned=1,
),
second_choice=dict( # 0100
choice=4,
expected_cleaned=4,
),
combo=dict( # 0101
choice=5,
expected_cleaned=5,
),
)
for case, data in cases.items():
with self.subTest(case=case):
self.assertEqual(
data['expected_cleaned'],
field.clean(data['choice'], None),
)
def test_bitmaskfield_works_with_multibit_choices(self):
field = BitmaskField(choices=[(1, 'choice 0'), (4, 'choice 1'), ('optgroup', [(5, 'choice 2')])])
self.assertEqual(5, field.clean(5, None))
def test_bitmaskfield_raises_error_on_invalid_choice(self):
field = BitmaskField(choices=[(1, 'choice 0'), ('optgroup', [(4, 'choice 1')])])
cases = dict(
none=None,
single_invalid_bit=2, # 0010
two_invalid_bits=10, # 1010
partly_invalid_1=3, # 0011
partly_invalid_2=6, # 0110
partly_invalid_3=7, # 0111
)
for case, value in cases.items():
with self.subTest(case=case):
with self.assertRaises(exceptions.ValidationError):
field.clean(value, None)
def test_bitmaskfield_write_and_read_from_db(self):
cases = dict(
empty=0,
single=1,
double=5,
null=None,
)
for case, value in cases.items():
with self.subTest(case=case):
test_model = TestModel(bitmask=value)
test_model.save()
self.assertEqual(value, TestModel.objects.get(id=test_model.id).bitmask)
def test_bitmaskfield_serialization_deserialization(self):
cases = dict(
none=None,
regualar=42,
)
for case, expected_value in cases.items():
with self.subTest(case=case):
model = TestModel(bitmask=expected_value)
serialized_data = serializers.serialize("xml", [model])
deserialized_data = list(serializers.deserialize('xml', serialized_data))
deserialized_model = deserialized_data[0].object
self.assertEqual(expected_value, deserialized_model.bitmask)
class BitmaskFormFieldTestCase(TestCase):
def test_is_valid(self):
cases = dict(
empty=dict(
data={},
is_valid=False,
errors={'bitmask', 'bitmask_int'},
expected={},
),
blank=dict(
data={'bitmask': [], 'bitmask_int': ''},
is_valid=False,
errors={'bitmask', 'bitmask_int'},
expected={},
),
regular=dict(
data={'bitmask': ['1'], 'bitmask_int': '42'},
is_valid=True,
errors=set(),
expected={'bitmask': 1, 'bitmask_int': 42},
),
)
for case, test_data in cases.items():
with self.subTest(case=case):
form = TestForm(test_data['data'])
self.assertEqual(
test_data['is_valid'],
form.is_valid(),
form.errors.as_text(),
)
self.assertFalse(test_data['errors'] ^ set(form.errors))
self.assertEqual(test_data['expected'], form.cleaned_data)
def test_has_changed(self):
cases = dict(
empty=dict(
initial=None,
data={},
has_changed=False,
),
scratch=dict(
initial=None,
data={'bitmask': ['1'], 'bitmask_int': '42'},
has_changed=True,
),
changed=dict(
initial={'bitmask': 1, 'bitmask_int': 42},
data={'bitmask': ['1', '4'], 'bitmask_int': '42'},
has_changed=True,
),
not_changed=dict(
initial={'bitmask': 1, 'bitmask_int': 42},
data={'bitmask': ['1'], 'bitmask_int': '42'},
has_changed=False,
),
)
for case, test_data in cases.items():
with self.subTest(case=case):
form = TestForm(test_data['data'], initial=test_data['initial'])
self.assertEqual(test_data['has_changed'], form.has_changed())
def test_prepare_value(self):
cases = dict(
none=dict(
initial_value=None,
prepared_value=None,
),
zero=dict(
initial_value=0,
prepared_value=0,
),
prepared=dict(
initial_value=['1', '4'],
prepared_value=['1', '4'],
),
single_value=dict(
initial_value=32,
prepared_value=[32],
),
double_value=dict(
initial_value=33,
prepared_value=[1, 32],
),
)
for case, test_data in cases.items():
with self.subTest(case=case):
form_field = BitmaskFormField()
self.assertEqual(
test_data['prepared_value'],
form_field.prepare_value(test_data['initial_value']),
)
|
from flask import current_app as app
from config.extensions import db
class DFCluster(db.Model):
"""
DF Cluster uniquely identifies a Deepfence on-prem deployment.
Unique ID, based on MAC address will be created based on rfc4122 recommendations.
Once created, it will be persisted in database and will not change for the entire installation
session
"""
id = db.Column(db.BigInteger, primary_key=True)
def save(self, commit=True):
db.session.add(self)
if commit:
try:
db.session.commit()
except:
db.session.rollback()
raise
def __repr__(self):
return "<DFCluster {}>".format(self.id)
|
def get_keytaps(target_set, key_capacity):
le = len(target_set)
if le>sum(key_capacity):
return -1
buttons = len(key_capacity)
target_set = sorted(target_set, reverse=True)
key_capacity = sorted(key_capacity)
multipliers = [ 1 for i in range(buttons) ]
pointer = 0
keytaps = 0
for i in range(le):
keytaps += multipliers[pointer]*target_set[i]
multipliers[pointer] += 1
pointer += 1
if pointer==buttons :
pointer = 0
return keytaps
def main():
target_set = list(map(int,input().split()))
key_capacity = list(map(int,input().split()))
print(get_keytaps(target_set,key_capacity))
if __name__=='__main__':
main()
|
def test_program_units(library):
for program_unit in library.program_units:
assert program_unit.program_unit_text
def test_library_has_program_units(library):
assert library.program_units
|
# -*- coding: utf-8 -*-
"""Export / Import of generic python models.
This module defines generic filesystem format for python models and provides utilities
for saving and loading to and from this format. The format is self contained in a sense
that it includes all necessary information for anyone to load it and use it. Dependencies
are either stored directly with the model or referenced via a conda environment.
The convention for pyfunc models is to have a predict method or function with the following
signature
predict(data: pandas.DataFrame) -> pandas.DataFrame
This convention is relied upon by other mlflow components.
Pyfunc model format is defined as a directory structure containing all required data, code and
configuration:
./dst-path/
./MLmodel - config
<code> - any code packaged with the model (specified in the conf file, see below)
<data> - any data packaged with the model (specified in the conf file, see below)
<env> - conda environment definition (specified in the conf file, see below)
It must contain MLmodel file in its root with "python_function" format with the following
parameters:
- loader_module [required]:
Python module that can load the model. Expected as module identifier
e.g. ``mlflow.sklearn``, it will be imported via importlib.import_module.
The imported module must contain function with the following signature:
load_pyfunc(path: string) -> <pyfunc model>
The path argument is specified by the data parameter and may refer to a file or directory.
- code [optional]:
relative path to a directory containing the code packaged with this model.
All files and directories inside this directory are added to the python path
prior to importing the model loader.
- data [optional]:
relative path to a file or directory containing model data.
the path is passed to the model loader.
- env [optional]:
relative path to an exported conda environment. If present this environment
should be activated prior to running the model.
Example:
```
>tree example/sklearn_iris/mlruns/run1/outputs/linear-lr
├── MLmodel
├── code
│ ├── sklearn_iris.py
│
├── data
│ └── model.pkl
└── mlflow_env.yml
>cat example/sklearn_iris/mlruns/run1/outputs/linear-lr/MLmodel
python_function:
code: code
data: data/model.pkl
env: mlflow_env.yml
main: sklearn_iris
```
Todo:
* Get default conda_env of the project.
"""
import importlib
import os
import shutil
import sys
import pandas
from mlflow import tracking
from mlflow.models import Model
from mlflow.utils.file_utils import TempDir
FLAVOR_NAME = "python_function"
MAIN = "loader_module"
CODE = "code"
DATA = "data"
ENV = "env"
def add_to_model(model, loader_module, data=None, code=None, env=None):
""" Add pyfunc spec to the model configuration.
Defines pyfunc configuration schema. Caller can use this to create a valid pyfunc model flavor
out of an existing directory structure. For example, other model flavors can use this to specify
how to use their output as a pyfunc.
NOTE: all paths are relative to the exported model root directory.
:param loader_module:
:param model: Existing servable
:param data: to the model data
:param code: path to the code dependencies
:param env: conda environment
:return: updated model configuration.
"""
parms = {MAIN: loader_module}
if code:
parms[CODE] = code
if data:
parms[DATA] = data
if env:
parms[ENV] = env
return model.add_flavor(FLAVOR_NAME, **parms)
def load_pyfunc(path, run_id=None):
""" Load model stored in python-function format.
"""
if run_id:
path = tracking._get_model_log_dir(path, run_id)
conf_path = os.path.join(path, "MLmodel")
model = Model.load(conf_path)
if FLAVOR_NAME not in model.flavors:
raise Exception("Format '{format}' not found not in {path}.".format(format=FLAVOR_NAME,
path=conf_path))
conf = model.flavors[FLAVOR_NAME]
if CODE in conf and conf[CODE]:
code_path = os.path.join(path, conf[CODE])
sys.path = [code_path] + _get_code_dirs(code_path) + sys.path
data_path = os.path.join(path, conf[DATA]) if (DATA in conf) else path
return importlib.import_module(conf[MAIN]).load_pyfunc(data_path)
def _get_code_dirs(src_code_path, dst_code_path=None):
if not dst_code_path:
dst_code_path = src_code_path
return [(os.path.join(dst_code_path, x))
for x in os.listdir(src_code_path) if not x.endswith(".py") and not x.endswith(".pyc")
and not x == "__pycache__"]
def spark_udf(spark, path, run_id=None, result_type="double"):
"""Returns a Spark UDF that can be used to invoke the python-function formatted model.
Note that parameters passed to the UDF will be forwarded to the model as a DataFrame
where the names are simply ordinals (0, 1, ...).
Example:
predict = mlflow.pyfunc.spark_udf(spark, "/my/local/model")
df.withColumn("prediction", predict("name", "age")).show()
Args:
spark (SparkSession): a SparkSession object
path (str): A path containing a pyfunc model.
result_type (str): Spark UDF type returned by the model's prediction method. Default double
"""
# Scope Spark import to this method so users don't need pyspark to use non-Spark-related
# functionality.
from mlflow.pyfunc.spark_model_cache import SparkModelCache
from pyspark.sql.functions import pandas_udf
if run_id:
path = tracking._get_model_log_dir(path, run_id)
archive_path = SparkModelCache.add_local_model(spark, path)
def predict(*args):
model = SparkModelCache.get_or_load(archive_path)
schema = {str(i): arg for i, arg in enumerate(args)}
pdf = pandas.DataFrame(schema)
result = model.predict(pdf)
return pandas.Series(result)
return pandas_udf(predict, result_type)
def _copy_file_or_tree(src, dst, dst_dir):
name = os.path.join(dst_dir, os.path.basename(os.path.abspath(src)))
if dst_dir:
os.mkdir(os.path.join(dst, dst_dir))
if os.path.isfile(src):
shutil.copy(src=src, dst=os.path.join(dst, name))
else:
shutil.copy_tree(src=src, dst=os.path.join(dst, name))
return name
def save_model(dst_path, loader_module, data_path=None, code_path=(), conda_env=None,
model=Model()):
"""Export model as a generic python-function model.
Args:
dst_path (str): path where the model is gonna be stored.
loader_module (str): the module to be used to load the model.
data_path (str): path to a file or directory containing model data.
code_path (list[str]): list of paths (file or dir)
contains code dependencies not present in the environment.
every path in the code_path is added to the python path
before the model is loaded.
conda_env (str): path to the conda environment definition (.yml).
This environment will be activated prior to running model code.
Returns:
model config (Servable) containing model info.
:param dst_path:
:param loader_module:
:param data_path:
:param code_path:
:param conda_env:
:param model:
"""
if os.path.exists(dst_path):
raise Exception("Path '{}' already exists".format(dst_path))
os.makedirs(dst_path)
code = None
data = None
env = None
if data_path:
model_file = _copy_file_or_tree(src=data_path, dst=dst_path, dst_dir="data")
data = model_file
if code_path:
for path in code_path:
_copy_file_or_tree(src=path, dst=dst_path, dst_dir="code")
code = "code"
if conda_env:
shutil.copy(src=conda_env, dst=os.path.join(dst_path, "mlflow_env.yml"))
env = "mlflow_env.yml"
add_to_model(model, loader_module=loader_module, code=code, data=data, env=env)
model.save(os.path.join(dst_path, 'MLmodel'))
return model
def log_model(artifact_path, **kwargs):
"""Export the model in python-function form and log it with current mlflow tracking service.
Model is exported by calling @save_model and logs the result with @tracking.log_output_files
"""
with TempDir() as tmp:
local_path = tmp.path(artifact_path)
run_id = tracking.active_run().info.run_uuid
if 'model' in kwargs:
raise Exception("Unused argument 'model'. log_model creates a new model object")
save_model(dst_path=local_path, model=Model(artifact_path=artifact_path, run_id=run_id),
**kwargs)
tracking.log_artifacts(local_path, artifact_path)
def get_module_loader_src(src_path, dst_path):
""" Generate python source of the model loader.
Model loader contains load_pyfunc method with no parameters. It basically hardcodes model
loading of the given model into a python source. This is done so that the exported model has no
unnecessary dependencies on mlflow or any other configuration file format or parsing library.
:param src_path: current path to the model
:param dst_path: relative or absolute path where the model will be stored
in the deployment environment
:return: python source code of the model loader as string.
"""
conf_path = os.path.join(src_path, "MLmodel")
model = Model.load(conf_path)
if FLAVOR_NAME not in model.flavors:
raise Exception("Format '{format}' not found not in {path}.".format(format=FLAVOR_NAME,
path=conf_path))
conf = model.flavors[FLAVOR_NAME]
update_path = ""
if CODE in conf and conf[CODE]:
src_code_path = os.path.join(src_path, conf[CODE])
dst_code_path = os.path.join(dst_path, conf[CODE])
code_path = ["os.path.abspath('%s')" % x
for x in [dst_code_path] + _get_code_dirs(src_code_path, dst_code_path)]
update_path = "sys.path = {} + sys.path; ".format("[%s]" % ",".join(code_path))
data_path = os.path.join(dst_path, conf[DATA]) if (DATA in conf) else dst_path
return loader_template.format(update_path=update_path, main=conf[MAIN], data_path=data_path)
loader_template = """
import importlib
import os
import sys
def load_pyfunc():
{update_path}return importlib.import_module('{main}').load_pyfunc('{data_path}')
"""
|
#!/usr/bin/env python3
import __future__
import __main__
import _thread
import abc
import aifc
import argparse
import array
import ast
import asynchat
import asyncio
import asyncore
import atexit
import audioop
import base64
import bdb
import binascii
import binhex
import bisect
import builtins
import bz2
import calendar
import cgi
import cgitb
import chunk
import cmath
import cmd
import code
import codecs
import codeop
import collections
import collections.abc
import colorsys
import compileall
import concurrent.futures
import configparser
import contextlib
import contextvars
import copy
import copyreg
import crypt
import csv
import ctypes
import curses
import curses.ascii
import curses.panel
import curses.textpad
import dataclasses
import datetime
import dbm
import decimal
import difflib
import dis
import distutils
import doctest
import email
import ensurepip
import enum
import errno
import faulthandler
import fcntl
import filecmp
import fileinput
import fnmatch
import formatter
import fractions
import ftplib
import functools
import gc
import getopt
import getpass
import gettext
import glob
import graphlib
import grp
import gzip
import hashlib
import heapq
import hmac
import html
import html.entities
import html.parser
import http
import http.client
import http.cookiejar
import http.cookies
import http.server
import imaplib
import imghdr
import imp
import importlib
import inspect
import io
import ipaddress
import itertools
import json
import keyword
import linecache
import locale
import logging
import logging.config
import logging.handlers
import lzma
import mailbox
import mailcap
import marshal
import math
import mimetypes
import mmap
import modulefinder
import msilib
import msvcrt
import multiprocessing
import multiprocessing.shared_memory
import netrc
import nis
import nntplib
import numbers
import operator
import optparse
import os
import os.path
import ossaudiodev
import parser
import pathlib
import pdb
import pickle
import pickletools
import pipes
import pkgutil
import platform
import plistlib
import poplib
import posix
import posixpath
import pprint
import pty
import pwd
import py_compile
import pyclbr
import pydoc
import queue
import quopri
import random
import re
import readline
import reprlib
import resource
import rlcompleter
import runpy
import sched
import secrets
import select
import selectors
import shelve
import shlex
import shutil
import signal
import site
import smtpd
import smtplib
import sndhdr
import socket
import socketserver
import spwd
import sqlite3
import ssl
import stat
import statistics
import string
import stringprep
import struct
import subprocess
import sunau
import symbol
import symtable
import sys
import sysconfig
import syslog
import tabnanny
import tarfile
import telnetlib
import tempfile
import termios
import test
import test.support
import test.support.bytecode_helper
import test.support.script_helper
import test.support.socket_helper
import textwrap
import threading
import time
import timeit
import tkinter
import tkinter.colorchooser
import tkinter.dnd
import tkinter.font
import tkinter.messagebox
import tkinter.scrolledtext
import tkinter.tix
import tkinter.ttk
import token
import tokenize
import trace
import traceback
import tracemalloc
import tty
import turtle
import types
import typing
import unicodedata
import unittest
import unittest.mock
import urllib
import urllib.error
import urllib.parse
import urllib.request
import urllib.response
import urllib.robotparser
import uu
import uuid
import venv
import warnings
import wave
import weakref
import webbrowser
import winreg
import winsound
import wsgiref
import xdrlib
import xml.dom
import xml.dom.minidom
import xml.dom.pulldom
import xml.etree.ElementTree
import xml.parsers.expat
import xml.sax
import xml.sax.handler
import xml.sax.saxutils
import xml.sax.xmlreader
import xmlrpc
import xmlrpc.client
import xmlrpc.server
import zipapp
import zipfile
import zipimport
import zlib
import zoneinfo
|
import getopt
import sys
import string
import os.path
import jsonpickle
import random
from model.contact import Contact
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["number of contacts", "file"])
except getopt.GetoptError:
getopt.usage()
sys.exit(2)
n = 3
f = "data/contact.json"
for o, a in opts:
if o == "-n":
n = int(a)
elif o == "-f":
f = a
def random_string(prefix, max_length):
symbols = string.ascii_letters + string.digits + string.punctuation + " " * 20
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(max_length))])
# Using special pattern for phone numbers
def random_phone(prefix):
symbols = string.digits + " " * 10 + "(" * 10 + ")" * 10 + "-" * 10
return prefix + "".join([random.choice(symbols) for i in range(1, 21)])
# Using special pattern for emails
def random_email(prefix, max_length):
symbols = string.ascii_letters + string.digits + "_" * 10
username = "".join([random.choice(symbols) for i in range(random.randrange(max_length))])
domain = "".join([random.choice(symbols) for i in range(2, 21)])
extension = "".join([random.choice(string.ascii_letters) for i in range(3)])
return prefix + username + "@" + domain + "." + extension
test_data = [Contact(first_name="", last_name="", address="", home_phone="",
work_phone="", mobile_phone="", secondary_phone="", email="",
email2="", email3="")] + [
Contact(first_name=random_string("firstname", 20),
last_name=random_string("lastname", 20),
address=random_string("address", 50),
home_phone=random_phone("home_phone"),
mobile_phone=random_phone("mobile_phone"),
work_phone=random_phone("work_phone"),
secondary_phone=random_phone("secondary_phone"),
email=random_email("email", 30),
email2=random_email("email2", 30),
email3=random_email("email3", 30)) for i in range(n)]
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
with open(file, "w") as out:
jsonpickle.set_encoder_options("json", indent=2)
out.write(jsonpickle.encode(test_data))
|
#-*- encoding:utf-8 -*-
#搭建模型所需要的一些模块
import tensorflow as tf
import os
import h5py
import numpy as np
def encoder_lstm_layer(layer_inputs,sequence_lengths,num_units,initial_state,is_training = 'True',direction = 'bidirectional'):
'''
Build a layer of lstm
'''
'''
参数解释:
layer_inputs:[batch_size,T,?],Input of this layer
sequence_lengths:[batch_size],Entered true length
num_units:Number of hidden nodes in each layer of lstm
initial_state:Initial state
direction:Lstm direction of each layer
'''
#The set environment is under the GPU, so CudnnLSTM is used
##cudnnlstm defaults to time-major, so you need to transform the entire input dimension
inputs = tf.transpose(layer_inputs,[1,0,2])
if direction == 'bidirectional':
cudnnlstm_layer = tf.contrib.cudnn_rnn.CudnnLSTM(num_layers = 1,num_units = num_units//2,direction = direction,kernel_initializer = tf.keras.initializers.Orthogonal())
else:
cudnnlstm_layer = tf.contrib.cudnn_rnn.CudnnLSTM(num_layers = 1,num_units = num_units,direction = direction,kernel_initializer = tf.keras.initializers.Orthogonal())
outputs,state = cudnnlstm_layer(inputs,initial_state = initial_state,sequence_lengths = sequence_lengths)
#Transform the dimensions of the generated outputs
outputs = tf.transpose(outputs,[1,0,2])
return outputs,state
def reshape_pyramidal(inputs,sequence_lengths):
'''
Overlay the input frames so that the entire sequence can be downsampled
参数解释:
inputs:[batch_size,T,?]Feature input
sequence_lengths:[batch_size]The true length of this feature sequence
This is proposed in the "Listen Attend and Spell" paper
'''
shape = tf.shape(inputs)
#Draw three dimensions
batch_size,max_time = shape[0], shape[1]
inputs_dim = inputs.get_shape().as_list()[-1]
#The length of the sequence to be prevented is odd and cannot be divisible, so zero-padded operation is required
pads = [[0,0],[0,tf.floormod(max_time,2)],[0,0]]
padded_inputs = tf.pad(inputs, pads)
#Reshape for frame overlay
concat_outputs = tf.reshape(padded_inputs, (batch_size, -1, inputs_dim * 2))
concat_sequence_lengths = tf.floordiv(sequence_lengths, 2) + tf.floormod(sequence_lengths, 2)
return concat_outputs, concat_sequence_lengths
def rnn_cell(num_units):
'''
Build a single layer of lstm
num_units:Number of hidden nodes in this layer of lstm
'''
cell = tf.nn.rnn_cell.LSTMCell(num_units)
#cell = tf.contrib.cudnn_rnn.CudnnLSTM(num_layers = 1,num_units = num_units,direction = "unidirectional")
return cell
def attention_cell(decoder_cell,num_units,encoder_outputs,encoder_sequence_lengths):
'''
Encapsulate decoder_cell using high-level api in tensorflow
参数解释:
decoder_cell:Set up the lstm layer in the decoder
num_units:Dimensions when calculating attention
encoder_outputs:Encoding sequence from the encoder
encoder_sequence_lengths:The actual length of the encoding sequence from the encoder
'''
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(num_units = num_units,
memory = encoder_outputs,
memory_sequence_length = encoder_sequence_lengths,
name = 'BahdanauAttention')
#attention_layer_size:After generating the context and concat with the hidden state, it enters a fully connected layer with attention_layer_size
return tf.contrib.seq2seq.AttentionWrapper(cell = decoder_cell,
alignment_history = True,
attention_mechanism = attention_mechanism,
attention_layer_size = 256,
output_attention = False)
def compute_loss(logits,labels,labels_lengths,max_time):
'''
Loss calculation during training
参数解释:
logits:[batch_size,T,num_classes]
labels:[batch_size,T]
max_time:maximum true length in label
Sequence_loss is used, so you need to set the mask according to the given sequence length
'''
#First set a mask
with tf.variable_scope('Loss'):
target_weights = tf.sequence_mask(lengths = labels_lengths,
maxlen = max_time,
dtype = tf.float32,
name = "loss_mask")
loss = tf.contrib.seq2seq.sequence_loss(logits = logits,
targets = labels,
weights = target_weights,
average_across_timesteps = True,
average_across_batch = True)
return loss
def dense_to_sparse(tensor,eos_id):
'''
Convert tensor to a specific sparse format
Because when calculating tf.edit_distance, only sparse tensor is received
'''
added_values = tf.cast(tf.fill((tf.shape(tensor)[0],1), eos_id), tensor.dtype)
#Add eos to the entire tensor
concat_tensor = tf.concat((tensor, added_values), axis = -1)
#Find duplicate phonemes
diff = tf.cast(concat_tensor[:,1:] - concat_tensor[:,:-1], tf.bool)
eos_indices = tf.where(tf.equal(concat_tensor, eos_id))
#Find the position of the first eos in each decoded sequence
first_eos = tf.segment_min(eos_indices[:,1],eos_indices[:, 0])
#
mask = tf.sequence_mask(first_eos,maxlen = tf.shape(tensor)[1])
indices = tf.where(diff & mask & tf.not_equal(tensor, -1))
values = tf.gather_nd(tensor, indices)
shape = tf.shape(tensor, out_type = tf.int64)
return tf.SparseTensor(indices, values, shape)
def compute_ler(logits,labels,eos_id):
'''
During training, calculate the label error rate for each batch
参数解释:
logits:[batch_size,T,num_classes]
labels:[batch_size,T]
'''
with tf.variable_scope('Ler'):
predicted_ids = tf.to_int32(tf.arg_max(logits,-1))
hypothesis = dense_to_sparse(predicted_ids,eos_id)
truth = dense_to_sparse(labels,eos_id)
label_error_rate = tf.edit_distance(hypothesis, truth, normalize = True)
return label_error_rate
def feature_extract(path):
'''
从文件中提取特征,一般是从h5py文件中提取,key = 'feature'
'''
audio_filename = os.path.abspath(path)
feature_filename = audio_filename.split('.')[0] + '.feat'
f = h5py.File(feature_filename, 'r')
data = f.get('feature')
data = np.array(data)
f.close()
return data
def label_extract(path):
'''
从文件中提取label,一般是从h5py文件中提取,key = 'label'
'''
audio_filename = os.path.abspath(path)
label_filename = audio_filename.split('.')[0] + '.label'
f = h5py.File(label_filename,'r')
label = f.get('label')
label = np.array(label)
f.close()
return label
|
# Generated by Django 3.0.7 on 2020-06-27 20:57
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('entrymeasures', '0003_auto_20200623_2348'),
]
operations = [
migrations.AlterUniqueTogether(
name='entrymeasure',
unique_together={('user', 'date_measure')},
),
]
|
"""Python Script Template."""
from .adversarial_policy import AdversarialPolicy
class JointPolicy(AdversarialPolicy):
"""Given a protagonist and an antagonist policy, combine to give a joint policy."""
def __init__(self, dim_action, action_scale, *args, **kwargs) -> None:
super().__init__(
dim_action=dim_action, action_scale=action_scale, *args, **kwargs
)
def forward(self, state):
"""Forward compute the policy."""
p_dim = self.protagonist_policy.dim_action[0]
a_dim = self.antagonist_policy.dim_action[0]
p_mean, p_scale_tril = self.protagonist_policy(state)
a_mean, a_scale_tril = self.antagonist_policy(state)
p_std = p_scale_tril.diagonal(dim1=-1, dim2=-2)
a_std = a_scale_tril.diagonal(dim1=-1, dim2=-2)
if self.protagonist:
h_mean = p_mean[..., p_dim:]
h_std = p_std[..., p_dim:]
elif self.antagonist:
h_mean = a_mean[..., a_dim:]
h_std = a_std[..., a_dim:]
else:
raise NotImplementedError
if p_dim + a_dim < self.dim_action[0]:
h_mean, h_scale_tril = self.hallucination_policy(state)
h_std = h_scale_tril.diagonal(dim1=-1, dim2=-2)
p_mean, p_std = p_mean[..., :p_dim], p_std[..., :p_dim]
a_mean, a_std = a_mean[..., :a_dim], a_std[..., :a_dim]
return self.stack_policies((p_mean, a_mean, h_mean), (p_std, a_std, h_std))
|
print ' -->> Lordran pack was invoked <<-- '
|
#!/usr/bin/env python
import time
import os
import sys
import numpy as np
import socket
from distributed import Client, LocalCluster
import dask
from collections import Counter
# get our start time
global st
st=time.time()
def test(i,j=10):
# get pid, start time, host and sleep for j seconds
pid=os.getpid()
when=time.time()-st
print("%6d %6.2f" % (pid,when))
time.sleep(j)
back="%s %6.2f %s" % (str(os.getpid()),when,socket.gethostname())
return back
def main():
#get command line arguments controling launch
threads=1
workers=8
for x in sys.argv[1:] :
if x.find("threads") > -1 :
z=x.split("=")
threads=int(z[1])
if x.find("workers") > -1 :
z=x.split("=")
workers=int(z[1])
# launch with either threads and/or workers specified (0 = default)
if threads == 0 and workers != 0 :
print("lanching %d workers, default threads" % (workers))
cluster = LocalCluster(n_workers=workers)
if threads != 0 and workers == 0 :
print("lanching %d threads, defalut workers" % (threads))
cluster = LocalCluster(threads_per_worker=threads)
if threads != 0 and workers != 0 :
print("lanching %d workers with %d threads" % (workers,threads))
cluster = LocalCluster(n_workers=workers,threads_per_worker=threads)
print(cluster)
client = Client(cluster)
print(client)
# do serial
# NOTE: it is possible to launch an asynchronous client
# but here we just do serial synchronous. See:
# https://distributed.dask.org/en/latest/asynchronous.html
result = []
print(" pid Start T")
for i in range (0,5):
j=2
result.append(client.submit(test,i,j).result())
print(result)
print (Counter(result))
#do parallel
n=15
np.random.seed(1234)
x=np.random.random(n)*20
#set to uniform nonzero to get uniform run times for each task
x=np.ones(n)*10
print(x)
print(" pid Start T")
L=client.map(test,range(n),x)
mylist=client.gather(L)
pids=[]
for m in mylist:
x=m.split()[0]
pids.append(x)
print(m)
pids=sorted(set(pids))
print(len(pids),pids)
if __name__ == '__main__':
main()
|
from flask import Flask, render_template, request, jsonify
from flask_cors import CORS
from file_processing import write_file, read_lines
app = Flask(__name__)
CORS(app)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/save_result', methods = ['POST'])
def save_result():
params = request.get_json()
write_file(f"{params['player']} {params['bet']} {params['result']}")
return jsonify(status='ok')
@app.route('/show_results')
def show_results():
return render_template('results.html', lines = read_lines())
|
import subprocess
from acquisition import AcquisitionStep
class AcquisitionForkStep(AcquisitionStep):
"""
Class to describe a fork acquisition step.
Attributes:
command_template (string): command template to execute
"""
command_template = None
def add_extra_arguments(self, parser):
parser.add_argument('--command-template', action='store',
default=None,
help='command template to execute ({PATH} string'
'will be replaced by the file fullpath, '
'{PLUGIN_DIR} by the full plugin dir path')
def init(self):
if self.args.command_template is None:
raise Exception('you have to set a command-template')
if "{PATH}" not in self.args.command_template:
raise Exception('{PATH} is not in the command template')
def get_command(self, filepath):
return self.args.command_template.replace('{PATH}', filepath).\
replace('{PLUGIN_DIR}', self.get_plugin_directory_path())
def process(self, xaf):
cmd = self.get_command(xaf.filepath)
self.info("Calling %s ...", cmd)
return_code = subprocess.call(cmd, shell=True)
if return_code != 0:
self.warning("%s returned a bad return code: %i",
cmd, return_code)
return (return_code == 0)
|
from .check import Check
from .is_cls import Is
__all__ = [Check, Is]
|
"""@package config
Contains all config files necessary for simulator
Rigid body configuration data
"""
from config.protobuf import *
# Rigid Bodies ---------------------------------------------------------------------------------------------------------
# Single Joint Robot
# Base Link
points_0 = [Point(x=0, y=0, z=0), Point(x=-100, y=0, z=0), Point(x=100, y=0, z=0)]
lines_0 = [Line(point_0=points_0[1], point_1=points_0[2])]
graphics_0 = Graphics(points=points_0, lines=lines_0)
joints_0 = [Joint(type=JointType.REVOLUTE, index=0)]
rigid_body_0 = RigidBody(index=0, name='base', graphics=graphics_0, is_fixed=True, joints=joints_0)
# First Link
points_1 = [Point(x=-100, y=0, z=0), Point(x=100, y=0, z=0)]
lines_1 = [Line(point_0=points_1[0], point_1=points_1[1])]
graphics_1 = Graphics(points=points_0, lines=lines_1)
joints_1 = [Joint(type=JointType.REVOLUTE, index=0, xfm=Xfm(tx=-100)),
Joint(type=JointType.REVOLUTE, index=1, xfm=Xfm(tx=100))]
rigid_body_1 = RigidBody(index=1, name='first', graphics=graphics_1, joints=joints_1)
# Second Link
points_2 = [Point(x=-100, y=0, z=0), Point(x=100, y=0, z=0)]
lines_2 = [Line(point_0=points_2[0], point_1=points_2[1])]
graphics_2 = Graphics(points=points_2, lines=lines_2)
joints_2 = [Joint(type=JointType.REVOLUTE, index=0, xfm=Xfm(tx=-100)),
Joint(type=JointType.REVOLUTE, index=1, xfm=Xfm(tx=100))]
rigid_body_2 = RigidBody(index=2, name='second', graphics=graphics_2, joints=joints_2)
# Third Link
points_3 = [Point(x=-100, y=0, z=0), Point(x=100, y=0, z=0)]
lines_3 = [Line(point_0=points_3[0], point_1=points_3[1])]
graphics_3 = Graphics(points=points_3, lines=lines_3)
joints_3 = [Joint(type=JointType.REVOLUTE, index=0, xfm=Xfm(tx=-100)),
Joint(type=JointType.REVOLUTE, index=1, xfm=Xfm(tx=100))]
rigid_body_3 = RigidBody(index=3, name='third', graphics=graphics_3, joints=joints_3)
# Fourth Link
points_4 = [Point(x=-100, y=0, z=0), Point(x=100, y=0, z=0)]
lines_4 = [Line(point_0=points_4[0], point_1=points_4[1])]
graphics_4 = Graphics(points=points_4, lines=lines_4)
joints_4 = [Joint(type=JointType.REVOLUTE, index=0, xfm=Xfm(tx=-100)),
Joint(type=JointType.REVOLUTE, index=1, xfm=Xfm(tx=100))]
rigid_body_4 = RigidBody(index=4, name='fourth', graphics=graphics_4, joints=joints_4)
# Fifth Link
points_5 = [Point(x=-100, y=0, z=0), Point(x=100, y=0, z=0)]
lines_5 = [Line(point_0=points_5[0], point_1=points_5[1])]
graphics_5 = Graphics(points=points_5, lines=lines_5)
joints_5 = [Joint(type=JointType.REVOLUTE, index=0, xfm=Xfm(tx=-100)),
Joint(type=JointType.REVOLUTE, index=1, xfm=Xfm(tx=100))]
rigid_body_5 = RigidBody(index=5, name='fifth', graphics=graphics_5, joints=joints_5)
# Sixth Link
points_6 = [Point(x=-100, y=0, z=0), Point(x=100, y=0, z=0)]
lines_6 = [Line(point_0=points_6[0], point_1=points_6[1])]
graphics_6 = Graphics(points=points_6, lines=lines_6)
joints_6 = [Joint(type=JointType.REVOLUTE, index=0, xfm=Xfm(tx=-100)),
Joint(type=JointType.REVOLUTE, index=1, xfm=Xfm(tx=100))]
rigid_body_6 = RigidBody(index=6, name='sixth', graphics=graphics_6, joints=joints_6)
# Seventh Link
points_7 = [Point(x=-100, y=0, z=0), Point(x=100, y=0, z=0)]
lines_7 = [Line(point_0=points_1[0], point_1=points_7[1])]
graphics_7 = Graphics(points=points_7, lines=lines_7)
joints_7 = [Joint(type=JointType.REVOLUTE, index=0, xfm=Xfm(tx=-100)),
Joint(type=JointType.REVOLUTE, index=1, xfm=Xfm(tx=100))]
rigid_body_7 = RigidBody(index=7, name='seventh', graphics=graphics_7, joints=joints_7)
# Eight Link
points_8 = [Point(x=-100, y=0, z=0), Point(x=100, y=0, z=0)]
lines_8 = [Line(point_0=points_8[0], point_1=points_8[1])]
graphics_8 = Graphics(points=points_8, lines=lines_8)
joints_8 = [Joint(type=JointType.REVOLUTE, index=0, xfm=Xfm(tx=-100)),
Joint(type=JointType.REVOLUTE, index=1, xfm=Xfm(tx=100))]
rigid_body_8 = RigidBody(index=8, name='eight', graphics=graphics_8, joints=joints_8)
# Rigid Body System ----------------------------------------------------------------------------------------------------
rigid_bodies = [rigid_body_0, rigid_body_1, rigid_body_2, rigid_body_3,
rigid_body_4, rigid_body_5, rigid_body_6, rigid_body_7, rigid_body_8]
mates = [
Mate(index=0,
bearing_rigid_body_index=0, bearing_joint_index=0,
shaft_rigid_body_index=1, shaft_joint_index=0),
Mate(index=1,
bearing_rigid_body_index=1, bearing_joint_index=1,
shaft_rigid_body_index=2, shaft_joint_index=0),
Mate(index=2,
bearing_rigid_body_index=1, bearing_joint_index=1,
shaft_rigid_body_index=3, shaft_joint_index=0),
Mate(index=3,
bearing_rigid_body_index=3, bearing_joint_index=1,
shaft_rigid_body_index=4, shaft_joint_index=0),
Mate(index=4,
bearing_rigid_body_index=2, bearing_joint_index=1,
shaft_rigid_body_index=5, shaft_joint_index=0),
Mate(index=5,
bearing_rigid_body_index=3, bearing_joint_index=1,
shaft_rigid_body_index=6, shaft_joint_index=0),
Mate(index=6,
bearing_rigid_body_index=2, bearing_joint_index=1,
shaft_rigid_body_index=7, shaft_joint_index=0),
Mate(index=7,
bearing_rigid_body_index=8, bearing_joint_index=1,
shaft_rigid_body_index=0, shaft_joint_index=0)]
sample_robot = RigidBodySystem(index=0, name='robot', rigid_bodies=rigid_bodies, mates=mates)
|
import sys, os
from time import sleep
import numpy
############
projectroot=os.path.split(os.getcwd())[0]
sys.path.append(projectroot)
sys.path.append(os.path.join(projectroot,'QtForms'))
sys.path.append(os.path.join(projectroot,'AuxPrograms'))
sys.path.append(os.path.join(projectroot,'OtherApps'))
from CreateExperimentApp import expDialog
from CalcFOMApp import calcfomDialog
from VisualizeDataApp import visdataDialog
from VisualizeBatchFcns import choosexyykeys
from fcns_io import *
from fcns_ui import *
from SaveImagesApp import *
from DBPaths import *
class MainMenu(QMainWindow):
def __init__(self, previousmm, execute=True):#, TreeWidg):
super(MainMenu, self).__init__(None)
self.setWindowTitle('HTE Experiment and FOM Data Processing')
self.expui=expDialog(self, title='Create/Edit an Experiment')
self.calcui=calcfomDialog(self, title='Calculate FOM from EXP', guimode=False)
self.visdataui=visdataDialog(self, title='Visualize Raw, Intermediate and FOM data', GUIMODE=False)
def visui_exec(self, show=True):
if self.visdataui is None:
self.visdataui=visdataDialog(self, title='Visualize Raw, Intermediate and FOM data')
if show:
self.visdataui.show()
def visexpana(self, anafiledict=None, anafolder=None, experiment_path=None, show=True):
self.visui_exec(show=show)
if not (anafiledict is None or anafolder is None):
self.visdataui.importana(anafiledict=anafiledict, anafolder=anafolder)
elif not experiment_path is None:
self.visdataui.importexp(experiment_path=experiment_path)
# def openwindow():
# form.show()
# form.setFocus()
# form.calcui.show()
# mainapp.exec_()
# expui=form.expui
# visdataui=form.visdataui
def select_ana_fcn(calcui, analabel):
calcui.FOMProcessNamesComboBox.setCurrentIndex(0)
cb = calcui.AnalysisNamesComboBox
# print cb.count()
for i in range(1, int(cb.count())):
# print (str(cb.itemText(i)).partition('(')[0].partition('__')[2])
if (str(cb.itemText(i)).partition('(')[0].partition('__')[2]) == analabel:
cb.setCurrentIndex(i)
calcui.getactiveanalysisclass()
return True
return False
def select_procana_fcn(calcui, analabel):
cb = calcui.FOMProcessNamesComboBox
# print cb.count()
for i in range(1, int(cb.count())):
# print (str(cb.itemText(i)).partition('(')[0].partition('__')[2])
if (str(cb.itemText(i)).partition('(')[0].partition('__')[2]) == analabel:
cb.setCurrentIndex(i)
calcui.getactiveanalysisclass()
return True
return False
def updateanalysisparams(calcui, paramd):
calcui.analysisclass.params.update(paramd)
calcui.processeditedparams()
# calcui.analysisclass.processnewparams(calcFOMDialogclass=calcui)
def select_techtype(searchstr):
qlist = calcui.TechTypeButtonGroup.buttons()
typetechfound = False
for button in qlist:
if searchstr in str(button.text()).strip():
button.setChecked(True)
typetechfound = True
break
calcui.fillanalysistypes(calcui.TechTypeButtonGroup.checkedButton())
if not typetechfound:
calcui.exec_()
raiseerror
def plot_new_fom(visdataui, fom_name):
cb = visdataui.fomplotchoiceComboBox
# print cb.count()
for i in range(0, int(cb.count())):
# print(str(cb.itemText(i)).partition('(')[0].partition('__')[2])
if str(cb.itemText(i)) == fom_name:
cb.setCurrentIndex(i)
visdataui.filterandplotfomdata()
return True
return False
runfoldername = None
expsaveextension = '.done'
anasaveextension = '.run'
# providing these paths will skip the generation of the exp/ana
expname = None
expdestchoice = 'eche'
anadestchoice = 'eche'
# explst = [
# ('L:/processes/experiment/eche/20190501.125714.copied-20190501221215867PDT/20190501.125714.exp', (0.48, 1.0), (0.5, 1.0)), # 7
# ('L:/processes/experiment/eche/20190416.140535.copied-20190416220707052PDT/20190416.140535.exp', (0.48, 1.0), (0.5, 1.0)), # 7
# ('L:/processes/experiment/eche/20190415.115538.copied-20190415220556175PDT/20190415.115538.exp', (0.48, 1.0), (0.5, 1.0)), # 7
# ('L:/processes/experiment/eche/20180411.141724.copied-20180411220901744PDT/20180411.141724.exp', (0.48, 1.0), (0.5, 1.0)), # 3
# ('L:/processes/experiment/eche/20180411.152214.copied-20180411220901744PDT/20180411.152214.exp', (0.48, 1.0), (0.5, 1.0)), # 3
# ('L:/processes/experiment/eche/20180411.154249.copied-20180411220901744PDT/20180411.154249.exp', (0.48, 1.0), (0.5, 1.0)), # 3
# ('L:/processes/experiment/eche/20170828.170010.copied-20170828220902243PDT/20170828.170010.exp', (0.48, 0.9), (0.5, 0.9)), # 9
# ('L:/processes/experiment/eche/20170828.165552.copied-20170828220902243PDT/20170828.165552.exp', (0.48, 0.9), (0.5, 0.9)), # 9
# ('L:/processes/experiment/eche/20170828.165831.copied-20170828220902243PDT/20170828.165831.exp', (0.48, 0.9), (0.5, 0.9)), # 9
# ('L:/processes/experiment/eche/20170823.145121.copied-20170823194838230PDT/20170823.145121.exp', (0.32, 0.6), (0.35, 0.65)), # 13
# ('L:/processes/experiment/eche/20170823.143138.copied-20170823194838230PDT/20170823.143138.exp', (0.32, 0.6), (0.35, 0.65)), # 13
# ('L:/processes/experiment/eche/20170823.151056.copied-20170823194838230PDT/20170823.151056.exp', (0.32, 0.6), (0.35, 0.65)) # 13
# ]
# explst = [
# ('L:/processes/experiment/eche/20190814.210551.done/20190814.210551.exp', (0.32, 0.6), (0.35, 0.65)), # 13
# ]
# explst = [
# ('L:/processes/experiment/eche/20190819.120802.done/20190819.120802.exp', (0.48, 1.0), (0.5, 1.0)),
# ('L:/processes/experiment/eche/20190819.120931.done/20190819.120931.exp', (0.48, 1.0), (0.5, 1.0))
# ]
# 9/20 update TRI dataset
# explst = [
# ('L:/processes/experiment/eche/20161208.093513.copied-20161208221238642PST/20161208.093513.exp',
# (0.32, 0.6), (0.35, 0.65)),
# ('L:/processes/experiment/eche/20160705.085609.copied-20160705220726116PDT/20160705.085609.exp',
# (0.32, 0.6), (0.35, 0.65)),
# ('L:/processes/experiment/eche/20170301.101901.copied-20170308132955800PST/20170301.101901.exp',
# (0.32, 0.6), (0.35, 0.65)),
# ('L:/processes/experiment/eche/20190520.161947.copied-20190520221254058PDT/20190520.161947.exp',
# (0.32, 0.6), (0.35, 0.65)),
# ('L:/processes/experiment/eche/20170823.145121.copied-20170823194838230PDT/20170823.145121.exp',
# (0.32, 0.6), (0.35, 0.65)),
# ]
# 9/20 reprocess 4098
# explst = [
# ('L:/processes/experiment/eche/20190920.110322.done/20190920.110322.exp',
# (0.32, 0.6), (0.35, 0.65)),
# ]
# 9/20 reprocess pH < 13
# explst = [
# ('L:/processes/experiment/eche/20160314.112931.copied-20160314220326441PDT/20160314.112931.exp', (0.48, 1), (0.5, 1)),
# ('L:/processes/experiment/eche/20160721.085158.copied-20160727063023665PDT/20160721.085158.exp', (0.48, 1), (0.5, 1)),
# ('L:/processes/experiment/eche/20190520.162751.copied-20190520221254058PDT/20190520.162751.exp', (0.48, 1), (0.5, 1)),
# ('L:/processes/experiment/eche/20190304.154816.copied-20190304221211247PST/20190304.154816.exp', (0.48, 1), (0.5, 1)),
# ('L:/processes/experiment/eche/20180411.141724.copied-20180411220901744PDT/20180411.141724.exp', (0.48, 1), (0.5, 1)),
# ('L:/processes/experiment/eche/20170301.110115.copied-20170308132955800PST/20170301.110115.exp', (0.48, 1), (0.5, 1)),
# ('L:/processes/experiment/eche/20180411.154249.copied-20180411220901744PDT/20180411.154249.exp', (0.48, 1), (0.5, 1)),
# ('L:/processes/experiment/eche/20180411.152214.copied-20180411220901744PDT/20180411.152214.exp', (0.48, 1), (0.5, 1)),
# ('L:/processes/experiment/eche/20190819.120802.copied-20190821054206716PDT/20190819.120802.exp', (0.48, 1), (0.5, 1)),
# ('L:/processes/experiment/eche/20190913.110534.copied-20190913220646905PDT/20190913.110534.exp', (0.48, 1), (0.5, 1)),
# ('L:/processes/experiment/eche/20190819.120931.copied-20190821054206716PDT/20190819.120931.exp', (0.48, 1), (0.5, 1)),
# ('L:/processes/experiment/eche/20161114.153016.copied-20161114220507745PST/20161114.153016.exp', (0.48, 1), (0.5, 1)),
# ('L:/processes/experiment/eche/20160314.151855.copied-20160314220326441PDT/20160314.151855.exp', (0.48, 1), (0.5, 1)),
# ('L:/processes/experiment/eche/20190304.151422.copied-20190304221211247PST/20190304.151422.exp', (0.48, 1), (0.5, 1)),
# ('L:/processes/experiment/eche/20160720.163219.copied-20160727063023665PDT/20160720.163219.exp', (0.48, 1), (0.5, 1)),
# ('L:/processes/experiment/eche/20170228.142848.copied-20170301220342337PST/20170228.142848.exp', (0.48, 1), (0.5, 1)),
# ('L:/processes/experiment/eche/20170328.101615.copied-20170328220334394PDT/20170328.101615.exp', (0.48, 1), (0.5, 1)),
# ('L:/processes/experiment/eche/20190920.225000.done/20190920.225000.exp', (0.48, 1), (0.5, 1)),
# ('L:/processes/experiment/eche/20190430.140031.copied-20190430220754109PDT/20190430.140031.exp', (0.48, 1), (0.5, 1)),
# ('L:/processes/experiment/eche/20190812.142046.copied-20190814002507232PDT/20190812.142046.exp', (0.48, 1), (0.5, 1)),
# ('L:/processes/experiment/eche/20190812.140146.copied-20190814002507232PDT/20190812.140146.exp', (0.48, 1), (0.5, 1)),
# ('L:/processes/experiment/eche/20190920.163109.done/20190920.163109.exp', (0.48, 1), (0.5, 1)),
# ('L:/processes/experiment/eche/20160308.105918.copied-20160308220226709PST/20160308.105918.exp', (0.48, 0.9), (0.5, 0.9)),
# ('L:/processes/experiment/eche/20161114.133636.copied-20161114220507745PST/20161114.133636.exp', (0.48, 0.9), (0.5, 0.9)),
# ('L:/processes/experiment/eche/20190520.161823.copied-20190520221254058PDT/20190520.161823.exp', (0.48, 0.9), (0.5, 0.9)),
# ('L:/processes/experiment/eche/20170828.165831.copied-20170828220902243PDT/20170828.165831.exp', (0.48, 0.9), (0.5, 0.9)),
# ('L:/processes/experiment/eche/20190920.162750.done/20190920.162750.exp', (0.48, 0.9), (0.5, 0.9)),
# ('L:/processes/experiment/eche/20160418.172947.copied-20160418220703150PDT/20160418.172947.exp', (0.32, 0.6), (0.35, 0.65)),
# ('L:/processes/experiment/eche/20170823.143138.copied-20170823194838230PDT/20170823.143138.exp', (0.32, 0.6), (0.35, 0.65)),
# ('L:/processes/experiment/eche/20190814.210551.copied-20190816111512496PDT/20190814.210551.exp', (0.32, 0.6), (0.35, 0.65)),
# ('L:/processes/experiment/eche/20190920.163245.done/20190920.163245.exp', (0.32, 0.6), (0.35, 0.65))
# ]
explst = [
('L:/processes/experiment/eche/20190920.234643.done/20190920.234643.exp', (0.48, 0.9), (0.5, 0.9)),
('L:/processes/experiment/eche/20190920.235421.done/20190920.235421.exp', (0.48, 1.0), (0.5, 1.0))
]
for exp, lim1, lim2 in explst:
mainapp = QApplication(sys.argv)
form = MainMenu(None)
calcui = form.calcui
visdataui=form.visdataui
try:
calcui.importexp(exppath=exp)
select_techtype('CP3')
select_procana_fcn(calcui, '')
select_ana_fcn(calcui, 'Etaave')
calcui.analysisclass.params['num_std_dev_outlier'] = 1.5
calcui.processeditedparams()
calcui.analyzedata()
select_techtype('CP3')
select_procana_fcn(calcui, 'FOM_Merge_PlatemapComps')
calcui.analysisclass.params['select_ana'] = 'ana__1'
calcui.processeditedparams()
calcui.analyzedata()
select_techtype('CP3')
select_procana_fcn(calcui, 'CDEF')
calcui.analysisclass.params['select_ana'] = 'ana__2'
calcui.processeditedparams()
calcui.batch_process_allsubspace()
select_techtype('CP4')
select_procana_fcn(calcui, '')
select_ana_fcn(calcui, 'Etaave')
calcui.analysisclass.params['num_std_dev_outlier'] = 1.5
calcui.processeditedparams()
calcui.analyzedata()
select_techtype('CP4')
select_procana_fcn(calcui, 'FOM_Merge_PlatemapComps')
calcui.analysisclass.params['select_ana'] = 'ana__18'
calcui.processeditedparams()
calcui.analyzedata()
select_techtype('CP4')
select_procana_fcn(calcui, 'CDEF')
calcui.analysisclass.params['select_ana'] = 'ana__19'
calcui.processeditedparams()
calcui.batch_process_allsubspace()
anasavefolder = calcui.saveana(
dontclearyet=True, anatype=anadestchoice, rundone=anasaveextension)
calcui.viewresult(anasavefolder=anasavefolder, show=False)
comboind_strlist=[]
for i in range(1, visdataui.numStdPlots+1):
visdataui.stdcsvplotchoiceComboBox.setCurrentIndex(i)
comboind_strlist+=[(i, str(visdataui.stdcsvplotchoiceComboBox.currentText()))]
batchidialog=saveimagesbatchDialog(visdataui, comboind_strlist)
batchidialog.plotstyleoverrideCheckBox.setChecked(1)
inds=numpy.where(numpy.logical_not(numpy.isnan(visdataui.fomplotd['fom'])))[0]
if len(inds)>0:
samplestoplot=list(visdataui.fomplotd['sample_no'][inds])
filterinds=[ind for ind, smp in enumerate(visdataui.fomplotd['sample_no']) if smp in samplestoplot]
for k in visdataui.fomplotd.keys():
if isinstance(visdataui.fomplotd[k], numpy.ndarray):
visdataui.fomplotd[k]=visdataui.fomplotd[k][filterinds]
for i in range(1, 35):
visdataui.stdcsvplotchoiceComboBox.setCurrentIndex(i)
visdataui.numcompintervalsSpinBox.setValue(10)
if i<18:
vmin, vmax = lim1
else:
vmin, vmax = lim2
visdataui.vminmaxLineEdit.setText('%.3f,%.3f' %(vmin, vmax))
if i == 2 or i == 19:
continue
if i==1 or i==18:
filenamesearchlist=['plate_id']
else:
filenamesearchlist=['code__-1']
filenamesearchlist=[s.strip() for s in filenamesearchlist if (len(s.strip())>0) and s!='&']
if len(filenamesearchlist)==0:
filenamesearchlist=None
else:
filenamesearchlist=[[sv.strip() for sv in s.split('&') if len(sv.strip())>0] for s in filenamesearchlist if len(s.strip())>0]
visdataui.plot_preparestandardplot(loadstyleoptions=True)# or logic for ,-delim and and logic within each or block with &-delim
visdataui.savefigs(save_all_std_bool=True, batchidialog=batchidialog, filenamesearchlist=filenamesearchlist, lastbatchiteration=(i==32))#for std plots all foms will be from same ana__ and prepend str will be filled in automatically
visdataui.close()
sleep(0.5)
calcui.close()
sleep(0.5)
mainapp.quit()
sleep(0.5)
del visdataui
del calcui
del mainapp
except:
visdataui.close()
sleep(0.5)
calcui.close()
sleep(0.5)
mainapp.quit()
sleep(0.5)
del visdataui
del calcui
del mainapp
|
#!/usr/bin/env python3
import wpilib
import ctre
from magicbot import MagicRobot
from robotpy_ext.autonomous.selector import AutonomousModeSelector
from networktables import NetworkTables
from networktables.util import ntproperty
from rev.color import ColorSensorV3, ColorMatch
from components import swervedrive, swervemodule, shooter, wof
from common import color_sensor, vision
from collections import namedtuple
# Get the config preset from the swervemodule
ModuleConfig = swervemodule.ModuleConfig
class MyRobot(MagicRobot):
"""
After creating low-level components like "shooter", use component's name and an underscore
to inject objects to the component.
e.g.
Using variable annotation like "shooter_beltMotor: ctre.WPI_VictorSPX" decleares the type of the variable.
When beltMotor is called from the shooter component, it's going to be a VictorSPX object.
Using equal sign for variable decleration like "shooter_beltMotor = ctre.WPI_VictorSPX(11)" creates the actual object.
When beltMotor is called from the shooter component, it's going to be a VictorSPX at the can port 11.
Use the equal sign (mostly) in the #createObjects function so they can be correctly injected to their parent components.
For more info refer to https://robotpy.readthedocs.io/en/stable/frameworks/magicbot.html
"""
# Create low-level object
drive: swervedrive.SwerveDrive
shooter: shooter.Shooter
wof: wof.WheelOfFortune
frontLeftModule: swervemodule.SwerveModule
frontRightModule: swervemodule.SwerveModule
rearLeftModule: swervemodule.SwerveModule
rearRightModule: swervemodule.SwerveModule
# Create configs for each module. This is before #createObjects because modules need these configs to be initialized.
frontLeftModule_cfg = ModuleConfig(sd_prefix='FrontLeft_Module', zero=2.97, inverted=True, allow_reverse=True)
frontRightModule_cfg = ModuleConfig(sd_prefix='FrontRight_Module', zero=2.69, inverted=False, allow_reverse=True)
rearLeftModule_cfg = ModuleConfig(sd_prefix='RearLeft_Module', zero=0.18, inverted=True, allow_reverse=True)
rearRightModule_cfg = ModuleConfig(sd_prefix='RearRight_Module', zero=4.76, inverted=False, allow_reverse=True)
# Decleare motors for the shooter component
shooter_leftShooterMotor: ctre.WPI_VictorSPX
shooter_rightShooterMotor: ctre.WPI_VictorSPX
shooter_intakeMotor: ctre.WPI_VictorSPX
shooter_beltMotor: ctre.WPI_VictorSPX
# Create common components
vision: vision.Vision
colorSensor: color_sensor.ColorSensor
def createObjects(self):
"""
This is where all the components are actually created with "=" sign.
Components with a parent prefix like "shooter_" will be injected.
"""
# SmartDashboard
self.sd = NetworkTables.getTable('SmartDashboard')
# Gamepad
self.gamempad = wpilib.Joystick(0)
self.gamempad2 = wpilib.Joystick(1)
# Drive Motors
self.frontLeftModule_driveMotor = ctre.WPI_VictorSPX(5)
self.frontRightModule_driveMotor = ctre.WPI_VictorSPX(8)
self.rearLeftModule_driveMotor = ctre.WPI_VictorSPX(4)
self.rearRightModule_driveMotor = ctre.WPI_VictorSPX(9)
# Rotate Motors
self.frontLeftModule_rotateMotor = ctre.WPI_VictorSPX(3)
self.frontRightModule_rotateMotor = ctre.WPI_VictorSPX(14)
self.rearLeftModule_rotateMotor = ctre.WPI_VictorSPX(2)
self.rearRightModule_rotateMotor = ctre.WPI_VictorSPX(15)
# Encoders
self.frontLeftModule_encoder = wpilib.AnalogInput(0)
self.frontRightModule_encoder = wpilib.AnalogInput(3)
self.rearLeftModule_encoder = wpilib.AnalogInput(1)
self.rearRightModule_encoder = wpilib.AnalogInput(2)
# Shooter
self.shooter_leftShooterMotor = ctre.WPI_VictorSPX(6)
self.shooter_rightShooterMotor = ctre.WPI_VictorSPX(7)
self.shooter_beltMotor = ctre.WPI_VictorSPX(11)
self.shooter_intakeMotor = ctre.WPI_VictorSPX(0)
# Wheel of Fortune
self.wof_motor = ctre.WPI_VictorSPX(13)
# Climber
self.climbingMotor = ctre.WPI_VictorSPX(10)
self.hookMotor = ctre.WPI_VictorSPX(1)
# Color Sensor
self.colorSensor = color_sensor.ColorSensor()
# Vision
self.vision = vision.Vision()
# Limit Switch
self.switch = wpilib.DigitalInput(0)
# PDP
self.pdp = wpilib.PowerDistributionPanel(0)
def disabledPeriodic(self):
# Update the dashboard, even when the robot is disabled.
self.update_sd()
def autonomousInit(self):
# Reset the drive when the auto starts.
self.drive.flush()
self.drive.threshold_input_vectors = True
def autonomous(self):
# For auto, use MagicBot's auto mode.
# This will load the ./autonomous folder.
super().autonomous()
def teleopInit(self):
# Reset the drive when the teleop starts.
self.drive.flush()
self.drive.squared_inputs = True
self.drive.threshold_input_vectors = True
def move(self, x, y, rcw):
"""
This function is ment to be used by the teleOp.
:param x: Velocity in x axis [-1, 1]
:param y: Velocity in y axis [-1, 1]
:param rcw: Velocity in z axis [-1, 1]
"""
if self.gamempad.getRawButton(3):
# If the button is pressed, lower the rotate speed.
rcw *= 0.7
self.drive.move(x, y, rcw)
def teleopPeriodic(self):
# Drive
self.move(self.gamempad.getRawAxis(5), self.gamempad.getRawAxis(4), self.gamempad.getRawAxis(0))
# Lock
if self.gamempad.getRawButton(1):
self.drive.request_wheel_lock = True
# Vectoral Button Drive
if self.gamempad.getPOV() == 0:
self.drive.set_raw_fwd(-0.35)
elif self.gamempad.getPOV() == 180:
self.drive.set_raw_fwd(0.35)
elif self.gamempad.getPOV() == 90:
self.drive.set_raw_strafe(0.35)
elif self.gamempad.getPOV() == 270:
self.drive.set_raw_strafe(-0.35)
# Climber
if self.gamempad2.getRawButton(1):
self.climbingMotor.set(1)
else:
self.climbingMotor.set(0)
# Hook
if self.gamempad2.getRawAxis(5) < 0 and not self.switch.get():
self.hookMotor.set(self.gamempad2.getRawAxis(5))
elif self.gamempad2.getRawAxis(5) > 0:
self.hookMotor.set(self.gamempad2.getRawAxis(5))
else:
self.hookMotor.set(0)
# Shooter
if self.gamempad.getRawAxis(3) > 0:
self.shooter.shoot()
elif self.gamempad.getRawButton(6):
self.shooter.align()
elif self.gamempad.getRawButton(5) or self.gamempad2.getRawAxis(2) > 0:
self.shooter.unload()
elif self.gamempad.getRawAxis(2) > 0 or self.gamempad2.getRawAxis(3) > 0:
self.shooter.intake()
else:
self.shooter.stop()
# WoF
if self.gamempad2.getRawButton(3):
self.wof.handleFirstStage()
elif self.gamempad2.getRawButton(2):
self.wof.handleSecondStage()
elif self.gamempad2.getRawButton(4):
self.wof.reset()
elif self.gamempad2.getRawButton(5):
self.wof.manualTurn(1)
elif self.gamempad2.getRawButton(6):
self.wof.manualTurn(-1)
else:
self.wof.manualTurn(0)
# Update smartdashboard
self.update_sd()
def update_sd(self):
"""
Calls each component's own update function
and puts data to the smartdashboard.
"""
self.sd.putNumber('Climb_Current_Draw', self.pdp.getCurrent(10))
self.drive.update_smartdash()
self.colorSensor.updateSD()
self.wof.updateSD()
self.vision.updateTable()
if __name__ == "__main__":
wpilib.run(MyRobot)
|
# N = int(input())
# A = [0] * N
# B = [0] * N
# for k in range(N):
# A[k] = int(input())
# for k in range(N):
# B[k] = A[k]
#
# print(B)
#
def array_search(A: list, N: int, x: int):
""" Осуществляет поиск числа x в массиве A
от 0 до N-1 индекса включительно.
Возвращает индекс элемента x в массиве A.
Или -1, если такого нет.
Если в массиве несколько одинаковых элементов,
равных x, то вернуть индекс первого по счёту.
:param A:
:param N:
:param x:
:return:
"""
for k in range(N):
if A[k] == x:
return k
return -1
def test_array_search():
A1 = [1, 2, 3, 4, 5]
m = array_search(A1, 5, 8)
if m == -1:
print("#test1 - ok")
else:
print("#test1 - false")
A2 = [-1, -2, -3, -4, -5]
m = array_search(A2, 5, -3)
if m == 2:
print("#test2 - ok")
else:
print("#test2 - false")
A3 = [10, 20, 30, 10, 10]
m = array_search(A3, 5, 10)
if m == 0:
print("#test3 - ok")
else:
print("#test3 - false")
test_array_search()
|
"""
SPDX-License-Identifier: Apache-2.0
Copyright 2021 Sergio Correia (scorreia@redhat.com), Red Hat, Inc.
"""
import json as json_module
from typing import IO, Any, Dict, List, Union
JSONType = Union[str, int, float, bool, None, Dict[str, Any], List[Any]]
_list_types = [list, tuple]
try:
from sqlalchemy.engine.row import Row # sqlalchemy >= 1.4.
_list_types.append(Row)
except ModuleNotFoundError:
try:
from sqlalchemy.engine import RowProxy
_list_types.append(RowProxy)
except ModuleNotFoundError:
pass
def bytes_to_str(data: Any) -> Any:
if isinstance(data, (bytes, bytearray)):
data = data.decode("utf-8")
elif isinstance(data, dict):
for _k, _v in data.items():
data[_k] = bytes_to_str(_v)
elif isinstance(data, tuple(_list_types)):
_l = list(data)
for _k, _v in enumerate(_l):
_l[_k] = bytes_to_str(_v)
data = _l
return data
def dumps(obj: JSONType, **kwargs: Any) -> str:
try:
ret = json_module.dumps(obj, **kwargs)
except TypeError:
# dumps() from the built-it json module does not work with bytes,
# so let's convert those to str if we get a TypeError exception.
ret = json_module.dumps(bytes_to_str(obj), **kwargs)
return ret
def dump(obj: JSONType, fp: IO[str], **kwargs: Any) -> None:
try:
json_module.dump(obj, fp, **kwargs)
except TypeError:
# dump() from the built-it json module does not work with bytes,
# so let's convert those to str if we get a TypeError exception.
json_module.dump(bytes_to_str(obj), fp, **kwargs)
def load(fp: Any, **kwargs: Any) -> Any:
return json_module.load(fp, **kwargs)
def loads(s: Union[str, bytes], **kwargs: Any) -> Any:
return json_module.loads(s, **kwargs)
# JSON pickler that fulfills SQLAlchemy requirements, from
# social-storage-sqlalchemy.
# https://github.com/python-social-auth/social-storage-sqlalchemy/commit/39d129
class JSONPickler:
"""JSON pickler wrapper around json lib since SQLAlchemy invokes
dumps with extra positional parameters"""
@classmethod
def dumps(cls, value: JSONType, *_args: Any, **_kwargs: Any) -> str:
# pylint: disable=unused-argument
"""Dumps the python value into a JSON string"""
return dumps(value)
@classmethod
def loads(cls, value: Union[str, bytes]) -> Any:
"""Parses the JSON string and returns the corresponding python value"""
return loads(value)
|
import logging
import os
from configuration_overrider.abstract_overrider import AbstractOverrider
log = logging.getLogger(__name__)
class EnvironmentOverrider(AbstractOverrider):
def get(self, key) -> str:
log.info(f"[get|in] ({key})")
result = None
try:
result = os.environ[key]
except KeyError as x:
log.debug(f"environment variable not found: {key}", exc_info=x)
log.info(f"[get|out] => {result if result is not None else 'None'}")
return result
|
# pylint: skip-file
# flake8: noqa
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import json
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
import ruamel.yaml as yaml
from ansible.module_utils.basic import AnsibleModule
|
import warnings
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
from gisutils.projection import get_proj_str, get_authority_crs
from gisutils.projection import project as project_fn
from gisutils.raster import get_values_at_points, get_raster_crs, write_raster
from gisutils.shapefile import df2shp, shp2df, get_shapefile_crs
def __getattr__(name):
if name == 'project':
warnings.warn("The 'project' module was renamed to 'projection' "
"to avoid confusion with the project() function.",
DeprecationWarning)
return project_fn
raise AttributeError('No module named ' + name)
|
from lenstronomy.Plots.model_plot import ModelPlot
import numpy as np
from lenstronomywrapper.LensSystem.LensSystemExtensions.chain_post_processing import ChainPostProcess
from lenstronomywrapper.LensSystem.LensSystemExtensions.lens_maps import ResidualLensMaps
import random
class MCMCchainNew(object):
def __init__(self, original_chain):
self.mcmc_samples = original_chain.mcmc_samples
self.lensModel = original_chain.lensModel
self.kwargs_lens = original_chain.kwargs_lens
self.param_class = original_chain.param_class
self.lens_system_fit = original_chain.lens_system_fit
self.lens = original_chain.lens
self.modelPlot = original_chain.modelPlot
self.kwargs_result = original_chain.kwargs_result
self.lensModel_full = original_chain.lensModel_full
self.kwargs_lens_full = original_chain.kwargs_lens_full
self.window_size = original_chain.window_size
self.kwargs_data_setup = original_chain.kwargs_data_setup
self.save_name_path = original_chain.save_name_path
def extract_observables(self, n_burn_frac, n_keep):
nsamples_total = int(len(self.mcmc_samples[:, 0]))
n_start = round(nsamples_total * (1 - n_burn_frac))
if n_start < 0:
raise Exception('n burn too large, length of array is ' + str(nsamples_total))
chain_samples = self.mcmc_samples[n_start:nsamples_total, :]
keep_inds = random.sample(list(np.arange(1, chain_samples.shape[0])), n_keep)
chain_samples = self.mcmc_samples[keep_inds, :]
chain_process = ChainPostProcess(self.lensModel, chain_samples, self.param_class,
background_quasar=self.lens_system_fit.background_quasar)
flux_ratios, source_x, source_y = chain_process.flux_ratios(self.lens.x, self.lens.y)
macro_params = chain_process.macro_params()
arrival_times = chain_process.arrival_times(self.lens.x, self.lens.y)
relative_arrival_times = np.empty((n_keep, 3))
for row in range(0, n_keep):
relative_arrival_times[row, :] = self.lens.relative_time_delays(arrival_times[row, :])
return_kwargs_data = {'flux_ratios': flux_ratios,
'time_delays': relative_arrival_times,
'source_x': source_x,
'source_y': source_y,
'kwargs_lens_macro_fit': macro_params}
return return_kwargs_data, self.kwargs_data_setup
def maps(self):
logL = self.modelPlot._imageModel.likelihood_data_given_model(
source_marg=False, linear_prior=None, **self.kwargs_result)
ndata_points = self.modelPlot._imageModel.num_data_evaluate
chi2_imaging = logL * 2 / ndata_points
observed_lens = self.modelPlot._select_band(0)._data
modeled_lens = self.modelPlot._select_band(0)._model
normalized_residuals = self.modelPlot._select_band(0)._norm_residuals
reconstructed_source, coord_transform = \
self.modelPlot._select_band(0).source(numPix=250, deltaPix=0.025)
reconstructed_source_log = np.log10(reconstructed_source)
vmin, vmax = max(np.min(reconstructed_source_log), -5), min(np.max(reconstructed_source_log), 10)
reconstructed_source_log[np.where(reconstructed_source_log < vmin)] = vmin
reconstructed_source_log[np.where(reconstructed_source_log > vmax)] = vmax
residual_maps = ResidualLensMaps(self.lensModel_full, self.lensModel, self.kwargs_lens_full, self.kwargs_lens)
kappa = residual_maps.convergence(self.window_size, 250)
tdelay_res_geo, tdelay_res_grav = residual_maps.time_delay_surface_geoshapiro(self.window_size, 250,
self.lens.x[0], self.lens.y[0])
tdelay_res_map = tdelay_res_geo + tdelay_res_grav
return_kwargs = {'chi2_imaging': chi2_imaging,
'mean_kappa': np.mean(kappa),
'residual_convergence': kappa,
'time_delay_residuals': tdelay_res_map,
'reconstructed_source': reconstructed_source,
'observed_lens': observed_lens,
'modeled_lens': modeled_lens,
'normalized_residuals': normalized_residuals,
}
return return_kwargs
class MCMCchain(object):
def __init__(self, save_name_path, lens_system_fit, lens, mcmc_samples, kwargs_result, kwargs_model,
multi_band_list, kwargs_special, param_class, lensModel, kwargs_lens,
lensModel_full, kwargs_lens_full, window_size, kwargs_data_setup):
self.mcmc_samples = mcmc_samples
self.kwargs_result = kwargs_result
self.kwargs_model = kwargs_model
self.multi_band_list = multi_band_list
self.kwargs_special = kwargs_special
self.param_class = param_class
self.lensModel = lensModel
self.kwargs_lens = kwargs_lens
self.kwargs_data_setup = kwargs_data_setup
self.save_name_path = save_name_path
self.lens = lens
self.lensModel_full = lensModel_full
self.kwargs_lens_full = kwargs_lens_full
self.lens_system_fit = lens_system_fit
self.window_size = window_size
self.modelPlot = ModelPlot(multi_band_list, kwargs_model, kwargs_result, arrow_size=0.02, cmap_string="gist_heat")
def get_output(self, n_burn_frac, n_keep):
assert n_burn_frac < 1
logL = self.modelPlot._imageModel.likelihood_data_given_model(
source_marg=False, linear_prior=None, **self.kwargs_result)
ndata_points = self.modelPlot._imageModel.num_data_evaluate
chi2_imaging = logL * 2 / ndata_points
observed_lens = self.modelPlot._select_band(0)._data
modeled_lens = self.modelPlot._select_band(0)._model
normalized_residuals = self.modelPlot._select_band(0)._norm_residuals
reconstructed_source, coord_transform = \
self.modelPlot._select_band(0).source(numPix=250, deltaPix=0.025)
reconstructed_source_log = np.log10(reconstructed_source)
vmin, vmax = max(np.min(reconstructed_source_log), -5), min(np.max(reconstructed_source_log), 10)
reconstructed_source_log[np.where(reconstructed_source_log < vmin)] = vmin
reconstructed_source_log[np.where(reconstructed_source_log > vmax)] = vmax
residual_maps = ResidualLensMaps(self.lensModel_full, self.lensModel, self.kwargs_lens_full, self.kwargs_lens)
kappa = residual_maps.convergence(self.window_size, 250)
tdelay_res_geo, tdelay_res_grav = residual_maps.time_delay_surface_geoshapiro(self.window_size, 250,
self.lens.x[0], self.lens.y[0])
tdelay_res_map = tdelay_res_geo + tdelay_res_grav
nsamples_total = int(len(self.mcmc_samples[:,0]))
n_start = round(nsamples_total * (1 - n_burn_frac))
if n_start < 0:
raise Exception('n burn too large, length of array is '+str(nsamples_total))
chain_samples = self.mcmc_samples[n_start:nsamples_total, :]
keep_inds = random.sample(list(np.arange(1, chain_samples.shape[0])), n_keep)
chain_samples = self.mcmc_samples[keep_inds, :]
chain_process = ChainPostProcess(self.lensModel, chain_samples, self.param_class,
background_quasar=self.lens_system_fit.background_quasar)
flux_ratios, source_x, source_y = chain_process.flux_ratios(self.lens.x, self.lens.y)
macro_params = chain_process.macro_params()
arrival_times = chain_process.arrival_times(self.lens.x, self.lens.y)
relative_arrival_times = np.empty((n_keep, 3))
for row in range(0, n_keep):
relative_arrival_times[row, :] = self.lens.relative_time_delays(arrival_times[row, :])
return_kwargs_data = {'flux_ratios': flux_ratios,
'time_delays': relative_arrival_times,
'source_x': source_x,
'source_y': source_y}
return_kwargs = {'chi2_imaging': chi2_imaging,
'kwargs_lens_macro_fit': macro_params, 'mean_kappa': np.mean(kappa),
'residual_convergence': kappa, 'time_delay_residuals': tdelay_res_map,
'reconstructed_source': reconstructed_source,
'observed_lens': observed_lens, 'modeled_lens': modeled_lens,
'normalized_residuals': normalized_residuals,
'source_x': self.lens_system_fit.source_centroid_x,
'source_y': self.lens_system_fit.source_centroid_y, 'zlens': self.lens_system_fit.zlens,
'zsource': self.lens_system_fit.zsource}
return return_kwargs, return_kwargs_data, self.kwargs_data_setup
|
# Copyright 2021. ThingsBoard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from regex import compile
from thingsboard_gateway.connectors.ftp.file import File
COMPATIBLE_FILE_EXTENSIONS = ('json', 'txt', 'csv')
class Path:
def __init__(self, path: str, delimiter: str, telemetry: list, device_name: str, attributes: list,
txt_file_data_view: str, poll_period=60, with_sorting_files=True, device_type='Device', max_size=5,
read_mode='FULL'):
self._path = path
self._with_sorting_files = with_sorting_files
self._poll_period = poll_period
self._files: [File] = []
self._delimiter = delimiter
self._last_polled_time = 0
self._telemetry = telemetry
self._attributes = attributes
self._device_name = device_name
self._device_type = device_type
self._txt_file_data_view = txt_file_data_view
self.__read_mode = File.ReadMode[read_mode]
self.__max_size = max_size
@staticmethod
def __is_file(ftp, filename):
current = ftp.pwd()
try:
ftp.cwd(filename)
except Exception:
ftp.cwd(current)
return True
ftp.cwd(current)
return False
def __get_files(self, ftp, paths, file_name, file_ext):
kwargs = {}
pattern = compile(file_name.replace('*', '.*'))
for item in paths:
ftp.cwd(item)
folder_and_files = ftp.nlst()
for ff in folder_and_files:
cur_file_name, cur_file_ext = ff.split('.')
if cur_file_ext in COMPATIBLE_FILE_EXTENSIONS and self.__is_file(ftp, ff) and ftp.size(ff):
if (file_name == file_ext == '*') \
or pattern.fullmatch(cur_file_name) \
or (cur_file_ext == file_ext and file_name == cur_file_name) \
or (file_name != '*' and cur_file_name == file_name and (
file_ext == cur_file_ext or file_ext == '*')):
kwargs[ftp.voidcmd(f"MDTM {ff}")] = (item + '/' + ff)
if self._with_sorting_files:
return [File(path_to_file=val, read_mode=self.__read_mode, max_size=self.__max_size) for (_, val) in
sorted(kwargs.items(), reverse=True)]
return [File(path_to_file=val, read_mode=self.__read_mode, max_size=self.__max_size) for val in kwargs.values()]
def find_files(self, ftp):
final_arr = []
current_dir = ftp.pwd()
dirname, basename = os.path.split(self._path)
filename, fileex = basename.split('.')
for (index, item) in enumerate(dirname.split('/')):
if item == '*':
current = ftp.pwd()
arr = []
for x in final_arr:
ftp.cwd(x)
node_paths = ftp.nlst()
for node in node_paths:
if not self.__is_file(ftp, node):
arr.append(ftp.pwd() + node)
final_arr = arr
ftp.cwd(current)
else:
if len(final_arr) > 0:
current = ftp.pwd()
for (j, k) in enumerate(final_arr):
ftp.cwd(k)
if not self.__is_file(ftp, item):
final_arr[j] = str(final_arr[j]) + '/' + item
else:
final_arr = []
ftp.cwd(current)
else:
if not self.__is_file(ftp, item):
final_arr.append(item)
final_arr = self.__get_files(ftp, final_arr, filename, fileex)
ftp.cwd(current_dir)
self._files = final_arr
@property
def config(self):
return {
'delimiter': self.delimiter,
'devicePatternName': self.device_name,
'devicePatternType': self.device_type,
'timeseries': self.telemetry,
'attributes': self.attributes,
'txt_file_data_view': self.txt_file_data_view
}
@property
def files(self):
return self._files
@property
def delimiter(self):
return self._delimiter
@property
def telemetry(self):
return self._telemetry
@property
def device_name(self):
return self._device_name
@property
def device_type(self):
return self._device_type
@property
def attributes(self):
return self._attributes
@property
def txt_file_data_view(self):
return self._txt_file_data_view
@property
def last_polled_time(self):
return self._last_polled_time
@property
def path(self):
return self._path
@property
def poll_period(self):
return self._poll_period
@last_polled_time.setter
def last_polled_time(self, value):
self._last_polled_time = value
|
import unittest
from .addr import *
class TestAddrParts(unittest.TestCase):
def test_st_num_is_valid(self):
addrs = [
("1234 North Fake Street", 1234),
("5678 North Fake Street", 5678),
("9012 North Fake Street", 9012)
]
for addr, exp in addrs:
with self.subTest(addr=addr, exp=exp):
parts = AddrParts.from_text(addr)
self.assertEqual(parts.st_num, exp)
def test_st_num_is_invalid(self):
addrs = [
"A123 North Fake Street",
"678Z North Fake Street",
"ABCD North Fake Street",
"ABCD North Fake Street",
]
for addr in addrs:
with self.subTest(addr=addr), self.assertRaises(AddrError):
AddrParts.from_text(addr)
def test_st_dir_is_valid(self):
addrs = [
("1234 N Fake Street", AddrDir.NORTH),
("1234 North Fake Street", AddrDir.NORTH),
("1234 S Fake Street", AddrDir.SOUTH),
("1234 South Fake Street", AddrDir.SOUTH),
("1234 E Fake Street", AddrDir.EAST),
("1234 East Fake Street", AddrDir.EAST),
("1234 W Fake Street", AddrDir.WEST),
("1234 West Fake Street", AddrDir.WEST)
]
for addr, exp in addrs:
with self.subTest(addr=addr, exp=exp):
parts = AddrParts.from_text(addr)
self.assertEqual(parts.st_dir, exp)
def test_st_dir_is_invalid(self):
addrs = [
"1234 Zamboni Fake Street",
"1234 4567 Fake Street",
"1234 Cat Fake Street",
]
for addr in addrs:
with self.subTest(addr=addr), self.assertRaises(AddrError):
AddrParts.from_text(addr)
def test_st_name_is_valid(self):
addrs = [
("1234 N Fake Street", "FAKE"),
("1234 North Blah Blorg Street", "BLAH BLORG"),
("1234 S Fake Terrace Ave", "FAKE TERRACE"),
("1234 North 51st Street", "51ST"),
]
for addr, exp in addrs:
with self.subTest(addr=addr, exp=exp):
parts = AddrParts.from_text(addr)
self.assertEqual(parts.st_name, exp)
def test_st_name_is_invalid(self):
addrs = [
"1234 North Street",
]
for addr in addrs:
with self.subTest(addr=addr), self.assertRaises(AddrError):
AddrParts.from_text(addr)
def test_st_name_suffix_is_added(self):
addrs = [
# Try some of the single-digit streets
("1234 North 1 Street", "1ST"),
("1234 North 2 Street", "2ND"),
("1234 North 3 Street", "3RD"),
("1234 North 4 Street", "4TH"),
# 10-19 always end with "TH"
("1234 North 10 Street", "10TH"),
("1234 North 11 Street", "11TH"),
("1234 North 12 Street", "12TH"),
("1234 North 13 Street", "13TH"),
("1234 North 14 Street", "14TH"),
("1234 North 15 Street", "15TH"),
("1234 North 16 Street", "16TH"),
("1234 North 17 Street", "17TH"),
("1234 North 18 Street", "18TH"),
("1234 North 19 Street", "19TH"),
# Try each of the last digits from 0-9
("1234 North 20 Street", "20TH"),
("1234 North 21 Street", "21ST"),
("1234 North 22 Street", "22ND"),
("1234 North 33 Street", "33RD"),
("1234 North 44 Street", "44TH"),
("1234 North 55 Street", "55TH"),
("1234 North 66 Street", "66TH"),
("1234 North 77 Street", "77TH"),
("1234 North 88 Street", "88TH"),
("1234 North 99 Street", "99TH"),
# Likewise 110-119 also end with "TH"
("1234 North 110 Street", "110TH"),
("1234 North 111 Street", "111TH"),
("1234 North 112 Street", "112TH"),
("1234 North 113 Street", "113TH"),
("1234 North 114 Street", "114TH"),
("1234 North 115 Street", "115TH"),
("1234 North 116 Street", "116TH"),
("1234 North 117 Street", "117TH"),
("1234 North 118 Street", "118TH"),
("1234 North 119 Street", "119TH"),
]
for addr, exp in addrs:
with self.subTest(addr=addr, exp=exp):
parts = AddrParts.from_text(addr)
self.assertEqual(parts.st_name, exp)
def test_st_suffix_is_valid(self):
addrs = [
("1234 North Fake Ave", AddrSuffix.AVE),
("1234 North Fake Avenue", AddrSuffix.AVE),
("1234 North Fake Boulevard", AddrSuffix.BLVD),
("1234 North Fake BLVD", AddrSuffix.BLVD),
("1234 North Fake Circle", AddrSuffix.CIRCLE),
("1234 North Fake CIR", AddrSuffix.CIRCLE),
("1234 North Fake Court", AddrSuffix.COURT),
("1234 North Fake CT", AddrSuffix.COURT),
("1234 North Fake Drive", AddrSuffix.DRIVE),
("1234 North Fake DR", AddrSuffix.DRIVE),
("1234 North Fake Lane", AddrSuffix.LANE),
("1234 North Fake LN", AddrSuffix.LANE),
("1234 North Fake PKWY", AddrSuffix.PKWY),
("1234 North Fake Place", AddrSuffix.PLACE),
("1234 North Fake PL", AddrSuffix.PLACE),
("1234 North Fake Road", AddrSuffix.ROAD),
("1234 North Fake RD", AddrSuffix.ROAD),
("1234 North Fake Square", AddrSuffix.SQUARE),
("1234 North Fake SQ", AddrSuffix.SQUARE),
("1234 North Fake Street", AddrSuffix.STREET),
("1234 North Fake ST", AddrSuffix.STREET),
("1234 North Fake Terrace", AddrSuffix.TERRACE),
("1234 North Fake TER", AddrSuffix.TERRACE),
("1234 North Fake Way", AddrSuffix.WAY)
]
for (addr, exp) in addrs:
with self.subTest(addr=addr, exp=exp):
parts = AddrParts.from_text(addr)
self.assertEqual(parts.st_suffix, exp)
def test_st_suffix_is_invalid(self):
addrs = [
"1234 North Fake",
"1234 North Fake Stroll",
]
for addr in addrs:
with self.subTest(addr=addr), self.assertRaises(AddrError):
AddrParts.from_text(addr)
|
from ete3 import NCBITaxa
#The first time this will download the taxonomic NCBI database and save a parsed version
#of it in `~/.etetoolkit/taxa.sqlite`.May take some minutes
ncbi = NCBITaxa()
print("ncbi.dbfile", ncbi.dbfile)
with open(snakemake.input[0], 'r', encoding='utf8') as fh:
genus_list = fh.read().strip().split('\n')
genus_to_taxid = ncbi.get_name_translator(genus_list)
tax_id_vals = genus_to_taxid.values()
tree = ncbi.get_topology([genus_id for subls in tax_id_vals for genus_id in subls], intermediate_nodes=True)
# `get_ascii()` has a bug, prints the taxons before to genus without any separation between them, so a way to avoid that is using extra attribues, `dist` seems to be less invasive. Also, numbers from 'dist' are replaced
with open(snakemake.output[0], mode='w', encoding='utf8') as fh:
print(tree.get_ascii(attributes=["dist", "sci_name"]).replace('1.0,','-'), file=fh)
|
import numpy as np
class MatrixFactorization:
@classmethod
def run(cls, R, P, Q, K, steps=5000, alpha=0.0002, beta=0.02, threshold=0.001):
"""
Args:
R (numpy.ndarray): Rating
P (numpy.ndarray): m x K のユーザ行列
Q (numpy.ndarray): n x K のアイテム行列
K (int): Latent factor(潜在変数)数
alpha (flaot): 学習率
beta (float): 正則化項の学習率
"""
m, n = R.shape
Q = Q.T
for step in range(steps):
for i in range(m):
for j in range(n):
if R[i][j] == 0:
continue
# 各レーティングの誤差算出
err = cls._calc_rating_err(R[i][j], P[i, :], Q[:, j])
# P, Q更新
for k in range(K):
P[i][k] = P[i][k] + alpha * (2 * err * Q[k][j] - beta * P[i][k])
Q[k][j] = Q[k][j] + alpha * (2 * err * P[i][k] - beta * Q[k][j])
# 全体の誤差算出
e = cls._calc_error(R, P, Q, m, n, beta)
if e < threshold:
break
return P, Q
@classmethod
def _calc_error(cls, R, P, Q, m, n, beta):
"""レーティング行列Rと推定レーティング行列との差を算出する
Args:
R (numpy.ndarray): 評価値行列
P (numpy.ndarray): ユーザ行列 K x m
Q (numpy.ndarray): アイテム行列 K x n
beta (flaot): Learning rate
"""
err = 0.0
for i in range(m):
for j in range(n):
current_rating = R[i][j]
if current_rating == 0:
continue
# 行列全体の二乗誤差の和
err += pow(cls._calc_rating_err(current_rating, P[i, :], Q[:, j]), 2)
# L2 regularization
l2_term = (beta / 2) * (np.linalg.norm(P) + np.linalg.norm(Q))
err += l2_term
return err
@classmethod
def _calc_rating_err(cls, r, p, q):
"""実際の評価値と内積との差を算出する
Args:
r (int): 実際の評価値
p (numpy.ndarray): m列のユーザ行列
q (numpy.ndarray): n列のアイテム行列
Return:
(int) 評価値との誤差
"""
return r - np.dot(p, q)
if __name__ == '__main__':
# 潜在因子数
K = 2
# m x n のレーティング行列
R = np.array([
[5, 3, 0, 1],
[4, 0, 0, 1],
[1, 1, 0, 5],
[1, 0, 0, 4],
[0, 1, 5, 4]
])
m, n = R.shape
# m x K のユーザ行列P
P = np.random.rand(m, K)
# n x K のアイテム行列Q
Q = np.random.rand(n, K)
print('========================================')
print('Old P: {}'.format(P))
print('Old Q: {}'.format(Q))
print()
P_new, Q_new = MatrixFactorization.run(R, P, Q, K, steps=5000, alpha=0.0002, beta=0.2, threshold=0.001)
print('========================================')
print('New P: {}'.format(P_new))
print('New Q: {}'.format(Q_new))
print('----------------------------------------')
print(R)
print(np.round(np.dot(P, Q.T)))
|
from keras import backend as K
from keras.layers import Layer
class ExpandLayer(Layer):
def __init__(self, axis, **kwargs):
super().__init__(**kwargs)
self._axis = axis
def call(self, inputs, **kwargs):
return K.expand_dims(inputs, axis=self._axis)
def compute_output_shape(self, input_shape):
if self._axis < 0:
axis = self._axis + len(input_shape) + 1
else:
axis = self._axis
output_shape = input_shape[:axis] + (1,) + input_shape[axis:]
return tuple(output_shape)
def get_config(self):
config = {
"index": self._index
}
base_config = super().get_config()
config.update(base_config)
return config
def get_encoder(self):
return self._encoder
|
import abc
class Response(abc.ABC):
def __init__(self):
self.output = ""
def with_output(self, output):
self.output = output
return self
@abc.abstractmethod
def print(self):
pass
def __eq__(self, other):
return isinstance(other, self.__class__) \
and self.output == other.output
def __hash__(self):
return hash(('output', self.output))
|
# makes KratosMultiphysics backward compatible with python 2.6 and 2.7
from __future__ import print_function, absolute_import, division #makes KratosMultiphysics backward compatible with python 2.6 and 2.7
# Application dependent names and paths
import KratosMultiphysics as KM
from KratosDemStructuresCouplingApplication import *
application = KratosDemStructuresCouplingApplication()
application_name = "KratosDemStructuresCouplingApplication"
application_folder = "DemStructuresCouplingApplication"
KM._ImportApplicationAsModule(application, application_name, application_folder, __path__)
|
from engine import *
from grid import Grid
class Maze:
'''
Class that will generate the maze
This is the environment of this program
Maybe built in PyGame
'''
def __init__(self, title, dim):
self.window = Window(title, dim, FPS_CAP=60)
self.window.background_color = pygame.Color('white')
self.id = "maze"
self.running = False
self.ready_loading = False
# Grid
self.grid = Grid(dim, self.window)
# Event handler
self.eventListener = EventListener()
# Setup
self.setup()
def setup(self):
# Grid setup
self.window.addWidget(self.grid)
self.grid.cell_maz_size = 15
cellx, celly = self.grid.setup()
# Draw Instuctions labels
label1 = Label((0, (celly*self.grid.cell_dim)+100),
(self.window.dimension[1], self.grid.cell_dim),
"S start point, E end point,SPACE start simulation")
label1.text_size = 25
self.window.addWidget(label1)
label2 = Label((0, (celly * self.grid.cell_dim) + 150),
(self.window.dimension[1], self.grid.cell_dim),
"C to clear the grid, M_Sx to build walls, L to load")
label2.text_size = 25
self.window.addWidget(label2)
# Add event listener
self.eventListener.bind('onKeyDown', function)
self.window.addWidget(self.eventListener)
# Add maze to objects
self.window.addObject(self, self.id)
def start(self):
self.window.start()
# Function event listener
def function(self, win, key):
if key == ' ' and not win.objects["maze"].running and win.objects["maze"].grid.exist_start_point() and win.objects["maze"].grid.exist_end_point():
win.objects["maze"].grid.updateMatrixWidg()
win.objects["maze"].running = True
elif key == ' ' and win.objects["maze"].running:
win.objects["maze"].grid.updateMatrixWidg()
win.objects["maze"].running = False
elif key == 'c' and not win.objects["maze"].running:
win.objects["maze"].grid.clear()
win.objects["maze"].grid.updateMatrixAi()
elif key == 'l':
win.objects["maze"].ready_loading = not win.objects["maze"].ready_loading
|
from datetime import datetime
import unittest
import time
import six
from salesmachine.version import VERSION
from salesmachine.client import Client
class TestClient(unittest.TestCase):
def fail(self, e, batch):
"""Mark the failure handler"""
self.failed = True
def setUp(self):
self.failed = False
self.client = Client("key", "secret", debug=True, on_error=self.fail)
def test_requires_write_key(self):
self.assertRaises(AssertionError, Client)
def test_empty_flush(self):
self.client.flush()
def test_basic_track_event(self):
client = self.client
success, msg = client.track_event('contact_uid', 'event_uid')
client.flush()
self.assertTrue(success)
self.assertFalse(self.failed)
self.assertEqual(msg['contact_uid'], 'contact_uid')
self.assertEqual(msg['event_uid'], 'event_uid')
self.assertEqual(msg['method'], 'event')
def test_basic_set_contact(self):
client = self.client
success, msg = client.set_contact('contact_uid', {'name': 'Jean'})
client.flush()
self.assertTrue(success)
self.assertFalse(self.failed)
self.assertEqual(msg['params'], {'name': 'Jean' })
self.assertEqual(msg['contact_uid'], 'contact_uid')
self.assertEqual(msg['method'], 'contact')
def test_basic_set_account(self):
client = self.client
success, msg = client.set_account('account_uid', {'name': 'Jean Corp.'})
client.flush()
self.assertTrue(success)
self.assertFalse(self.failed)
self.assertEqual(msg['params'], {'name': 'Jean Corp.' })
self.assertEqual(msg['account_uid'], 'account_uid')
self.assertEqual(msg['method'], 'account')
def test_basic_track_pageview(self):
client = self.client
success, msg = client.track_pageview('contact_uid', {'display_name': 'Jean has seen page 2.'})
client.flush()
self.assertTrue(success)
self.assertFalse(self.failed)
self.assertEqual(msg['params'], {'display_name': 'Jean has seen page 2.'})
self.assertEqual(msg['contact_uid'], 'contact_uid')
self.assertEqual(msg['method'], 'event')
def test_flush(self):
client = self.client
# send a few more requests than a single batch will allow
for i in range(60):
success, msg = client.set_contact('contact_uid', { 'name': 'value' })
self.assertFalse(client.queue.empty())
client.flush()
self.assertTrue(client.queue.empty())
def test_overflow(self):
client = Client('testkey', 'testsecret', max_queue_size=1)
client.consumer.pause()
time.sleep(5.1) # allow time for consumer to exit
for i in range(10):
client.set_contact('contact_uid')
success, msg = client.set_contact('contact_uid')
self.assertFalse(success)
def test_error_on_invalid_write_key(self):
client = Client('bad_key', 'bad_secret', on_error=self.fail)
client.set_contact('contact_uid')
client.flush()
self.assertTrue(self.failed)
def test_unicode(self):
Client('tetskey', six.u('unicode_key'))
def test_numeric_user_id(self):
self.client.set_contact(789)
self.client.flush()
self.assertFalse(self.failed)
def test_debug(self):
Client('bad_key', 'bad_secret', debug=True)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# :Author: snxx
# :Copyright: (c) 2021 snxx
# For license and copyright information please follow this like:
# https://github.com/snxx-lppxx/Cloud-vk-bot/blob/master/LICENSE
''' GitHub: snxx-lppxx/Cloud-vk-bot '''
# Importing modules
import vk_api
import time
import json
from vk_api.bot_longpoll import VkBotEventType, VkBotLongPoll
from source.resource.word import err
from source.betypes.config import token, version, gid
from vk_api.keyboard import VkKeyboard, VkKeyboardColor
vk = vk_api.VkApi(token=token)
vk._auth_token()
vk.get_api()
longpoll = VkBotLongPoll(vk, gid)
settings = dict(one_time=False, inline=True)
class Server(object):
# Send starting message. Etry point
print('{}\n{}{}\n'.format('Server started...', 'API: ', version))
# INFDEF MAIN
def __init__(self, keyboard):
''' Creating keyboards '''
# Keyboard number-1
keyboard.add_callback_button(label='1',
color=vk_api.keyboard.VkKeyboardColor.SECONDARY,
payload={"type": "show_snackbar", "text": "1"}
)
keyboard.add_line()
# Keyboard number-2
keyboard.add_callback_button(label='2',
color=vk_api.keyboard.VkKeyboardColor.POSITIVE,
payload={"type": "open_link", "link": "https://github.com/snxx-lppxx"}
)
keyboard.add_line()
# Keyboard number-3
keyboard.add_callback_button(label='3',
color=vk_api.keyboard.VkKeyboardColor.SECONDARY,
payload={"type": "open_link", "link": "https://github.com/snxx-lppxx"}
)
# ENDIF MAIN
# IFNDEF LOGICS
f_toggle: bool = False
for event in longpoll.listen():
keyboard = vk_api.keyboard.VkKeyboard(**settings)
try:
if event.type == vk_api.bot_longpoll.VkBotEventType.MESSAGE_NEW:
if event.from_user:
# Checking correctness
if event.obj.message['text'] != '':
if 'callback' not in event.obj.client_info['button_actions']:
print(f'Client does not supported: {event.obj.message["from_id"]}')
vk.messages.send(user_id = event.obj.message['from_id'], peer_id = event.obj.message['from_id'],
message = 'Здравствуйте, я Ваш консультант, могу чем-то помочь?', random_id = 0, keyboard=keyboard.get_keyboard()
)
elif event.type == VkBotEventType.MESSAGE_EVENT:
if event.object.payload.get('type') in CALLBACK_TYPES:
r = vk.messages.sendMessageEventAnswer(
event_id = event.object.event_id,
user_id = event.object.user_id,
peer_id = event.object.peer_id,
event_data = json.dumps(event.object.payload))
f_toggle = not f_toggle
if 'Negative' in event.obj.text:
vk.messages.send(peer_id = event.obj.message['peer_id'], message = '1', random_id = 0)
if 'Primary' in event.obj.text:
vk.messages.send(peer_id = event.obj.message['peer_id'], message = '2', random_id = 0)
if 'Secondary' in event.obj.text:
vk.messages.send(peer_id = event.obj.message['peer_id'], message = '3', random_id = 0)
except Exception as e:
time.sleep(0.75)
# ENDIF LOGICS
if __name__ == '__main__':
Server(object).main(keyboard)
|
#!/bin/env python
# -*coding: UTF-8 -*-
"""
High level helper methods to load Argo data from any source
The facade should be able to work with all available data access point,
Usage for LOCALFTP:
from argopy import DataFetcher as ArgoDataFetcher
argo_loader = ArgoDataFetcher(backend='localftp', ds='phy')
or
argo_loader = ArgoDataFetcher(backend='localftp', ds='bgc')
argo_loader.float(6902746).to_xarray()
argo_loader.float([6902746, 6902747, 6902757, 6902766]).to_xarray()
Usage for ERDDAP (default backend):
from argopy import DataFetcher as ArgoDataFetcher
argo_loader = ArgoDataFetcher(backend='erddap')
or
argo_loader = ArgoDataFetcher(backend='erddap', cachedir='tmp', cache=True)
or
argo_loader = ArgoDataFetcher(backend='erddap', ds='ref')
argo_loader.profile(6902746, 34).to_xarray()
argo_loader.profile(6902746, np.arange(12,45)).to_xarray()
argo_loader.profile(6902746, [1,12]).to_xarray()
or
argo_loader.float(6902746).to_xarray()
argo_loader.float([6902746, 6902747, 6902757, 6902766]).to_xarray()
argo_loader.float([6902746, 6902747, 6902757, 6902766], CYC=1).to_xarray()
or
argo_loader.region([-85,-45,10.,20.,0,1000.]).to_xarray()
argo_loader.region([-85,-45,10.,20.,0,1000.,'2012-01','2014-12']).to_xarray()
"""
import os
import sys
import glob
import pandas as pd
import xarray as xr
import numpy as np
import warnings
from argopy.options import OPTIONS, _VALIDATORS
from .errors import InvalidFetcherAccessPoint, InvalidFetcher
from .utilities import list_available_data_backends
AVAILABLE_BACKENDS = list_available_data_backends()
# Import plotters :
from .plotters import plot_trajectory, plot_dac, plot_profilerType
# Highest level API / Facade:
class ArgoDataFetcher(object):
""" Fetch and process Argo data.
Can return data selected from:
- one or more float(s), defined by WMOs
- one or more profile(s), defined for one WMO and one or more CYCLE NUMBER
- a space/time rectangular domain, defined by lat/lon/pres/time range
Can return data from the regular Argo dataset ('phy': temperature, salinity) and the Argo referenced
dataset used in DMQC ('ref': temperature, salinity).
This is the main API facade.
Specify here all options to data_fetchers
"""
def __init__(self,
mode: str = "",
backend : str = "",
ds: str = "",
**fetcher_kwargs):
# Facade options:
self._mode = OPTIONS['mode'] if mode == '' else mode
self._dataset_id = OPTIONS['dataset'] if ds == '' else ds
self._backend = OPTIONS['datasrc'] if backend == '' else backend
_VALIDATORS['mode'](self._mode)
_VALIDATORS['datasrc'](self._backend)
_VALIDATORS['dataset'](self._dataset_id)
# Load backend access points:
if self._backend not in AVAILABLE_BACKENDS:
raise ValueError("Data fetcher '%s' not available" % self._backend)
else:
Fetchers = AVAILABLE_BACKENDS[self._backend]
# Auto-discovery of access points for this fetcher:
# rq: Access point names for the facade are not the same as the access point of fetchers
self.valid_access_points = ['profile', 'float', 'region']
self.Fetchers = {}
for p in Fetchers.access_points:
if p == 'wmo': # Required for 'profile' and 'float'
self.Fetchers['profile'] = Fetchers.Fetch_wmo
self.Fetchers['float'] = Fetchers.Fetch_wmo
if p == 'box': # Required for 'region'
self.Fetchers['region'] = Fetchers.Fetch_box
# Init sub-methods:
self.fetcher = None
if ds is None:
ds = Fetchers.dataset_ids[0]
self.fetcher_options = {**{'ds':ds}, **fetcher_kwargs}
self.postproccessor = self.__empty_processor
# Dev warnings
#Todo Clean-up before each release
if self._dataset_id == 'bgc' and self._mode == 'standard':
warnings.warn(" 'BGC' dataset fetching in 'standard' user mode is not reliable. "
"Try to switch to 'expert' mode if you encounter errors.")
def __repr__(self):
if self.fetcher:
summary = [self.fetcher.__repr__()]
summary.append("Backend: %s" % self._backend)
summary.append("User mode: %s" % self._mode)
else:
summary = ["<datafetcher 'Not initialised'>"]
summary.append("Backend: %s" % self._backend)
summary.append("Fetchers: %s" % ", ".join(self.Fetchers.keys()))
summary.append("User mode: %s" % self._mode)
return "\n".join(summary)
def __empty_processor(self, xds):
""" Do nothing to a dataset """
return xds
def __getattr__(self, key):
""" Validate access points """
# print("key", key)
valid_attrs = ['Fetchers', 'fetcher', 'fetcher_options', 'postproccessor']
if key not in self.valid_access_points and key not in valid_attrs:
raise InvalidFetcherAccessPoint("'%s' is not a valid access point" % key)
pass
def float(self, wmo, **kw):
""" Load data from a float, given one or more WMOs """
if "CYC" in kw or "cyc" in kw:
raise TypeError("float() got an unexpected keyword argument 'cyc'. Use 'profile' access "
"point to fetch specific profile data.")
if 'float' in self.Fetchers:
self.fetcher = self.Fetchers['float'](WMO=wmo, **self.fetcher_options)
else:
raise InvalidFetcherAccessPoint("'float' not available with '%s' backend" % self._backend)
if self._mode == 'standard' and self._dataset_id != 'ref':
def postprocessing(xds):
xds = self.fetcher.filter_data_mode(xds)
xds = self.fetcher.filter_qc(xds)
xds = self.fetcher.filter_variables(xds, self._mode)
return xds
self.postproccessor = postprocessing
return self
def profile(self, wmo, cyc):
""" Load data from a profile, given one or more WMOs and CYCLE_NUMBER """
if 'profile' in self.Fetchers:
self.fetcher = self.Fetchers['profile'](WMO=wmo, CYC=cyc, **self.fetcher_options)
else:
raise InvalidFetcherAccessPoint("'profile' not available with '%s' backend" % self._backend)
if self._mode == 'standard' and self._dataset_id != 'ref':
def postprocessing(xds):
xds = self.fetcher.filter_data_mode(xds)
xds = self.fetcher.filter_qc(xds)
xds = self.fetcher.filter_variables(xds, self._mode)
return xds
self.postproccessor = postprocessing
return self
def region(self, box):
""" Load data from a rectangular region, given latitude, longitude, pressure and possibly time bounds """
if 'region' in self.Fetchers:
self.fetcher = self.Fetchers['region'](box=box, **self.fetcher_options)
else:
raise InvalidFetcherAccessPoint("'region' not available with '%s' backend" % self._backend)
if self._mode == 'standard' and self._dataset_id != 'ref':
def postprocessing(xds):
xds = self.fetcher.filter_data_mode(xds)
xds = self.fetcher.filter_qc(xds)
xds = self.fetcher.filter_variables(xds, self._mode)
return xds
self.postproccessor = postprocessing
return self
def deployments(self, box):
""" Retrieve deployment locations in a specific space/time region """
warnings.warn("This access point is to be used with an index fetcher")
pass
def to_xarray(self, **kwargs):
""" Fetch and post-process data, return xarray.DataSet """
if not self.fetcher:
raise InvalidFetcher(" Initialize an access point (%s) first." %
",".join(self.Fetchers.keys()))
xds = self.fetcher.to_xarray(**kwargs)
xds = self.postproccessor(xds)
return xds
class ArgoIndexFetcher(object):
"""
Specs discussion :
https://github.com/euroargodev/argopy/issues/8
https://github.com/euroargodev/argopy/pull/6)
Usage :
from argopy import ArgoIndexFetcher
idx = ArgoIndexFetcher.region([-75, -65, 10, 20])
idx.plot.trajectories()
idx.to_dataframe()
Fetch and process Argo index.
Can return metadata from index of :
- one or more float(s), defined by WMOs
- one or more profile(s), defined for one WMO and one or more CYCLE NUMBER
- a space/time rectangular domain, defined by lat/lon/pres/time range
idx object can also be used as an input :
argo_loader = ArgoDataFetcher(index=idx)
Specify here all options to data_fetchers
"""
def __init__(self,
mode: str = "",
backend : str = "",
**fetcher_kwargs):
# Facade options:
self._mode = OPTIONS['mode'] if mode == '' else mode
self._backend = OPTIONS['datasrc'] if backend == '' else backend
_VALIDATORS['mode'](self._mode)
_VALIDATORS['datasrc'](self._backend)
# Load backend access points:
if self._backend not in AVAILABLE_BACKENDS:
raise ValueError("Fetcher '%s' not available" % self._backend)
else:
Fetchers = AVAILABLE_BACKENDS[self._backend]
# Auto-discovery of access points for this fetcher:
# rq: Access point names for the facade are not the same as the access point of fetchers
self.valid_access_points = ['float', 'region']
self.Fetchers = {}
for p in Fetchers.access_points:
if p == 'wmo': # Required for 'profile' and 'float'
self.Fetchers['float'] = Fetchers.IndexFetcher_wmo
if p == 'box': # Required for 'region'
self.Fetchers['region'] = Fetchers.IndexFetcher_box
# Init sub-methods:
self.fetcher = None
self.fetcher_options = {**fetcher_kwargs}
self.postproccessor = self.__empty_processor
def __repr__(self):
if self.fetcher:
summary = [self.fetcher.__repr__()]
summary.append("User mode: %s" % self._mode)
else:
summary = ["<indexfetcher 'Not initialised'>"]
summary.append("Fetchers: 'float' or 'region'")
summary.append("User mode: %s" % self._mode)
return "\n".join(summary)
def __empty_processor(self, xds):
""" Do nothing to a dataset """
return xds
def __getattr__(self, key):
""" Validate access points """
valid_attrs = ['Fetchers', 'fetcher', 'fetcher_options', 'postproccessor']
if key not in self.valid_access_points and key not in valid_attrs:
raise InvalidFetcherAccessPoint("'%s' is not a valid access point" % key)
pass
def float(self, wmo):
""" Load index for one or more WMOs """
if 'float' in self.Fetchers:
self.fetcher = self.Fetchers['float'](WMO=wmo, **self.fetcher_options)
else:
raise InvalidFetcherAccessPoint("'float' not available with '%s' backend" % self._backend)
return self
def region(self, box):
""" Load index for a rectangular region, given latitude, longitude, and possibly time bounds """
if 'region' in self.Fetchers:
self.fetcher = self.Fetchers['region'](box=box, **self.fetcher_options)
else:
raise InvalidFetcherAccessPoint("'region' not available with '%s' backend" % self._backend)
return self
def to_dataframe(self, **kwargs):
""" Fetch index and return pandas.Dataframe """
if not self.fetcher:
raise InvalidFetcher(" Initialize an access point (%s) first." %
",".join(self.Fetchers.keys()))
return self.fetcher.to_dataframe(**kwargs)
def to_xarray(self, **kwargs):
""" Fetch index and return xr.dataset """
if not self.fetcher:
raise InvalidFetcher(" Initialize an access point (%s) first." %
",".join(self.Fetchers.keys()))
return self.fetcher.to_xarray(**kwargs)
def to_csv(self, file: str='output_file.csv'):
""" Fetch index and return csv """
if not self.fetcher:
raise InvalidFetcher(" Initialize an access point (%s) first." %
",".join(self.Fetchers.keys()))
return self.to_dataframe().to_csv(file)
def plot(self, ptype='trajectory'):
""" Custom plots """
idx=self.to_dataframe()
if ptype=='dac':
plot_dac(idx)
elif ptype=='profiler':
plot_profilerType(idx)
elif ptype=='trajectory':
plot_trajectory(idx.sort_values(['file']))
else:
raise ValueError("Type of plot unavailable. Use: 'dac', 'profiler' or 'trajectory' (default)")
|
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.viewsets import ReadOnlyModelViewSet, GenericViewSet
from rest_framework.mixins import RetrieveModelMixin, CreateModelMixin
from django_redis import get_redis_connection
from wechat.core.bot import SaveModelMessageBot
from wechat.core.authentication import AccessTokenAuthentication
from wechat.core import pkl_path
from wechat import serializers, models
from wechat.core import utils
import threading
import time
import os
class LoginView(APIView):
bot_class = SaveModelMessageBot
authentication_classes = [AccessTokenAuthentication]
def get(self, request, *args, **kwargs):
assert self.bot_class is not None, 'bot_class不能为`None`'
# 创建线程
flag = time.time()
login = threading.Thread(target=self.bot_login, args=(flag,))
login.start()
# 递归获取二维码
response = self.get_response(flag)
# byte 转字符串
response = {k.decode(): v.decode() for k, v in response.items()}
# 将键名改过来,便于登陆查询
self.rename_redis_key(flag)
return Response(response)
def bot_login(self, flag):
obj = self.bot_class(request=self.request, flag=flag)
# 永久存储puid
obj.enable_puid(os.path.join(pkl_path, f'{self.request.auth.app_id}.pkl'))
def get_response(self, flag):
time.sleep(0.01)
coon = get_redis_connection('default')
qrcode = coon.hgetall(flag)
return qrcode if qrcode else self.get_response(flag)
@staticmethod
def rename_redis_key(flag):
coon = get_redis_connection('default')
uuid = coon.hget(flag, 'uuid')
coon.rename(flag, uuid)
class CheckLoginView(APIView):
"""检查登陆状态"""
authentication_classes = [AccessTokenAuthentication]
def get(self, request, *args, **kwargs):
serializer = serializers.CheckLoginSerializer(data=request.GET, context={'request': request})
serializer.is_valid(raise_exception=True)
return Response(serializer.validated_data)
class FriendsReadOnlyModelViewSet(ReadOnlyModelViewSet):
"""好友列表查询接口"""
serializer_class = serializers.WxUserModelModelSerializer
authentication_classes = [AccessTokenAuthentication]
filterset_fields = ['puid', 'name', 'nick_name', 'user_name', 'remark_name', 'signature', 'sex', 'province', 'city']
def get_queryset(self):
return self.request.user.friends.all()
class GroupsReadOnlyModelViewSet(ReadOnlyModelViewSet):
"""群查询接口"""
authentication_classes = [AccessTokenAuthentication]
serializer_class = serializers.WxGroupModelModelSerializer
filterset_fields = ['puid', 'name', 'nick_name', 'user_name', ]
def get_queryset(self):
return self.request.user.wxgroupmodel_set.all()
class GroupsMembersRetrieveModelMixinViewSet(RetrieveModelMixin, GenericViewSet):
"""群成员查询接口"""
serializer_class = serializers.WxGroupMembersSerializer
authentication_classes = [AccessTokenAuthentication]
def get_queryset(self):
return self.request.user.wxgroupmodel_set.all()
def get_object(self):
"""修改`get_object`的返回结果,让`retrieve`调用"""
instance = super().get_object()
return instance.members.all()
def retrieve(self, request, *args, **kwargs):
instance = self.get_object()
queryset = self.filter_queryset(instance)
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
class MpReadOnlyModelViewSet(ReadOnlyModelViewSet):
"""微信公众号查询接口"""
serializer_class = serializers.WxMpsModelModelSerializer
authentication_classes = [AccessTokenAuthentication]
filterset_fields = ['puid', 'name', 'nick_name', 'province', 'city', 'signature']
def get_queryset(self):
return self.request.user.wxmpsmodel_set.all()
class MessageReadOnlyModelViewSet(ReadOnlyModelViewSet):
"""聊天记录查询接口"""
serializer_class = serializers.MessageReadModelSerializer
authentication_classes = [AccessTokenAuthentication]
queryset = models.MessageModel.objects.all()
filterset_fields = ['type', 'create_time', 'receive_time', 'is_at', 'sender_puid', 'receiver_puid']
def get_queryset(self):
return self.queryset.filter(owner=self.request.user)
class AccessTokenView(APIView):
"""获取access token"""
def get(self, request, *args, **kwargs):
serializer = serializers.AccessTokenSerializer(data=request.GET)
serializer.is_valid(raise_exception=True)
return Response(serializer.validated_data)
class SendMessageView(CreateModelMixin, GenericViewSet):
"""主动发送消息"""
serializer_class = serializers.SendMessageSerializer
authentication_classes = [AccessTokenAuthentication]
class UpdateUserInfoView(APIView):
"""主动更新列表信息"""
authentication_classes = [AccessTokenAuthentication]
def get(self, request, *args, **kwargs):
try:
utils.update_app_info_by_view(request)
except AssertionError as e:
return Response({'errmsg': e.__str__()})
return Response({'msg': '更新成功!'}, status=201)
|
import pyxb.binding.generate
import pyxb.utils.domutils
from xml.dom import Node
import os.path
xsd='''<?xml version="1.0" encoding="UTF-8"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:complexType name="tAddress">
<xs:choice>
<xs:sequence>
<xs:element name="Line1" type="xs:string"/>
<xs:element name="Line2" type="xs:string"/>
</xs:sequence>
<xs:sequence>
<xs:element name="Missing" type="xs:string"/>
</xs:sequence>
</xs:choice>
</xs:complexType>
<xs:complexType name="tOther">
<xs:sequence>
<xs:element name="Header" type="xs:string"/>
<xs:choice>
<xs:sequence>
<xs:element name="Special" type="tAddress"/>
<xs:element name="Common" type="tAddress" minOccurs="0"/>
</xs:sequence>
<xs:sequence>
<xs:element name="Common" type="tAddress"/>
</xs:sequence>
</xs:choice>
</xs:sequence>
</xs:complexType>
<xs:element name="elt" type="tOther"/>
</xs:schema>'''
code = pyxb.binding.generate.GeneratePython(schema_text=xsd)
file('code.py', 'w').write(code)
#print code
rv = compile(code, 'test', 'exec')
eval(rv)
from pyxb.exceptions_ import *
import unittest
class TestTrac0033a (unittest.TestCase):
def test (self):
xml = '<elt><Header/><Common><Line1/><Line2/></Common></elt>'
instance = CreateFromDocument(xml)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import String
import threading
import time
from gps3 import gps3
latitude=0
longitude=0
gps_socket = gps3.GPSDSocket()
data_stream = gps3.DataStream()
gps_socket.connect()
gps_socket.watch()
def pos_update():
while True:
global latitude,longitude
print('here1')
for new_data in gps_socket:
if new_data:
data_stream.unpack(new_data)
global latitude,longitude
latitude = data_stream.TPV['lat']
longitude = data_stream.TPV['lon']
if type(longitude) is type('sdas') or type(latitude) is type('sdas'):
continue
else:
return latitude,longitude
def gpub():
pubcor=rospy.Publisher("coordinates",String, queue_size=1)
#publong=rospy.Publisher("longitude",String, queue_size=1)
rospy.init_node("GPS",anonymous=True)
rate=rospy.Rate(10)
while not rospy.is_shutdown():
latitude,longitude=pos_update()
pubcor.publish(str(latitude)+','+str(longitude))
rate.sleep()
if __name__=='__main__':
try:
gpub()
except rospy.ROSInterruptException:
pass
|
# -*- coding: utf-8 -*-
# Copyright 2012 Loris Corazza, Sakis Christakidis
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from p2ner.core.namespace import autoNS
from p2ner.abstract.message import Message
from weakref import ref
class MessageSent(Exception):
def __init__(self, peer= None):
self.peer = peer
class MessageError(Exception):
def __init__(self, peer=None):
self.peer = peer
def probe_ack(f,func=None,*args):
f.trap(MessageSent,MessageError)
if f.check(MessageSent):
return True
if f.check(MessageError):
if func:
func(peer=f.value.peer,*args)
return False
def probe_rec(f,func=None,*args):
f.trap(MessageSent,MessageError)
if f.check(MessageSent):
if func:
func(peer=f.value.peer,*args)
return False
if f.check(MessageError):
return True
def probe_all(f,suc_func=None,err_func=None,**kwargs):
f.trap(MessageSent,MessageError)
if f.check(MessageSent):
if suc_func:
suc_func(f.value.peer,**kwargs)
return False
if f.check(MessageError):
if err_func:
err_func(f.value.peer,**kwargs)
return False
def trap_sent(f):
f.trap(MessageSent,MessageError)
class ControlMessage(Message):
__instances__ = []
@property
def code(self):
return self.__class__.code
@property
def type(self):
return self.__class__.type
@property
def ack(self):
return self.__class__.ack
@classmethod
def _cleanref(cls, r):
#print 'del',r
#cls.Log().error('removing %s',str(r))
ControlMessage.__instances__.remove(r)
#print ControlMessage.__instances__
@classmethod
def remove_refs(cls,inst):
#cls.Log.error('in remove refs')
i=0
for msg_ref in ControlMessage.__instances__:
if msg_ref() is inst:
found=i
#cls.Log.debug('removing control message instance:%s',msg_ref())
#cls.__instances__.remove(msg_ref)
break
i+=1
ControlMessage.__instances__.pop(found)
@autoNS
def __init__(self, *args, **kwargs):
ControlMessage.__instances__.append(ref(self, ControlMessage._cleanref))
#if not 'Log' in ControlMessage.__dict__:
# ControlMessage.Log=ref(self.logger.getLoggerChild('messages',self.interface))
#ControlMessage.Log().debug('registering message %s',str(self))
self.initMessage(*args, **kwargs)
def initMessage(self, *args, **kwargs):
pass
@classmethod
def codefilter(cls, code):
msglist = []
for msg_ref in cls.__instances__:
msg = msg_ref()
if msg == None:
continue
if msg.type == "messageheader":
continue
elif msg.code == code or msg.code == "*":
msglist.append(msg)
if len(msglist)==0:
print "No matches for msg code", code
#cls.Log.warning("No matches for msg code:%s", code)
# print "Message instances: ", [m() for m in cls.__instances__]
#cls.Log.warning("Message instances:%s ",str( [m() for m in cls.__instances__]))
return msglist
@classmethod
def fallbackmsgs(cls):
msglist = []
for msg_ref in cls.__instances__:
msg = msg_ref()
if msg:
if msg.code == "-":
msglist.append(msg)
return msglist
@classmethod
def trig(cls, msgs, triggers):
triggered = []
for msg in msgs:
if msg.trigger(triggers[msg.type]):
triggered.append((msg, triggers[msg.type]))
return triggered
def __str__(self):
return "CODE: %d TYPE: %s ACK: %d" % (self.__class__.code, self.__class__.type, self.__class__.ack)
def __repr__(self):
return "%s()" % self.__class__.__name__
class BaseControlMessage(ControlMessage):
@autoNS
def __init__(self, *args, **kwargs):
pass
def trigger(self,message):
pass
def action(self,message,peer):
pass
|
"""
Detect and recognize faces using dlib served by zerorpc
Should be called from a zerorpc client with ZoneMinder
alarm image metadata from zm-s3-upload.js.
This program should be run in the 'cv' virtual python environment, i.e.,
$ /home/lindo/.virtualenvs/cv/bin/python ./face_detect_server.py
This is part of the smart-zoneminder project.
See https://github.com/goruck/smart-zoneminder
Copyright (c) 2018, 2019 Lindo St. Angel
"""
import numpy as np
import cv2
import face_recognition
import json
import zerorpc
import logging
import pickle
import gevent
import signal
logging.basicConfig(level=logging.ERROR)
# Get configuration.
with open('./config.json') as fp:
config = json.load(fp)['faceDetServer']
# Heartbeat interval for zerorpc client in ms.
# This must match the zerorpc client config.
ZRPC_HEARTBEAT = config['zerorpcHeartBeat']
# IPC (or TCP) socket for zerorpc.
# This must match the zerorpc client config.
ZRPC_PIPE = config['zerorpcPipe']
# Settings for face classifier.
# The model and label encoder need to be generated by 'train.py' first.
MODEL_PATH = config['modelPath']
LABEL_PATH = config['labelPath']
MIN_PROBA = config['minProba']
# Images with Variance of Laplacian less than this are declared blurry.
FOCUS_MEASURE_THRESHOLD = config['focusMeasureThreshold']
# Faces with width or height less than this are too small for recognition.
# In pixels.
MIN_FACE = config['minFace']
# Factor to scale image when looking for faces.
# May increase the probability of finding a face in the image.
# Use caution setting the value > 1 since you may run out of memory.
# See https://github.com/ageitgey/face_recognition/wiki/Face-Recognition-Accuracy-Problems.
NUMBER_OF_TIMES_TO_UPSAMPLE = config['numFaceImgUpsample']
# Face detection model to use. Can be either 'cnn' or 'hog'.
FACE_DET_MODEL = config['faceDetModel']
# How many times to re-sample when calculating face encoding.
NUM_JITTERS = config['numJitters']
# Load face recognition model along with the label encoder.
with open(MODEL_PATH, 'rb') as fp:
recognizer = pickle.load(fp)
with open(LABEL_PATH, 'rb') as fp:
le = pickle.load(fp)
def face_classifier(encoding, min_proba):
# perform classification to recognize the face based on 128D encoding
# note: reshape(1,-1) converts 1D array into 2D
preds = recognizer.predict_proba(encoding.reshape(1, -1))[0]
j = np.argmax(preds)
proba = preds[j]
logging.debug('face classifier proba {} name {}'.format(proba, le.classes_[j]))
if proba >= min_proba:
name = le.classes_[j]
logging.debug('face classifier says this is {}'.format(name))
else:
name = None # prob too low to recog face
logging.debug('face classifier cannot recognize face')
return name, proba
def variance_of_laplacian(image):
# compute the Laplacian of the image and then return the focus
# measure, which is simply the variance of the Laplacian
return cv2.Laplacian(image, cv2.CV_64F).var()
def image_resize(image, width=None, height=None, inter=cv2.INTER_AREA):
# ref: https://stackoverflow.com/questions/44650888/resize-an-image-without-distortion-opencv
# initialize the dimensions of the image to be resized and
# grab the image size
dim = None
(h, w) = image.shape[:2]
# if both the width and height are None, then return the
# original image
if width is None and height is None:
return image
# check to see if the width is None
if width is None:
# calculate the ratio of the height and construct the
# dimensions
r = height / float(h)
dim = (int(w * r), height)
# otherwise, the height is None
else:
# calculate the ratio of the width and construct the
# dimensions
r = width / float(w)
dim = (width, int(h * r))
# resize the image
resized = cv2.resize(image, dim, interpolation=inter)
# return the resized image
return resized
# Define zerorpc class.
class DetectRPC(object):
def detect_faces(self, test_image_paths):
# List that will hold all images with any face detection information.
objects_detected_faces = []
# Loop over the images paths provided.
for obj in test_image_paths:
logging.debug('**********Find Face(s) for {}'.format(obj['image']))
for label in obj['labels']:
# If the object detected is a person then try to identify face.
if label['name'] == 'person':
# Read image from disk.
img = cv2.imread(obj['image'])
if img is None:
# Bad image was read.
logging.error('Bad image was read.')
label['face'] = None
continue
# First bound the roi using the coord info passed in.
# The roi is area around person(s) detected in image.
# (x1, y1) are the top left roi coordinates.
# (x2, y2) are the bottom right roi coordinates.
y2 = int(label['box']['ymin'])
x1 = int(label['box']['xmin'])
y1 = int(label['box']['ymax'])
x2 = int(label['box']['xmax'])
roi = img[y2:y1, x1:x2]
#cv2.imwrite('./roi.jpg', roi)
if roi.size == 0:
# Bad object roi...move on to next image.
logging.error('Bad object roi.')
label['face'] = None
continue
# Detect the (x, y)-coordinates of the bounding boxes corresponding
# to each face in the input image.
rgb = cv2.cvtColor(roi, cv2.COLOR_BGR2RGB)
#cv2.imwrite('./rgb.jpg', rgb)
detection = face_recognition.face_locations(rgb, NUMBER_OF_TIMES_TO_UPSAMPLE,
FACE_DET_MODEL)
if not detection:
# No face detected...move on to next image.
logging.debug('No face detected.')
label['face'] = None
continue
# Carve out face roi and check to see if large enough for recognition.
face_top, face_right, face_bottom, face_left = detection[0]
#cv2.rectangle(rgb, (face_left, face_top), (face_right, face_bottom), (255,0,0), 2)
#cv2.imwrite('./face_rgb.jpg', rgb)
face_roi = roi[face_top:face_bottom, face_left:face_right]
#cv2.imwrite('./face_roi.jpg', face_roi)
(f_h, f_w) = face_roi.shape[:2]
# If face width or height are not sufficiently large then skip.
if f_h < MIN_FACE or f_w < MIN_FACE:
logging.debug('Face too small to recognize.')
label['face'] = None
continue
# Compute the focus measure of the face
# using the Variance of Laplacian method.
# See https://www.pyimagesearch.com/2015/09/07/blur-detection-with-opencv/
gray = cv2.cvtColor(face_roi, cv2.COLOR_BGR2GRAY)
fm = variance_of_laplacian(gray)
# If fm below a threshold then face probably isn't clear enough
# for face recognition to work, so skip it.
if fm < FOCUS_MEASURE_THRESHOLD:
logging.debug('Face too blurry to recognize.')
label['face'] = None
continue
# Find the 128-dimension face encoding for face in image.
# face_locations in css order (top, right, bottom, left)
face_location = (face_top, face_right, face_bottom, face_left)
encoding = face_recognition.face_encodings(rgb,
known_face_locations=[face_location], num_jitters=NUM_JITTERS)[0]
logging.debug('face encoding {}'.format(encoding))
# Perform classification on the encodings to recognize the face.
(name, proba) = face_classifier(encoding, MIN_PROBA)
# Add face name to label metadata.
label['face'] = name
# Add face confidence to label metadata.
# (First convert NumPy value to native Python type for json serialization.)
label['faceProba'] = proba.item()
# Add processed image to output list.
objects_detected_faces.append(obj)
# Convert json to string and return data.
return(json.dumps(objects_detected_faces))
s = zerorpc.Server(DetectRPC(), heartbeat=ZRPC_HEARTBEAT)
s.bind(ZRPC_PIPE)
# Register graceful ways to stop server.
gevent.signal(signal.SIGINT, s.stop) # Ctrl-C
gevent.signal(signal.SIGTERM, s.stop) # termination
# Start server.
# This will block until a gevent signal is caught
s.run()
|
"""Voice Assistant base NL processor."""
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Optional
from voiceassistant.utils.datastruct import DottedDict
@dataclass
class NlpResult:
"""NLP Result class.
Should be a return type of all NL processors.
"""
intent: str
entities: DottedDict
is_complete: bool
class BaseNLP(ABC):
"""Base Natural Language Processor."""
@property
@abstractmethod
def name(self) -> str:
"""Return NL Processor name."""
raise NotImplementedError
@abstractmethod
def process(self, transcript: str) -> Optional[NlpResult]:
"""Process natural language `transcript`."""
raise NotImplementedError
|
import sys
import click
from subprocess import check_call
DEFAULT_PYTHON_VERSION = "3.6"
PYTHON_VERSIONS = ["3.6"]
ADDITIONAL_CORE_DEPS = [
'pyface>=7.0.0-3',
'traitsui>=7.0.0-2',
'requests==2.18.4-1'
]
ADDITIONAL_PLATFORM_CORE_DEPS = {
'rh6-x86_64': [
'lxml==3.7.3-2'
],
'osx-x86_64': [
'lxml==3.7.3-1'
]
}
@click.group()
def cli():
pass
python_version_option = click.option(
'--python-version',
default=DEFAULT_PYTHON_VERSION,
type=click.Choice(PYTHON_VERSIONS),
show_default=True,
help="Python version for the environment")
@cli.command(name="install", help="Installs the code and its dependencies")
@python_version_option
def install(python_version):
env_name = get_env_name(python_version)
check_call(["edm", "install", "-e", env_name, "--yes"]
+ ADDITIONAL_CORE_DEPS
+ ADDITIONAL_PLATFORM_CORE_DEPS[current_platform()])
check_call([
"edm", "run", "-e", env_name, "--",
"pip", "install", "-e", "."])
@cli.command(name="install-dummy-granta", help="Installs a dummy version "
"of the GRANTA library")
@python_version_option
def install_dummy_granta(python_version):
env_name = get_env_name(python_version)
check_call([
"edm", "run", "-e", env_name, "--",
"pip", "install", "dummy/granta"])
@cli.command(help="Run the tests")
@python_version_option
def test(python_version):
env_name = get_env_name(python_version)
check_call([
"edm", "run", "-e", env_name, "--", "python", "-m", "unittest",
"discover"
])
@cli.command(help="Run flake")
@python_version_option
def flake8(python_version):
env_name = get_env_name(python_version)
check_call(["edm", "run", "-e", env_name, "--", "flake8", "."])
@cli.command(help="Runs the coverage")
@python_version_option
def coverage(python_version):
env_name = get_env_name(python_version)
check_call(["edm", "run", "-e", env_name, "--",
"coverage", "run", "-m", "unittest", "discover"])
@cli.command(help="Builds the documentation")
@python_version_option
def docs(python_version):
env_name = get_env_name(python_version)
check_call(["edm", "run", "-e", env_name, "--", "make", "html"], cwd="doc")
def get_env_name(python_version):
return "force-py{}".format(remove_dot(python_version))
def remove_dot(python_version):
return "".join(python_version.split('.'))
def current_platform():
platform = sys.platform
if platform.startswith("linux"):
return "rh6-x86_64"
elif platform == "darwin":
return "osx-x86_64"
else:
raise RuntimeError("platform {!r} not supported".format(platform))
if __name__ == "__main__":
cli()
|
def parse_range(srange):
left, right = [int(e) for e in srange.split('=')[-1].split('..')]
return left, right
def parse_line(line):
line2 = line.strip()
if line2.startswith('on '):
line3 = line2[3:]
light_on = True
elif line2.startswith('off '):
line3 = line2[4:]
light_on = False
else:
raise ValueError('Unknown switch value')
xr, yr, zr = [parse_range(srange) for srange in line3.split(',')]
return light_on, xr, yr, zr
def intersect_interval(I1, I2):
intervals = sorted([I1, I2])
a1, b1 = intervals[0]
a2, b2 = intervals[1]
return b1 >= a2
def intersect_cubes(ci, cj):
_, xir, yir, zir = ci
xi1, xi2 = xir
yi1, yi2 = yir
zi1, zi2 = zir
_, xjr, yjr, zjr = cj
xj1, xj2 = xjr
yj1, yj2 = yjr
zj1, zj2 = zjr
inter_x = intersect_interval((xi1, xi2), (xj1, xj2))
inter_y = intersect_interval((yi1, yi2), (yj1, yj2))
inter_z = intersect_interval((zi1, zi2), (zj1, zj2))
return all([inter_x, inter_y, inter_z])
def get_gross_lights(cube):
_, xr, yr, zr = cube
x1, x2 = xr
xl = x2 - x1 + 1
y1, y2 = yr
yl = y2 - y1 + 1
z1, z2 = zr
zl = z2 - z1 + 1
return xl * yl * zl
def intersected_interval(I1, I2):
intervals = sorted([I1, I2])
a1, b1 = intervals[0]
a2, b2 = intervals[1]
return a2, min(b1, b2)
def intersected_cube(ci, cj):
if not intersect_cubes(ci, cj):
return False, 0
else:
_, xir, yir, zir = ci
xi1, xi2 = xir
yi1, yi2 = yir
zi1, zi2 = zir
_, xjr, yjr, zjr = cj
xj1, xj2 = xjr
yj1, yj2 = yjr
zj1, zj2 = zjr
inter_x = intersected_interval((xi1, xi2), (xj1, xj2))
inter_y = intersected_interval((yi1, yi2), (yj1, yj2))
inter_z = intersected_interval((zi1, zi2), (zj1, zj2))
res_c = True, inter_x, inter_y, inter_z
return True, res_c
def get_net_lights(i, cubes):
current_cube = cubes[i]
future_cubes = cubes[i + 1:]
inter_cubes_results = [intersected_cube(current_cube, future_cube)
for future_cube in future_cubes]
inter_cubes = [c for b, c in inter_cubes_results if b]
inter_cubes_net_lights = [get_net_lights(j, inter_cubes) for j in range(len(inter_cubes))]
sum_icnl = sum(inter_cubes_net_lights)
result = get_gross_lights(current_cube) - sum_icnl
return result
def usecase2(filename):
with open(filename, 'r') as input_file:
cubes = [parse_line(line) for line in input_file]
C = len(cubes)
all_net_lights = [get_net_lights(i, cubes) for i in range(C) if cubes[i][0]]
result = sum(all_net_lights)
print(f'result = {result}')
def main():
usecase2('sample-input2.txt')
usecase2('input.txt')
if __name__ == '__main__':
main()
|
from os import listdir
from pickle import dump
from keras.applications.vgg16 import VGG16
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.applications.vgg16 import preprocess_input
from keras.models import Model
import tensorflow as tf
#import theano
import os
#os.environ["CUDA_VISIBLE_DEVICES"]="0"
#from tensorflow.python.client import device_lib
#print (device_lib.list_local_devices())
#import tensorflow as tf
#config = tf.ConfigProto()
#config.gpu_options.per_process_gpu_memory_fraction = 0.5
#session = tf.Session(config=tf.ConfigProto(log_device_placement=True))
#print(session)
#%matplotlib inline
# extract features from each photo in the directory
def extract_features(directory):
# load the model
model = VGG16()
# re-structure the model
model.layers.pop()
model = Model(inputs=model.inputs, outputs=model.layers[-1].output)
# summarize
print(model.summary())
# extract features from each photo
features = dict()
for name in listdir(directory):
# load an image from file
filename = directory + '/' + name
image = load_img(filename, target_size=(224, 224))
# convert the image pixels to a numpy array
image = img_to_array(image)
# reshape data for the model
image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
# prepare the image for the VGG model
image = preprocess_input(image)
# get features
feature = model.predict(image, verbose=0)
# get image id
image_id = name.split('.')[0]
# store feature
features[image_id] = feature
print('>%s' % name)
return features
# extract features from all images
#directory = 'D:/my_tutorials/fewimages model/Images Dataset'
directory = 'D:/my_tutorials/New Created Database/Dabasets Self Captioning'
features = extract_features(directory)
print('Extracted Features: %d' % len(features))
# save to file
dump(features, open('features1.pkl', 'wb'))
import string
# load doc into memory
def load_doc(filename):
# open the file as read only
file = open(filename, 'r')
# read all text
text = file.read()
# close the file
file.close()
return text
# extract descriptions for images
def load_descriptions(doc):
mapping = dict()
# process lines
for line in doc.split('\n'):
# split line by white space
tokens = line.split()
if len(line) < 2:
continue
# take the first token as the image id, the rest as the description
image_id, image_desc = tokens[0], tokens[1:]
# remove filename from image id
image_id = image_id.split('.')[0]
# convert description tokens back to string
image_desc = ' '.join(image_desc)
# create the list if needed
if image_id not in mapping:
mapping[image_id] = list()
# store description
mapping[image_id].append(image_desc)
return mapping
def clean_descriptions(descriptions):
# prepare translation table for removing punctuation
table = str.maketrans('', '', string.punctuation)
for key, desc_list in descriptions.items():
for i in range(len(desc_list)):
desc = desc_list[i]
# tokenize
desc = desc.split()
# convert to lower case
desc = [word.lower() for word in desc]
# remove punctuation from each token
desc = [w.translate(table) for w in desc]
# remove hanging 's' and 'a'
desc = [word for word in desc if len(word)>1]
# remove tokens with numbers in them
desc = [word for word in desc if word.isalpha()]
# store as string
desc_list[i] = ' '.join(desc)
# convert the loaded descriptions into a vocabulary of words
def to_vocabulary(descriptions):
# build a list of all description strings
all_desc = set()
for key in descriptions.keys():
[all_desc.update(d.split()) for d in descriptions[key]]
return all_desc
# save descriptions to file, one per line
def save_descriptions(descriptions, filename):
lines = list()
for key, desc_list in descriptions.items():
for desc in desc_list:
lines.append(key + ' ' + desc)
data = '\n'.join(lines)
file = open(filename, 'w')
file.write(data)
file.close()
#filename = 'D:/my_tutorials/fewimages model/Tokens/Document1.txt'
filename = 'D:/my_tutorials/New Created Database/tokens.txt'
# load descriptions
doc = load_doc(filename)
# parse descriptions
descriptions = load_descriptions(doc)
print('Loaded: %d ' % len(descriptions))
# clean descriptions
clean_descriptions(descriptions)
# summarize vocabulary
vocabulary = to_vocabulary(descriptions)
print('Vocabulary Size: %d' % len(vocabulary))
# save to file
save_descriptions(descriptions, 'descriptions.txt')
from pickle import load
# load doc into memory
def load_doc(filename):
# open the file as read only
file = open(filename, 'r')
# read all text
text = file.read()
# close the file
file.close()
return text
# load a pre-defined list of photo identifiers
def load_set(filename):
doc = load_doc(filename)
dataset = list()
# process line by line
for line in doc.split('\n'):
# skip empty lines
if len(line) < 1:
continue
# get the image identifier
identifier = line.split('.')[0]
dataset.append(identifier)
return set(dataset)
# load clean descriptions into memory
def load_clean_descriptions(filename, dataset):
# load document
doc = load_doc(filename)
descriptions = dict()
for line in doc.split('\n'):
# split line by white space
tokens = line.split()
# split id from description
image_id, image_desc = tokens[0], tokens[1:]
# skip images not in the set
if image_id in dataset:
# create list
if image_id not in descriptions:
descriptions[image_id] = list()
# wrap description in tokens
desc = 'startseq ' + ' '.join(image_desc) + ' endseq'
# store
descriptions[image_id].append(desc)
return descriptions
# load photo features
def load_photo_features(filename, dataset):
# load all features
all_features = load(open(filename, 'rb'))
# filter features
features = {k: all_features[k] for k in dataset}
return features
# load training dataset
#filename = 'D:/my_tutorials/fewimages model/Tokens/Document2.txt'
filename = 'D:/my_tutorials/New Created Database/train.txt'
train = load_set(filename)
print('Dataset: %d' % len(train))
# descriptions
train_descriptions = load_clean_descriptions('descriptions.txt', train)
print('Descriptions: train=%d' % len(train_descriptions))
# photo features
train_features = load_photo_features('features1.pkl', train)
print('Photos: train=%d' % len(train_features))
from keras.preprocessing.text import Tokenizer
# fit a tokenizer given caption descriptions
# fit a tokenizer given caption descriptions
def create_tokenizer(descriptions):
lines = to_lines(descriptions)
tokenizer = Tokenizer()
tokenizer.fit_on_texts(lines)
return tokenizer
# calculate the length of the description with the most words
def max_length(descriptions):
lines = to_lines(descriptions)
return max(len(d.split()) for d in lines)
def create_sequences(tokenizer, max_length, descriptions, photos):
X1, X2, y = list(), list(), list()
# walk through each image identifier
for key, desc_list in descriptions.items():
# walk through each description for the image
for desc in desc_list:
# encode the sequence
seq = tokenizer.texts_to_sequences([desc])[0]
# split one sequence into multiple X,y pairs
for i in range(1, len(seq)):
# split into input and output pair
in_seq, out_seq = seq[:i], seq[i]
# pad input sequence
in_seq = pad_sequences([in_seq], maxlen=max_length)[0]
# encode output sequence
out_seq = to_categorical([out_seq], num_classes=vocab_size)[0]
# store
X1.append(photos[key][0])
X2.append(in_seq)
y.append(out_seq)
return array(X1), array(X2), array(y)
# calculate the length of the description with the most words
def max_length(descriptions):
lines = to_lines(descriptions)
return max(len(d.split()) for d in lines)
# define the captioning model
def define_model(vocab_size, max_length):
# feature extractor model
inputs1 = Input(shape=(4096,1))
fe1 = Dropout(0.5)(inputs1)
fe2 = Dense(256, activation='relu')(fe1)
# sequence model
inputs2 = Input(shape=(max_length,))
se1 = Embedding(vocab_size, 256, mask_zero=True)(inputs2)
se2 = Dropout(0.5)(se1)
se3 = LSTM(256)(se2)
# decoder model
decoder1 = add([fe2, se3])
decoder2 = Dense(256, activation='relu')(decoder1)
outputs = Dense(vocab_size, activation='softmax')(decoder2)
# tie it together [image, seq] [word]
model = Model(inputs=[inputs1, inputs2], outputs=outputs)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# summarize model
print(model.summary())
plot_model(model, to_file='model.png', show_shapes=True)
return model
## Start###################################################################### Above 16GB MEMORY
from numpy import array
from pickle import load
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.utils import plot_model
from keras.models import Model
from keras.layers import Input
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Embedding
from keras.layers import Dropout
from keras.layers.merge import add
from keras.callbacks import ModelCheckpoint
# load doc into memory
def load_doc(filename):
# open the file as read only
file = open(filename, 'r')
# read all text
text = file.read()
# close the file
file.close()
return text
# load a pre-defined list of photo identifiers
def load_set(filename):
doc = load_doc(filename)
dataset = list()
# process line by line
for line in doc.split('\n'):
# skip empty lines
if len(line) < 1:
continue
# get the image identifier
identifier = line.split('.')[0]
dataset.append(identifier)
return set(dataset)
# load clean descriptions into memory
def load_clean_descriptions(filename, dataset):
# load document
doc = load_doc(filename)
descriptions = dict()
for line in doc.split('\n'):
# split line by white space
tokens = line.split()
# split id from description
image_id, image_desc = tokens[0], tokens[1:]
# skip images not in the set
if image_id in dataset:
# create list
if image_id not in descriptions:
descriptions[image_id] = list()
# wrap description in tokens
desc = 'startseq ' + ' '.join(image_desc) + ' endseq'
# store
descriptions[image_id].append(desc)
return descriptions
# load photo features
def load_photo_features(filename, dataset):
# load all features
all_features = load(open(filename, 'rb'))
# filter features
features = {k: all_features[k] for k in dataset}
return features
# covert a dictionary of clean descriptions to a list of descriptions
def to_lines(descriptions):
all_desc = list()
for key in descriptions.keys():
[all_desc.append(d) for d in descriptions[key]]
return all_desc
# fit a tokenizer given caption descriptions
def create_tokenizer(descriptions):
lines = to_lines(descriptions)
tokenizer = Tokenizer()
tokenizer.fit_on_texts(lines)
return tokenizer
# calculate the length of the description with the most words
def max_length(descriptions):
lines = to_lines(descriptions)
return max(len(d.split()) for d in lines)
# create sequences of images, input sequences and output words for an image
def create_sequences(tokenizer, max_length, descriptions, photos):
X1, X2, y = list(), list(), list()
# walk through each image identifier
for key, desc_list in descriptions.items():
# walk through each description for the image
for desc in desc_list:
# encode the sequence
seq = tokenizer.texts_to_sequences([desc])[0]
# split one sequence into multiple X,y pairs
for i in range(1, len(seq)):
# split into input and output pair
in_seq, out_seq = seq[:i], seq[i]
# pad input sequence
in_seq = pad_sequences([in_seq], maxlen=max_length)[0]
# encode output sequence
out_seq = to_categorical([out_seq], num_classes=vocab_size)[0]
# store
X1.append(photos[key][0])
X2.append(in_seq)
y.append(out_seq)
return array(X1), array(X2), array(y)
# define the captioning model
def define_model(vocab_size, max_length):
# feature extractor model
inputs1 = Input(shape=(4096,))
fe1 = Dropout(0.5)(inputs1)
fe2 = Dense(256, activation='relu')(fe1)
# sequence model
inputs2 = Input(shape=(max_length,))
se1 = Embedding(vocab_size, 256, mask_zero=True)(inputs2)
se2 = Dropout(0.5)(se1)
se3 = LSTM(256)(se2)
# decoder model
decoder1 = add([fe2, se3])
decoder2 = Dense(256, activation='relu')(decoder1)
outputs = Dense(vocab_size, activation='softmax')(decoder2)
# tie it together [image, seq] [word]
model = Model(inputs=[inputs1, inputs2], outputs=outputs)
model.compile(loss='categorical_crossentropy', optimizer='adam')
# summarize model
print(model.summary())
plot_model(model, to_file='model.png', show_shapes=True)
return model
# train dataset
# load training dataset (6K)
filename = 'D:/my_tutorials/New Created Database/train.txt'
train = load_set(filename)
print('Dataset: %d' % len(train))
# descriptions
train_descriptions = load_clean_descriptions('descriptions.txt', train)
print('Descriptions: train=%d' % len(train_descriptions))
# photo features
train_features = load_photo_features('features1.pkl', train)
print('Photos: train=%d' % len(train_features))
# prepare tokenizer
tokenizer = create_tokenizer(train_descriptions)
vocab_size = len(tokenizer.word_index) + 1
print('Vocabulary Size: %d' % vocab_size)
# determine the maximum sequence length
max_length = max_length(train_descriptions)
print('Description Length: %d' % max_length)
# prepare sequences
X1train, X2train, ytrain = create_sequences(tokenizer, max_length, train_descriptions, train_features)
# dev dataset
# load test set
filename = 'D:/my_tutorials/New Created Database/dev.txt'
test = load_set(filename)
print('Dataset: %d' % len(test))
# descriptions
test_descriptions = load_clean_descriptions('descriptions.txt', test)
print('Descriptions: test=%d' % len(test_descriptions))
# photo features
test_features = load_photo_features('features1.pkl', test)
print('Photos: test=%d' % len(test_features))
# prepare sequences
X1test, X2test, ytest = create_sequences(tokenizer, max_length, test_descriptions, test_features)
# fit model
# define the model
model = define_model(vocab_size, max_length)
# define checkpoint callback
filepath = 'D:/my_tutorials/New Created Database/'
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
# fit model
model.fit([X1train, X2train], ytrain, epochs=1, verbose=2, batch_size=5)
model.save('modelz'+ '.h5')
from numpy import argmax
from pickle import load
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import load_model
from nltk.translate.bleu_score import corpus_bleu
# load doc into memory
def load_doc(filename):
# open the file as read only
file = open(filename, 'r')
# read all text
text = file.read()
# close the file
file.close()
return text
# load a pre-defined list of photo identifiers
def load_set(filename):
doc = load_doc(filename)
dataset = list()
# process line by line
for line in doc.split('\n'):
# skip empty lines
if len(line) < 1:
continue
# get the image identifier
identifier = line.split('.')[0]
dataset.append(identifier)
return set(dataset)
# load clean descriptions into memory
def load_clean_descriptions(filename, dataset):
# load document
doc = load_doc(filename)
descriptions = dict()
for line in doc.split('\n'):
# split line by white space
tokens = line.split()
# split id from description
image_id, image_desc = tokens[0], tokens[1:]
# skip images not in the set
if image_id in dataset:
# create list
if image_id not in descriptions:
descriptions[image_id] = list()
# wrap description in tokens
desc = 'startseq ' + ' '.join(image_desc) + ' endseq'
# store
descriptions[image_id].append(desc)
return descriptions
# load photo features
def load_photo_features(filename, dataset):
# load all features
all_features = load(open(filename, 'rb'))
# filter features
features = {k: all_features[k] for k in dataset}
return features
# covert a dictionary of clean descriptions to a list of descriptions
def to_lines(descriptions):
all_desc = list()
for key in descriptions.keys():
[all_desc.append(d) for d in descriptions[key]]
return all_desc
# fit a tokenizer given caption descriptions
def create_tokenizer(descriptions):
lines = to_lines(descriptions)
tokenizer = Tokenizer()
tokenizer.fit_on_texts(lines)
return tokenizer
# calculate the length of the description with the most words
def max_length(descriptions):
lines = to_lines(descriptions)
return max(len(d.split()) for d in lines)
# map an integer to a word
def word_for_id(integer, tokenizer):
for word, index in tokenizer.word_index.items():
if index == integer:
return word
return None
# generate a description for an image
def generate_desc(model, tokenizer, photo, max_length):
# seed the generation process
in_text = 'startseq'
# iterate over the whole length of the sequence
for i in range(max_length):
# integer encode input sequence
sequence = tokenizer.texts_to_sequences([in_text])[0]
# pad input
sequence = pad_sequences([sequence], maxlen=max_length)
# predict next word
yhat = model.predict([photo,sequence], verbose=0)
# convert probability to integer
yhat = argmax(yhat)
# map integer to word
word = word_for_id(yhat, tokenizer)
# stop if we cannot map the word
if word is None:
break
# append as input for generating the next word
in_text += ' ' + word
# stop if we predict the end of the sequence
if word == 'endseq':
break
return in_text
# evaluate the skill of the model
def evaluate_model(model, descriptions, photos, tokenizer, max_length):
actual, predicted = list(), list()
# step over the whole set
for key, desc_list in descriptions.items():
# generate description
yhat = generate_desc(model, tokenizer, photos[key], max_length)
# store actual and predicted
references = [d.split() for d in desc_list]
actual.append(references)
predicted.append(yhat.split())
# calculate BLEU score
print('BLEU-1: %f' % corpus_bleu(actual, predicted, weights=(1.0, 0, 0, 0)))
print('BLEU-2: %f' % corpus_bleu(actual, predicted, weights=(0.5, 0.5, 0, 0)))
print('BLEU-3: %f' % corpus_bleu(actual, predicted, weights=(0.3, 0.3, 0.3, 0)))
print('BLEU-4: %f' % corpus_bleu(actual, predicted, weights=(0.25, 0.25, 0.25, 0.25)))
# prepare tokenizer on train set
# load training dataset (6K)
filename ='D:/my_tutorials/New Created Database/train.txt'
train = load_set(filename)
print('Dataset: %d' % len(train))
# descriptions
train_descriptions = load_clean_descriptions('descriptions.txt', train)
print('Descriptions: train=%d' % len(train_descriptions))
# prepare tokenizer
tokenizer = create_tokenizer(train_descriptions)
vocab_size = len(tokenizer.word_index) + 1
print('Vocabulary Size: %d' % vocab_size)
# determine the maximum sequence length
max_length = max_length(train_descriptions)
print('Description Length: %d' % max_length)
# prepare test set
# load test set
filename = 'D:/my_tutorials/New Created Database/test.txt'
test = load_set(filename)
print('Dataset: %d' % len(test))
# descriptions
test_descriptions = load_clean_descriptions('descriptions.txt', test)
print('Descriptions: test=%d' % len(test_descriptions))
# photo features
test_features = load_photo_features('features1.pkl', test)
print('Photos: test=%d' % len(test_features))
# load the model
filename = 'modelx.h5'
model = load_model(filename)
# evaluate model
evaluate_model(model, train_descriptions, train_features, tokenizer, max_length)
#####################################################################
from keras.preprocessing.text import Tokenizer
from pickle import dump
# load doc into memory
def load_doc(filename):
# open the file as read only
file = open(filename, 'r')
# read all text
text = file.read()
# close the file
file.close()
return text
# load a pre-defined list of photo identifiers
def load_set(filename):
doc = load_doc(filename)
dataset = list()
# process line by line
for line in doc.split('\n'):
# skip empty lines
if len(line) < 1:
continue
# get the image identifier
identifier = line.split('.')[0]
dataset.append(identifier)
return set(dataset)
# load clean descriptions into memory
def load_clean_descriptions(filename, dataset):
# load document
doc = load_doc(filename)
descriptions = dict()
for line in doc.split('\n'):
# split line by white space
tokens = line.split()
# split id from description
image_id, image_desc = tokens[0], tokens[1:]
# skip images not in the set
if image_id in dataset:
# create list
if image_id not in descriptions:
descriptions[image_id] = list()
# wrap description in tokens
desc = 'startseq ' + ' '.join(image_desc) + ' endseq'
# store
descriptions[image_id].append(desc)
return descriptions
# covert a dictionary of clean descriptions to a list of descriptions
def to_lines(descriptions):
all_desc = list()
for key in descriptions.keys():
[all_desc.append(d) for d in descriptions[key]]
return all_desc
# fit a tokenizer given caption descriptions
def create_tokenizer(descriptions):
lines = to_lines(descriptions)
tokenizer = Tokenizer()
tokenizer.fit_on_texts(lines)
return tokenizer
# load training dataset (6K)
#filename = 'D:/my_tutorials/New Created Database/train.txt'
filename='D:/my_tutorials/tf_object_detection_api/Flickr8k_text/Flickr_8k.trainImages.txt'
train = load_set(filename)
print('Dataset: %d' % len(train))
# descriptions
train_descriptions = load_clean_descriptions('descriptions.txt', train)
print('Descriptions: train=%d' % len(train_descriptions))
# prepare tokenizer
tokenizer = create_tokenizer(train_descriptions)
# save the tokenizer
dump(tokenizer, open('tokenizer.pkl', 'wb'))
###################################################################################
from keras.models import Sequential
from keras.layers import Dense, Activation
from pickle import load
from numpy import argmax
from keras.preprocessing.sequence import pad_sequences
from keras.applications.vgg16 import VGG16
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.applications.vgg16 import preprocess_input
from keras.models import Model
from keras.models import load_model
import os
# extract features from each photo in the directory
def extract_features(filename):
# load the model
model = VGG16()
# re-structure the model
model.layers.pop()
model = Model(inputs=model.inputs, outputs=model.layers[-1].output)
# load the photo
image = load_img(filename, target_size=(224, 224))
# convert the image pixels to a numpy array
image = img_to_array(image)
# reshape data for the model
image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
# prepare the image for the VGG model
image = preprocess_input(image)
# get features
feature = model.predict(image, verbose=0)
return feature
model = load_model('modelx.h5')
print(model.summary())
photo = extract_features('4.jpg')
print(photo.shape)
# generate description
# map an integer to a word
def word_for_id(integer, tokenizer):
for word, index in tokenizer.word_index.items():
if index == integer:
return word
return None
# generate a description for an image
def generate_desc(model, tokenizer, photo, max_length):
# seed the generation process
in_text = 'startseq'
# iterate over the whole length of the sequence
for i in range(max_length):
# integer encode input sequence
sequence = tokenizer.texts_to_sequences([in_text])[0]
# pad input
sequence = pad_sequences([sequence], maxlen=max_length)
# predict next word
yhat = model.predict([photo,sequence], verbose=0)
# convert probability to integer
yhat = argmax(yhat)
# map integer to word
word = word_for_id(yhat, tokenizer)
# stop if we cannot map the word
if word is None:
break
# append as input for generating the next word
in_text += ' ' + word
# stop if we predict the end of the sequence
if word == 'endseq':
break
return in_text
# load the tokenizer
tokenizer = load(open('tokenizer.pkl', 'rb'))
# pre-define the max sequence length (from training)
max_length = 34
# load and prepare the photograph
# generate description
description = generate_desc(model, tokenizer, photo, max_length)
print(description)
############################################################################################################################################
#from numpy import array
#from pickle import load
#from keras.preprocessing.text import Tokenizer
#from keras.preprocessing.sequence import pad_sequences
#from keras.utils import to_categorical
#from keras.utils import plot_model
#from keras.models import Model
#from keras.layers import Input
#from keras.layers import Dense
#from keras.layers import LSTM
#from keras.layers import Embedding
#from keras.layers import Dropout
#from keras.layers.merge import add
#from keras.callbacks import ModelCheckpoint
#
## load doc into memory
#def load_doc(filename):
# # open the file as read only
# file = open(filename, 'r')
# # read all text
# text = file.read()
# # close the file
# file.close()
# return text
#
## load a pre-defined list of photo identifiers
#def load_set(filename):
# doc = load_doc(filename)
# dataset = list()
# # process line by line
# for line in doc.split('\n'):
# # skip empty lines
# if len(line) < 1:
# continue
# # get the image identifier
# identifier = line.split('.')[0]
# dataset.append(identifier)
# return set(dataset)
#
## load clean descriptions into memory
#def load_clean_descriptions(filename, dataset):
# # load document
# doc = load_doc(filename)
# descriptions = dict()
# for line in doc.split('\n'):
# # split line by white space
# tokens = line.split()
# # split id from description
# image_id, image_desc = tokens[0], tokens[1:]
# # skip images not in the set
# if image_id in dataset:
# # create list
# if image_id not in descriptions:
# descriptions[image_id] = list()
# # wrap description in tokens
# desc = 'startseq ' + ' '.join(image_desc) + ' endseq'
# # store
# descriptions[image_id].append(desc)
# return descriptions
#
## load photo features
#def load_photo_features(filename, dataset):
# # load all features
# all_features = load(open(filename, 'rb'))
# # filter features
# features = {k: all_features[k] for k in dataset}
# return features
#
## covert a dictionary of clean descriptions to a list of descriptions
#def to_lines(descriptions):
# all_desc = list()
# for key in descriptions.keys():
# [all_desc.append(d) for d in descriptions[key]]
# return all_desc
#
## fit a tokenizer given caption descriptions
#def create_tokenizer(descriptions):
# lines = to_lines(descriptions)
# tokenizer = Tokenizer()
# tokenizer.fit_on_texts(lines)
# return tokenizer
#
## calculate the length of the description with the most words
#def max_length(descriptions):
# lines = to_lines(descriptions)
# return max(len(d.split()) for d in lines)
#
## create sequences of images, input sequences and output words for an image
#def create_sequences(tokenizer, max_length, desc_list, photo):
# X1, X2, y = list(), list(), list()
# # walk through each description for the image
# for desc in desc_list:
# # encode the sequence
# seq = tokenizer.texts_to_sequences([desc])[0]
# # split one sequence into multiple X,y pairs
# for i in range(1, len(seq)):
# # split into input and output pair
# in_seq, out_seq = seq[:i], seq[i]
# # pad input sequence
# in_seq = pad_sequences([in_seq], maxlen=max_length)[0]
# # encode output sequence
# out_seq = to_categorical([out_seq], num_classes=vocab_size)[0]
# # store
# X1.append(photo)
# X2.append(in_seq)
# y.append(out_seq)
# return array(X1), array(X2), array(y)
#
## define the captioning model
#def define_model(vocab_size, max_length):
# # feature extractor model
# inputs1 = Input(shape=(4096,))
# fe1 = Dropout(0.5)(inputs1)
# fe2 = Dense(256, activation='relu')(fe1)
# # sequence model
# inputs2 = Input(shape=(max_length,))
# se1 = Embedding(vocab_size, 256, mask_zero=True)(inputs2)
# se2 = Dropout(0.5)(se1)
# se3 = LSTM(256)(se2)
# # decoder model
# decoder1 = add([fe2, se3])
# decoder2 = Dense(256, activation='relu')(decoder1)
# outputs = Dense(vocab_size, activation='softmax')(decoder2)
# # tie it together [image, seq] [word]
# model = Model(inputs=[inputs1, inputs2], outputs=outputs)
# # compile model
# model.compile(loss='categorical_crossentropy', optimizer='adam')
# # summarize model
# model.summary()
# plot_model(model, to_file='model.png', show_shapes=True)
# return model
#
## data generator, intended to be used in a call to model.fit_generator()
#def data_generator(descriptions, photos, tokenizer, max_length):
# # loop for ever over images
# while 1:
# for key, desc_list in descriptions.items():
# # retrieve the photo feature
# photo = photos[key][0]
# in_img, in_seq, out_word = create_sequences(tokenizer, max_length, desc_list, photo)
# yield [[in_img, in_seq], out_word]
#
## load training dataset (6K)
#filename = 'D:/my_tutorials/New Created Database/train.txt'
#train = load_set(filename)
#print('Dataset: %d' % len(train))
## descriptions
#train_descriptions = load_clean_descriptions('descriptions.txt', train)
#print('Descriptions: train=%d' % len(train_descriptions))
## photo features
#train_features = load_photo_features('features1.pkl', train)
#print('Photos: train=%d' % len(train_features))
## prepare tokenizer
#tokenizer = create_tokenizer(train_descriptions)
#vocab_size = len(tokenizer.word_index) + 1
#print('Vocabulary Size: %d' % vocab_size)
## determine the maximum sequence length
#max_length = max_length(train_descriptions)
#print('Description Length: %d' % max_length)
#
## define the model
#model = define_model(vocab_size, max_length)
## train the model, run epochs manually and save after each epoch
#epochs = 2
#steps = len(train_descriptions)
#for i in range(epochs):
# # create the data generator
# generator = data_generator(train_descriptions, train_features, tokenizer, max_length)
# # fit for one epoch
# model.fit_generator(generator, epochs=1, steps_per_epoch=steps)
# # save model
# model.save('model_' + str(i) + '.h5')
############### DETAILS
import matplotlib.pyplot as plt
print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history['acc'])
#plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
#plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
################################### csv MATLAB
#
#srcFiles = dir('Directory\*.jpg');
#X=length(srcFiles)% the folder in which ur images exists
#for i = 1 : length(srcFiles)
# filename = strcat('Directory\',srcFiles(i).name);
# I = imread(filename);
# csvwrite('bb.csv',I(:));
#end
#
#
#
###############################################################################################
#import numpy
#from sklearn.model_selection import GridSearchCV
#learn_rate = [0.001, 0.01, 0.1, 0.2, 0.3]
#momentum = [0.0, 0.2, 0.4, 0.6, 0.8, 0.9]
#param_grid = dict(learn_rate=learn_rate, momentum=momentum)
#grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1)
#grid_result = grid.fit(X, Y)
## summarize results
#print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
#means = grid_result.cv_results_['mean_test_score']
#stds = grid_result.cv_results_['std_test_score']
#params = grid_result.cv_results_['params']
#for mean, stdev, param in zip(means, stds, params):
# print("%f (%f) with: %r" % (mean, stdev, param))
|
import os, time, errno
import mss #https://python-mss.readthedocs.io/en/dev/examples.html
import numpy as np
import csv
import cv2
import dlib
import IMDBActors as imdb
MONITOR = {'top': 0, 'left': 0, 'width': 1080, 'height': 720}
def capture(sct):
return sct.grab(MONITOR)
KNEWFACES_DIR = 'Resources/KnownPeopleToEncode'
KFACES_DIR = 'Resources/KnownPeople'
KFACES_FILEPATH = 'Resources/KnownEncodings.csv'
KPEOPLE = []
'''
Reads the .csv file that contains all the saved known pairs (name, encoding).
Adds each pair to KPEOPLE array.
input: no input
output: no output
'''
def kFacesRead():
#If the file exists - loads the saved known pairs (name, encoding).
try:
print('Trying to load saved known encodings...')
#Opens the .csv file that contains all the saved known pairs (name, encoding).
with open(KFACES_FILEPATH, 'r') as csvfile:
reader = csv.reader(csvfile) #Inits the reader.
failedFaces = 0
totalFaces = 0
for row in reader:
try:
totalFaces += 1
#Transforms the string of the numpy array of the enconding to
#a numpy array.
encS = row[1]
encS = encS.split()
encS[0] = encS[0][1:]
encS[-1] = encS[-1][:-1]
encSF = []
for i, j in enumerate(encS):
if j == '': continue
f = float(j)
encSF.append(f)
encNP = np.array(encSF, dtype=float)
#Appends the saved known pair (name, encoding) to the array.
KPEOPLE.append([row[0], encNP])
except OSError as e:
failedFaces += 1
print(e)
print('Unable to load face {}'.format(row[0]))
print('Loaded {} of {} known faces.'.format(totalFaces - failedFaces, totalFaces))
print('Done loading saved known encodings')
#If the file does not exist - theres were no saved known pairs (name, encoding).
except FileNotFoundError as filenotfound:
print('Did not find any saved known encodings.')
'''
Encodes all the new people's faces found in KNEWFACES_DIR and creates the pair
((name, encoding).
Adds each pair to KPEOPLE array.
Writes each pair to the .csv file that contains all the saved known pairs.
Moves all the new images (found in KNEWFACES_DIR) to the directory where all
the known, already encoded, people's faces's images are.
input: no input
output: no output
'''
def kFacesSaveNew():
#Tries to create the directory to store the images of already encoded faces.
try:
os.makedirs(KFACES_DIR)
except OSError as e:
if e.errno != errno.EEXIST:
raise
print('\nEncoding and saving new known people...')
#Opens the .csv file that contains all the saved known pairs (name, encoding).
with open(KFACES_FILEPATH, 'a', newline = '') as csvfile:
writer = csv.writer(csvfile) #Inits the writer.
failedFaces = 0
totalFaces = 0
for img in os.listdir(KNEWFACES_DIR):
totalFaces += 1
pName = (img.split('.'))[0] #Gets the name.
print('\tEnconding {}'.format(pName))
path = os.path.join(KNEWFACES_DIR, img)
image = cv2.imread(path) #Opens the image.
#Detects the face.
try:
(x, y, w, h) = faceDetector.detectMultiScale(image, 1.3, 5)[0]
#Transforms the rectangle coordenates to the expected type.
faceRect = dlib.rectangle(int(x), int(y), int(x+w), int(y+h))
#Warp the image so the points are always in the same position.
faceAligned = facePosePredictor(image, faceRect)
#Gets the face's encoding.
faceEncoding = uFaceEncode(image, faceAligned, 1)
#Appends the saved known pair (name, encoding) to the array.
KPEOPLE.append([pName, faceEncoding])
#Writes the new known pair (name, encoding) to the .csv file.
writer.writerow([pName, faceEncoding])
except:
failedFaces += 1
print('Couldn\'t find face in {}.'.format(img))
#Moves the image to the directory of images of already encoded faces.
try:
newpath = os.path.join(KFACES_DIR, img)
os.rename(path, newpath)
except:
print('Couldn\'t move file - {}.'.format(img))
print('Encoded {} of {} new faces.'.format(totalFaces - failedFaces, totalFaces))
print('Done encoding and saving new known people.')
'''
Returns the similarity of the unknown face (that is being detected) with all the
known people's faces.
input: <numpy.array(128)>uEncoding Encoding of the unknown detected face.
output: <array(len(KPEOPLE))> Distance between the detected face and
all the know ones.
'''
def kFacesSimilarity(uEncoding):
if len(KPEOPLE) == 0:
return [9999]
return [np.linalg.norm(person[1] - uEncoding) for person in KPEOPLE]
'''
Returns the name of the detected face or 'Face not recognized' if it does not
find a match.
input: <numpy.array(128)>uEncoding Encoding of the unknown detected face.
output: <str> Name of the detected person or
'Face not recognized'.
'''
def uFaceGetName(uEncoding):
#Checks if the distance between the unknown encoding and each of the known
#encodings is lower than the maximum distance needed to be considered a match.
#The lower the distance, the more similar the encodings need to be.
l = [i <= 0.6 for i in kFacesSimilarity(uEncoding)]
#if True in l: return 1, KPEOPLE[l.index(True)][0]
#else: return 'Face not recognized.'
names = []
for i, tValue in enumerate(l):
if tValue:
names.append(KPEOPLE[i][0])
break #SOLVE - WHEN MULTIPLE NAMES ARE GIVEN, IMDB CANT FIND ID.
if len(names) == 0:
names.append('Face not recognized.')
return '+'.join(names)
'''
Returns encoding of the detected face.
input: <np.array()>image Frame where the face was detected.
<dlib.dlib.full_object_detection'> Location of the face.
<int>resampling Number of times to resample the face.
output: <numpy.array(128)> Encoding of the unknown detected face.
'''
def uFaceEncode(image, shape, resampling = 1):
return np.array(faceEncoder.compute_face_descriptor(image, shape, resampling))
def faceDD(imageRGB, x, y, w, h):
#Adds a rectangle to the image. Debugging and interface purposes.
cv2.rectangle(imageRGB, (x, y), (x+w, y+h), (255, 255,255), 2)
#Transforms the rectangle coordenates to the expected type.
faceRect = dlib.rectangle(int(x), int(y), int(x+w), int(y+h))
#Warp the image so the points are always in the same position.
shape = facePosePredictor(imageRGB, faceRect)
#Gets the face's encoding.
uFaceEncoding = uFaceEncode(imageRGB, shape, 1) #Encodes the face.
#Gets the face's name.
uFaceName = uFaceGetName(uFaceEncoding) #Names the face.
#Adds the name of the face on top of the rectangle in the image.
#Debugging and interface purposes.
cv2.putText(imageRGB, uFaceName, (x, y-5), 0, 0.5, (0, 255, 0), 1, cv2.LINE_AA)
return uFaceName
def main():
kFacesRead() #Loads all the saved known pairs (name, encoding).
kFacesSaveNew() #Loads all the new known pairs (name, encoding).
with mss.mss() as sct:
ids = []
previousActorList = []
while True:
lastTime = time.time() #Debugging purposes.
image = np.array(capture(sct)) #Captures the screen.
imageRGB = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
#Detects the faces in the image.
facesDetected = faceDetector.detectMultiScale(image, 1.3, 5)
if len(facesDetected) != 0:
ids = []
#For each face detected, indentifies it.
for (x, y, w, h) in facesDetected:
name = faceDD(imageRGB, x, y, w, h)
print(name)
# actorID = imdb.getActorID(name)
# if actorID != -1:
# ids.append(str(actorID))
# if ids != previousActorList:
# print('\n\n-------------------------------------------------\n')
# previousActorList = ids
# movies = imdb.getActorsMovies(ids)
# print('\nTotal of {} movies.'.format(len(movies)))
# print('-------------------------------------------------')
cv2.imshow('image', cv2.cvtColor(imageRGB, cv2.COLOR_RGB2BGR)) #Shows the image (with the identities)
#print('FPS = {}'.format(1 / (time.time() - lastTime))) #Debugging purposes.
if cv2.waitKey(1) & 0xFF == ord('q'): break #Exits if 'Q' is pressed.
cv2.destroyAllWindows()
faceDetector = cv2.CascadeClassifier('Resources/HaarCascades/haarcascade_frontalface_default_CPU.xml')
predictorModel = 'Resources/TrainedModels/shape_predictor_68_face_landmarks.dat'
facePosePredictor = dlib.shape_predictor(predictorModel)
enconderModel = 'Resources/TrainedModels/dlib_face_recognition_resnet_model_v1.dat'
faceEncoder = dlib.face_recognition_model_v1(enconderModel)
if __name__ == '__main__':
main()
|
from utils import run
def test_list_krake_applications():
cmd = "kubectl get po --all-namespaces"
response = run(cmd)
assert response.returncode == 0
assert "NAMESPACE" in response.output
|
from functools import partial
import jax
from jax.flatten_util import ravel_pytree
import jax.numpy as jnp
import optax
import numpy as np
from absl import flags
from tqdm import tqdm
flags.DEFINE_integer('iter_max', 100,
help='number of iteration for Hutchison method')
FLAGS = flags.FLAGS
@jax.pmap
def acc_batch(state, batch):
pred = state.apply_fn(
{'params': state.params, 'batch_stats': state.batch_stats},
batch['x'],
train=False,
)
acc = jnp.mean(
jnp.argmax(pred, axis=-1) == jnp.argmax(batch['y'],axis=-1)
)
return acc
def acc_dataset(state, dataset):
acc_total = 0.
n_total = 0
for batch in dataset:
batch_shape = batch['x'].shape
n = batch_shape[0] * batch_shape[1]
acc = acc_batch(state, batch)
acc_total += np.mean(jax.device_get(acc)) * n
n_total += n
acc_total /= n_total
return acc_total
@partial(jax.pmap, static_broadcasted_argnums=(0,))
def tr_hess_batch_p(loss_fn, state, batch):
# Hutchinson's method for estimating trace of Hessian
rng = jax.random.PRNGKey(FLAGS.seed)
# redefine loss for HVP computation
loss_fn_ = lambda params, inputs, targets : loss_fn(
params,
state,
{'x' : inputs, 'y' : targets},
False,
)[0]
def body_fn(_, carrier):
res, rng = carrier
rng, rng_r = jax.random.split(rng)
v = jax.random.rademacher(
rng_r,
(ravel_pytree(state.params)[0].size,),
jnp.float32,
)
Hv = optax.hvp(loss_fn_, v, state.params, batch['x'], batch['y'])
Hv = ravel_pytree(Hv)[0] / batch['x'].shape[0]
vHv = jnp.vdot(v, Hv)
res += vHv / FLAGS.iter_max
return res, rng
res, rng = jax.lax.fori_loop(0, FLAGS.iter_max, body_fn, (0, rng))
return res
def tr_hess_batch(loss_fn, state, batch):
tr_hess = tr_hess_batch_p(loss_fn, state, batch)
tr_hess = np.mean(jax.device_get(tr_hess))
return tr_hess
def tr_hess_dataset(loss_fn, state, dataset):
tr_hess_total = 0.
n_total = 0
for batch in tqdm(dataset):
batch_shape = batch['x'].shape
n = batch_shape[0] * batch_shape[1]
tr_hess = tr_hess_batch(loss_fn, state, batch)
tr_hess_total += tr_hess * n
n_total += n
tr_hess_total /= n_total
return tr_hess_total
@jax.pmap
def tr_ntk_batch_p(state, batch):
# Hutchinson's method for estimating trace of NTK
rng = jax.random.PRNGKey(FLAGS.seed)
# redefine forward for JVP computation
def f(params):
return state.apply_fn(
{'params' : params, 'batch_stats': state.batch_stats},
batch['x'],
train=False,
)
_, f_vjp = jax.vjp(f, state.params)
def body_fn(_, carrier):
res, rng = carrier
_, rng = jax.random.split( rng )
v = jax.random.rademacher(
rng,
(batch['x'].shape[0], batch['y'].shape[-1]),
jnp.float32,
)
j_p = ravel_pytree(f_vjp(v))[0]
tr_ntk= jnp.sum(jnp.square(j_p)) / batch['x'].shape[0]
res += tr_ntk / FLAGS.iter_max
return res, rng
a = jax.lax.fori_loop(0, FLAGS.iter_max, body_fn, (0.,rng))
res, rng = a
return res
def tr_ntk_batch(state, batch):
tr_ntk = tr_ntk_batch_p(state, batch)
tr_ntk = np.mean(jax.device_get(tr_ntk))
return tr_ntk
def tr_ntk_dataset(state, dataset):
tr_ntk_total = 0.
n_total = 0
for batch in tqdm(dataset):
batch_shape = batch['x'].shape
n = batch_shape[0] * batch_shape[1]
tr_ntk = tr_ntk_batch(state, batch)
tr_ntk_total += tr_ntk * n
n_total += n
tr_ntk_total /= n_total
return tr_ntk_total
|
#!/usr/bin/env python3
# encoding: utf-8
#
# Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved.
#
# This file is part of ewm-cloud-robotics
# (see https://github.com/SAP/ewm-cloud-robotics).
#
# This file is licensed under the Apache Software License, v. 2 except as noted
# otherwise in the LICENSE file (https://github.com/SAP/ewm-cloud-robotics/blob/master/LICENSE)
#
"""K8s custom resource handler for new warehouse orders."""
import os
import sys
import traceback
import logging
from typing import Dict, Optional
from robcoewmtypes.helper import create_robcoewmtype_str, get_sample_cr
from robcoewmtypes.warehouseorder import (
WarehouseOrder, ConfirmWarehouseTask, WarehouseOrderCRDSpec)
from k8scrhandler.k8scrhandler import K8sCRHandler, k8s_cr_callback
from .robot import WarehouseOrderStateRestore
_LOGGER = logging.getLogger(__name__)
WAREHOUSEORDER_TYPE = create_robcoewmtype_str(WarehouseOrder('lgnum', 'who'))
class OrderController(K8sCRHandler):
"""Handle K8s custom resources."""
def __init__(self) -> None:
"""Constructor."""
self.init_robot_fromenv()
# Last successfully processed spec of warehouse order
self.processed_order_spec = {}
template_cr = get_sample_cr('warehouseorder')
labels = {}
labels['cloudrobotics.com/robot-name'] = self.robco_robot_name
super().__init__(
'sap.com',
'v1',
'warehouseorders',
'default',
template_cr,
labels
)
@k8s_cr_callback
def _callback(self, name: str, labels: Dict, operation: str, custom_res: Dict) -> None:
"""Process custom resource operation."""
_LOGGER.debug('Handling %s on %s', operation, name)
# Run all registered callback functions
try:
# Check if warehouse order has to be processed in callback.
process_cr = self._warehouse_order_precheck(name, custom_res)
# If pre check was successfull set iterate over all callbacks
if process_cr:
for callback in self.callbacks[operation].values():
callback(WAREHOUSEORDER_TYPE, custom_res['spec']['data'])
except Exception: # pylint: disable=broad-except
_LOGGER.error(
'Error while processing custom resource %s', name)
exc_info = sys.exc_info()
_LOGGER.error(
'%s/%s: Error in callback - Exception: "%s" / "%s" - TRACEBACK: %s', self.group,
self.plural, exc_info[0], exc_info[1], traceback.format_exception(*exc_info))
else:
if operation == 'DELETED':
# Cleanup when CR was deleted
self.processed_order_spec.pop(name, None)
elif process_cr:
# When CR was processed successfully, save its spec
self.processed_order_spec[name] = custom_res['spec']
_LOGGER.debug('Successfully processed custom resource %s', name)
def _warehouse_order_precheck(self, name: str, custom_res: Dict) -> bool:
"""Check if warehouse order has to be processed in callback."""
# Skip warehouse orders with specs already processed before
if self.processed_order_spec.get(name) == custom_res['spec']:
_LOGGER.debug('Spec for "%s" already processed before - skip', name)
return False
cr_status = custom_res.get('status') if isinstance(custom_res.get('status'), dict) else {}
status_data = cr_status.get('data', {})
process_status = custom_res['spec'].get('process_status', {})
order_status = custom_res['spec'].get('order_status')
# Skip warehouse order which is not RUNNING
if order_status != WarehouseOrderCRDSpec.STATE_RUNNING:
_LOGGER.debug(
'Skip "%s" because warehouse order is not %s but in order_status "%s"', name,
WarehouseOrderCRDSpec.STATE_RUNNING, order_status)
return False
# Skip warehouse order if process status from order manager is not
# equal to status of the warehouse order
if status_data != process_status:
_LOGGER.info(
'Skip "%s" because order manager process status is not equal to warehouse order '
'status', name)
return False
# Check if a warehouse task is already confirmed
if status_data:
for wht in custom_res['spec']['data']['warehousetasks']:
for conf in status_data:
if wht['tanum'] == conf['tanum'] and wht['lgnum'] == conf['lgnum']:
if (conf['confirmationnumber'] == ConfirmWarehouseTask.FIRST_CONF
and conf['confirmationtype'] == ConfirmWarehouseTask.CONF_SUCCESS
and wht['vlpla'] != ''):
_LOGGER.error(
'Skip "%s" because warehouse task "%s" already got first '
'confirmation but includes a source bin', name, wht['tanum'])
return False
if conf['confirmationnumber'] == ConfirmWarehouseTask.SECOND_CONF:
_LOGGER.error(
'Skip "%s" because warehouse task "%s" already got second '
'confirmation', name, wht['tanum'])
return False
return True
def init_robot_fromenv(self) -> None:
"""Initialize EWM Robot from environment variables."""
# Read environment variables
envvar = {}
envvar['ROBCO_ROBOT_NAME'] = os.environ.get('ROBCO_ROBOT_NAME')
# Check if complete
for var, val in envvar.items():
if val is None:
raise ValueError('Environment variable "{}" is not set'.format(var))
# Robot identifier
self.robco_robot_name = envvar['ROBCO_ROBOT_NAME']
def confirm_wht(self, dtype: str, wht: Dict, clear_progress: bool = False) -> bool:
"""Notify order manager about current status of who + tasks."""
name = '{lgnum}.{who}'.format(lgnum=wht['lgnum'], who=wht['who'])
# Get current status from custom resource of the warehouse order
custom_res = self.get_cr(name)
status = custom_res.get('status') if isinstance(custom_res.get('status'), dict) else {}
# Append current wht confirmation to status
if not status.get('data'):
status['data'] = []
# Check if confirmation was already sent
for conf in status['data']:
if conf == wht:
_LOGGER.error('Confirmation already sent. Not doing anything.')
return True
status['data'].append(wht)
# Clear robot progress
if clear_progress is True:
status['robot'] = {'mission': '', 'statemachine': '', 'tanum': ''}
success = self.update_cr_status(name, status)
return success
def send_wht_progress_update(self, wht: Dict, mission: str, statemachine: str) -> bool:
"""Save the progress the robot made on processing the warehouse task."""
name = '{lgnum}.{who}'.format(lgnum=wht['lgnum'], who=wht['who'])
tanum = wht['tanum']
# Create robot subtree in status
status = {'robot': {'mission': mission, 'statemachine': statemachine, 'tanum': tanum}}
success = self.update_cr_status(name, status)
return success
def get_who_in_process(self) -> Optional[WarehouseOrderStateRestore]:
"""
Get the warehouse order which is process plus active mission and statemachine state.
Assume that there is only one warehouse order in process.
If there are multiple, get the first one.
"""
# Get all warehouse order CRs
cr_resp = self.list_all_cr()
# Return the first warehouse order in process
for custom_res in cr_resp['items']:
warehouseorder = custom_res.get('spec', {}).get('data', {})
tanum = custom_res.get('status', {}).get('robot', {}).get('tanum')
mission = custom_res.get('status', {}).get('robot', {}).get('mission')
statemachine = custom_res.get('status', {}).get('robot', {}).get('statemachine')
if warehouseorder and mission and statemachine and tanum:
# Determine type of warehouse order
who_type = statemachine[:statemachine.rfind('_')]
# MoveHU warehouse orders are always assigned to the robot and do not have sub
# warehouse orders
if who_type == 'MoveHU':
# Only warehouse orders assigned to the robot
if warehouseorder.get('rsrc') != str(self.robco_robot_name).upper():
continue
who_restore = WarehouseOrderStateRestore(
warehouseorder=warehouseorder, mission=mission, statemachine=statemachine,
tanum=tanum, subwarehouseorder=None)
# In PickPackPass scenario there might be a sub warehouse order
elif who_type == 'PickPackPass':
# If robot is working on a sub warehouse order, there is a topwhoid
if warehouseorder.get('topwhoid') != '0000000000':
topwarehouseorder = None
# Find top warehouse order
for cr_top in cr_resp['items']:
who_top = cr_top.get('spec', {}).get('data', {})
if (who_top.get('who') == warehouseorder.get('topwhoid')
and who_top.get('lgnum') == warehouseorder.get('lgnum')
and who_top.get('rsrc') == str(self.robco_robot_name).upper()):
topwarehouseorder = who_top
# Continue only if top warehouse order found
if topwarehouseorder:
who_restore = WarehouseOrderStateRestore(
warehouseorder=topwarehouseorder, mission=mission, tanum=tanum,
statemachine=statemachine, subwarehouseorder=warehouseorder)
else:
return
else:
who_restore = WarehouseOrderStateRestore(
warehouseorder=warehouseorder, mission=mission, tanum=tanum,
statemachine=statemachine, subwarehouseorder=None)
return who_restore
|
"""
https://docs/python.org/3/library
"""
import math
from math import sqrt
# import modulesexternal.car as car
# from modulesexternal import car # better way to do it
from modulesexternal.car import info
# from modulesexternal.fibo import *
from modulesexternal import fibo
class ModulesDemo():
def builtin_modules(self):
print(math.sqrt(100))
print(sqrt(100))
def car_description(self):
make = "bmw"
model = "550i"
# car.info(make, model)
info(make, model)
m = ModulesDemo()
m.builtin_modules()
m.car_description()
print(fibo.__name__)
print(fibo.fib(1000))
print(fibo.fib2(1000))
import sys
print(dir(fibo))
print(dir())
import builtins
print(dir(builtins))
|
"""
activations.py
Each of the supported activation functions.
"""
import torch
from utils.dictionary import D_CLAMPED_ABS, D_CLAMPED_LINEAR, D_COS, D_EXP, D_EXP_ABS, D_GAUSS, D_HAT, D_SIGMOID, \
D_SIN, D_TANH
def clamped_abs_activation(x):
return min(1.0, torch.abs(x) / 3)
def clamped_linear_activation(x):
return max(-1.0, min(1.0, x / 3))
def cos_activation(x):
return torch.cos(x * 3.14)
def exponential_activation(x):
return 1 - 1 / (2 * torch.exp(torch.abs(3 * x)))
def exponential_abs_activation(x):
return torch.abs(x) ** (1 / torch.abs(x))
def gauss_activation(x):
return torch.exp(-x ** 2)
def hat_activation(x):
return max(0.0, 1 - torch.abs(x / 3))
def sigmoid_activation(x):
return torch.sigmoid(2 * x)
def sin_activation(x):
return torch.sin(x * 3.14)
def tanh_activation(x):
return torch.tanh(x)
str_to_activation = {
D_CLAMPED_ABS: clamped_abs_activation,
D_CLAMPED_LINEAR: clamped_linear_activation,
D_COS: cos_activation,
D_EXP: exponential_activation,
D_EXP_ABS: exponential_abs_activation,
D_GAUSS: gauss_activation,
D_HAT: hat_activation,
D_SIGMOID: sigmoid_activation,
D_SIN: sin_activation,
D_TANH: tanh_activation,
}
|
from MiddleKit.Run.MiddleObject import MiddleObject
def assertBazIsObjRef(bar):
bazAttr = getattr(bar, '_baz')
assert isinstance(bazAttr, MiddleObject), (
'bazAttr=%r, type(bazAttr)=%r' % (bazAttr, type(bazAttr)))
def test(store):
foos = store.fetchObjectsOfClass('Foo')
assert len(foos) == 2
foo1 = foos[0]
foo2 = foos[1]
bar = foo1.bar()
baz = foo1.bar().baz()
assert baz.x() == 5 # just to make sure we got what we expected
assertBazIsObjRef(bar)
# Now here's what we're really testing for:
#
# When we ask foo2 for bar(), it's baz attribute
# should still be a Python pointer, NOT a longint
# (e.g., unpacked obj ref)
#
# This was not the case earlier, because store.fetchObject()
# was _always_ calling store.fetchObjectsOfClass() instead of
# checking the in-memory object cache first.
bar = foo2.bar()
assertBazIsObjRef(bar)
if 0:
bazAttr = getattr(bar, '_baz')
assert isinstance(bazAttr, MiddleObject), (
'bazAttr=%r, type(bazAttr)=%r' % (bazAttr, type(bazAttr)))
|
#!/use/bin/env python
#coding:utf-8
#Author:WuYa
import os
def data_dir(data='data',fileName=None):
'''查找文件的路径'''
return os.path.join(os.path.dirname(os.path.dirname(__file__)),data,fileName)
|
# Generated by Django 2.2.6 on 2021-08-21 16:40
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('posts', '0023_auto_20210821_1635'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='views',
),
migrations.AddField(
model_name='post',
name='views',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='post_views', to='posts.Ip', verbose_name='просмотры'),
),
]
|
'''
GetQueueAttributes Command
'''
import time
from .base_command import BaseRSMQCommand
from .exceptions import QueueDoesNotExist
class GetQueueAttributesCommand(BaseRSMQCommand):
'''
Get Queue Attributes from existing queue
'''
PARAMS = {'qname': {'required': True,
'value': None},
'quiet': {'required': False,
'value': False}
}
def exec_command(self):
''' Exec Command '''
secs, usecs = self.client.time()
now = secs * 1000 + int(usecs / 1000)
queue_base = self.queue_base
queue_key = self.queue_key
tx = self.client.pipeline(transaction=True)
tx.hmget(queue_key, "vt", "delay", "maxsize", "totalrecv", "totalsent", "created",
"modified")
tx.zcard(queue_base)
tx.zcount(queue_base, now, "+inf")
results = tx.execute()
if not results or results[0][0] is None:
raise QueueDoesNotExist(self.get_qname)
stats = results[0]
return {
"vt": float(stats[0]),
"delay": float(stats[1]),
"maxsize": int(stats[2]),
"totalrecv": int(stats[3] or 0),
"totalsent": int(stats[4] or 0),
"created": int(stats[5]),
"modified": int(stats[6]),
"msgs": results[1],
"hiddenmsgs": results[2]
}
|
"""Array algorithms"""
from .find_peak import *
from .two_sum import *
|
#DataClass.py
#-*- coding: utf-8 -*-
import numpy as np
from sklearn.datasets import fetch_mldata
class LabeledSet:
def __init__(self,x,y,input_dim,output_dim):
self.x = x
self.y = y
self.input_dim = input_dim
self.output_dim = output_dim
#Renvoie la dimension de l'espace d'entrée
def getInputDimension(self):
return self.input_dim
#Renvoie la dimension de l'espace de sortie
def getOutputDimension(self):
return self.output_dim
#Renvoie le nombre d'exemple dans le set
def size(self):
return len(self.x)
#Renvoie la valeur de x_i
def getX(self,i):
return self.x[i]
#Renvoie la valeur de y_i
def getY(self,i):
return self.y[1]
def createGaussianDataset(positive_center_1,positive_center_2,positive_sigma,negative_center_1,negative_center_2,negative_sigma,nb_points):
pos = True
first = True
while nb_points>0:
if pos:
a = np.random.multivariate_normal([positive_center_1,positive_center_2],[[positive_sigma,0],[0,positive_sigma]])
if first:
x=a
first = False
y = np.array([1])
else:
x = np.vstack((x,a))
y = np.vstack((y,np.array([1])))
pos = False
else:
b = np.random.multivariate_normal([negative_center_1,negative_center_2],[[negative_sigma,0],[0,negative_sigma]])
x = np.vstack((x,b))
y = np.vstack((y,np.array([-1])))
pos = True
nb_points -= 1
return LabeledSet(x,y,2,1)
def getMnistDualDataset():
mnist=fetch_mldata('MNIST original')
#Creation des vecteurs d'entrée
mnist_6=mnist.data[mnist.target==6]
nb_6=len(mnist_6)
mnist_8=mnist.data[mnist.target==8]
nb_8=len(mnist_8)
mnist_6_8=np.vstack((mnist_6,mnist_8))
print "%d 6s and %d 8s" % (nb_6,nb_8)
#Creation des vecteurs de sortie
target_6_8=np.array([[1]])
for i in range(nb_6-1):
target_6_8=np.vstack((target_6_8,[1]))
for i in range(nb_8):
target_6_8=np.vstack((target_6_8,[-1]))
print "%d/%d vecteurs d'apprentissage" % (len(target_6_8),len(mnist_6_8))
randomvec=np.random.rand(len(target_6_8))
randomvec=randomvec>0.8
target_6_8=target_6_8[randomvec]
mnist_6_8=mnist_6_8[randomvec]
print "%d/%d vecteurs d'apprentissage apres echantillonage" % (len(target_6_8),len(mnist_6_8))
randomvec=np.random.rand(len(target_6_8))
randomvec=randomvec>0.5
train_data=mnist_6_8[randomvec]
train_label=target_6_8[randomvec]
test_data=mnist_6_8[np.logical_not(randomvec)]
test_label=target_6_8[np.logical_not(randomvec)]
print "%d training examples and %d testing examples " % (len(train_data),len(test_data))
return (train_data,train_label,test_data,test_label)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.