content
stringlengths 5
1.05M
|
|---|
"""
baemo.entity
~~~~~~~~~~~~~~~~~~~~~~~~
This module defines the Entity and Entities interfaces. Entities is a cache of
Entity groups that allows for retrieval by name. Entity is a metaclass that
creates Model and Collection classes.
"""
from collections import OrderedDict
from .delimited import DelimitedDict
from .projection import Projection
from .references import References
from .sort import Sort
from .exceptions import EntityNotSet
class EntityMeta(object):
pass
class Entities(object):
cache = {}
@classmethod
def set(cls, name, entity):
Entities.cache[name] = entity
@classmethod
def get(cls, name):
if name not in Entities.cache:
raise EntityNotSet(name)
else:
return Entities.cache[name]
class Entity(type):
def __new__(cls, name, model_options=None, collection_options=None):
from .model import Model
from .collection import Collection
entity_definition = {}
entity_config = [{
"type": "model",
"bases": [Model, EntityMeta],
"options": model_options
}, {
"type": "collection",
"bases": [Collection, EntityMeta],
"options": collection_options
}]
for member_config in entity_config:
# pre handle configuration options
if member_config["options"] is not None:
# base classes
if "bases" in member_config["options"]:
bases = member_config["options"]["bases"]
del member_config["options"]["bases"]
if type(bases) is not list:
bases = [bases]
member_config["bases"] = bases + member_config["bases"]
# create entity member class
entity_definition[member_config["type"]] = type(
"{}{}".format(name, member_config["type"].title()),
tuple(member_config["bases"]),
dict()
)
# for each base class in reverse order, cache and merge dict
# type attributes NOTE: attribute type will be what it was
# when first encountered while scanning base classes, this is
# important because attributes set in model/collection options
# as dicts at entity creation time must become the type
# appropriate for the attribute to function correctly
# Sort for sorts, References for references etc.
bases_attribute_cache = {}
for base in reversed(member_config["bases"]):
# for each attribute in base class
for key in dir(base):
attr = getattr(base, key)
# determine correct type by checking type on base model
if isinstance(attr, (dict, OrderedDict, DelimitedDict)):
if key not in bases_attribute_cache:
bases_attribute_cache[key] = attr.__class__()
bases_attribute_cache[key] = DelimitedDict._merge(
attr,
bases_attribute_cache[key]
)
# add merged base attribute to options
if bases_attribute_cache:
if member_config["options"] is None:
member_config["options"] = {}
for key, value in bases_attribute_cache.items():
# overwrite
if key not in member_config["options"]:
member_config["options"][key] = value
# merge
# if key not in member_config["options"]:
# member_config["options"][key] = value.__class__()
#
# # overwrite values inherited with values in options
# member_config["options"][key] = DelimitedDict._merge(
# value,
# member_config["options"][key]
# )
# cast back to correct type
# member_config["options"][key] = value.__class__(member_config["options"][key])
# # if there are no options, continue
# if member_config["options"] is None:
# continue
# add options attributes to entity member class
for key, value in member_config["options"].items():
# check against type of attribute in Model or Collection base
# for attributes passed only as options and
if hasattr(member_config["bases"][-2], key):
attr = getattr(member_config["bases"][-2], key)
if isinstance(attr, (dict, OrderedDict, DelimitedDict)):
if not isinstance(value, attr.__class__):
value = attr.__class__(value)
# set attribute on entity member class
setattr(entity_definition[member_config["type"]], key, value)
# connect model and collection via entity definition
entity_definition["model"].__entity__ = entity_definition
entity_definition["collection"].__entity__ = entity_definition
# set entity member in Entities cache
Entities.set(name, entity_definition)
return entity_definition["model"], entity_definition["collection"]
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from TorchSUL import Model as M
import config
from . import hrnet
class Head(M.Model):
def initialize(self, head_layernum, head_chn):
self.layers = nn.ModuleList()
for i in range(head_layernum):
self.layers.append(M.ConvLayer(3, head_chn, activation=M.PARAM_PRELU, batch_norm=True, usebias=False))
def forward(self, x):
for l in self.layers:
x = l(x)
return x
class DepthToSpace(M.Model):
def initialize(self, block_size):
self.block_size = block_size
def forward(self, x):
bsize, chn, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
assert chn%(self.block_size**2)==0, 'DepthToSpace: Channel must be divided by square(block_size)'
x = x.view(bsize, -1, self.block_size, self.block_size, h, w)
x = x.permute(0,1,4,2,5,3)
x = x.reshape(bsize, -1, h*self.block_size, w*self.block_size)
return x
class UpSample(M.Model):
def initialize(self, upsample_layers, upsample_chn):
self.prevlayers = nn.ModuleList()
#self.uplayer = M.DeConvLayer(3, upsample_chn, stride=2, activation=M.PARAM_PRELU, batch_norm=True, usebias=False)
self.uplayer = M.ConvLayer(3, upsample_chn*4, activation=M.PARAM_PRELU, usebias=False)
self.d2s = DepthToSpace(2)
self.postlayers = nn.ModuleList()
for i in range(upsample_layers):
self.prevlayers.append(M.ConvLayer(3, upsample_chn, activation=M.PARAM_PRELU, batch_norm=True, usebias=False))
for i in range(upsample_layers):
self.postlayers.append(M.ConvLayer(3, upsample_chn, activation=M.PARAM_PRELU, batch_norm=True, usebias=False))
def forward(self, x):
for p in self.prevlayers:
x = p(x)
x = self.uplayer(x)
x = self.d2s(x)
# print('UPUP', x.shape)
for p in self.postlayers:
x = p(x)
return x
class HR3DNet(M.Model):
def initialize(self, head_layernum, head_chn, upsample_layers, upsample_chn):
self.backbone = hrnet.Body()
self.upsample = UpSample(upsample_layers, upsample_chn)
self.head = Head(head_layernum, head_chn)
self.head2 = Head(head_layernum, head_chn)
self.head3 = Head(head_layernum, head_chn)
self.head4 = Head(head_layernum, head_chn)
self.c1 = M.ConvLayer(1, config.num_pts)
self.c2 = M.ConvLayer(1, config.num_pts)
self.c3 = M.ConvLayer(1, 1)
self.c4 = M.ConvLayer(1, config.num_pts-1)
def build_forward(self, x, *args, **kwargs):
feat = self.backbone(x)
feat = self.upsample(feat)
feat1 = self.head(feat)
feat2 = self.head2(feat)
feat3 = self.head3(feat)
feat4 = self.head4(feat)
outs = self.c1(feat1)
idout = self.c2(feat2)
depout = self.c3(feat3)
depallout = self.c4(feat4)
nn.init.normal_(self.c1.conv.weight, std=0.001)
nn.init.normal_(self.c2.conv.weight, std=0.001)
nn.init.normal_(self.c3.conv.weight, std=0.001)
nn.init.normal_(self.c4.conv.weight, std=0.001)
print('normal init for last conv ')
return outs, idout, depout, depallout
def forward(self, x, density_only=False):
feat = self.backbone(x)
feat = self.upsample(feat)
h1 = self.head(feat)
h2 = self.head2(feat)
h3 = self.head3(feat)
h4 = self.head4(feat)
# results = self.density_branch(feat)
# idout = self.id_branch(feat)
outs = self.c1(h1)
idout = self.c2(h2)
depout = self.c3(h3)
depallout = self.c4(h4)
result = torch.cat([outs, idout, depout, depallout], dim=1)
# return outs, idout, depout, depallout
return [result,]
|
from typing import List
import os
import pathlib
import csv
import io
import logging
_logger = logging.getLogger(__name__)
class DatasetDeployer(object):
"""Common deploy operations for persisting files to a local folder.
"""
def __init__(self, key="filename.csv", body="a random data", output_dir="."):
self.key = key
self.body = body
self.output_dir = output_dir
def _persist_to_local(self):
"""Persists specific data onto an s3 bucket.
This method assumes versioned is handled on the bucket itself.
"""
_logger.info(f"persisting {self.key} {self.output_dir}")
with open(os.path.join(self.output_dir, self.key), "wb") as f:
# hack to allow the local writer to take either bytes or a string
# note this assumes that all strings are given in utf-8 and not,
# like, ASCII
f.write(self.body.encode("UTF-8") if isinstance(self.body, str) else self.body)
def persist(self):
self._persist_to_local()
def upload_csv(key_name: str, csv: str, output_dir: str):
blob = {
"key": f"{key_name}.csv",
"body": csv,
"output_dir": output_dir,
}
obj = DatasetDeployer(**blob)
obj.persist()
_logger.info(f"Generated csv for {key_name}")
def flatten_dict(data: dict, level_separator: str = ".") -> dict:
"""Flattens a nested dictionary, separating nested keys by separator.
Args:
data: data to flatten
level_separator: separator to use when combining keys from nested dictionary.
"""
flattened = {}
for key, value in data.items():
if not isinstance(value, dict):
flattened[key] = value
continue
value = flatten_dict(value)
new_data = {
f"{key}{level_separator}{nested_key}": nested_value
for nested_key, nested_value in value.items()
}
flattened.update(new_data)
return flattened
def write_nested_csv(data: List[dict], key: str, output_dir: str):
"""Writes list of data as a nested csv.
Args:
data: list of data to write.
key: Stem of file to write
output_dir: Output directory to write to.
"""
if not data:
raise ValueError("Cannot upload a 0 length list.")
header = flatten_dict(data[0]).keys()
output_path = pathlib.Path(output_dir) / f"{key}.csv"
_logger.info(f"Writing {key} to {output_path}")
with output_path.open("w") as csvfile:
writer = csv.DictWriter(csvfile, header)
writer.writeheader()
for row in data:
flattened_row = flatten_dict(row)
# if a nested key is optional (i.e. {a: Optional[dict]}) and there is no
# value for a, (i.e. {a: None}), don't write a, as it's not in the header.
flattened_row = {k: v for k, v in flattened_row.items() if k in header}
writer.writerow(flattened_row)
def upload_json(key_name, json: str, output_dir: str):
DatasetDeployer(f"{key_name}.json", json, output_dir).persist()
def deploy_shape_files(
output_dir: str, key: str, shp_bytes: io.BytesIO, shx_bytes: io.BytesIO, dbf_bytes: io.BytesIO,
):
"""Deploys shape files to specified output dir.
Args:
output_dir: Output directory to save shapefiles to.
key: stem of filename to save shapefiles to.
shp_bytes:
shx_bytes:
dbf_bytes:
"""
DatasetDeployer(key=f"{key}.shp", body=shp_bytes.getvalue(), output_dir=output_dir).persist()
DatasetDeployer(key=f"{key}.shx", body=shx_bytes.getvalue(), output_dir=output_dir).persist()
DatasetDeployer(key=f"{key}.dbf", body=dbf_bytes.getvalue(), output_dir=output_dir).persist()
|
# -*- coding: utf-8 -*-
import json
import csv
import glob
import os
import sys
# 3rd party
import pymongo
from pprint import pprint
#%%#######################################################################
## establishing mongo connection, only after you get mongod.exe is running
# C:\Program Files\MongoDB\Server\3.2\bin
##########################################################################
conn=pymongo.MongoClient()
db = conn.mydb
conn.database_names()
collection = db.PbP_Full
#%%#######################################################################
## Import of JSONS into mongo; not necessary to run if you already have mongo database already generated locally
##########################################################################
if __name__ == '__main__':
os.chdir(os.path.dirname(sys.argv[0]))
list_jsons = glob.glob('.\\data\\PlaybyPlay\\*.json')
for x in list_jsons:
data_file=x
with open(data_file) as data_file:
singleLoad = json.load(data_file)
singlePbP = singleLoad['_playbyplay']['resultSets']['PlayByPlay']
for i in singlePbP:
collection.insert_one(i)
#pprint to identify where the game events are actually present.
#%%#######################################################################
## Outputting to CSV directly from mongo. Pretty sleek yo.
##########################################################################
cursor = collection.find({"$or":[ {"PLAYER1_TEAM_ABBREVIATION":"SAS"}, {"PLAYER2_TEAM_ABBREVIATION":"SAS"}, {"PLAYER3_TEAM_ABBREVIATION":"SAS"}]})
def pbp_csv_to_mongo(cursor):
with open('pbp_SAS.csv', 'a', newline='') as outfile:
fields = ['SCORE', 'PERSON3TYPE', 'PLAYER3_NAME', 'PLAYER3_TEAM_CITY', 'VISITORDESCRIPTION', 'PLAYER1_ID', 'PERSON1TYPE', 'PERIOD', 'PLAYER1_TEAM_NICKNAME', 'HOMEDESCRIPTION', 'PLAYER1_TEAM_ID', '_id', 'WCTIMESTRING', 'PLAYER2_TEAM_NICKNAME', 'SCOREMARGIN', 'PLAYER2_NAME', 'PCTIMESTRING', 'PLAYER3_TEAM_NICKNAME', 'PLAYER1_TEAM_CITY', 'PLAYER2_ID', 'EVENTMSGTYPE', 'GAME_ID', 'PERSON2TYPE', 'EVENTNUM', 'PLAYER1_NAME', 'PLAYER3_ID', 'PLAYER3_TEAM_ABBREVIATION', 'PLAYER2_TEAM_ABBREVIATION', 'EVENTMSGACTIONTYPE', 'PLAYER3_TEAM_ID', 'PLAYER2_TEAM_CITY', 'NEUTRALDESCRIPTION', 'PLAYER2_TEAM_ID', 'PLAYER1_TEAM_ABBREVIATION']
writer = csv.DictWriter(outfile, fieldnames=fields)
writer.writeheader()
for x in cursor:
writer.writerow(x)
def array_front9(nums):
count = 0
for i in range(0,3):
count = count + 1
return count
|
# Copyright (C) 2014 eNovance SAS <licensing@enovance.com>
#
# Author: Sylvain Afchain <sylvain.afchain@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from neutronclient.v2_0 import client as clientv20
from oslo.config import cfg
from ceilometer.openstack.common import log
cfg.CONF.import_group('service_credentials', 'ceilometer.service')
LOG = log.getLogger(__name__)
def logged(func):
@functools.wraps(func)
def with_logging(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
LOG.exception(e)
raise
return with_logging
class Client(object):
"""A client which gets information via python-neutronclient."""
def __init__(self):
conf = cfg.CONF.service_credentials
params = {
'insecure': conf.insecure,
'ca_cert': conf.os_cacert,
'username': conf.os_username,
'password': conf.os_password,
'auth_url': conf.os_auth_url,
'region_name': conf.os_region_name,
'endpoint_type': conf.os_endpoint_type
}
if conf.os_tenant_id:
params['tenant_id'] = conf.os_tenant_id
else:
params['tenant_name'] = conf.os_tenant_name
self.client = clientv20.Client(**params)
@logged
def network_get_all(self):
"""Returns all networks."""
resp = self.client.list_networks()
return resp.get('networks')
@logged
def port_get_all(self):
resp = self.client.list_ports()
return resp.get('ports')
@logged
def vip_get_all(self):
resp = self.client.list_vips()
return resp.get('vips')
@logged
def pool_get_all(self):
resp = self.client.list_pools()
return resp.get('pools')
@logged
def member_get_all(self):
resp = self.client.list_members()
return resp.get('members')
@logged
def health_monitor_get_all(self):
resp = self.client.list_health_monitors()
return resp.get('health_monitors')
@logged
def pool_stats(self, pool):
return self.client.retrieve_pool_stats(pool)
@logged
def vpn_get_all(self):
resp = self.client.list_vpnservices()
return resp.get('vpnservices')
@logged
def ipsec_site_connections_get_all(self):
resp = self.client.list_ipsec_site_connections()
return resp.get('ipsec_site_connections')
@logged
def firewall_get_all(self):
resp = self.client.list_firewalls()
return resp.get('firewalls')
@logged
def fw_policy_get_all(self):
resp = self.client.list_firewall_policies()
return resp.get('firewall_policies')
|
import configparser
import datetime
import os
import json
import simplejson
def getConfig(file):
config = configparser.ConfigParser()
config.read(file)
if len(config.sections()) == 0:
raise Exception('Could not parse configuration file. Aborting.')
return config
class Config:
CONFIG_FILE = "../bidder.conf"
config = getConfig(os.path.join(os.path.abspath(os.path.dirname(__file__)), CONFIG_FILE))
@classmethod
def getParam(cls, section, attr):
return cls.config[section][attr]
@classmethod
def getMockedCampaigns(cls):
mock = cls.getParam('campaignresponse', 'mock')
params_file = cls.getParam('campaignresponse', 'mockjsonfile')
params_filepath = os.path.join(os.path.abspath(os.path.dirname(__file__)), "../"+params_file)
if mock:
paramsfile = open(params_filepath, 'r')
# Read the file's data
try:
data = json.load(paramsfile)
except simplejson.scanner.JSONDecodeError:
logger.error('Did not get valid json response:{0}. Giving up.'.format(data))
except Exception as error:
logger.error('Error encountered in loading paramsfile to json: {0}'.format(error))
return data
else:
return None
|
# Create hourly land and full storm catalog for the Contiguous United States (CONUS).
# Full storm catalog includes storm precipitation and area over the sea, while the land storm catalog only includes
# storms over the land of CONUS.
# author: Yuan Liu
# 2021/12/21
import numpy as np
from time import time
import pandas as pd
import os
import mpu
from geographiclib.geodesic import Geodesic
import xarray as xr
def make_folder(save_path):
"""
Function to create a new folder at specified path.
:param save_path: The path of the new folder.
:return:
"""
try:
os.mkdir(save_path)
except OSError as error:
pass
def build_storm_catalog(year, low_threshold, track_storms, mtpr_xarray, mer_xarray,
mvimd_xarray, dwdt_xarray, conus_boundary, save_loc):
"""
Compute first-stage storm attributes based on tracking results.
:param year: The year of storm tracking.
:param low_threshold: The low threshold used in storm identification, default 0.03 mm/hour.
:param track_storms: Storm tracking result array with dim (time, lon, lat).
:param mtpr_xarray: Preprocessed precipitation nc file read by xarray package.
:param mer_xarray: Preprocessed evaporation nc file read by xarray package.
:param mvimd_xarray: Preprocessed divergence nc file read by xarray package.
:param dwdt_xarray: Preprocessed time derivative of precipitable water nc file read by xarray package
:param conus_boundary: Boundary array of the CONUS, where the grid is 1 if it is inside the CONUS.
:param save_loc: Location to save storm catalog data.
:return:
"""
# record start time
ts = time()
# create a dataframe to store all storms
full_storm_catalog = pd.DataFrame()
# create a dataframe to store only the land part of the storms
land_storm_catalog = pd.DataFrame()
# extract the data array
mer_array = mer_xarray['mer'].data
mvimd_array = mvimd_xarray['mvimd'].data
# dw/dt array data need to be shortened to have consistent time step
if year == 1979:
# note: For ERA5 data in 1979, precipitation, evaporation, and divergence start from T07,
# but dw/dt starts from T00, so dwdt should be cut to start from T07 to be consistent
dwdt_array = dwdt_xarray['dwdt'].data[7:-1]
else:
# drop the last element to be consistent with other variables.
dwdt_array = dwdt_xarray['dwdt'].data[:-1]
# extract the timestamp and covert to string type
timestamp = mtpr_xarray['time'].data.astype("datetime64[h]").astype("str")
# extract lon and lat coordinates
lat_data = mtpr_xarray['latitude'].data
lon_data = mtpr_xarray['longitude'].data
# compute projected area of each pixel (km^2)
lon_2, lat_2 = np.meshgrid(lon_data, lat_data)
grid_cell_degree = 0.25
pixel_area = np.cos(lat_2 * np.pi / 180) * 111 * 111 * grid_cell_degree * grid_cell_degree
# extract precipitation data and filter it by the low threshold
prcp_array = mtpr_xarray['mtpr'].data
filtered_array = np.where(prcp_array < low_threshold, 0, prcp_array)
# quantification
# quantify storm sizes (sqkm)
sizes = get_size_prj(storms=track_storms, grid_cell_degree=0.25, lat_data=lat_data, lon_data=lon_data)
# quantify storm avg intensity (mm/hour)
averages = get_average(storms=track_storms, precip=filtered_array)
# quantify storm max intensity (mm/hour)
max_intensity = get_max_intensity(storms=track_storms, precip=filtered_array)
# quantify storm central location (degree)
central_loc = get_central_loc_degree(storms=track_storms, precip=filtered_array,
lat_data=lat_data, lon_data=lon_data)
# find individual storm ids
unique_labels = np.unique(track_storms)
print("Total storm number: {0}".format(unique_labels.shape[0]))
# create a folder to save spatial pattern of each storm record
single_record_save_loc = os.path.join(save_loc, "single_record")
make_folder(single_record_save_loc)
# skip 0 because 0 is background
for storm_label in np.arange(1, unique_labels.max() + 1):
# find time period of the current storm
storm_binary = sizes[:, storm_label] != 0
# extract the storm period from tracking results
storm_tracks = track_storms[storm_binary]
# assign 1 to the current storm area, 0 otherwise
selected_storm = np.where(storm_tracks == storm_label, storm_label, 0)
# extract the precipitation distrbution, mer distribution, mvimd distribution, and dwdt distribution
precip_distribution = np.where(selected_storm == storm_label, filtered_array[storm_binary], 0)
mer_distribution = np.where(selected_storm == storm_label, mer_array[storm_binary], 0)
mvimd_distribution = np.where(selected_storm == storm_label, mvimd_array[storm_binary], 0)
dwdt_distribution = np.where(selected_storm == storm_label, dwdt_array[storm_binary], 0)
# compute the storm duration (hour)
duration_storm = selected_storm.shape[0]
duration_storm_list = [duration_storm] * duration_storm
# compute the duration by day
rounded_days = round(duration_storm / 24)
rounded_days_list = [rounded_days] * duration_storm
# extract timestamp sequence
time_stamp_storm = timestamp[storm_binary]
time_stamp_storm = pd.DatetimeIndex(time_stamp_storm)
# initialize lists to save storm centroids
lon_storm = []
lat_storm = []
# initialize the list to save move distance storm centroid per hour (km)
distance_list = []
# initialize the list to save storm bearing (degree)
bearing_list = []
# extract storm centroids array([lon, lat])
centroid_coord_pair = central_loc[storm_binary][:, storm_label]
for i in range(centroid_coord_pair.shape[0]):
lon_storm.append(centroid_coord_pair[i][0])
lat_storm.append(centroid_coord_pair[i][1])
if i == 0:
# the distance and bearing are 0 for the first time step
distance = 0
bearing = 0
distance_list.append(distance)
bearing_list.append(bearing)
else:
# obtain the centroids for current and previous time steps
lat1 = centroid_coord_pair[i-1][1]
lon1 = centroid_coord_pair[i-1][0]
lat2 = centroid_coord_pair[i][1]
lon2 = centroid_coord_pair[i][0]
# compute the distance between two centroids (sqkm)
# https: // stackoverflow.com / questions / 19412462 / getting - distance - between - two - points - based - on - latitude - longitude
distance = mpu.haversine_distance((lat1, lon1), (lat2, lon2))
distance_list.append(distance)
# compute the bearing between two centroids (degree)
# https://stackoverflow.com/questions/54873868/python-calculate-bearing-between-two-lat-long
bearing = Geodesic.WGS84.Inverse(lat1, lon1, lat2, lon2)['azi1'] # return the bearing in degrees
bearing_list.append(bearing)
# compute avg intensity (mm/hour)
avg_intensity_storm = averages[storm_binary, storm_label]
# compute max intensity (mm/hour)
max_intensity_storm = max_intensity[storm_binary, storm_label]
# compute projected area (km^2)
prj_area_storm = sizes[storm_binary, storm_label]
# generate storm ID
id_storm = str(year) + str(storm_label).zfill(5)
id_storm_list = [id_storm] * duration_storm
# create lists to save distributions of storm moisture variables
list_mask = [selected_storm[i] for i in np.arange(duration_storm)] # the mask corresponds to storm labels
# precipitation distribution
list_precip_distribution = [precip_distribution[i] for i in np.arange(duration_storm)]
# evaporation distribution
list_mer_distribution = [mer_distribution[i] for i in np.arange(duration_storm)]
# divergence distribution
list_mvimd_distribution = [mvimd_distribution[i] for i in np.arange(duration_storm)]
# time derivative distribution
list_dwdt_distribution = [dwdt_distribution[i] for i in np.arange(duration_storm)]
# initialize lists to save area-weighted average moisture variables
land_avg_mtpr_list = []
land_avg_mer_list = []
land_avg_dwdt_list = []
land_avg_mvimd_list = []
land_avg_residual_list = []
# initialize lists to save only the land part of the moisture distribution of the storm
land_mtpr_distribution_list = []
land_mer_distribution_list = []
land_mvimd_distribution_list = []
land_dwdt_distribution_list = []
land_residual_distribution_list = []
# initialize the list to save storm land boundary
storm_extent_list = []
# initialize the list to save storm area over the land
land_area_list = []
# compute the land extent properties of the storm
for time_index in np.arange(duration_storm):
# extract the storm extent
storm_extent = list_precip_distribution[time_index] != 0
# extract the storm event over the land
non_nan_loc = (conus_boundary == 1) & (storm_extent)
# compute the land area (sqkm)
non_nan_extent_area = pixel_area[non_nan_loc].sum()
# append the land extend and arae to the lists
storm_extent_list.append(non_nan_loc)
land_area_list.append(non_nan_extent_area)
# get the moisture variable distributions over the land part of the storm
for time_index in np.arange(duration_storm):
land_mtpr_distribution = np.where(storm_extent_list[time_index], list_precip_distribution[time_index], 0)
land_mer_distribution = np.where(storm_extent_list[time_index], list_mer_distribution[time_index], 0)
land_mvimd_distribution = np.where(storm_extent_list[time_index], list_mvimd_distribution[time_index], 0)
land_dwdt_distribution = np.where(storm_extent_list[time_index], list_dwdt_distribution[time_index], 0)
# compute the residual distribution
land_residual_distribution = land_mtpr_distribution + land_mer_distribution + land_mvimd_distribution + land_dwdt_distribution
# append the distribution to the list
land_mtpr_distribution_list.append(land_mtpr_distribution)
land_mer_distribution_list.append(land_mer_distribution)
land_mvimd_distribution_list.append(land_mvimd_distribution)
land_dwdt_distribution_list.append(land_dwdt_distribution)
land_residual_distribution_list.append(land_residual_distribution)
# compute the area-weighted averaged moisture variables
# if land area is zero, then the average is zero
if land_area_list[time_index] == 0:
land_avg_mtpr = 0
land_avg_mer = 0
land_avg_mvimd = 0
land_avg_dwdt = 0
land_avg_residual = 0
else:
# compute the area-weighted averages
land_avg_mtpr = ((land_mtpr_distribution * pixel_area).sum()) / land_area_list[time_index]
land_avg_mer = ((land_mer_distribution * pixel_area).sum()) / land_area_list[time_index]
land_avg_mvimd = ((land_mvimd_distribution * pixel_area).sum()) / land_area_list[time_index]
land_avg_dwdt = ((land_dwdt_distribution * pixel_area).sum()) / land_area_list[time_index]
land_avg_residual = land_avg_mtpr + land_avg_mer + land_avg_mvimd + land_avg_dwdt
# append the averages to the lists
land_avg_mtpr_list.append(land_avg_mtpr)
land_avg_mer_list.append(land_avg_mer)
land_avg_dwdt_list.append(land_avg_dwdt)
land_avg_mvimd_list.append(land_avg_mvimd)
land_avg_residual_list.append(land_avg_residual)
# save storm season and month, which are determined by the first timestamp of the storm
start_month = time_stamp_storm[0].month
if start_month in [12, 1, 2]:
season_storm = "win"
season_id = 4
elif start_month in [3, 4, 5]:
season_storm = 'spr'
season_id = 1
elif start_month in [6, 7, 8]:
season_storm = 'sum'
season_id = 2
else:
season_storm = 'fal'
season_id = 3
season_id_list = [season_id] * duration_storm
season_storm_list = [season_storm] * duration_storm
month_storm = time_stamp_storm[0].month
mont_storm_list = [month_storm] * duration_storm
# create a dataframe to save all the information for the storm
storm_record = pd.DataFrame()
storm_record['ID'] = id_storm_list
storm_record['Projected_area(sqkm)'] = prj_area_storm
storm_record['Timestamp'] = time_stamp_storm
storm_record['Avg_intensity(mm/h)'] = avg_intensity_storm
storm_record['Max_intensity(mm/h)'] = max_intensity_storm
storm_record['Duration(hour)'] = duration_storm_list
storm_record['DurationDays(day)'] = rounded_days_list
storm_record['Central_lon(degree)'] = lon_storm
storm_record['Central_lat(degree)'] = lat_storm
storm_record['Season'] = season_storm_list
storm_record['Season_id'] = season_id_list
storm_record['Month'] = mont_storm_list
storm_record['Distance(km)'] = distance_list
storm_record['Bearing(degree)'] = bearing_list
storm_record['Land_area(sqkm)'] = land_area_list
storm_record['Land_area_avg_mtpr(mm/h)'] = land_avg_mtpr_list
storm_record['Land_area_avg_mer(mm/h)'] = land_avg_mer_list
storm_record['Land_area_avg_dwdt(mm/h)'] = land_avg_dwdt_list
storm_record['Land_area_avg_mvimd(mm/h)'] = land_avg_mvimd_list
storm_record['Land_area_avg_residual(mm/h)'] = land_avg_residual_list
# deep copy the storm record
land_storm_record = storm_record.copy(deep=True)
# adjust the storm duration based on its presence on land
record_length = land_storm_record.shape[0]
start_index = 0
for i in range(record_length):
if land_storm_record.loc[i, 'Land_area(sqkm)'] == 0:
land_storm_record.drop(i, axis=0, inplace=True)
start_index = start_index + 1
else:
break
# if the storm has no time on land, remove the record
if land_storm_record.shape[0] == 0:
# print('The storm is empty.')
continue
end_index = record_length
for i in range(record_length):
if land_storm_record.loc[record_length - 1 - i, 'Land_area(sqkm)'] == 0:
# print('Remove sea storm record')
land_storm_record.drop(record_length - 1 - i, axis=0, inplace=True)
end_index = end_index - 1
else:
break
# if the storm has no time on land, remove the record
if land_storm_record.shape[0] == 0:
# print('The storm is empty.')
continue
# update the storm duration
storm_duration = land_storm_record.shape[0]
storm_day = round(storm_duration / 24)
storm_duration_list = [storm_duration] * storm_duration
storm_day_list = [storm_day] * storm_duration
land_storm_record['Duration(hour)'] = storm_duration_list
land_storm_record['DurationDays(day)'] = storm_day_list
# transform the list of moisture distributions to array
mask_grid_array = np.array(list_mask[start_index:end_index])
land_mtpr_array = np.array(land_mtpr_distribution_list[start_index:end_index])
land_mer_array = np.array(land_mer_distribution_list[start_index:end_index])
land_mvimd_array = np.array(land_mvimd_distribution_list[start_index:end_index])
land_dwdt_array = np.array(land_dwdt_distribution_list[start_index:end_index])
land_residual_array = np.array(land_residual_distribution_list[start_index:end_index])
# save the array to a netcdf file
da = xr.Dataset(
data_vars={
"mask": (('time', "lat", "lon"), mask_grid_array),
"land_mtpr": (('time', "lat", "lon"), land_mtpr_array),
"land_mer": (('time', "lat", "lon"), land_mer_array),
"land_mvimd": (('time', "lat", "lon"), land_mvimd_array),
"land_dwdt": (('time', "lat", "lon"), land_dwdt_array),
"land_residual": (('time', "lat", "lon"), land_residual_array),
},
coords={
"lon": lon_data,
"lat": lat_data,
"time": time_stamp_storm[start_index:end_index]
},
attrs=dict(
description="Single record of storm " + id_storm,
units="mm/hour")
)
da.to_netcdf(os.path.join(single_record_save_loc, id_storm + ".nc"),
encoding={"mask": {"dtype": "int16", 'zlib': True},
"land_mtpr": {"dtype": "f4", 'zlib': True},
"land_mer": {"dtype": "f4", 'zlib': True},
"land_mvimd": {"dtype": "f4", 'zlib': True},
"land_dwdt": {"dtype": "f4", 'zlib': True},
"land_residual": {"dtype": "f4", 'zlib': True},
})
# append the record to the catalog
land_storm_catalog = land_storm_catalog.append(land_storm_record, ignore_index=True)
full_storm_catalog = full_storm_catalog.append(storm_record, ignore_index=True)
# save the storm catalog
land_storm_catalog.to_pickle(os.path.join(save_loc, "land_storm_catalog_" + str(year) + ".pkl"))
full_storm_catalog.to_pickle(os.path.join(save_loc, "full_storm_catalog_" + str(year) + ".pkl"))
print("Storm catalog in {0} finished, time spent: {1} s".format(year, time() - ts))
def get_duration(storms: np.ndarray, time_interval: float) -> np.ndarray:
"""Computes the duration (in the time unit of time_interval) of each storm across all time slices given.
:param storms: the tracked storms returned by the tracking algorithm, given as an array of dimensions
Time x Rows x Cols.
:param time_interval: the period between temporal 'snapshots', given as a float.
:return: An array of length equal to the number of tracked storms + 1, where the value at [x] corresponds to
the duration of the storm x. The index 0 (referring to the background) is always 0 and provided for ease of
indexing.
"""
# find the number of time slices in the data
lifetime = storms.shape[0]
ls = []
for time_index in range(lifetime):
# compute the labels that appear in that time slice
curr_labels = np.unique(storms[time_index])
ls.append(curr_labels)
# Convert list of different size into a numpy array
storm_array = np.zeros([len(ls), len(max(ls, key=lambda x: len(x)))])
for i, j in enumerate(ls):
storm_array[i][0:len(j)] = j
storm_array = np.array(storm_array, dtype=np.int32)
unique, counts = np.unique(storm_array, return_counts=True)
counts[0] = 0
result = counts * time_interval
return result
def get_size_prj(storms: np.ndarray, grid_cell_degree: float, lat_data: np.ndarray, lon_data: np.ndarray) -> np.ndarray:
"""
Compute the size of each storm with unit of km^2.
:param storms: the tracked storms returned by the tracking algorithm, given as an array of dimensions
Time x Rows x Cols.
:param grid_cell_degree: 0.25 degree for ERA5 storms.
:param lat_data: latitude coordinate array.
:param lon_data: longitude coordinate array.
:return: a lifetime x total_storms array where the value found at [y][x] corresponds to the size of the storm at t=y,
storm=x. Except in the case of index 0, which is always 0 for any t.
"""
# compute the projected area for each pixel
lon_2, lat_2 = np.meshgrid(lon_data, lat_data)
pixel_area = np.cos(lat_2 * np.pi / 180) * 111 * 111 * grid_cell_degree * grid_cell_degree
# find the number of time slices in the data
lifetime = storms.shape[0]
# and the number of storms
total_storms = len(np.unique(storms))
# initialize an array with dimensions number of time slices by number of storms
result = np.zeros((lifetime, total_storms))
for time_index in range(lifetime):
# find the unique labels
labels = np.unique(storms[time_index])
# for each label that appears in this time slice (that's not the background)
for label in labels:
if label:
# add up its coverage area over the pixel_area_matrix
storm_size = np.sum(np.where(storms[time_index] == label, pixel_area, 0))
# and place it at that correct location in the array to return
result[time_index][label] = storm_size
return result
def get_average(storms: np.ndarray, precip: np.ndarray) -> np.ndarray:
"""
Computes the average intensity of each storm across all time slices given.
:param storms: the tracked storms returned by the tracking algorithm, given as an array of dimensions
Time x Rows x Cols.
:param precip: the precipitation data corresponding to the tracked storms, with the same dimensions as
tracked_storms.
:return: a lifetime x total_storms array where the value found at [y][x] corresponds to the mean intensity of the
storm at t=y, storm=x. Except in the case of index 0, which is always 0 for any t.
"""
# find the number of time slices in the data
lifetime = storms.shape[0]
# and the number of storms
total_storms = len(np.unique(storms))
# initialize an array with dimensions number of time slices by number of storms
result = np.zeros((lifetime, total_storms))
for time_index in range(lifetime):
# find the unique labels
labels = np.unique(storms[time_index])
# for each label that appears in this time slice (that's not the background)
for label in labels:
if label:
# find the precipitation where it appears in the current time slice
storm_precip = np.where(storms[time_index] == label, precip[time_index], 0)
# sum the precipitation
storm_precip_sum = np.sum(storm_precip)
# find the number of grid cells belonging to the storm
storm_size = np.sum(np.where(storms[time_index] == label, 1, 0))
# find the storm's average precipitation in this time slice
storm_avg = storm_precip_sum / storm_size
# and store it in the appropriate place in our result array
result[time_index][label] = storm_avg
return result
def get_max_intensity(storms: np.ndarray, precip: np.ndarray) -> np.ndarray:
"""
Computes the average intensity of each storm across all time slices given.
:param storms: the tracked storms returned by the tracking algorithm, given as an array of dimensions
Time x Rows x Cols.
:param precip: the precipitation data corresponding to the tracked storms, with the same dimensions as
tracked_storms.
:return: a lifetime x total_storms array where the value found at [y][x] corresponds to the mean intensity of the
storm at t=y, storm=x. Except in the case of index 0, which is always 0 for any t.
"""
# find the number of time slices in the data
lifetime = storms.shape[0]
# and the number of storms
total_storms = len(np.unique(storms))
# initialize an array with dimensions number of time slices by number of storms
result = np.zeros((lifetime, total_storms))
for time_index in range(lifetime):
# find the unique labels
labels = np.unique(storms[time_index])
# for each label that appears in this time slice (that's not the background)
for label in labels:
if label:
# find the precipitation where it appears in the current time slice
storm_precip = np.where(storms[time_index] == label, precip[time_index], 0)
# get the maximum precipitation
storm_precip_max = np.max(storm_precip)
# find the number of grid cells belonging to the storm
# storm_size = np.sum(np.where(storms[time_index] == label, 1, 0))
# find the storm's average precipitation in this time slice
# storm_avg = storm_precip_sum / storm_size
# and store it in the appropriate place in our result array
result[time_index][label] = storm_precip_max
return result
def get_central_loc_degree(storms: np.ndarray, precip: np.ndarray, lat_data: np.ndarray, lon_data: np.ndarray) \
-> np.ndarray:
"""
Compute the precipitation intensity weighted centroid of the storm with unit of degree.
:param storms: the tracked storms returned by the tracking algorithm, given as an array of dimensions
Time x Rows x Cols.
:param precip: the precipitation data corresponding to the tracked storms data, with the same dimensions as
tracked_storms.
:param lat_data: lat_data. 1 * lenth array
:param lon_data: lon_data 1 * lenth array
:return: a lifetime x total_storms array where the value found at [y][x] corresponds to the central location of the
storm at t=y, storm=x. Except in the case of index 0, which is always 0 for any t.
"""
# create mesh grid of lat and lon data
lon_array, lat_array = np.meshgrid(lon_data, lat_data)
lifetime = storms.shape[0]
total_storms = len(np.unique(storms))
# initialize an array to store our result, but of type object to allow us to store an array in each cell
result = np.zeros((lifetime, total_storms)).astype(object)
# create arrays of x, y, and z values for the cartesian grid in R3
# create an array to hold each central location as we calculate it
central_location = np.empty(2)
for time_index in range(lifetime):
# find the unique labels
labels = np.unique(storms[time_index])
for label in labels:
# if the storm exists in this time slice
if label:
# find the sum of the precipitation values belonging to the storm
sum_precipitation = np.sum(np.where(storms[time_index] == label, precip[time_index], 0))
# and its intensity weighted centroid
x_avg = np.sum(np.where(storms[time_index] == label, ((lon_array * precip[time_index]) /
sum_precipitation), 0))
y_avg = np.sum(np.where(storms[time_index] == label, ((lat_array * precip[time_index]) /
sum_precipitation), 0))
# get the corresponding lat and lon data
central_location[0] = x_avg
central_location[1] = y_avg
# and we place it in the appropriate spot in the array
result[time_index][label] = central_location
# reset the central location - this seems to be necessary here
central_location = np.zeros(2)
return result
if __name__ == "__main__":
# define save location
save_loc = "hourly_catalog"
make_folder(save_loc)
# load tracking data
tracked_storm_loc = "storm_tracking_results/tracking_array.npy"
track_storms = np.load(tracked_storm_loc, allow_pickle=True)
# load precipitation data
raw_data_folder = "era5_data"
mtpr_xarray = xr.open_dataset(raw_data_folder + "\\" + "ERA5_hourly_mtpr_processed_2019_2019_1_2.nc")
# load evaporation data
mer_xarray = xr.open_dataset(raw_data_folder + "\\" + "ERA5_hourly_mer_processed_2019_2019_1_2.nc")
# load divergence data
mvimd_xarray = xr.open_dataset(raw_data_folder + "\\" + "ERA5_hourly_mvimd_processed_2019_2019_1_2.nc")
# load time derivative data
dwdt_xarray = xr.open_dataset(raw_data_folder + "\\" + "ERA5_hourly_dwdt_processed_2019_2019_1_2.nc")
# set parameters
year = 2019
low_threshold = 0.03
conus_boundary = np.load("boundary_files/conus_boundary.npy")
build_storm_catalog(year, low_threshold, track_storms, mtpr_xarray, mer_xarray,
mvimd_xarray, dwdt_xarray, conus_boundary, save_loc)
|
import InvenTree.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('company', '0033_auto_20210410_1528'),
]
operations = [
migrations.CreateModel(
name='ManufacturerPart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('MPN', models.CharField(help_text='Manufacturer Part Number', max_length=100, null=True, verbose_name='MPN')),
('link', InvenTree.fields.InvenTreeURLField(blank=True, help_text='URL for external manufacturer part link', null=True, verbose_name='Link')),
('description', models.CharField(blank=True, help_text='Manufacturer part description', max_length=250, null=True, verbose_name='Description')),
('manufacturer', models.ForeignKey(help_text='Select manufacturer', limit_choices_to={'is_manufacturer': True}, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='manufactured_parts', to='company.Company', verbose_name='Manufacturer')),
('part', models.ForeignKey(help_text='Select part', limit_choices_to={'purchaseable': True}, on_delete=django.db.models.deletion.CASCADE, related_name='manufacturer_parts', to='part.Part', verbose_name='Base Part')),
],
options={
'unique_together': {('part', 'manufacturer', 'MPN')},
},
),
]
|
import boto3
import os
import subprocess
import configparser
import pymailer
import tarfile
from hdfs.ext.kerberos import KerberosClient
from logger_setting import *
from datetime import datetime, date, timedelta
py_logger=logging.getLogger('py_logger')
try:
#Your certificate location
os.environ["REQUESTS_CA_BUNDLE"]="/etc/security/rootCA.pem"
config_path = '/edgenode/s3/download/s3_config.conf'
configuration_file = configparser.ConfigParser()
config = configuration_file.read_file(open(config_path))
access_key_id = configuration_file.get('config', 'aws_access_key_id')
secret_access_key = configuration_file.get('config', 'aws_secret_access_key')
s3_folder_name = configuration_file.get('config', 's3_folder_name')
bucket_name = configuration_file.get('config', 'bucket_name')
hive_script_name = configuration_file.get('config', 'hive_script_name')
hdfs_url = configuration_file.get('config', 'hdfs_url')
source_directory = configuration_file.get('config', 'source_directory')
log_file_path = configuration_file.get('config', 'log_file_path')
hive_db_a = configuration_file.get('config', 'hive_db_a')
hive_db_b = configuration_file.get('config', 'hive_db_b')
session = boto3.Session(
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key,
)
py_logger.info("hive execution started")
file_list = subprocess.check_output([hive_script_name,hive_db_a,hive_db_b])
file_list_arr = file_list.split(' ')
py_logger.info("hive execution completed")
client = KerberosClient(hdfs_url)
s3 = session.client('s3',use_ssl=False, verify=False)
counter = 0
for file_path in file_list_arr:
file_path = source_directory + file_path
file_name = os.path.basename(file_path)
key_name = s3_folder_name + file_name
with client.write(file_path) as f:
s3.download_fileobj(bucket_name, key_name, f)
counter = counter + 1
py_logger.info("File: " + file_path + " downloaded from s3 bucket")
py_logger.info("S3 script execution completed. No.of Files downloaded: " + str(counter))
#Compresses the log files which are greater than 30 days
today = date.today()
current_day = datetime.now().strftime('%d')
log_directory = log_file_path.rpartition('/')[0] + log_file_path.rpartition('/')[1]
tarFileName = log_directory + today.strftime("%d-%m-%Y") + '.tar.gz'
if current_day == "30":
# writing files to a compressed file
with tarfile.open(tarFileName, "w:gz") as tar:
# writing each file one by one
for folderName, subfolders, filenames in os.walk(log_directory):
for filename in filenames:
# create complete filepath of file in directory
filePath = os.path.join(folderName, filename)
to_date = (datetime.now() - timedelta(1)).strftime('%Y-%m-%d')
modTimesinceEpoc = os.path.getmtime(filePath)
modificationTime = datetime.fromtimestamp(modTimesinceEpoc).strftime('%Y-%m-%d')
if (modificationTime <= to_date):
# add the file to tar.gz
tar.add(filePath)
# remove the file from directory
os.remove(filePath)
except Exception as e:
py_logger.error(e)
pymailer.send_mail(str(e))
|
# Generated by Django 3.0.7 on 2020-07-27 20:37
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('forum', '0008_auto_20200718_1621'),
]
operations = [
migrations.AlterModelOptions(
name='threadlike',
options={'verbose_name': 'мнение', 'verbose_name_plural': 'мнения'},
),
]
|
# Parts or the whole documentation of this module
# are copied from the respective module:
# libcloud/compute/drivers/dreamhost.py
# see also:
# https://github.com/apache/libcloud/tree/trunk/libcloud/compute/drivers/dreamhost.py
#
# Apache Libcloud is licensed under the Apache 2.0 license.
# For more information, please see LICENSE and NOTICE file or:
# http://www.apache.org/licenses/LICENSE-2.0
from javaimpl.compute.ComputeContextImpl import ComputeContextImpl
from javaimpl.compute.utils import none_check, wrap_listing, wrap_exception, jlist_str_to_pylist
from javaimpl.compute.utils import jlist_obj_to_pylist, get_property, get_property_list
from javaimpl.compute.utils import jmap_to_pymap, jlist_map_to_pylist_map
from org.askalon.jlibcloud.compute.driverSpecific.dreamhost import DreamhostNodeTemplateImpl
from javaimpl.compute.base.NodeImpl import NodeImpl
from org.askalon.jlibcloud.compute.driverSpecific.dreamhost import DreamhostComputeContext
class DreamhostComputeContextImpl(ComputeContextImpl, DreamhostComputeContext):
def __init__(self, builder):
ComputeContext.__init__(self, builder)
def createNode(self, node_temp):
"""Create a new Dreamhost node
@inherits: :class:`NodeDriver.create_node`
:keyword ex_movedata: Copy all your existing users to this new PS
:type ex_movedata: ``str``
"""
try:
kwargs = self._eval_template(node_temp)
kwargs = self._parse_dreamhost_template(node_temp, kwargs)
return wrap_listing(self.conn.create_node(**kwargs), NodeImpl)
except Exception, ex:
raise wrap_exception(ex)
def deployNode(self, node_temp):
try:
kwargs = self._eval_template(node_temp)
kwargs = self._eval_deploy_template(node_temp, kwargs)
kwargs = self._parse_dreamhost_template(node_temp, kwargs)
return wrap_listing(self.conn.deploy_node(**kwargs), NodeImpl)
except Exception, ex:
raise wrap_exception(ex)
def _parse_dreamhost_template(self, node_temp, kwargs):
ex_movedata = node_temp.getExMovedata()
kwargs = get_property(self, ex_movedata, 'ex_movedata',
kwargs,lambda x : x)
return kwargs
def getTemplateBuilder(self):
return DreamhostNodeTemplateImpl.newBuilder()
|
# -*- coding: utf-8 -*-
pytest_plugins = [
u'ckanext.cloudstorage.tests.ckan_setup',
u'ckanext.cloudstorage.tests.fixtures',
]
|
"""
The :mod:`pure_sklearn.tree` module implements a variety of tree models
"""
from ._classes import (
DecisionTreeClassifierPure,
ExtraTreeClassifierPure,
DecisionTreeRegressorPure,
ExtraTreeRegressorPure,
)
__all__ = [
"DecisionTreeClassifierPure",
"ExtraTreeClassifierPure",
"DecisionTreeRegressorPure",
"ExtraTreeRegressorPure",
]
|
import config
def clean_image(image):
image[config.WATERMARK_THRESHOLD_LOW < image ] = 255
return image
|
from unittest import TestCase
from neo.VM.InteropService import StackItem, Array, Map
from neo.VM.ExecutionEngine import ExecutionEngine
from neo.VM.ExecutionEngine import ExecutionContext
from neo.VM.Script import Script
from neo.SmartContract.Iterable import KeysWrapper, ValuesWrapper
from neo.SmartContract.Iterable.Wrapper import ArrayWrapper, MapWrapper
from neo.SmartContract.Iterable.ConcatenatedEnumerator import ConcatenatedEnumerator
from neo.SmartContract.StateMachine import StateMachine
class InteropSerializeDeserializeTestCase(TestCase):
def setUp(self):
self.engine = ExecutionEngine()
self.econtext = ExecutionContext(Script(self.engine.Crypto, b''), 0)
self.engine.InvocationStack.PushT(self.econtext)
self.service = StateMachine(None, None)
def test_iter_array(self):
my_array = Array([StackItem.New(12),
StackItem.New(b'Hello World'),
StackItem.New(True),
Array([StackItem.New(113442), StackItem.New(2), StackItem.New(3)])
])
self.econtext.EvaluationStack.PushT(my_array)
self.engine.InvocationStack.PushT(self.econtext)
self.service.Enumerator_Create(self.engine)
iterable = self.econtext.EvaluationStack.Peek(0).GetInterface()
self.assertIsInstance(iterable, ArrayWrapper)
keys = []
values = []
while iterable.Next():
currentKey = iterable.Key()
keys.append(currentKey.GetBigInteger())
values.append(iterable.Value())
self.assertEqual(keys, [0, 1, 2, 3])
self.assertEqual(values, my_array.GetArray())
def test_iter_map(self):
my_map = Map(
{
StackItem.New('a'): StackItem.New(1),
StackItem.New('b'): StackItem.New(3),
StackItem.New('d'): StackItem.New(432)
}
)
self.econtext.EvaluationStack.PushT(my_map)
self.engine.InvocationStack.PushT(self.econtext)
self.service.Iterator_Create(self.engine)
iterable = self.econtext.EvaluationStack.Peek(0).GetInterface()
self.assertIsInstance(iterable, MapWrapper)
keys = []
values = []
while iterable.Next():
keys.append(iterable.Key())
values.append(iterable.Value())
self.assertEqual(keys, [StackItem.New('a'), StackItem.New('b'), StackItem.New('d')])
self.assertEqual(keys, my_map.Keys)
self.assertEqual(values, [StackItem.New(1), StackItem.New(3), StackItem.New(432)])
self.assertEqual(values, my_map.Values)
def test_iter_array_keys(self):
my_array = Array([StackItem.New(12),
StackItem.New(b'Hello World'),
StackItem.New(True),
Array([StackItem.New(113442), StackItem.New(2), StackItem.New(3)])
])
self.econtext.EvaluationStack.PushT(my_array)
self.engine.InvocationStack.PushT(self.econtext)
self.service.Enumerator_Create(self.engine)
create_iterkeys = self.service.Iterator_Keys(self.engine)
self.assertEqual(create_iterkeys, True)
iterkeys = self.econtext.EvaluationStack.Peek(0).GetInterface()
self.assertIsInstance(iterkeys, KeysWrapper)
keys = []
while iterkeys.Next():
keys.append(iterkeys.Value().GetBigInteger())
self.assertEqual(keys, [0, 1, 2, 3])
def test_iter_array_values(self):
my_array = Array([StackItem.New(12),
StackItem.New(b'Hello World'),
StackItem.New(True),
Array([StackItem.New(113442), StackItem.New(2), StackItem.New(3)])
])
self.econtext.EvaluationStack.PushT(my_array)
self.engine.InvocationStack.PushT(self.econtext)
self.service.Enumerator_Create(self.engine)
create_itervalues = self.service.Iterator_Values(self.engine)
self.assertEqual(create_itervalues, True)
itervals = self.econtext.EvaluationStack.Peek(0).GetInterface()
self.assertIsInstance(itervals, ValuesWrapper)
values = []
while itervals.Next():
values.append(itervals.Value())
self.assertEqual(values, my_array.GetArray())
def test_iter_concat(self):
my_array = Array([StackItem.New(12),
StackItem.New(b'Hello World'),
StackItem.New(True),
Array([StackItem.New(113442), StackItem.New(2), StackItem.New(3)])
])
my_array2 = Array([StackItem.New(b'a'), StackItem.New(b'b'), StackItem.New(4), StackItem.New(100)])
self.econtext.EvaluationStack.PushT(my_array2)
self.engine.InvocationStack.PushT(self.econtext)
self.service.Enumerator_Create(self.engine)
self.econtext.EvaluationStack.PushT(my_array)
self.service.Enumerator_Create(self.engine)
result = self.service.Enumerator_Concat(self.engine)
self.assertEqual(result, True)
concatted_enum = self.econtext.EvaluationStack.Peek().GetInterface()
self.assertIsInstance(concatted_enum, ConcatenatedEnumerator)
values = []
count = 0
while concatted_enum.Next():
count += 1
values.append(concatted_enum.Value())
self.assertEqual(count, 8)
self.assertEqual(values, my_array.GetArray() + my_array2.GetArray())
def test_iter_array_bad(self):
my_item = StackItem.New(12)
self.econtext.EvaluationStack.PushT(my_item)
self.engine.InvocationStack.PushT(self.econtext)
result = self.service.Enumerator_Create(self.engine)
self.assertEqual(result, False)
self.assertEqual(self.econtext.EvaluationStack.Count, 0)
def test_iter_map_bad(self):
my_item = StackItem.New(12)
self.econtext.EvaluationStack.PushT(my_item)
self.engine.InvocationStack.PushT(self.econtext)
result = self.service.Iterator_Create(self.engine)
self.assertEqual(result, False)
self.assertEqual(self.econtext.EvaluationStack.Count, 0)
def test_iter_array_key_bad(self):
my_item = StackItem.New(12)
self.econtext.EvaluationStack.PushT(my_item)
self.engine.InvocationStack.PushT(self.econtext)
result = self.service.Iterator_Key(self.engine)
self.assertEqual(result, False)
self.assertEqual(self.econtext.EvaluationStack.Count, 0)
def test_iter_array_values_bad(self):
my_item = StackItem.New(12)
self.econtext.EvaluationStack.PushT(my_item)
self.engine.InvocationStack.PushT(self.econtext)
result = self.service.Iterator_Values(self.engine)
self.assertEqual(result, False)
self.assertEqual(self.econtext.EvaluationStack.Count, 0)
def test_iter_array_keys_bad(self):
my_item = StackItem.New(12)
self.econtext.EvaluationStack.PushT(my_item)
self.engine.InvocationStack.PushT(self.econtext)
result = self.service.Iterator_Keys(self.engine)
self.assertEqual(result, False)
self.assertEqual(self.econtext.EvaluationStack.Count, 0)
|
# ----------------------------------------------------------------------
# |
# | __main__.py
# |
# | David Brownell <db@DavidBrownell.com>
# | 2020-07-16 17:02:19
# |
# ----------------------------------------------------------------------
# |
# | Copyright David Brownell 2020-21
# | Distributed under the Boost Software License, Version 1.0. See
# | accompanying file LICENSE_1_0.txt or copy at
# | http://www.boost.org/LICENSE_1_0.txt.
# |
# ----------------------------------------------------------------------
"""Generates HTTP-based code from provided information"""
import importlib
import itertools
import os
import re
import sys
import textwrap
import yaml
from collections import namedtuple, OrderedDict
import six
import CommonEnvironment
from CommonEnvironment.CallOnExit import CallOnExit
from CommonEnvironment import CommandLine
from CommonEnvironmentEx.CompilerImpl.GeneratorPluginFrameworkImpl import GeneratorFactory
from CommonEnvironmentEx.Package import InitRelativeImports
# ----------------------------------------------------------------------
_script_fullpath = CommonEnvironment.ThisFullpath()
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
with InitRelativeImports():
from .Plugin import Plugin
# ----------------------------------------------------------------------
# | Load the input parsers
InputParserInfo = namedtuple(
"InputParserInfo",
[
"Mod",
"DeserializeFunc",
],
)
INPUT_PARSERS = OrderedDict()
for parser, file_extensions in [
("Json", [".json"]),
("Xml", [".xml"]),
("Yaml", [".yaml", ".yml"]),
]:
generated_filename = os.path.join(
_script_dir,
"SimpleSchema",
"GeneratedCode",
"Python{}".format(parser),
"Python{parser}_Python{parser}Serialization.py".format(
parser=parser,
),
)
assert os.path.isfile(generated_filename), generated_filename
dirname, basename = os.path.split(generated_filename)
basename = os.path.splitext(basename)[0]
sys.path.insert(0, dirname)
with CallOnExit(lambda: sys.path.pop(0)):
mod = importlib.import_module(basename)
assert mod
deserialize_func = getattr(mod, "Deserialize")
assert deserialize_func
INPUT_PARSERS[tuple(file_extensions)] = InputParserInfo(mod, deserialize_func)
# ----------------------------------------------------------------------
PLUGINS = GeneratorFactory.CreatePluginMap("DEVELOPMENT_ENVIRONMENT_HTTP_GENERATOR_PLUGINS", os.path.join(_script_dir, "Plugins"), sys.stdout)
_PluginTypeInfo = CommandLine.EnumTypeInfo(list(six.iterkeys(PLUGINS)))
# ----------------------------------------------------------------------
def _GetOptionalMetadata(*args, **kwargs):
return __GetOptionalMetadata(*args, **kwargs)
def _CreateContext(*args, **kwargs):
return __CreateContext(*args, **kwargs)
def _Invoke(*args, **kwargs):
return __Invoke(*args, **kwargs)
CodeGenerator = GeneratorFactory.CodeGeneratorFactory(
PLUGINS,
"HttpGenerator",
__doc__.replace("\n", ""),
r".+({})".format(
"|".join(itertools.chain(*INPUT_PARSERS.keys()))
),
_GetOptionalMetadata,
_CreateContext,
_Invoke,
requires_output_name=False,
)
# ----------------------------------------------------------------------
@CommandLine.EntryPoint(
plugin=CommandLine.EntryPoint.Parameter("Name of plugin used for generation"),
output_dir=CommandLine.EntryPoint.Parameter("Output directory used during generation; the way in which this value impacts generated output varies from plugin to plugin"),
input=CommandLine.EntryPoint.Parameter("Input filename or a directory containing input files"),
content_type_include=CommandLine.EntryPoint.Parameter("Http content type to include in generation"),
content_type_exclude=CommandLine.EntryPoint.Parameter("Http content type to exclude from generation"),
verb_include=CommandLine.EntryPoint.Parameter("Http verb to include in generation"),
verb_exclude=CommandLine.EntryPoint.Parameter("Http verb to exclude from generation"),
output_data_filename_prefix=CommandLine.EntryPoint.Parameter(
"Prefix used by the code generation implementation; provide this value to generated content from multiple plugins in the same output directory",
),
plugin_arg=CommandLine.EntryPoint.Parameter("Argument passed directly to the plugin"),
force=CommandLine.EntryPoint.Parameter("Force generation"),
verbose=CommandLine.EntryPoint.Parameter("Produce verbose output during generation"),
)
@CommandLine.Constraints(
plugin=_PluginTypeInfo,
output_dir=CommandLine.DirectoryTypeInfo(
ensure_exists=False,
),
input=CommandLine.FilenameTypeInfo(
match_any=True,
arity="*",
),
content_type_include=CommandLine.StringTypeInfo(
arity="*",
),
content_type_exclude=CommandLine.StringTypeInfo(
arity="*",
),
verb_include=CommandLine.StringTypeInfo(
arity="*",
),
verb_exclude=CommandLine.StringTypeInfo(
arity="*",
),
output_data_filename_prefix=CommandLine.StringTypeInfo(
arity="?",
),
plugin_arg=CommandLine.DictTypeInfo(
require_exact_match=False,
arity="*",
),
output_stream=None,
)
def Generate(
plugin,
output_dir,
input,
content_type_include=None,
content_type_exclude=None,
verb_include=None,
verb_exclude=None,
output_data_filename_prefix=None,
plugin_arg=None,
force=False,
output_stream=sys.stdout,
verbose=False,
):
"""Generates HTTP content using the given plugin"""
return GeneratorFactory.CommandLineGenerate(
CodeGenerator,
input,
output_stream,
verbose,
force=force,
plugin_name=plugin,
output_dir=output_dir,
content_type_includes=content_type_include,
content_type_excludes=content_type_exclude,
verb_includes=verb_include,
verb_excludes=verb_exclude,
plugin_settings=plugin_arg,
output_data_filename_prefix=output_data_filename_prefix,
)
# ----------------------------------------------------------------------
@CommandLine.EntryPoint(
output_dir=CommandLine.EntryPoint.Parameter("Output directory previously generated"),
)
@CommandLine.Constraints(
output_dir=CommandLine.DirectoryTypeInfo(),
output_stream=None,
)
def Clean(
output_dir,
output_stream=sys.stdout,
):
"""Cleans content previously generated"""
return GeneratorFactory.CommandLineClean(output_dir, output_stream)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
def CommandLineSuffix():
return textwrap.dedent(
"""\
Where <plugin> can be one of the following:
{}
""",
).format(
"\n".join([" - {0:<30} {1}".format("{}:".format(pi.Plugin.Name), pi.Plugin.Description) for pi in six.itervalues(PLUGINS)])
)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
def __GetOptionalMetadata():
return [
("content_type_includes", []),
("content_type_excludes", []),
("verb_includes", []),
("verb_excludes", []),
("output_data_filename_prefix", None),
]
# ----------------------------------------------------------------------
def __CreateContext(context, plugin):
# Read all the endpoint info
roots = OrderedDict()
for input_filename in context["inputs"]:
ext = os.path.splitext(input_filename)[1]
for extensions, input_parser_info in six.iteritems(INPUT_PARSERS):
if ext not in extensions:
continue
try:
root = input_parser_info.DeserializeFunc(
input_filename,
always_include_optional=True,
)
roots[input_filename] = root
break
except Exception as ex:
# Augment the exception with stack information
args = list(ex.args)
args[0] = textwrap.dedent(
"""\
{}
{}
[{}]
""",
).format(
args[0],
input_filename,
" > ".join(getattr(ex, "stack", [])),
)
ex.args = tuple(args)
raise ex from None
# Validate the endpoint info
endpoint_stack = []
# ----------------------------------------------------------------------
def Validate(input_filename, endpoint):
nonlocal endpoint_stack
endpoint_stack.append(endpoint)
with CallOnExit(endpoint_stack.pop):
try:
# Ensure that all parameters in the uri are defined in variables and vice versa
uri_variables = set()
for match in Plugin.URI_PARAMETER_REGEX.finditer(endpoint.uri):
name = match.group("name")
if name in uri_variables:
raise Exception("The uri variable '{}' has already been defined".format(name))
uri_variables.add(name)
for variable in endpoint.variables:
if variable.name not in uri_variables:
raise Exception("The uri variable '{}' was not found in the uri '{}'".format(variable.name, endpoint.uri))
uri_variables.remove(variable.name)
if uri_variables:
raise Exception("The uri variables {} were not defined".format(", ".join(["'{}'".format(variable) for variable in uri_variables])))
# Ensure that the uri variables don't overlap with parent variables
all_variables = set()
for e in endpoint_stack:
for variable in e.variables:
if variable.name in all_variables:
raise Exception("The variable '{}' has already been defined".format(variable.name))
all_variables.add(variable.name)
# Handle content that is mutually exclusive
for method in endpoint.methods:
if method.default_request and method.requests:
raise Exception("'default_request' and 'requests' are mutually exclusive and cannot both be provided ({})".format(method.verb))
for response in method.responses:
if response.default_content and response.contents:
raise Exception("'default_content' and 'contents' are mutually exclusive and cannot both be provided ({}, {})".format(method.verb, response.code))
# Validate the children
for child in endpoint.children:
Validate(input_filename, child)
except Exception as ex:
# Augment the exception with stack information
args = list(ex.args)
args[0] = textwrap.dedent(
"""\
{}
{}
[{}]
""",
).format(
args[0],
input_filename,
"".join([e.uri for e in endpoint_stack]),
)
ex.args = tuple(args)
raise ex from None
# ----------------------------------------------------------------------
for input_filename, root in six.iteritems(roots):
for endpoint in root.endpoints:
Validate(input_filename, endpoint)
# Filter the content
if (
context["content_type_includes"]
or context["content_type_excludes"]
or context["verb_includes"]
or context["verb_excludes"]
):
if context["content_type_excludes"]:
content_type_excludes_regexes = [re.compile(value) for value in context["content_type_excludes"]]
content_type_exclude_func = lambda rar: any(regex for regex in content_type_excludes_regexes if regex.match(rar.content_type))
else:
content_type_exclude_func = lambda rar: False
if context["content_type_includes"]:
content_type_include_regexes = [re.compile(value) for value in context["content_type_includes"]]
content_type_include_func = lambda rar: any(regex for regex in content_type_include_regexes if regex.match(rar.content_type))
else:
content_type_include_func = lambda rar: True
verb_includes = set([value.upper() for value in context["verb_includes"]])
verb_excludes = set([value.upper() for value in context["verb_excludes"]])
# ----------------------------------------------------------------------
def Filter(endpoint):
method_index = 0
while method_index < len(endpoint.methods):
method = endpoint.methods[method_index]
# Process the requests
request_index = 0
while request_index < len(method.requests):
if (
content_type_exclude_func(method.requests[request_index].content_type)
or not content_type_include_func(method.request[request_index].content_type)
):
del method.requests[request_index]
else:
request_index += 1
# Process the responses
response_index = 0
while response_index < len(method.responses):
response = method.responses[response_index]
content_index = 0
while content_index < len(response.contents):
if (
content_type_exclude_func(response.contents[content_index].content_type)
or not content_type_include_func(response.contents[content_index].content_type)
):
del response.responses[content_index]
else:
content_index += 1
if not response.default_content and not response.contents:
del method.responses[response_index]
else:
response_index += 1
if (
not method.default_request
and not method.requests
and not method.responses
):
del endpoint.methods[method_index]
else:
method_index += 1
child_index = 0
while child_index < len(endpoint.children):
child = endpoint.children[child_index]
Filter(child)
if not child.methods and not child.children:
del endpoint.children[child_index]
else:
child_index += 1
# ----------------------------------------------------------------------
for input_filename, root in list(six.iteritems(roots)):
endpoint_index = 0
while endpoint_index < len(root.endpoints):
endpoint = root.endpoints[endpoint_index]
Filter(endpoint)
if not root.endpoint.methods and not endpoint.children:
del root.endpoints[endpoint_index]
else:
endpoint_index += 1
if not root.endpoints:
del roots[input_filename]
# Here we have all the endpoints and need to assign them to the context.
# However, the context will be compared with previous context to see if
# a new generation is required. To make this work as expected, we need to
# compare the data within the endpoints and not the endpoints themselves.
context["persisted_roots"] = yaml.dump(roots)
return context
# ----------------------------------------------------------------------
def __Invoke(
code_generator,
invoke_reason,
context,
status_stream,
verbose_stream,
verbose,
plugin,
):
roots = yaml.load(
context["persisted_roots"],
Loader=yaml.FullLoader,
)
# ----------------------------------------------------------------------
def Postprocess(endpoint, uris):
uris.append(endpoint.uri)
with CallOnExit(uris.pop):
endpoint.full_uri = ''.join(uris).replace("//", "/")
endpoint.unique_name = endpoint.full_uri.replace("/", ".").replace("{", "__").replace("}", "__")
for child in endpoint.children:
Postprocess(child, uris)
# ----------------------------------------------------------------------
for root in six.itervalues(roots):
for endpoint in root.endpoints:
Postprocess(endpoint, [])
return plugin.Generate(
code_generator,
invoke_reason,
context["output_dir"],
roots,
status_stream,
verbose_stream,
verbose,
**context["plugin_settings"],
)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
if __name__ == "__main__":
try:
sys.exit(
CommandLine.Main()
)
except KeyboardInterrupt:
pass
|
import requests
from bs4 import BeautifulSoup
from tqdm import tqdm
import pickle
import pkg_resources
knowledge_base_path = pkg_resources.resource_filename(__name__, 'KB.p')
class Helppy:
def __init__(self, kb=None):
self.default_repo = 'https://github.com/vvaezian/Data-Science-Fundamentals'
self.kb = kb
self.load_KB()
class Topic:
def __init__(self, title=None, body='', header_size=None):
self.title = title
self.body = body
self.header_size = header_size
self.is_sub_topic = False
self.sub_topics = []
def add_to_body(self, line):
self.body += line
def load_KB(self):
'''load the pre-built knowledge-base'''
self.kb = pickle.load(open(knowledge_base_path, 'rb'))
def save_KB(self, kb_name='KB.p'):
'''Save the KB to the current directory'''
pickle.dump(self.kb, open(kb_name, 'wb'))
def refresh_KB(self, new_repo=None, replace_default_repo=True, extensions=['.md']):
'''reload the topics of the knowledge-base in case there have been some changes.
Can also add a new repository to the knowledge-base by passing the new repo url
and setting 'replace_default_repo=False'.
'''
repository = new_repo if new_repo else self.default_repo
searchTerms_and_links = self.get_searchTerms_and_links(repository, extensions=extensions)
kb = {} if replace_default_repo else self.kb
for item in tqdm(searchTerms_and_links):
search_terms, page_url = item
topics, page_body = self.process_page(self.get_raw_url(page_url))
if topics != [] or page_body != '':
kb[(page_url, search_terms)] = (topics, page_body.lower())
self.kb = kb
def find(self, header_keyword=None, page_path_keyword=None, page_body_keyword=None, results_cap=3):
'''Search the knowledge-base by providing a keyword that appears in the header of the section,
or search by providing a keyword that appears in the page.
In either case, you can optionally provide a keyword for page name (page_path_keyword) to limit the search to those pages.
'''
if (header_keyword is None and page_body_keyword is None) or (header_keyword and page_body_keyword):
print("One of 'header_keyword' or 'page_body_keyword' arguments must to be provided.")
return
counter = 0
if header_keyword:
for (url, page_path_keywords), (topics, page_body) in self.kb.items():
if not page_path_keyword or page_path_keyword.lower() in page_path_keywords: # filter the results by page_path_keyword
for topic in topics:
if header_keyword.lower() in topic.title.lower():
counter += 1
# print the topic
print(url + '#' + topic.title.replace(':', '').replace(' ','-') + '\n' + '#' * topic.header_size + ' ' + topic.title + '\n' + topic.body)
# print the sub-topics of the current topic (if any)
for sub_topic in topic.sub_topics:
print(url + '#' + sub_topic.title.replace(':', '').replace(' ','-') + '\n' + '#' * sub_topic.header_size + ' ' + sub_topic.title + '\n' + sub_topic.body)
if results_cap != 0 and counter >= results_cap:
print("\nBy default at most three results are shown. You can change it by passing a different number to the 'results_cap' parameter (pass 0 for no cap).")
break
else: # page_body_keyword is provided instead of header_keyword
for (url, page_path_keywords), (topics, page_body) in self.kb.items():
if not page_path_keyword or page_path_keyword.lower() in page_path_keywords: # filter the results by page_path_keyword
if page_body_keyword.lower() in page_body:
print(url)
def get_searchTerms_and_links(self, url, extensions, all_page_links=None):
'''recursively search through the repository and find links to pages that have the provided extensions, ignoring the README.md files'''
if all_page_links is None:
all_page_links = []
res = requests.get(url)
soup = BeautifulSoup(res.text, features="html.parser")
rows = soup.findAll('div', {'role':'rowheader'})
links = [ row.select('a')[0]['href'] for row in rows ]
dir_links = [ 'https://github.com' + link for link in links if '/tree/' in link ]
page_links = []
for link in links:
for extension in extensions:
if link.endswith(extension) and not link.lower().endswith('readme.md'):
sections_split = link.split('/')
# links have this format :/[user]/[repo-name]/blob/master/[directory]/[file]
sections = sections_split[2].lower().replace('-', ' ') + '/' + '/'.join(sections_split[5:]).lower().rstrip(extension).replace('-', ' ').replace('%20', ' ')
page_links.append( (sections, 'https://github.com' + link) )
all_page_links.extend(page_links)
for url in dir_links:
self.get_searchTerms_and_links(url, extensions, all_page_links)
return all_page_links
@staticmethod
def get_raw_url(url):
sections = url.replace('/blob/', '/').split('/')
return 'https://raw.githubusercontent.com/' + '/'.join(sections[3:])
def process_page(self, url):
res = requests.get(url).text
res_lines = res.split('\n')
topics = []
any_header_detected_sofar = False
code_section = False
for line in res_lines:
if line.startswith('```'): # it is start or end of a code section
# toggle the code_section flag
code_section = not code_section
if line.startswith('#') and not code_section: # new header
if any_header_detected_sofar:
# add the previous topic to topics
topics.append(topic)
# create a new tipic
topic = self.Topic()
# set the title properties of the topic
topic.header_size = len(line) - len(line.lstrip('#'))
topic.title = line.strip('#').strip(' ')
any_header_detected_sofar = True
continue
# create the body of the topic, line by line
if any_header_detected_sofar:
topic.add_to_body(line.strip(' ') + '\n')
# adding the last section
if any_header_detected_sofar:
topics.append(topic)
# handling the parent-child relationships
if len(topics) > 1:
prev_topic = topics[0]
for cur_topic in topics[1:]:
if cur_topic.header_size < prev_topic.header_size:
prev_topic.sub_topics.append(cur_topic)
cur_topic.is_sub_topic = True # currently this feature is not used.
# since it was a sub-topic we don't change the prev_topic variable
else:
prev_topic = cur_topic
return topics, res
|
import unittest
from sprial import spiral
class TestSpiralMethods(unittest.TestCase):
def test_spiral(self):
funcs = [spiral]
for func in funcs:
self.assertEqual(func(3), [[1,2,3], [8,9,4], [7,6,5]])
if __name__ == '__main__':
unittest.main()
|
import cv2
import numpy as np
import time
# Creating a VideoCapture object
cap = cv2.VideoCapture(0)
# Give some time for the camera to warm-up!
time.sleep(3)
background=0
for i in range(30):
avl,background = cap.read()
if avl==True:
# Laterally invert the image / flip the image.
background = np.flip(background,axis=1)
while cap.isOpened():
# Capturing the live frame
avl, img = cap.read()
if ret == True:
# Laterally invert the image / flip the image
img = np.flip(imgaxis=1)
# converting from BGR to HSV color space
hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
# Range for lower red
lower_red = np.array([0,120,70])
upper_red = np.array([10,255,255])
mask1 = cv2.inRange(hsv, lower_red, upper_red)
# Range for upper range
lower_red = np.array([170,120,70])
upper_red = np.array([180,255,255])
mask2 = cv2.inRange(hsv,lower_red,upper_red)
# Generating the final mask to detect red color
mask1 = mask1+mask2
mask1 = cv2.morphologyEx(mask, cv2.MORPH_OPEN, np.ones((3,3),np.uint8))
mask1 = cv2.morphologyEx(mask, cv2.MORPH_DILATE, np.ones((3,3),np.uint8))
#creating an inverted mask to segment out the cloth from the frame
mask2 = cv2.bitwise_not(mask1)
#Segmenting the cloth out of the frame using bitwise and with the inverted mask
res1 = cv2.bitwise_and(img,img,mask=mask2)
# creating image showing static background frame pixels only for the masked region
res2 = cv2.bitwise_and(background, background, mask = mask1)
#Generating the final output
final_output = cv2.addWeighted(res1,1,res2,1,0)
imshow("invcloak",final_output)
else :
break
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
cap.release()
|
cash = float(input("Quanto de dinheiro você te na carteira?"))
dol = cash * 5.46
eur = cash * 6.33
print(" Com o valor total da sua carteira {:.2f} é possivél comprar USA {:.2f} e EUR {:.2f}".format(cash, dol, eur))
|
from robodkdriver import RoboDK, RobotSerial
import struct
import numpy as np
import numpy as np
motor_reductions = np.array([[1/48., 0, 0, 0, 0, 0],
[0, 1/48., 1/48., 0, 0, 0],
[0, 0, 1/48., 0, 0, 0],
[0, 0, 0, 1/24., -1/28.8, 1/24.],
[0, 0, 0, 0, 1/28.8, -1/48.],
[0, 0, 0, 0, 0, 1/24.]])
motor_sprs = np.array([200, 200, 200, 200, 200, 200]).T
radians_per_step = (360. / motor_sprs) * motor_reductions.T
def generate_step_instructions(delta_theta, dps = [9, 9, 9, 9, 9, 9]):
assert((np.sign(delta_theta) == np.sign(dps)).all())
delta_theta = np.array(delta_theta)
dps = np.array(dps)
instructions = []
for _ in range(6):
min_travel_time = np.inf
for i in range(6):
travel_time = 0
if delta_theta[i] != 0:
if travel_time < min_travel_time:
min_travel_time = travel_time
else:
dps[i] = 0
if min_travel_time == np.inf:
break
steps_per_second = np.linalg.solve(radians_per_step, dps)
# Compute which Joint to a complete first and then re-calculate motor speeds
steps_to_take = min_travel_time * steps_per_second
steps_to_take = steps_to_take.astype(int)
sub_delta_theta = np.dot(steps_to_take, radians_per_step.T)
delta_theta = delta_theta - sub_delta_theta
instructions.append((steps_to_take.tolist(), steps_per_second.tolist()))
return instructions
print(*generate_step_instructions([90, 90, 90, -90, 90, 90], [1, 1, 1, -3, 1, 1]),sep="\n")
class MerlinRobot(RobotSerial):
def run_command(self, cmd, args):
RoboDK.update_status('working')
if cmd == 'CONNECT':
connected = self._connect(port=args[0], baud_rate=int(args[1]))
if not connected:
return 'connection_problems'
return 'ready'
if cmd == 'DISCONNECT' or cmd == 'STOP':
disconnected = self._disconnect()
if disconnected:
return 'disconnected'
return 'ready'
if cmd == 'MOVJ':
self._send_message('M')
for i in range(6):
self._serial.write(bytearray(struct.pack("f", float(args[i]))))
if self._get_message('DONE'):
return 'ready'
return 'error'
if cmd == "MOVL":
pass # Use the move group planner to figure out joint trajectory
if cmd == 'CJNT':
self._send_message("C")
joints = self._get_message()
RoboDK.update_status(joints)
return 'ready'
if __name__ == '__main__':
RoboDK(MerlinRobot()).run_driver()
|
from __future__ import print_function
from distutils.spawn import find_executable
def cmd_exists(name):
"""Check whether `name` is an executable on PATH."""
return find_executable(name) is not None
def attr(obj, *path, **kwargs):
"""Safely get a nested attribute from an dictionary"""
default = kwargs.get('default')
if obj is None:
return default
res = obj
for p in path:
if p not in res:
return default
res = res[p]
return res
|
import reapy
from reapy import reascript_api as RPR
from reapy.core import ReapyObject
from reapy.tools import Program
class Marker(ReapyObject):
_class_name = "Marker"
def __init__(
self, parent_project=None, index=None, parent_project_id=None
):
if parent_project_id is None:
message = (
"One of `parent_project` or `parent_project_id` must be "
"specified."
)
assert parent_project is not None, message
parent_project_id = parent_project.id
self.project_id = parent_project_id
self.index = index
def _get_enum_index(self):
"""
Return marker index as needed by RPR.EnumProjectMarkers2.
"""
code = """
index = [
i for i, m in enumerate(project.markers)
if m.index == marker.index
][0]
"""
index = Program(code, "index").run(
marker=self, project=reapy.Project(self.project_id)
)[0]
return index
@property
def _kwargs(self):
return {
"index": self.index, "parent_project_id": self.project_id
}
def delete(self):
"""
Delete marker.
"""
RPR.DeleteProjectMarker(self.project_id, self.index, False)
@property
def position(self):
"""
Return marker position.
Returns
-------
position : float
Marker position in seconds.
"""
code = """
index = marker._get_enum_index()
position = RPR.EnumProjectMarkers2(
marker.project_id, index, 0, 0, 0, 0, 0
)[4]
"""
position = Program(code, "position").run(marker=self)[0]
return position
@position.setter
def position(self, position):
"""
Set marker position.
Parameters
----------
position : float
Marker position in seconds.
"""
RPR.SetProjectMarker2(
self.project_id, self.index, False, position, 0, ""
)
|
# Centralized location for extracting data information from a THREAD_ID
# These functions can be shared between the different visualization apps
# MINT API ingestion
from __future__ import print_function
import time
import solutioncatalog
from solutioncatalog.rest import ApiException
from pprint import pprint
from solutioncatalog.download import download_files
from pathlib import Path
from osgeo import gdal
import pandas as pd
# GET Data from MINT API usint scenario_id, subgoal_id and THREAD_ID
api_instance = solutioncatalog.ResultsApi(solutioncatalog.ApiClient())
def get_MINT_data(scenario_id, subgoal_id, thread_id):
try:
# Get the results of a thread
api_response = api_instance.results_scenario_id_subgoal_id_thread_id_get(scenario_id, subgoal_id, thread_id)
resultsdf = pd.DataFrame(columns=['model', 'output','url'])
results = api_response['thread']['results']
for model in results.keys():
for runs in results[model]:
for output in runs['has_output']:
resultsdf = resultsdf.append({'model': model, 'output': output, 'url': runs['has_output'][output]['url']}, ignore_index=True)
return resultsdf
# except:
# return {'Result':'No data found in Solutions Catalog for this thread'}
except ApiException as e:
print("Exception when calling ResultsApi->results_scenario_id_subgoal_id_thread_id_get: %s\n" % e)
# Geto geospatial extent info from a geotiff
def get_geotiff_details(geotiff):
details ={
"center": [0,0],
"bounds": [(0,0),(0,0)]
}
data = gdal.Open(geotiff, gdal.GA_ReadOnly)
geoTransform = data.GetGeoTransform()
if geoTransform is not None:
minx = geoTransform[0]
maxy = geoTransform[3]
maxx = minx + geoTransform[1] * data.RasterXSize
miny = maxy + geoTransform[5] * data.RasterYSize
details['center'] = [(miny + maxy)/2, (minx + maxx)/2]
details['bounds'] = [(miny, minx), (maxy, maxx)]
return details
|
import sys
import sc2
from sc2 import Difficulty, Race
from sc2.player import Bot, Computer
from __init__ import run_ladder_game
# Load bot
from SunshineBot import SunshineBot
bot = Bot(Race.Terran, SunshineBot())
# Start game
if __name__ == "__main__":
if "--LadderServer" in sys.argv:
# Ladder game started by LadderManager
print("Starting ladder game...")
run_ladder_game(bot)
else:
# Local game
print("Starting local game...")
sc2.run_game(
sc2.maps.get("Abyssal Reef LE"), [bot, Computer(Race.Protoss, Difficulty.VeryHard)], realtime=False
)
|
"""Base classes for actions. Actions do something with the payload produced from either
message processors or formatters."""
from typing import Any
from homebot.models import Context
from homebot.utils import AutoStrMixin, LogMixin
from homebot.validator import TypeGuardMeta
class Action(AutoStrMixin, LogMixin, metaclass=TypeGuardMeta):
"""Action base class. Provides the interface to respect."""
async def __call__(self, ctx: Context, payload: Any) -> None:
"""Performs the action."""
raise NotImplementedError() # pragma: no cover
class Console(Action):
"""Simply logs the payload to the console."""
async def __call__(self, ctx: Context, payload: Any) -> None:
"""Performs the action: Simply print the payload to the console via print."""
print("Context:", ctx, "\nPayload:", payload) # pragma: no cover
|
"""
This module defines data types in Taichi:
- primitive: int, float, etc.
- compound: matrix, vector, struct.
- template: for reference types.
- ndarray: for arbitrary arrays.
- quantized: for quantized types, see "https://yuanming.taichi.graphics/publication/2021-quantaichi/quantaichi.pdf"
"""
from taichi.types import quantized_types as quant
from taichi.types.annotations import *
from taichi.types.compound_types import *
from taichi.types.ndarray_type import *
from taichi.types.primitive_types import *
from taichi.types.utils import *
|
import torch
from torch import nn
import torch.nn.functional as F
import models
from torch.utils.data import DataLoader
import os.path as osp
from tqdm import tqdm
from torch.autograd import Variable
import numpy as np
from utils.logger import AverageMeter as meter
from data_loader import Visda_Dataset, Office_Dataset, Home_Dataset, Visda18_Dataset
from utils.loss import FocalLoss
from models.component import Discriminator
class ModelTrainer():
def __init__(self, args, data, step=0, label_flag=None, v=None, logger=None):
self.args = args
self.batch_size = args.batch_size
self.data_workers = 6
self.step = step
self.data = data
self.label_flag = label_flag
self.num_class = data.num_class
self.num_task = args.batch_size
self.num_to_select = 0
self.model = models.create(args.arch, args)
self.model = nn.DataParallel(self.model).cuda()
#GNN
self.gnnModel = models.create('gnn', args)
self.gnnModel = nn.DataParallel(self.gnnModel).cuda()
self.meter = meter(args.num_class)
self.v = v
# CE for node classification
if args.loss == 'focal':
self.criterionCE = FocalLoss().cuda()
elif args.loss == 'nll':
self.criterionCE = nn.NLLLoss(reduction='mean').cuda()
# BCE for edge
self.criterion = nn.BCELoss(reduction='mean').cuda()
self.global_step = 0
self.logger = logger
self.val_acc = 0
self.threshold = args.threshold
if self.args.discriminator:
self.discriminator = Discriminator(self.args.in_features)
self.discriminator = nn.DataParallel(self.discriminator).cuda()
def get_dataloader(self, dataset, training=False):
if self.args.visualization:
data_loader = DataLoader(dataset, batch_size=self.batch_size, num_workers=self.data_workers,
shuffle=training, pin_memory=True, drop_last=True)
return data_loader
data_loader = DataLoader(dataset, batch_size=self.batch_size, num_workers=self.data_workers,
shuffle=training, pin_memory=True, drop_last=training)
return data_loader
def adjust_lr(self, epoch, step_size):
lr = self.args.lr / (2 ** (epoch // step_size))
for g in self.optimizer.param_groups:
g['lr'] = lr * g.get('lr_mult', 1)
if epoch % step_size == 0:
print("Epoch {}, current lr {}".format(epoch, lr))
def label2edge(self, targets):
batch_size, num_sample = targets.size()
target_node_mask = torch.eq(targets, self.num_class).type(torch.bool).cuda()
source_node_mask = ~target_node_mask & ~torch.eq(targets, self.num_class - 1).type(torch.bool)
label_i = targets.unsqueeze(-1).repeat(1, 1, num_sample)
label_j = label_i.transpose(1, 2)
edge = torch.eq(label_i, label_j).float().cuda()
target_edge_mask = (torch.eq(label_i, self.num_class) + torch.eq(label_j, self.num_class)).type(torch.bool).cuda()
source_edge_mask = ~target_edge_mask
init_edge = edge*source_edge_mask.float()
return init_edge, target_edge_mask, source_edge_mask, target_node_mask, source_node_mask
def transform_shape(self, tensor):
batch_size, num_class, other_dim = tensor.shape
tensor = tensor.view(1, batch_size * num_class, other_dim)
return tensor
def train(self, step, epochs=70, step_size=55):
args = self.args
train_loader = self.get_dataloader(self.data, training=True)
# initialize model
# change the learning rate
if args.arch == 'res':
if args.dataset == 'visda' or args.dataset == 'office' or args.dataset == 'visda18':
param_groups = [
{'params': self.model.module.CNN.parameters(), 'lr_mult': 0.01},
{'params': self.gnnModel.parameters(), 'lr_mult': 0.1},
]
if self.args.discriminator:
param_groups.append({'params': self.discriminator.parameters(), 'lr_mult': 0.1})
else:
param_groups = [
{'params': self.model.module.CNN.parameters(), 'lr_mult': 0.05},
{'params': self.gnnModel.parameters(), 'lr_mult': 0.8},
]
if self.args.discriminator:
param_groups.append({'params': self.discriminator.parameters(), 'lr_mult': 0.8})
args.in_features = 2048
elif args.arch == 'vgg':
param_groups = [
{'params': self.model.module.extractor.parameters(), 'lr_mult': 1},
{'params': self.gnnModel.parameters(), 'lr_mult': 1},
]
args.in_features = 4096
self.optimizer = torch.optim.Adam(params=param_groups,
lr=args.lr,
weight_decay=args.weight_decay)
self.model.train()
self.gnnModel.train()
self.meter.reset()
for epoch in range(epochs):
self.adjust_lr(epoch, step_size)
with tqdm(total=len(train_loader)) as pbar:
for i, inputs in enumerate(train_loader):
images = Variable(inputs[0], requires_grad=False).cuda()
targets = Variable(inputs[1]).cuda()
targets_DT = targets[:, args.num_class - 1:].reshape(-1)
if self.args.discriminator:
domain_label = Variable(inputs[3].float()).cuda()
targets = self.transform_shape(targets.unsqueeze(-1)).squeeze(-1)
init_edge, target_edge_mask, source_edge_mask, target_node_mask, source_node_mask = self.label2edge(targets)
# extract backbone features
features = self.model(images)
features = self.transform_shape(features)
# feed into graph networks
edge_logits, node_logits = self.gnnModel(init_node_feat=features, init_edge_feat=init_edge,
target_mask=target_edge_mask)
# compute edge loss
full_edge_loss = [self.criterion(edge_logit.masked_select(source_edge_mask), init_edge.masked_select(source_edge_mask)) for edge_logit in edge_logits]
norm_node_logits = F.softmax(node_logits[-1], dim=-1)
if args.loss == 'nll':
source_node_loss = self.criterionCE(torch.log(norm_node_logits[source_node_mask, :] + 1e-5),
targets.masked_select(source_node_mask))
elif args.loss == 'focal':
source_node_loss = self.criterionCE(norm_node_logits[source_node_mask, :],
targets.masked_select(source_node_mask))
edge_loss = 0
for l in range(args.num_layers - 1):
edge_loss += full_edge_loss[l] * 0.5
edge_loss += full_edge_loss[-1] * 1
loss = 1 * edge_loss + args.node_loss* source_node_loss
if self.args.discriminator:
unk_label_mask = torch.eq(targets, args.num_class-1).squeeze()
domain_pred = self.discriminator(features)
temp = domain_pred.view(-1)[~unk_label_mask]
domain_loss = self.criterion(temp, domain_label.view(-1)[~unk_label_mask]) #(targets.size(1) / temp.size(0)) *
loss = loss + args.adv_coeff * domain_loss
node_pred = norm_node_logits[source_node_mask, :].detach().cpu().max(1)[1]
node_prec = node_pred.eq(targets.masked_select(source_node_mask).detach().cpu()).double().mean()
# Only for debugging
target_labels = Variable(inputs[2]).cuda()
target_labels = self.transform_shape(target_labels.unsqueeze(-1)).view(-1)
if target_node_mask.any():
target_pred = norm_node_logits[target_node_mask, :].max(1)[1]
# <unlabeled> data mask
pseudo_label_mask = ~torch.eq(targets_DT, args.num_class).detach().cpu()
# remove unk for calculation
unk_label_mask = torch.eq(target_labels[~pseudo_label_mask], args.num_class - 1).detach().cpu()
# only predict on <unlabeled> data
target_prec = target_pred.eq(target_labels[~pseudo_label_mask]).double().data.cpu()
# update prec calculation on <unlabeled> data
self.meter.update(target_labels[~pseudo_label_mask].detach().cpu().view(-1).numpy(),
target_prec.numpy())
# For pseudo_labeled data, remove unk data for prec calculation
pseudo_unk_mask = torch.eq(target_labels[pseudo_label_mask], args.num_class - 1).detach().cpu()
pseudo_prec = torch.eq(target_labels[pseudo_label_mask], targets_DT[pseudo_label_mask]).double()
if True in pseudo_unk_mask:
self.meter.update(target_labels[pseudo_label_mask].detach().cpu().masked_select(~pseudo_unk_mask).view(-1).numpy(),
pseudo_prec.detach().cpu().masked_select(~pseudo_unk_mask).view(-1).numpy())
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.logger.global_step += 1
if self.args.discriminator:
self.logger.log_scalar('train/domain_loss', domain_loss, self.logger.global_step)
self.logger.log_scalar('train/node_prec', node_prec, self.logger.global_step)
self.logger.log_scalar('train/edge_loss', edge_loss, self.logger.global_step)
self.logger.log_scalar('train/OS_star', self.meter.avg[:-1].mean(), self.logger.global_step)
self.logger.log_scalar('train/OS', self.meter.avg.mean(), self.logger.global_step)
pbar.update()
if i > 150:
break
if (epoch + 1) % args.log_epoch == 0:
print('---- Start Epoch {} Training --------'.format(epoch))
for k in range(args.num_class - 1):
print('Target {} Precision: {:.3f}'.format(args.class_name[k], self.meter.avg[k]))
print('Step: {} | {}; Epoch: {}\t'
'Training Loss {:.3f}\t'
'Training Prec {:.3%}\t'
'Target Prec {:.3%}\t'
.format(self.logger.global_step, len(train_loader), epoch, loss.data.cpu().numpy(),
node_prec.data.cpu().numpy(), self.meter.avg[:-1].mean()))
self.meter.reset()
# save model
states = {'model': self.model.state_dict(),
'graph': self.gnnModel.state_dict(),
'iteration': self.logger.global_step,
'val_acc': node_prec,
'optimizer': self.optimizer.state_dict()}
torch.save(states, osp.join(args.checkpoints_dir, '{}_step_{}.pth.tar'.format(args.experiment, step)))
self.meter.reset()
def estimate_label(self):
args = self.args
print('label estimation...')
if args.dataset == 'visda':
test_data = Visda_Dataset(root=args.data_dir, partition='test', label_flag=self.label_flag, target_ratio=self.step * args.EF / 100)
elif args.dataset == 'office':
test_data = Office_Dataset(root=args.data_dir, partition='test', label_flag=self.label_flag,
source=args.source_name, target=args.target_name, target_ratio=self.step * args.EF / 100)
elif args.dataset == 'home':
test_data = Home_Dataset(root=args.data_dir, partition='test', label_flag=self.label_flag, source=args.source_name,
target=args.target_name, target_ratio=self.step * args.EF / 100)
elif args.dataset == 'visda18':
test_data = Visda18_Dataset(root=args.data_dir, partition='test', label_flag=self.label_flag,
target_ratio=self.step * args.EF / 100)
self.meter.reset()
# append labels and scores for target samples
pred_labels = []
pred_scores = []
real_labels = []
target_loader = self.get_dataloader(test_data, training=False)
self.model.eval()
self.gnnModel.eval()
with tqdm(total=len(target_loader)) as pbar:
for i, (images, targets, target_labels, _, split) in enumerate(target_loader):
images = Variable(images, requires_grad=False).cuda()
targets = Variable(targets).cuda()
targets = self.transform_shape(targets.unsqueeze(-1)).squeeze(-1)
init_edge, target_edge_mask, source_edge_mask, target_node_mask, source_node_mask = self.label2edge(targets)
# extract backbone features
features = self.model(images)
features = self.transform_shape(features)
torch.cuda.empty_cache()
# feed into graph networks
edge_logits, node_logits = self.gnnModel(init_node_feat=features, init_edge_feat=init_edge,
target_mask=target_edge_mask)
del features
norm_node_logits = F.softmax(node_logits[-1], dim=-1)
target_score, target_pred = norm_node_logits[target_node_mask, :].max(1)
# only for debugging
target_labels = Variable(target_labels).cuda()
target_labels = self.transform_shape(target_labels.unsqueeze(-1)).view(-1)
pred = target_pred.detach().cpu()
target_prec = pred.eq(target_labels.detach().cpu()).double()
self.meter.update(
target_labels.detach().cpu().view(-1).data.cpu().numpy(),
target_prec.numpy())
pred_labels.append(target_pred.cpu().detach())
pred_scores.append(target_score.cpu().detach())
real_labels.append(target_labels.cpu().detach())
if i % self.args.log_step == 0:
print('Step: {} | {}; \t'
'OS Prec {:.3%}\t'
.format(i, len(target_loader),
self.meter.avg.mean()))
pbar.update()
pred_labels = torch.cat(pred_labels)
pred_scores = torch.cat(pred_scores)
real_labels = torch.cat(real_labels)
self.model.train()
self.gnnModel.train()
self.num_to_select = int(len(target_loader) * self.args.batch_size * (self.args.num_class - 1) * self.args.EF / 100)
return pred_labels.data.cpu().numpy(), pred_scores.data.cpu().numpy(), real_labels.data.cpu().numpy()
def select_top_data(self, pred_score):
# remark samples if needs pseudo labels based on classification confidence
if self.v is None:
self.v = np.zeros(len(pred_score))
unselected_idx = np.where(self.v == 0)[0]
if len(unselected_idx) < self.num_to_select:
self.num_to_select = len(unselected_idx)
index = np.argsort(-pred_score[unselected_idx])
index_orig = unselected_idx[index]
num_pos = int(self.num_to_select * self.threshold)
num_neg = self.num_to_select - num_pos
for i in range(num_pos):
self.v[index_orig[i]] = 1
for i in range(num_neg):
self.v[index_orig[-i]] = -1
return self.v
def generate_new_train_data(self, sel_idx, pred_y, real_label):
# create the new dataset merged with pseudo labels
assert len(sel_idx) == len(pred_y)
new_label_flag = []
pos_correct, pos_total, neg_correct, neg_total = 0, 0, 0, 0
for i, flag in enumerate(sel_idx):
if i >= len(real_label):
break
if flag > 0:
new_label_flag.append(pred_y[i])
pos_total += 1
if real_label[i] == pred_y[i]:
pos_correct += 1
elif flag < 0:
# assign the <unk> pseudo label
new_label_flag.append(self.args.num_class - 1)
pred_y[i] = self.args.num_class - 1
neg_total += 1
if real_label[i] == self.args.num_class - 1:
neg_correct += 1
else:
new_label_flag.append(self.args.num_class)
self.meter.reset()
self.meter.update(real_label, (pred_y == real_label).astype(int))
for k in range(self.args.num_class):
print('Target {} Precision: {:.3f}'.format(self.args.class_name[k], self.meter.avg[k]))
for k in range(self.num_class):
self.logger.log_scalar('test/' + self.args.class_name[k], self.meter.avg[k], self.step)
self.logger.log_scalar('test/ALL', self.meter.sum.sum() / self.meter.count.sum(), self.step)
self.logger.log_scalar('test/OS_star', self.meter.avg[:-1].mean(), self.step)
self.logger.log_scalar('test/OS', self.meter.avg.mean(), self.step)
print('Node predictions: OS accuracy = {:0.4f}, OS* accuracy = {:0.4f}'.format(self.meter.avg.mean(), self.meter.avg[:-1].mean()))
correct = pos_correct + neg_correct
total = pos_total + neg_total
acc = correct / total
pos_acc = pos_correct / pos_total
neg_acc = neg_correct / neg_total
new_label_flag = torch.tensor(new_label_flag)
# update source data
if self.args.dataset == 'visda':
new_data = Visda_Dataset(root=self.args.data_dir, partition='train', label_flag=new_label_flag,
target_ratio=(self.step + 1) * self.args.EF / 100)
elif self.args.dataset == 'office':
new_data = Office_Dataset(root=self.args.data_dir, partition='train', label_flag=new_label_flag,
source=self.args.source_name, target=self.args.target_name,
target_ratio=(self.step + 1) * self.args.EF / 100)
elif self.args.dataset == 'home':
new_data = Home_Dataset(root=self.args.data_dir, partition='train', label_flag=new_label_flag,
source=self.args.source_name, target=self.args.target_name,
target_ratio=(self.step + 1) * self.args.EF / 100)
elif self.args.dataset == 'visda18':
new_data = Visda18_Dataset(root=self.args.data_dir, partition='train', label_flag=new_label_flag,
target_ratio=(self.step + 1) * self.args.EF / 100)
print('selected pseudo-labeled data: {} of {} is correct, accuracy: {:0.4f}'.format(correct, total, acc))
print('positive data: {} of {} is correct, accuracy: {:0.4f}'.format(pos_correct, pos_total, pos_acc))
print('negative data: {} of {} is correct, accuracy: {:0.4f}'.format(neg_correct, neg_total, neg_acc))
return new_label_flag, new_data
def one_hot_encode(self, num_classes, class_idx):
return torch.eye(num_classes, dtype=torch.long)[class_idx]
def load_model_weight(self, path):
print('loading weight')
state = torch.load(path)
self.model.load_state_dict(state['model'])
self.gnnModel.load_state_dict(state['graph'])
def label2edge_gt(self, targets):
'''
creat initial edge map and edge mask for unlabeled targets
'''
batch_size, num_sample = targets.size()
target_node_mask = torch.eq(targets, self.num_class).type(torch.bool).cuda()
source_node_mask = ~target_node_mask & ~torch.eq(targets, self.num_class - 1).type(torch.bool)
label_i = targets.unsqueeze(-1).repeat(1, 1, num_sample)
label_j = label_i.transpose(1, 2)
edge = torch.eq(label_i, label_j).float().cuda()
target_edge_mask = (torch.eq(label_i, self.num_class) + torch.eq(label_j, self.num_class)).type(
torch.bool).cuda()
source_edge_mask = ~target_edge_mask
# unlabeled flag
return (edge*source_edge_mask.float())
def extract_feature(self):
print('Feature extracting...')
self.meter.reset()
# append labels and scores for target samples
vgg_features_target = []
node_features_target = []
labels = []
overall_split = []
target_loader = self.get_dataloader(self.data, training=False)
self.model.eval()
self.gnnModel.eval()
num_correct = 0
skip_flag = self.args.visualization
with tqdm(total=len(target_loader)) as pbar:
for i, (images, targets, target_labels, _, split) in enumerate(target_loader):
# for debugging
# if i > 100:
# break
images = Variable(images, requires_grad=False).cuda()
targets = Variable(targets).cuda()
# only for debugging
# target_labels = Variable(target_labels).cuda()
targets = self.transform_shape(targets.unsqueeze(-1)).squeeze(-1)
target_labels = self.transform_shape(target_labels.unsqueeze(-1)).squeeze(-1).cuda()
init_edge, target_edge_mask, source_edge_mask, target_node_mask, source_node_mask = self.label2edge(targets)
# gt_edge = self.label2edge_gt(target_labels)
# extract backbone features
features = self.model(images)
features = self.transform_shape(features)
# feed into graph networks
edge_logits, node_feat = self.gnnModel(init_node_feat=features, init_edge_feat=init_edge,
target_mask=target_edge_mask)
vgg_features_target.append(features.data.cpu())
#####heat map only
# temp = np.array(edge_logits[0].data.cpu()) * 4
# ax = sns.heatmap(temp.squeeze(), vmax=1)#
# cbar = ax.collections[0].colorbar
# # here set the labelsize by 20
# cbar.ax.tick_params(labelsize=17)
# plt.savefig('heat/' + str(i) + '.png')
# plt.close()
###########
node_features_target.append(node_feat[-1].data.cpu())
labels.append(target_labels.data.cpu())
overall_split.append(split)
if skip_flag and i > 50:
break
pbar.update()
return vgg_features_target, node_features_target, labels, overall_split
|
# Generated by Django 3.2.5 on 2021-07-22 08:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('alumni', '0002_auto_20210721_2315'),
]
operations = [
migrations.AlterModelOptions(
name='studentsheet',
options={'verbose_name': '学生表单', 'verbose_name_plural': '导入学生表单'},
),
migrations.AddField(
model_name='studentsheet',
name='hasBeenProceeded',
field=models.BooleanField(default=False),
),
]
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""Handle cancel and help intents."""
from botbuilder.core import BotTelemetryClient, NullTelemetryClient
from botbuilder.dialogs import (
ComponentDialog,
DialogContext,
DialogTurnResult,
DialogTurnStatus,
)
from botbuilder.schema import ActivityTypes
class CancelAndHelpDialog(ComponentDialog):
"""Implementation of handling cancel and help."""
def __init__(
self,
dialog_id: str,
telemetry_client: BotTelemetryClient = NullTelemetryClient(),
):
super(CancelAndHelpDialog, self).__init__(dialog_id)
self.telemetry_client = telemetry_client
async def on_begin_dialog(
self, inner_dc: DialogContext, options: object
) -> DialogTurnResult:
result = await self.interrupt(inner_dc)
if result is not None:
return result
return await super(CancelAndHelpDialog, self).on_begin_dialog(inner_dc, options)
async def on_continue_dialog(self, inner_dc: DialogContext) -> DialogTurnResult:
result = await self.interrupt(inner_dc)
if result is not None:
return result
return await super(CancelAndHelpDialog, self).on_continue_dialog(inner_dc)
async def interrupt(self, inner_dc: DialogContext) -> DialogTurnResult:
"""Detect interruptions."""
if inner_dc.context.activity.type == ActivityTypes.message:
text = inner_dc.context.activity.text.lower()
if text in ("help", "?"):
await inner_dc.context.send_activity("Show Help...")
return DialogTurnResult(DialogTurnStatus.Waiting)
if text in ("cancel", "quit"):
await inner_dc.context.send_activity("Cancelling")
return await inner_dc.cancel_all_dialogs()
return None
|
import unittest
from lexer.token import Token, TokenType, TokenError
class TestToken(unittest.TestCase):
def test_identifier_token_creation(self):
token_inputs = (TokenType.IDENTIFIER, 'ID', '[a-zA-Z_][a-zA-Z0-9_]*')
token = Token(*token_inputs)
self._test_token_creation(token, token_inputs)
def test_keyword_token_creation(self):
token_inputs = (TokenType.KEYWORD, 'FUNCTION', 'function')
token = Token(*token_inputs)
self._test_token_creation(token, token_inputs)
def test_separator_token_creation(self):
token_inputs = (TokenType.SEPARATOR, 'LPAREN', '(')
token = Token(*token_inputs)
self._test_token_creation(token, token_inputs)
def test_operator_token_creation(self):
token_inputs = (TokenType.OPERATOR, 'PLUS', '+')
token = Token(*token_inputs)
self._test_token_creation(token, token_inputs)
def test_string_literal_token_creation(self):
token_inputs = (TokenType.LITERAL, 'STRING', '[a-zA-Z_][a-zA-Z0-9_]*')
token = Token(*token_inputs)
self._test_token_creation(token, token_inputs)
def test_numeric_literal_token_creation(self):
token_inputs = (TokenType.LITERAL, 'NUMBER', '\d+')
token = Token(*token_inputs)
self._test_token_creation(token, token_inputs)
def test_token_creation_with_invalid_token_type(self):
with self.assertRaises(TokenError):
Token(123, 'ID', 'something')
def test_token_creation_with_empty_token_type(self):
with self.assertRaises(TokenError):
Token(None, 'ID', 'something')
def test_token_creation_with_empty_name(self):
with self.assertRaises(TokenError):
Token(TokenType.LITERAL, None, 'something')
def test_token_creation_with_empty_value(self):
with self.assertRaises(TokenError):
Token(TokenType.LITERAL, 'ID', None)
def _test_token_creation(self, token, token_inputs):
(token_type, name, value) = token_inputs
self.assertEqual(token.token_type, token_type)
self.assertEqual(token.name, name)
self.assertEqual(token.value, value)
self.assertEqual(str(token), f'{name}:{token_type.name}="{value}"')
|
"""
pyProm: Copyright 2018.
This software is distributed under a license that is described in
the LICENSE file that accompanies it.
"""
from pyprom.lib.containers.multipoint import MultiPoint
from pyprom.lib.locations.saddle import Saddle
def generate_MultiPoint(x, y, xSpan, ySpan,
datamap, elevation,
excludeBGPC=[]):
"""
Generate a rectangular MultiPoint, with the ability to exclude
points.
:param x: upper x coordinate
:param y: upper y coordinate
:param xSpan: span on x axis
:param ySpan: span on y axis
:param datamap: :class:`Datamap`
:param elevation: elevation
:param excludeBGPC: [BaseGridPointContainer, BaseGridPointContainer...]
points to remove from MultiPoint
:return: :class:`MultiPoint`
"""
mpBlock = []
for xx in range(x, x + xSpan):
for yy in range(y, y + ySpan):
# leave these ones out, they're our islands.
mpBlock.append((xx, yy))
mp = MultiPoint(mpBlock, elevation, datamap)
for excluded in excludeBGPC:
for point in excluded.points:
mp.points.remove(point.to_tuple())
return mp
def generate_multipoint_saddle(x, y, xSpan, ySpan,
datamap, elevation,
islands=[],
perimeterHighNeighborhoods=1):
"""
Generate a rectangular MultiPoint Saddle, with the ability to exclude
points (islands). and generate highPerimeterNeighborhoods on the Perimeter.
:param x: upper x coordinate
:param y: upper y coordinate
:param xSpan: span on x axis
:param ySpan: span on y axis
:param datamap: :class:`Datamap`
:param elevation: elevation
:param islands: [BaseGridPointContainer, BaseGridPointContainer...]
islands to remove from multipoint. islands will be elevation of mp +1
DO NOT MAKE AN ISLAND MORE THAN 2 POINTS WIDE. This function is
not designed to be smart in any way.
:param perimeterHighNeighborhoods: number of perimeter highPerimeterNeighborhoods to make up.
:return: :class:`MultiPoint`
"""
mp = generate_MultiPoint(x, y, xSpan, ySpan,
datamap, elevation,
excludeBGPC=islands)
saddle = Saddle(x, y, elevation)
saddle.multipoint = mp
islandGPCs = []
for island in islands:
islandGridPoints = []
for islandPoint in island:
islandGridPoints.append((islandPoint.x,
islandPoint.y,
elevation + 1))
islandGPCs.append(islandGridPoints)
highPerimeter = []
# Dumb highPerimeterNeighborhood generator. One point along y axis. Since
# this is for testing, make sure not to set `perimeterHighNeighborhoods`
# to more than the ySpan + 2. Again, this is dumb.
for highPerimeterNeighborhoodIdx in range(perimeterHighNeighborhoods):
hs = (x - 1, y - 1 + highPerimeterNeighborhoodIdx, elevation + 1)
highPerimeter.append([hs])
saddle.highPerimeterNeighborhoods = islandGPCs + highPerimeter
return saddle
|
from __future__ import unicode_literals
from ..elements.elementbase import LogicElement, Attribute
from ..filter import MoyaFilter, MoyaFilterExpression
class Filter(LogicElement):
"""
Define a [i]filter[/i], which may be used in expressions.
Here's an example of a filter:
[code xml]
<filter name="repeat">
<return-str>${str:value * count}</return-str>
</filter>
[/code]
And here is how you might use it in an expression:
[code xml]
<echo>${"beetlejuice "|repeat(count=3)}</echo>
[/code]
"""
class Help:
synopsis = "define a filter"
name = Attribute("Filter name", required=True)
value = Attribute("Value name", default="value", required=False)
expression = Attribute("Expression", type="function", required=False, default=None)
missing = Attribute(
"Allow missing values?", type="boolean", default=False, required=False
)
def lib_finalize(self, context):
validator = None
for signature in self.children("signature"):
validator = signature.get_validator(context)
expression = self.expression(context)
value_name = self.value(context)
allow_missing = self.missing(context)
if expression is not None:
_filter = MoyaFilterExpression(
expression, value_name, allow_missing=allow_missing
)
else:
_filter = MoyaFilter(
self.lib,
self.libid,
value_name,
allow_missing=allow_missing,
validator=validator,
)
self.lib.register_filter(self.name(context), _filter)
|
#
# C O N S T A N T S
#
# Various constants
# This is just the text that will be placed on the visible treatment plan
SPRAYER_NAME = ""
# Names for the attributes
# Some of these are a bit redundant and are there for backwards compatibility
NAME_AREA = "area"
NAME_TYPE = "type"
NAME_LOCATION = "location"
NAME_CENTER = "center"
NAME_CONTOUR = "contour"
NAME_SHAPE_INDEX = "shape_index"
NAME_RATIO = "lw_ratio"
NAME_IMAGE = "image"
NAME_REASON = "reason"
NAME_SIZE_RATIO = "size_ratio"
NAME_BLUE = "blue"
NAME_DISTANCE = "distance"
NAME_DISTANCE_NORMALIZED = "normalized_distance"
NAME_NAME = "name"
NAME_NUMBER = "number"
NAME_HEIGHT = "height"
NAME_NEIGHBOR_COUNT = "neighbors"
NAME_HUE = "hue"
NAME_HUE_MEAN = "hue_mean"
NAME_HUE_STDDEV = "hue_stddev"
NAME_SATURATION = "saturation_mean"
NAME_SATURATION_STDEV = "saturation_stddev"
NAME_I_YIQ = "in_phase"
NAME_I_MEAN = "in_phase_mean"
NAME_I_STDDEV = "in_phase_stddev"
NAME_Q_MEAN = "quadrature_mean"
NAME_Q_STDDEV = "quadrature_stddev"
NAME_BLUE_DIFFERENCE = "cb_mean"
NAME_BLUE_DIFFERENCE_MEAN = "cb_mean"
NAME_BLUE_DIFFERENCE_STDEV = "cb_stddev"
NAME_RED_DIFFERENCE_MEAN = "cr_mean"
NAME_RED_DIFFERENCE_STDEV = "cr_stddev"
NAME_COMPACTNESS = "compactness"
NAME_ELONGATION = "elongation"
NAME_ECCENTRICITY = "eccentricity"
NAME_ROUNDNESS = "roundness"
NAME_CONVEXITY = "convexity"
NAME_SOLIDITY = "solidity"
# Temporary
NAME_CROP_SCORE = "score"
# A shortcut used for the command line
NAME_ALL = "all"
NAME_NONE = "none"
names = [NAME_AREA, NAME_TYPE, NAME_LOCATION, NAME_CENTER, NAME_CONTOUR, NAME_SHAPE_INDEX, NAME_RATIO, NAME_IMAGE, NAME_REASON, NAME_HUE_MEAN, NAME_SATURATION]
# REASONS why things were scored the way they were
REASON_UNKNOWN = 0
REASON_AT_EDGE = 1
REASON_SIZE_RATIO = 2
REASON_LOGISTIC_REGRESSION = 3
REASON_KNN = 4
REASON_DECISION_TREE = 5
REASON_RANDOM_FOREST = 6
REASON_GRADIENT = 7
REASONS = ["Unknown", "At Edge", "Size", "Logistic", "KNN", "Decision Tree", "Random Forest", "Gradient"]
# TYPES of vegetation
TYPE_DESIRED = 0
TYPE_UNDESIRED = 1
TYPE_UNTREATED = 2
TYPE_IGNORED = 3
TYPE_UNKNOWN = 4
TYPES = ["Desired", "Undesired", "Untreated", "Ignored", "Unknown"]
# Items contained in performance csv for analysis
PERF_ANGLES = "angles"
PERF_ACQUIRE = "acquire"
PERF_CLASSIFY = "classify"
PERF_CONTOURS = "contours"
PERF_INDEX = "index"
PERF_LW_RATIO = "LWRatio"
PERF_OVERLAP = "overlap"
PERF_REGRESSION = "regression"
PERF_SHAPES = "shapes"
PERF_TREATMENT = "treatment"
PERF_COLORS = "colors"
PERF_HSI = "hsi"
PERF_HSV = "hsv"
PERF_YIQ = "yiq"
PERF_YCC = "ycc"
PERF_MEAN = "mean"
PERF_STDDEV = "stddev"
PERF_COMPACTNESS= "compactness"
PERF_TITLE_ACTIVITY = "activity"
PERF_TITLE_MILLISECONDS = "milliseconds"
COLOR_TREATMENT_GRID = (0,255,0)
COLOR_TREATMENT_WEED = (0,0,255)
SIZE_TREATMENT_LINE = 4
# Properties
PROPERTY_PIXELS_PER_MM = "PIXELS-PER-MM"
|
#import numpy as np
import neworder as no
print(no.__version__)
no.verbose()
class Test(no.Model):
def __init__(_self, timeline):
super().__init__(timeline, no.MonteCarlo.deterministic_identical_stream)
m = no.Model(no.NoTimeline(), no.MonteCarlo.deterministic_identical_stream)
t = no.NoTimeline()
print(t)
print(m.timeline) # base class
print(m.mc)
print(m.mc.ustream(10))
#m = Test()
no.run(m)
|
from rest_framework.serializers import ModelSerializer
from watch.models import Score
class ScoreSerializer(ModelSerializer):
class Meta:
model = Score
fields = "__all__"
|
import logging
import os,site,importlib, inspect
from flare.config import conf
from flare.views.view import View
sitepackagespath = site.getsitepackages()[0]
def generateView( view:View, moduleName, actionName, name = None, data = () ):
instView = view()
instView.params = {
"moduleName":moduleName,
"actionName":actionName,
"data":data
}
if name:
instView.name = name
else:
instView.name = moduleName + actionName
return instView
def addView(view:View,name=None):
'''
add a View and make it available
'''
logging.debug("addView: %r, %r", view, name)
instView = view()
if not name:
name = instView.name
conf["views_registered"].update({name:instView})
def updateDefaultView(name):
conf[ "views_default" ] = name
def removeView(name,targetView=None):
#try:
del conf[ "views_registered" ][name]
if not targetView:
targetView = conf["views_default"]
conf[ "views_state" ].updateState( "activeView", targetView ) #switch to default View
#except:pass
def registerViews(path):
'''
add all Views in a folder
'''
rootModule = path.replace(sitepackagespath,"").replace("/",".")[1:]
for viewFile in os.listdir(path):
logging.debug("found view_ %r", viewFile)
if viewFile == "__init__.py" or not viewFile.endswith( ".py" ):
continue
viewFile = viewFile[:-3]
if viewFile in conf["views_blacklist"]:
continue
try:
_import = importlib.import_module("%s.%s"%(rootModule,viewFile))
for _name in dir( _import ):
if _name.startswith( "_" ):
continue
_symbol = getattr( _import, _name )
if inspect.isclass(_symbol) and issubclass(_symbol,View) and _name != View.__name__:
addView(_symbol)
except Exception as err:
logging.error( "Unable to import View from '%s'" % viewFile )
logging.exception(err)
raise
|
import pytest
@pytest.mark.asyncio
async def test_video_info(client):
video = await client.video("fLAcgHX160k")
assert video.title == "The Advent of Omegaα"
|
from napari.plugins.io import read_data_with_plugins
def read_image(imagefile):
data, _ = read_data_with_plugins(imagefile)
return data
data = read_image("/Users/cjw/Desktop/RGB11/MeOH-QS-002.tif")
data2 = read_image("Data/test.nd2")
print(data[0][0].shape, data[0][0].mean(axis=(1,2)))
print(data2[0][0].shape, data2[0][0].mean(axis=(1,2)))
|
"""
Note: Implemented with old implementation of remove_outliers.
"""
# packages
import sys
import os
import numpy as np
sys.path.insert(0, os.path.abspath("../src/ssh-kd"))
from plugin.seg import remove_outliers, helper_object_points
from utils.data_prep import read_file, get_outliers
# hidding the warnings
import warnings
warnings.filterwarnings('ignore')
# Clustering
from sklearn.cluster import DBSCAN
import matplotlib.pyplot as plt
def plot_g(x,y,xlim,ylim,filename,c=False,centroid=False):
""" Plot the points using matplotlib.
params: x = x points
y = y points
xlim = (x_max,x_min) xrange limits for plots
ylim = (y_max,y_min) yrange limits for plots
c = colors if avaliable else False
centroid = centre point (lidar position)
return: plot is plotted
"""
fig = plt.figure(figsize=(20, 20), dpi= 80, facecolor='w', edgecolor='k')
ax = fig.gca()
if c:
plt.scatter(x, y, s=4, c=c)
else:
plt.scatter(x, y, s=4)
if centroid:
plt.scatter(0, 0, s=400, c="red")
plt.grid()
plt.xlim(xlim[0], xlim[1])
plt.ylim(ylim[0], ylim[1])
# plt.show()
plt.savefig(filename)
def _cluster(X_):
""" cluster using dbscan.
params: X_ = list of x,y point
return: lables
"""
clustering = DBSCAN(eps=1, min_samples=8).fit(X_)
labels = clustering.labels_
return labels
def format_data(df):
""" format data for cluster input.
params: df = dataframe with transform _x,_y
return: numpy array of x,y point
"""
return np.array(list(zip(np.array(df["_x"]),np.array(df["_y"]))))
def normalize(df):
""" Normalize the point using min_max normalizer.
params: df = dataframe with transform _x,_y
return: df with normalized _x,_y points
"""
df['_x'] = (df['_x']-df['_x'].min())/(df['_x'].max()-df['_x'].min())
df['_y'] = (df['_y']-df['_y'].min())/(df['_y'].max()-df['_y'].min())
return df
def project(df, d, view):
""" Project with prespective projections.
formula: x' = x*d/z
y' = y*d/z
params: df = dataframe with with x,y,z points
d = distance of prespective projection
view = w.r.t X , w.r.t Z
return: df with transformed _x,_y points in df
"""
v= "X" if view == "X" else "Z"
z= "X" if view == "Z" else "Z"
df['_x'] = ((df[z]/df[v])*d)
df['_y'] = ((df['Y']/df[v])*d)
return df
def get_data(object_type,object,num_scenes):
""" Load the data from dataset and apply ground removal and outlinear removal.
params: object_type = train or test
object = Name of the object
return: dataframe of the data
"""
dataset_dir = "../src/dataset/train"
# Create the outliers
get_outliers(dataset_dir)
# Creating the path for the object
object_path = "../{}/{}/in.csv".format("src/dataset/{}".format(object_type), object)
# read in the object of choice
dataframes = read_file(object_path, num_scenes)
# remove the outliers
no_outliers = remove_outliers(dataframes, num_scenes, '../src/ssh-kd/data/outliers.pkl')
# get the object points
return no_outliers
if __name__=="__main__":
for dir_ in os.listdir('../src/dataset/train):
# Experimenting with train data'
no_outliers = get_data("train",dir_, num_scenes=50)
for each_scn in range(len(no_outliers)):
# Get Each scene and work on it, scene selected 6
scene_df = helper_object_points(no_outliers[each_scn], 4)
# Apply projection
proj_df = project(scene_df, 5,view="Z")
# Apply normalization
proj_df = normalize(proj_df)
# Plot the transformation
try:
os.makedirs('../src/projection/'+dir_+'/')
except:
pass
plot_g(7*proj_df['_x'],3*proj_df['_y'],(-35,35),(-15,15),'../src/projection/'+dir_+'/'+str(each_scn)+'.png')
|
# -*- coding: utf-8 -*-
# ==========================================================================
# Copyright (C) since 2020 All rights reserved.
#
# filename : hello_world.py
# author : chendian / okcd00@qq.com
# date : 2020-05-16
# desc :
# ==========================================================================
print("hello world.")
|
import elbus_async
import asyncio
async def main():
name = 'test.client.python.async_sender'
bus = elbus_async.client.Client('/tmp/elbus.sock', name)
await bus.connect()
# send a regular message
result = await bus.send('test.client.python.async',
elbus_async.client.Frame('hello'))
print(hex(await result.wait_completed()))
# send a broadcast message
result = await bus.send(
'test.*',
elbus_async.client.Frame('hello everyone',
tp=elbus_async.client.OP_BROADCAST))
print(hex(await result.wait_completed()))
# publish to a topic with zero QoS (no confirmation required)
await bus.send(
'test/topic',
elbus_async.client.Frame('something',
tp=elbus_async.client.OP_PUBLISH,
qos=0))
asyncio.run(main())
|
# weight/forms.py
# Jake Malley
# 03/02/15
"""
Defines the forms used in the weight blueprint.
"""
# Imports
from flask_wtf import Form
from wtforms import DecimalField
from wtforms.validators import DataRequired, NumberRange
class AddWeightForm(Form):
"""
Form for users to add weight.
"""
# Decimal field for the weight.
weight = DecimalField('weight', validators=[DataRequired(), NumberRange(min=0,max=200)])
|
print(min(a, b, c))
|
import mock
from datetime import timedelta
from tornado import gen
from tornado.testing import gen_test
from microproxy.test.utils import ProxyAsyncTestCase
from microproxy.protocol.http2 import Connection
from microproxy.context import HttpRequest, HttpResponse, HttpHeaders
class TestConnection(ProxyAsyncTestCase):
def setUp(self):
super(TestConnection, self).setUp()
self.asyncSetUp()
self.request = None
self.response = None
self.settings = None
self.window_updates = None
self.priority_updates = None
self.push = None
self.reset = None
@gen_test
def asyncSetUp(self):
self.client_stream, self.server_stream = yield self.create_iostream_pair()
self.addCleanup(self.client_stream.close)
self.addCleanup(self.server_stream.close)
def on_request(self, stream_id, request, priority_updated):
self.request = (stream_id, request)
def on_response(self, stream_id, response):
self.response = (stream_id, response)
def on_settings(self, settings):
self.settings = settings
def on_window_updates(self, stream_id, delta):
self.window_updates = (stream_id, delta)
def on_priority_updates(self, stream_id, depends_on,
weight, exclusive):
self.priority_updates = dict(
stream_id=stream_id, depends_on=depends_on,
weight=weight, exclusive=exclusive)
def on_push(self, pushed_stream_id, parent_stream_id, request):
self.push = dict(
pushed_stream_id=pushed_stream_id,
parent_stream_id=parent_stream_id,
request=request)
def on_reset(self, stream_id, error_code):
self.reset = (stream_id, error_code)
@gen_test
def test_on_request(self):
client_conn = Connection(self.client_stream, client_side=True)
client_conn.initiate_connection()
client_conn.send_request(
client_conn.get_next_available_stream_id(),
HttpRequest(headers=[
(":method", "GET"),
(":path", "/"),
("aaa", "bbb")]))
server_conn = Connection(
self.server_stream, client_side=False, on_request=self.on_request,
on_settings=self.on_settings)
server_conn.initiate_connection()
yield server_conn.read_bytes()
self.assertIsNotNone(self.request)
_, request = self.request
self.assertEqual(request.headers,
HttpHeaders([
(":method", "GET"),
(":path", "/"),
("aaa", "bbb")]))
self.assertEqual(request.method, "GET")
self.assertEqual(request.path, "/")
self.assertEqual(request.version, "HTTP/2")
@gen_test
def test_on_response(self):
client_conn = Connection(
self.client_stream, client_side=True, on_response=self.on_response,
on_unhandled=mock.Mock())
client_conn.initiate_connection()
client_conn.send_request(
client_conn.get_next_available_stream_id(),
HttpRequest(headers=[
(":method", "GET"),
(":path", "/"),
("aaa", "bbb")]))
server_conn = Connection(
self.server_stream, client_side=False,
on_request=self.on_request, on_unhandled=mock.Mock())
server_conn.initiate_connection()
yield server_conn.read_bytes()
server_conn.send_response(
self.request[0],
HttpResponse(
headers=[(":status", "200"),
("aaa", "bbb")],
body=b"ccc"))
yield client_conn.read_bytes()
self.assertIsNotNone(self.response)
_, response = self.response
self.assertEqual(response.headers,
HttpHeaders([
(":status", "200"),
("aaa", "bbb")]))
self.assertEqual(response.code, "200")
self.assertEqual(response.version, "HTTP/2")
@gen_test
def test_on_settings(self):
client_conn = Connection(
self.client_stream, client_side=True, on_unhandled=mock.Mock())
client_conn.initiate_connection()
server_conn = Connection(
self.server_stream, client_side=False, on_settings=self.on_settings)
server_conn.initiate_connection()
yield server_conn.read_bytes()
# NOTE: h11 initiate_connection will send default settings
self.assertIsNotNone(self.settings)
self.settings = None
client_conn.send_update_settings({
4: 11111, 5: 22222})
yield server_conn.read_bytes()
self.assertIsNotNone(self.settings)
new_settings = {id: cs.new_value for (id, cs) in self.settings.iteritems()}
self.assertEqual(new_settings, {4: 11111, 5: 22222})
@gen_test
def test_on_window_updates(self):
client_conn = Connection(
self.client_stream, client_side=True, on_unhandled=mock.Mock())
client_conn.initiate_connection()
client_conn.send_window_updates(
0, 100)
server_conn = Connection(
self.server_stream, client_side=False, on_settings=self.on_settings,
on_window_updates=self.on_window_updates)
server_conn.initiate_connection()
yield server_conn.read_bytes()
self.assertIsNotNone(self.window_updates)
self.assertEqual(self.window_updates, (0, 100))
@gen_test
def test_on_priority_updates(self):
client_conn = Connection(
self.client_stream, client_side=True, on_unhandled=mock.Mock())
client_conn.initiate_connection()
stream_id = client_conn.get_next_available_stream_id()
client_conn.send_request(
stream_id,
HttpRequest(headers=[
(":method", "GET"),
(":path", "/"),
("aaa", "bbb")]))
client_conn.send_priority_updates(
stream_id, 0, 10, False)
server_conn = Connection(
self.server_stream, client_side=False,
on_priority_updates=self.on_priority_updates,
on_unhandled=mock.Mock())
server_conn.initiate_connection()
yield server_conn.read_bytes()
self.assertIsNotNone(self.priority_updates)
self.assertEqual(
self.priority_updates,
dict(stream_id=stream_id, depends_on=0, weight=10, exclusive=False))
@gen_test
def test_on_pushed_stream(self):
client_conn = Connection(
self.client_stream, client_side=True, on_push=self.on_push,
on_unhandled=mock.Mock())
client_conn.initiate_connection()
client_conn.send_request(
client_conn.get_next_available_stream_id(),
HttpRequest(headers=[
(":method", "GET"),
(":path", "/")]))
server_conn = Connection(
self.server_stream, client_side=False, on_request=self.on_request,
on_unhandled=mock.Mock())
server_conn.initiate_connection()
yield server_conn.read_bytes()
stream_id, _ = self.request
server_conn.send_pushed_stream(
stream_id,
2,
HttpRequest(headers=[
(":method", "GET"),
(":path", "/resource")]))
yield client_conn.read_bytes()
self.assertIsNotNone(self.push)
self.assertEqual(self.push["parent_stream_id"], 1)
self.assertEqual(self.push["pushed_stream_id"], 2)
self.assertEqual(
self.push["request"].headers,
HttpHeaders([
(":method", "GET"),
(":path", "/resource")]))
@gen_test
def test_on_reset(self):
client_conn = Connection(
self.client_stream, client_side=True, on_reset=self.on_reset,
on_unhandled=mock.Mock())
client_conn.initiate_connection()
client_conn.send_request(
client_conn.get_next_available_stream_id(),
HttpRequest(headers=[
(":method", "GET"),
(":path", "/")]))
server_conn = Connection(
self.server_stream, client_side=False, on_request=self.on_request,
on_unhandled=mock.Mock())
yield server_conn.read_bytes()
stream_id, _ = self.request
server_conn.send_reset(stream_id, 2)
yield client_conn.read_bytes()
self.assertIsNotNone(self.reset)
self.assertEqual(self.reset, (stream_id, 2))
@gen_test
def test_on_terminate(self):
client_conn = Connection(
self.client_stream, client_side=True, on_unhandled=mock.Mock())
client_conn.initiate_connection()
on_terminate = mock.Mock()
server_conn = Connection(
self.server_stream, client_side=False, on_terminate=on_terminate,
on_unhandled=mock.Mock())
server_conn.initiate_connection()
yield server_conn.read_bytes()
client_conn.send_terminate()
yield server_conn.read_bytes()
on_terminate.assert_called_with(None, 0, 0)
@gen_test
def test_on_post_request(self):
client_conn = Connection(self.client_stream, client_side=True)
client_conn.initiate_connection()
client_conn.send_request(
client_conn.get_next_available_stream_id(),
HttpRequest(headers=[
(":method", "POST"),
(":path", "/"),
("aaa", "bbb")], body=b"aaaa"))
server_conn = Connection(
self.server_stream, client_side=False, on_request=self.on_request,
on_settings=self.on_settings)
server_conn.initiate_connection()
yield server_conn.read_bytes()
self.assertIsNotNone(self.request)
_, request = self.request
self.assertEqual(request.headers,
HttpHeaders([
(":method", "POST"),
(":path", "/"),
("aaa", "bbb")]))
self.assertEqual(request.method, "POST")
self.assertEqual(request.path, "/")
self.assertEqual(request.version, "HTTP/2")
self.assertEqual(request.body, b"aaaa")
@gen_test
def test_readonly(self):
client_conn = Connection(self.client_stream, client_side=True, readonly=True)
client_conn.initiate_connection()
client_conn.send_request(
client_conn.get_next_available_stream_id(),
HttpRequest(headers=[
(":method", "GET"),
(":path", "/"),
("aaa", "bbb")]))
with self.assertRaises(gen.TimeoutError):
yield gen.with_timeout(
timedelta(milliseconds=100),
self.server_stream.read_bytes(1))
def tearDown(self):
self.client_stream.close()
self.server_stream.close()
|
import pandas as __pd
import datetime as __dt
from seffaflik.__ortak.__araclar import make_requests as __make_requests
from seffaflik.__ortak import __dogrulama as __dogrulama
__first_part_url = "market/"
def smf(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığı için saatlik sistem marjinal fiyatını (SMF) ve sistem yönünü vermektedir.
Parametreler
----------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
------
Sistem Marjinal Fiyatı, Sistem Yönü
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
try:
particular_url = \
__first_part_url + "smp" + "?startDate=" + baslangic_tarihi + "&endDate=" + bitis_tarihi
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["smpList"])
df["Saat"] = df["date"].apply(lambda h: int(h[11:13]))
df["Tarih"] = __pd.to_datetime(df["date"].apply(lambda d: d[:10]))
df.rename(index=str,
columns={"price": "SMF", "smpDirection": "Sistem Yönü"},
inplace=True)
df = df[["Tarih", "Saat", "SMF", "Sistem Yönü"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def hacim(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığı için dengeleme güç piyasası YAL/YAT talimat miktar bilgilerini vermektedir.
Parametreler
----------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
------
YAL/YAT Talimat Miktarları (MWh)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
try:
particular_url = \
__first_part_url + "bpm-order-summary" + "?startDate=" + baslangic_tarihi + "&endDate=" + bitis_tarihi
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["bpmorderSummaryList"])
df["Saat"] = df["date"].apply(lambda h: int(h[11:13]))
df["Tarih"] = __pd.to_datetime(df["date"].apply(lambda d: d[:10]))
df.rename(index=str,
columns={"net": "Net", "upRegulationZeroCoded": "YAL (0)",
"upRegulationOneCoded": "YAL (1)", "upRegulationTwoCoded": "YAL (2)",
"downRegulationZeroCoded": "YAT (0)", "downRegulationOneCoded": "YAT (1)",
"downRegulationTwoCoded": "YAT (2)", "upRegulationDelivered": "Teslim Edilen YAL",
"downRegulationDelivered": "Teslim Edilen YAT", "direction": "Sistem Yönü"},
inplace=True)
df["Sistem Yönü"] = df["Sistem Yönü"].map(
{"IN_BALANCE": "Dengede", "ENERGY_SURPLUS": "Enerji Fazlası", "ENERGY_DEFICIT": "Enerji Açığı"})
df = df[
["Tarih", "Saat", "Net", "YAL (0)", "YAL (1)", "YAL (2)", "Teslim Edilen YAL", "YAT (0)", "YAT (1)",
"YAT (2)", "Teslim Edilen YAT", "Sistem Yönü"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
|
# coding: utf8
import requests
import json
if __name__ == "__main__":
# 指定用于匹配的文本并生成字典{"text_1": [text_a1, text_a2, ... ]
# "text_2": [text_b1, text_b2, ... ]}
text = {
"text_1": ["这道题太难了", "这道题太难了", "这道题太难了"],
"text_2": ["这道题是上一年的考题", "这道题不简单", "这道题很有意思"]
}
# 指定匹配方法为simnet_bow并发送post请求
url = "http://127.0.0.1:8866/predict/text/simnet_bow"
r = requests.post(url=url, data=text)
# 打印匹配结果
print(json.dumps(r.json(), indent=4, ensure_ascii=False))
|
import pandas as pd
import numpy as np
import math
import random
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.layers.recurrent import LSTM
#% matplotlib inline
import matplotlib.pyplot as plt
random.seed(0)
# 乱数の係数
random_factor = 0.05
# サイクルあたりのステップ数
steps_per_cycle = 80
# 生成するサイクル数
number_of_cycles = 50
# 入力の長さ
length_of_sequences = 100
# 入力値・出力値の次元の大きさ
in_out_neurons = 1
# 隠れ要素のニューロン数
hidden_neurons = 300
np_ary = np.arange(steps_per_cycle * number_of_cycles + 1);
data = np.load(input("「株価データ.npy」を入力 : "))
arg_data = data[1:] / data[:len(data)-1] - 1
df = pd.DataFrame(arg_data, columns=["stock"])
#df = pd.DataFrame(np_ary, columns=["x"])
#pi_t = 2 * math.pi / steps_per_cycle
#df["sin(x)"] = df.x.apply(lambda t: math.sin(t * pi_t + random.uniform(-1.0, +1.0) * random_factor))
df[["stock"]].head(steps_per_cycle * 2).plot()
################################################################
def Input_Ans_Extract(data, input_num = 100):
InputList, AnsList = [], []
for i in range(len(data) - input_num):
InputData = data.iloc[i:i+input_num].as_matrix()
AnsData = data.iloc[i+input_num].as_matrix()
InputList.append(InputData)
AnsList.append(AnsData)
InputList_np = np.array(InputList)
AnsList_np = np.array(AnsList)
return InputList_np, AnsList_np
def Data_Split(df, test_size=0.1, input_num = 100):
train_size = round(len(df) * (1 - test_size))
train_size = int(train_size)
Input_train, Ans_train = Input_Ans_Extract(df.iloc[0:train_size], input_num)
Input_test, Ans_test = Input_Ans_Extract(df.iloc[train_size:], input_num)
return (Input_train, Ans_train), (Input_test, Ans_test)
(Input_train, Ans_train), (Input_test, Ans_test) = Data_Split(df[["stock"]], input_num = length_of_sequences)
################################################################
model = Sequential()
model.add(LSTM(hidden_neurons, batch_input_shape=(None, length_of_sequences, in_out_neurons), return_sequences=False))
model.add(Dense(in_out_neurons))
model.add(Activation("linear"))
model.compile(loss="mean_squared_error", optimizer="rmsprop")
model.fit(Input_train, Ans_train, batch_size=60, nb_epoch=3, validation_split=0.05)
################################################################
predicted = model.predict(Input_test)
################################################################
dataf = pd.DataFrame(predicted[:200])
dataf.columns = ["predict"]
dataf.plot()
dataf["answer"] = Ans_test[:200]
dataf.plot()
plt.show()
|
from numpy import inf, nan
from sklearn.decomposition import TruncatedSVD as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class _TruncatedSVDImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def transform(self, X):
return self._wrapped_model.transform(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for TruncatedSVD Dimensionality reduction using truncated SVD (aka LSA).",
"allOf": [
{
"type": "object",
"required": ["n_components", "algorithm", "n_iter", "random_state", "tol"],
"relevantToOptimizer": ["n_components", "algorithm", "n_iter", "tol"],
"additionalProperties": False,
"properties": {
"n_components": {
"type": "integer",
"minimumForOptimizer": 2,
"maximumForOptimizer": 256,
"distribution": "uniform",
"default": 2,
"description": "Desired dimensionality of output data",
},
"algorithm": {
"enum": ["arpack", "randomized"],
"default": "randomized",
"description": "SVD solver to use",
},
"n_iter": {
"type": "integer",
"minimumForOptimizer": 5,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": 5,
"description": "Number of iterations for randomized SVD solver",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`.",
},
"tol": {
"type": "number",
"minimumForOptimizer": 1e-08,
"maximumForOptimizer": 0.01,
"distribution": "loguniform",
"default": 0.0,
"description": "Tolerance for ARPACK",
},
},
}
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Fit LSI model on training data X.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "Training data.",
},
"y": {},
},
}
_input_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Perform dimensionality reduction on X.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
"description": "New data.",
}
},
}
_output_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Reduced version of X",
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.decomposition.TruncatedSVD#sklearn-decomposition-truncatedsvd",
"import_from": "sklearn.decomposition",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
TruncatedSVD = make_operator(_TruncatedSVDImpl, _combined_schemas)
set_docstrings(TruncatedSVD)
|
config = dict(
SUPPORTED_LANGUAGES = [ "de", "en", "fr", "it", "es", "ar", "tr", "el", "ru", "uk", "pl"],
RECORDING_DURATION = 8.0, # not used
SPEAKING_ANSWER_TIMEOUT = 2,
#PRINTED_LANGUAGES = ["en", "de", "el", "tr", "ar"],
SERIAL_PORT = "/dev/ttyACM0",
API_ADRESS = "http://localhost:3030"
)
|
import html
with open('./app/app.py', 'r') as f:
source = f.read()
with open('./app/files/source.html', 'w') as f:
f.write('<html><body><code><pre>' + html.escape(source) + '</pre></code></body></html>')
|
from django.contrib.auth.mixins import (UserPassesTestMixin,
LoginRequiredMixin, PermissionRequiredMixin)
from django.http import HttpResponse
from django.shortcuts import redirect
import re
from design import models as design_model
class PageableMixin(object):
def get_context_data(self, **kwargs):
context = super(PageableMixin, self).get_context_data(**kwargs)
paginator = context['paginator']
context['page_all_count'] = paginator.count
page_numbers_range = 5 # Display only 5 page numbers
max_index = paginator.page_range[-1]
page = self.request.GET.get('page')
current_page = int(page) if page else 1
start_index = int((current_page - 1) / page_numbers_range) * page_numbers_range
end_index = start_index + page_numbers_range
if end_index >= max_index:
end_index = max_index
page_range = paginator.page_range[start_index:end_index]
context['page_range'] = page_range
return context
class DataSearchFormMixin(object):
def get_context_data(self, **kwargs):
context = super(DataSearchFormMixin, self).get_context_data(**kwargs)
context['data_search_form'] = self.data_search_form(
q=self.request.GET.get('q') if self.request.GET.get('q') else ''
)
if self.request.GET.get('q'):
context['q'] = self.request.GET.get('q')
return context
class UserIsStaffMixin(UserPassesTestMixin):
def test_func(self):
return self.request.user.is_staff
def dispatch(self, request, *args, **kwargs):
if not self.request.user.is_staff:
return redirect('design:home')
user_test_result = self.get_test_func()()
if not user_test_result:
return HttpResponse('Wemix 관리자가 아닌 경우 접근을 불허합니다.')
return super(UserPassesTestMixin, self).dispatch(request, *args, **kwargs)
class DeliveryMixin(object):
def check(self, request, serializer):
# count 반드시 return
delivery = 0
count = 1
price_d = 0
try:
size = serializer.data['size']
paper = serializer.data['paper']
side = serializer.data['side']
deal = serializer.data['deal'].replace(',', '')
amount = int(serializer.data['amount'])
item = request.META['HTTP_REFERER'].split('/?item=')[-1]
except:
size = serializer['size']
paper = serializer['paper']
side = serializer['side']
deal = serializer['deal'].replace(',', '')
amount = int(serializer['amount'])
item = serializer['kind']
try:
i = int(re.findall('\d+', deal)[-1])
except:
i = None
del_model = design_model.DeliveryPrice.objects.filter(kind=item)
if del_model:
if 'flyer' in item:
count = i * amount
if 'A' in size:
price_d = del_model.get(size__icontains='A').sell
delivery = round(float(price_d)* i * amount * 1.1)
elif 'B' in size:
price_d = del_model.get(size__icontains='B').sell
delivery = round(float(price_d) * i * amount * 1.1)
elif 'card' in item:
price_d = del_model.get(kind='card').sell
i = int(re.findall('\d+', deal)[-1])*amount
if i <= 10500:
delivery = round(float(price_d)*1.1)
elif i <= 26000:
count = 2
delivery = round(float(price_d)*count*1.1)
else:
count = 3
delivery = round(float(price_d)*count*1.1)
return delivery, count, price_d
|
#!/usr/bin/env python
"""
Created by howie.hu at 2021/1/6.
"""
import ruia
BANNER = f"""
✨ Write less, run faster({ruia.__version__}).
__________ .__ .__ .__ .__
\______ \__ __|__|____ _____| |__ ____ | | | |
| _/ | \ \__ \ / ___/ | \_/ __ \| | | |
| | \ | / |/ __ \_ \___ \| Y \ ___/| |_| |__
|____|_ /____/|__(____ / /____ >___| /\___ >____/____/
\/ \/ \/ \/ \/
Available Objects :
response : ruia.Response
request : ruia.Request
Available Functions :
attr_field : Extract attribute elements by using css selector or xpath
text_field : Extract text elements by using css selector or xpath
fetch : Fetch a URL or ruia.Request
"""
# spider : ruia.Spider
# r_help() : Tips for use
|
def estimate_home_value(size_in_sqft, number_of_bedrooms):
#Assume all homes are worth at least $50,000
value = 50000
#adjust value estimate based on the size of the house
value += (size_in_sqft*92)
#Adjust the value estimate based on the number of bedrooms
value += number_of_bedrooms*10000
return value
# Estimate the value of our house:
# - 5 bedrooms
# - 3800 sqft
# Actual value: $450,000
value = estimate_home_value(3800, 5)
print("Estimated value:")
print(value)
|
D=int(input())
print("Christmas"+" Eve"*(25-D))
|
# Kількість паліндромів
#
# Назвемо число паліндромом, якщо воно не змінюється при перестановці його цифр у зворотному порядку. Напишіть програму, яка за заданою кількістю K виводить кількість натуральних палиндромiв, що не перевищують K.
#
# ## Формат введення
#
# Визнач однина K (1≤K≤100000).
#
# ## Формат виведення
#
# Необхідно вивести кількість натуральних палиндромiв, що не перевищують K.
#
# ## Приклади
#
# ```bash
# Тест 1
# Вхідні дані:
# 1
# Виведення програми:
# 1
#
# Тест 2
# Вхідні дані:
# 100
# Виведення програми:
# 18
#
# Тест 3
# Вхідні дані:
# 10
# Виведення програми:
# 9
# ```
#
# ## Розв'язання
k = int(input())
i = 1
ii = 0
while i <= k:
a = i
n = 0
while a > 0:
z = a % 10
a //= 10
n *= 10
n += z
if n == i:
ii += 1
i += 1
print(ii)
|
import numpy as np
from collections import defaultdict
def pairwise_view(target_station, next_station, mismatch='error'):
if target_station is None or next_station is None:
return ValueError("The data is empty.")
if target_station.shape != next_station.shape:
return None # ValueError("Paired station mismatched")
return ViewDefinition(y=target_station, x=next_station)
def multipair_view(target_station, stations):
"""
Args:
target_station:
stations:
Returns:
"""
assert all(target_station.shape == n_station.shape for n_station in stations)
dt = np.hstack(stations)
return ViewDefinition(y=target_station, x=dt)
class View(object):
def __init__(self):
self.X = None
self.y = None
self.label = None
self.view_metadata = defaultdict()
def make_view(self, target_station, k_stations):
return NotImplementedError
def to_json(self):
return NotImplementedError
@classmethod
def from_json(cls, json_file):
return NotImplementedError
class ViewDefinition:
"""
View definition format.
"""
def __init__(self, name=None, label=None, x=None, y=None):
self.name = name
self.label = label
if isinstance(x, np.ndarray) and isinstance(y, np.ndarray):
self.x = x
self.y = y
class ViewFactory:
@staticmethod
def get_view(view_type):
if view_type == 'PairwiseView':
return PairwiseView()
class PairwiseView(View):
def __init__(self, variable=None):
self.__name__ = "PairwiseView"
super(PairwiseView, self).__init__()
self.variable = variable
def make_view(self, target_station, k_stations):
len_series = target_station.shape[0]
# Check dimension mismatch.
if not all([len_series == value.shape[0] for value in k_stations.values()]):
raise ValueError("Dimension mismatch b/n target station and one of the k stations")
tuples_list = [target_station] + list(k_stations.values())
dt = np.hstack(tuples_list)
vw = ViewDefinition(name=self.__name__, label=k_stations.keys(),
x=dt[:, 1:], y=dt[:, 0:1])
return vw
def to_json(self):
view_config = {"variable": self.variable}
return view_config
def from_json(cls, json_file):
variable = json_file["variable"]
pwv = PairwiseView(variable=variable)
return pwv
|
from Jumpscale import j
try:
from mongoengine import connect
except:
j.builders.runtimes.python3.pip_package_install("mongoengine")
from mongoengine import connect
JSConfigClient = j.baseclasses.object_config
class MongoEngineClient(JSConfigClient):
_SCHEMATEXT = """
@url = jumpscale.MongoEngine.client
name** = "default" (S)
host = "localhost" (S)
port = 27017 (ipport)
username = "" (S)
password_ = "" (S)
alias = "" (S)
db = "" (S)
authentication_source = "" (S)
authentication_mechanism = "" (S)
ssl = False (B)
replicaset = "" (S)
"""
def _init(self, **kwargs):
kwargs = {}
connect(**kwargs)
|
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from typing import List
from pants_test.backend.jvm.tasks.jvm_compile.base_compile_integration_test import BaseCompileIT
from pants_test.backend.jvm.tasks.jvm_compile.zinc.zinc_compile_integration_base import (
BaseZincCompileIntegrationTest,
)
class ZincCompileIntegration(BaseCompileIT, BaseZincCompileIntegrationTest):
_EXTRA_TASK_ARGS: List[str] = []
|
"""Cirlces URLs"""
# django
from django.db import router
from django.urls import path
from django.urls.conf import include
# Django REST framework
from rest_framework.routers import DefaultRouter
# views
from .views import circles as circle_views
from .views import memberships as membership_views
router = DefaultRouter()
router.register(r'circles',circle_views.CircleViewSet,basename='circles')
router.register(
r'circles/(?P<slug_name>[-a-zA-z0-0_]+)/members',membership_views.MembershipViewSet,basename='memberships'
)
urlpatterns=[
path('',include(router.urls))
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2020/11/14 10:51 PM
@Author : Caroline
@File : 单机estimator实现
@Description :
"""
import tensorflow as tf
from tensorflow import feature_column
# from tensorflow.keras import layers # 1.15版本
# from tensorflow.python.feature_column import feature_column_v2 as fc
# tf_single.enable_eager_execution()
columns = [
'id',
'click',
'hour',
'C1',
'banner_pos',
'site_id',
'site_domain',
'site_category',
'app_id',
'app_domain',
'app_category',
'device_id',
'device_ip',
'device_model',
'device_type',
'device_conn_type',
'C14',
'C15',
'C16',
'C17',
'C18',
'C19',
'C20',
'C21',
]
def fc_column(feature_name, hash_bucket_size, dtype=tf.string):
f = feature_column.categorical_column_with_hash_bucket(feature_name, hash_bucket_size=hash_bucket_size, dtype=dtype)
f1 = feature_column.embedding_column(f, 10)
return f1
def fc_transform(feature_name, hash_bucket_size, dtype=tf.string):
f = feature_column.categorical_column_with_hash_bucket(feature_name, hash_bucket_size=hash_bucket_size, dtype=dtype)
f1 = feature_column.embedding_column(f, 4)
feature_layer = tf.keras.layers.DenseFeatures([f1])
# feature_layer = tf.keras.layers.DenseFeatures([fc_column(feature_name, hash_bucket_size, dtype)])
return feature_layer
feature_columns = [fc_column('device_ip', 100), fc_column('C1', 100, dtype=tf.int64)]
### Tensorflow have three levels of API:
# Low level API: tf.reduce_sum, tf.matmul
# Mid level API: layers, tf.keras.layers. ps: Dense, Concatenate, Customize a keras layers
# 极大地解放了生产力,不用写底层算子操作
# High level API: Estimator. ps: Session and Graph
# session: 拿到当前tennsorflow计算的session去跑op
# tensorflow需要做图冻结,冻结完之后才开始计算
# Adavantages: without session, you can focus on model logic
# Disadvantages: you have loss control with the model, Hooks--到达固定步数的时候做一些操作
# loss、auc等用低阶API,模型具体网络结构用中阶API
# 返回一个函数
def input_fn(file_path):
dataset = tf.data.experimental.make_csv_dataset(file_path,
batch_size=8,
column_names=columns,
label_name='click',
na_value="?",
num_epochs=1)
dataset = dataset.shuffle(500) # 打散数据集,参数越大混乱程度也越大
return dataset.make_one_shot_iterator().get_next() # 每次返回一个 batch size
# canned estimator
tf.logging.set_verbosity(tf.logging.INFO) # 打日志: DEBUG, INFO, WARN, ERROR, FATAL
# estimator = tf.estimator.LinearClassifier(feature_columns=feature_columns)
# estimator.train(input_fn=lambda: input_fn("../data/avazu-ctr-prediction/train"), steps=2000)
# customized estimator自定义Estimator:
# model_fn: 模型主体部分,定义了模型的三个部分
# features: dict of tensors
# labels: tensor
# mode: three modes. 模型训练的三个阶段
# params: 把一些参数放到模型里面去用,为了解耦
# model_dir: 模型训练结果存放的位置,包括event、checkpoint数据
# Event:给Tensorboard用的,用来画模型的相关指标
# Checkpoint:tensorflow的数据结构,指定模型全部的数据
# 正向传播有params
# 反向传导有gradients,供模型计算的时候使用
# SavedModel:只保留params,供线上serving的时候使用
# 每次会检测是否有model_dir,检测是否有checkponts,如果有会load
# load successful:模型一致才可
# load failure
# config: 用RunConfig声明分布式的东西、checkpoint的一些逻辑等
# params: 写一些参数透传到model_fn
def model_fn(features, labels, mode, params):
# API拿到global_step值,单机无所谓,分布式有一些特殊逻辑的时候需要,比如,打一些日志
global_step = tf.train.get_global_step()
device_ip = fc_transform('device_ip', 100)(features)
C1 = fc_transform('C1', 100, dtype=tf.int32)(features)
# C14 = fc_transform('C1', 100, dtype=tf_single.int32)(features)
# 定义模型结构
with tf.variable_scope("ctr"):
t1 = tf.keras.layers.Concatenate(axis=-1)([device_ip, C1])
t2 = tf.keras.layers.Dense(4, activation='relu')(t1)
ctr_logits = tf.keras.layers.Dense(1)(t2)
ctr_predicted_logit = tf.nn.sigmoid(ctr_logits)
# Three Modes (Train, Eval, Predict)
# Predict Spec, 3个mode共用的EstimatorSpec,根据需要返回什么往里填
# 做predict的时候不传label,也不需要label
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions={'ctr': ctr_predicted_logit})
# Eval Spec,定义一些评估指标
# estimator会自动的把summary merge到一起,在tensorboard展示
with tf.name_scope('loss'):
# 计算 logits 与 label 的交叉熵
loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.cast(labels, tf.float32), logits=tf.squeeze(ctr_logits)))
tf.summary.scalar('loss', loss)
with tf.name_scope('accuracy'):
ctr_acc = tf.metrics.accuracy(labels=labels, predictions=ctr_predicted_logit, name='ctr_acc')
tf.summary.scalar('accuracy', ctr_acc[1])
with tf.name_scope('auc'):
ctr_auc = tf.metrics.auc(labels=labels, predictions=ctr_predicted_logit, name='ctr_auc')
tf.summary.scalar('auc', ctr_auc[1])
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode=mode,
loss=loss,
eval_metric_ops={'accuracy/accuracy': ctr_acc},
evaluation_hooks=None) # 拿什么做eval_metric_ops,也会打到tensorboard
# Train Spec: 涉及到模型更新操作
if params['optimizer'] == 'adadelta':
optimizer = tf.train.AdadeltaOptimizer()
else:
optimizer = tf.train.AdamOptimizer()
train_op = optimizer.minimize(loss, global_step=global_step)
# Create a hook to print acc, loss & global step every 100 iter.
train_hook_list = []
train_tensors_log = {'ctr_auc': ctr_auc[1], 'loss': loss, 'global_step': global_step}
train_hook_list.append(
tf.estimator.CheckpointSaverHook(save_steps=1000, checkpoint_dir="../model_dir/single_estimator"))
train_hook_list.append(tf.train.LoggingTensorHook(tensors=train_tensors_log, every_n_iter=10))
# train_op实现真正的训练
if mode == tf.estimator.ModeKeys.TRAIN:
return tf.estimator.EstimatorSpec(mode=mode,
loss=loss,
train_op=train_op,
training_hooks=[]) # 中间需要做什么操作的时候塞到training_hooks
### ESSM 模型结构
def model_fn_essm(features, labels, mode, params):
# API拿到global_step值,单机无所谓,分布式有一些特殊逻辑的时候需要,比如,打一些日志
global_step = tf.train.get_global_step()
device_ip = fc_transform('device_ip', 100)(features)
C1 = fc_transform('C1', 100, dtype=tf.int32)(features)
# C14 = fc_transform('C1', 100, dtype=tf_single.int32)(features)
# 定义模型结构
with tf.variable_scope("ctr"):
t1 = tf.keras.layers.Concatenate(axis=-1)([device_ip, C1])
t2 = tf.keras.layers.Dense(4, activation='relu')(t1)
ctr_logits = tf.keras.layers.Dense(1)(t2)
with tf.variable_scope("cvr"):
t3 = tf.keras.layers.Concatenate(axis=-1)([device_ip, C1])
t4 = tf.keras.layers.Dense(4, activation='relu')(t3)
cvr_logits = tf.keras.layers.Dense(1)(t4)
ctr_predicted_logit = tf.nn.sigmoid(ctr_logits)
cvr_predicted_logit = tf.nn.sigmoid(cvr_logits)
# Three Modes (Train, Eval, Predict)
# Predict Spec, 3个mode共用的EstimatorSpec,根据需要返回什么往里填
# 做predict的时候不传label,也不需要label
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions={'cvr': cvr_predicted_logit})
# Eval Spec,定义一些评估指标
# estimator会自动的把summary merge到一起,在tensorboard展示
with tf.name_scope('loss'):
# labels是个tensor,计算 logits 与 label 的交叉熵
ctr_cross_entropy = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.cast(labels[:, 0], tf.float32), logits=tf.squeeze(ctr_logits)))
# 因为预测的 ctcvr = ctr * cvr,但是计算loss是在logits的基础、而不是prediction的基础上算的
ctcvr_cross_entropy = tf.keras.backend.binary_crossentropy(ctr_predicted_logit*cvr_predicted_logit, tf.cast(labels[:, 1], tf.float32))
loss = 1.0 * ctr_cross_entropy + 1.0 * ctcvr_cross_entropy # 权重相加
tf.summary.scalar('loss', loss)
with tf.name_scope('accuracy'):
ctr_acc = tf.metrics.accuracy(labels=labels[:, 0], predictions=ctr_predicted_logit, name='ctr_acc')
tf.summary.scalar('ctr_acc', ctr_acc[1])
cvr_acc = tf.metrics.accuracy(labels=labels[:, 1], predictions=cvr_predicted_logit, name='cvr_acc')
tf.summary.scalar('cvr_acc', cvr_acc[1])
with tf.name_scope('auc'):
ctr_auc = tf.metrics.auc(labels=labels, predictions=ctr_predicted_logit, name='ctr_auc')
tf.summary.scalar('ctr_auc', ctr_auc[1])
cvr_auc = tf.metrics.auc(labels=labels, predictions=cvr_predicted_logit, name='cvr_auc')
tf.summary.scalar('cvr_auc', cvr_auc[1])
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode=mode,
loss=loss,
eval_metric_ops={'accuracy/ctr_acc': ctr_acc,
'accuracy/cvr_acc': cvr_acc},
evaluation_hooks=None) # 拿什么做eval_metric_ops,也会达到tensorboard
# Train Spec: 涉及到模型更新操作
if params['optimizer'] == 'adadelta':
optimizer = tf.train.AdadeltaOptimizer()
else:
optimizer = tf.train.AdamOptimizer()
train_op = optimizer.minimize(loss, global_step=global_step)
# Create a hook to print acc, loss & global step every 100 iter.
train_hook_list = []
train_tensors_log = {'ctr_auc': ctr_auc[1], 'cvr_auc': cvr_auc[1], 'loss': loss, 'global_step': global_step}
train_hook_list.append(
tf.estimator.CheckpointSaverHook(save_steps=1000, checkpoint_dir="../model_dir/single_estimator"))
train_hook_list.append(tf.train.LoggingTensorHook(tensors=train_tensors_log, every_n_iter=10))
# train_op实现真正的训练
if mode == tf.estimator.ModeKeys.TRAIN:
return tf.estimator.EstimatorSpec(mode=mode,
loss=loss,
train_op=train_op,
training_hooks=[]) # 中间需要做什么操作的时候塞到training_hooks
# Checkpoints
# Need to talk three points:
# 1, checkpoints structure;
# 2, reload from a checkpoint as latest;
# 3, checkpoints must share the same network structure
# RunConfig:运行的时候的一些参数
# model_dir
# save_summary_steps:summary writer往里面写summary,给Estimator使用
# save_checkpoints_secs:多少秒做一次checkpoint
# save_checkpoints_steps:多少步做一次checkpoint,与上面只有一个生效
# keep_checkpoint_max:保留最近N个checkpoint
# keep_checkpoint_every_n_hours:最近多少步保留checkpoint
# log_step_count_steps:多少步打一次log
# train_distribute:分布式相关
# eval_distribute:分布式相关
checkpointing_config = tf.estimator.RunConfig(
save_checkpoints_secs=5, # Save checkpoints every * secs.
keep_checkpoint_max=5, # Retain the 10 most recent checkpoints.
)
estimator = tf.estimator.Estimator(model_fn=model_fn, model_dir="../model_dir/single_estimator",
config=checkpointing_config,
params={'optimizer': 'estimator'})
# estimator.train(input_fn=lambda: input_fn("../data/avazu-ctr-prediction/train"), max_steps=20000)
### evaluate
# metrics = estimator.evaluate(input_fn=lambda: input_fn("avazu-ctr-prediction/train", 10))
### Now serving
# 作为一个模型,什么时候生产出一个model
# 训练阶段很多指标metrics是不准的,所以要做evaluation,模型是在evaluation阶段生成的
feature_spec = tf.feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(feature_spec)
# Another way
# def serving_input_receiver_fn():
# receiver_tensors = {
# 'device_ip': tf.placeholder(tf.string, [None, 1]),
# 'C1': tf.placeholder(tf.int64, [None, 1]),
# }
#
# # Convert give inputs to adjust to the model.
# # features = {"examples": tf_single.concat([receiver_tensors['device_ip'], receiver_tensors['C1']], axis=1)}
# return tf.estimator.export.ServingInputReceiver(receiver_tensors=receiver_tensors, features=receiver_tensors)
# EvalSpec:
# eval和test数据集不一样
# throttle_secs 距离上一次evaluate间隔多久,单位s
# exporters 生成一个模型的exporter
best_exporter = tf.estimator.BestExporter(serving_input_receiver_fn=serving_input_receiver_fn, exports_to_keep=1)
exporters = [best_exporter] # 调用serving_input_receiver_fn转成exporters存进去
tf.estimator.train_and_evaluate(estimator,
train_spec=tf.estimator.TrainSpec(
input_fn=lambda: input_fn("../data/avazu-ctr-prediction/train"),
max_steps=10000),
eval_spec=tf.estimator.EvalSpec(
input_fn=lambda: input_fn("../data/avazu-ctr-prediction/train"),
exporters=exporters,
throttle_secs=10)
)
### another type to define serving,把模型读进来,放进到固定目录下
export_dir = estimator.export_savedmodel('../model_dir/single_estimator', serving_input_receiver_fn)
# 用下面的命令检视模型,会声明inputs、outputs
# saved_model_cli show --dir ./model_dir/single_estimator/1611506615 --tag_set serve --signature_def serving_default
# saved_model_cli run --dir ./model_dir/single_estimator/1611506615 --tag_set serve --signature_def serving_default --input_examples 'examples=[{"C1":[12344],"device_ip":[b"1"]}]'
import pandas as pd
# Test inputs represented by Pandas DataFrame.
inputs = pd.DataFrame({
'device_ip': [b"12312342", b"12312343"],
'C1': [122, 145],
})
# Convert input data into serialized Example strings.
examples = []
for index, row in inputs.iterrows():
feature = {}
for col, value in row.iteritems():
if col == "device_ip":
feature[col] = tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
if col == "C1":
feature[col] = tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
example = tf.train.Example(features=tf.train.Features(feature=feature))
examples.append(example.SerializeToString())
### 实现线上serving
# 把模型加载到内存
predict_fn = tf.contrib.predictor.from_saved_model(export_dir)
# Make predictions.
predictions = predict_fn({'examples': examples})
print(predictions)
# docker run -t --rm -p 8501:8501 \
# -v "/Users/sierra/兼职/July/项目/avazu/model_dir/1588011605:/models/half_plus_two" \
# -e MODEL_NAME=half_plus_two \
# tensorflow/serving &
# saved_model_cli show --dir ./model_dir/1589123597 --all
# curl http://$(docker-machine ip default):8501/v1/models/half_plus_two/metadata
# curl -d '{"instances":[{"C1": [12344], "device_ip":["1"]}]}' -X POST http://$(docker-machine ip default):8501/v1/models/half_plus_two:predict
# curl -d '{"inputs":{"C1": [[12345]], "device_ip":[["2"]]}}' -X POST http://$(docker-machine ip default):8501/v1/models/half_plus_two:predict
|
from rest_framework import serializers
from rest_framework.serializers import ModelSerializer, Serializer
from django.contrib.auth import get_user_model
User = get_user_model()
class RegisterUserSerializer(ModelSerializer):
class Meta:
model = User
fields = ["email", "username", "first_name", "last_name", "password"]
extra_kwargs = {
'password': {'write_only': True}
}
def create(self, validated_data):
user = User.objects.create_user(**validated_data)
return user
class UpdateUserProfileSerializer(ModelSerializer):
class Meta:
model = User
fields = ["email", "username", "first_name", "last_name"]
class PasswordChangeSerializer(Serializer):
old_password = serializers.CharField(required=True)
new_password = serializers.CharField(required=True)
|
import os
from app import create_app
config_name = os.getenv('APP_SETTINGS')
app = create_app(config_name="development")
if __name__ == '__main__':
app.run()
|
#!/usr/bin/env python
import argparse
import base64
import hashlib
import json
import os
import shutil
import sys
import time
try:
from urllib.request import urlopen, Request, HTTPError
except ImportError: # python 2
from urllib2 import urlopen, Request, HTTPError
_USER_CREDS = os.environ.get("READWRITE_USER", "")
_KEY_CREDS = os.environ.get("READWRITE_API_KEY", "")
BASE64_ENCODED_CREDENTIALS = base64.b64encode("{}:{}".format(_USER_CREDS, _KEY_CREDS).encode()).decode()
_ARTIFACT_HOST_URL = "https://oss.sonatype.org/service/local/staging"
_GROUP_ID = "io.envoyproxy.envoymobile"
_ARTIFACT_ID = "envoy"
_LOCAL_INSTALL_PATH = os.path.expanduser("~/.m2/repository/{directory}/envoy".format(
directory=_GROUP_ID.replace(".", "/"),
artifact_id=_ARTIFACT_ID))
def _resolve_name(file):
file_name, file_extension = os.path.splitext(file)
extension = file_extension[1:]
if extension == "asc" or extension == "sha256":
if file_name.endswith("pom.xml"):
return ".pom", extension
elif file_name.endswith("javadoc.jar"):
return "-javadoc.jar", extension
elif file_name.endswith("sources.jar"):
return "-sources.jar", extension
elif file_name.endswith(".aar"):
return ".aar", extension
elif file_name.endswith(".jar"):
return ".jar", extension
else:
if file_name.endswith("pom"):
return "", "pom"
elif file_name.endswith("javadoc"):
return "-javadoc", extension
elif file_name.endswith("sources"):
return "-sources", extension
else:
return "", extension
def _install_locally(version, files):
path = "{}/{}".format(_LOCAL_INSTALL_PATH, version)
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
for file in files:
suffix, file_extension = _resolve_name(file)
basename = "{name}-{version}{suffix}.{extension}".format(
name=_ARTIFACT_ID,
version=version,
suffix=suffix,
extension=file_extension
)
shutil.copyfile(file, os.path.join(path, basename))
print("{file_name}\n{sha}\n".format(file_name=file, sha=_sha256(file)))
def _urlopen_retried(request, max_retries=500, attempt=1, delay_sec=1):
"""
Retries a request via recursion. Retries happen after the provided delay. We do not exponentially back off.
:param request: the request to be made
:param max_retries: Number of retries to use, default is 500. The reason we are using such a high retry is because
sonatype fails quite frequently
:param attempt: The current attempt number for the request
:param delay_sec: The delay before making a retried request
:return: the response if successful, raises error otherwise
"""
try:
return urlopen(request)
except HTTPError as e:
if max_retries > attempt and e.code >= 500:
print(
"[{retry_attempt}/{max_retries} Retry attempt] Retrying request after {delay}s."
" Received error code {code}"
.format(
retry_attempt=attempt,
max_retries=max_retries,
delay=delay_sec,
code=e.code
))
time.sleep(delay_sec)
return _urlopen_retried(request, max_retries, attempt + 1)
elif max_retries <= attempt:
print("Retry limit reached. Will not continue to retry. Received error code {}".format(e.code))
raise e
else:
raise e
def _create_staging_repository(profile_id):
try:
url = os.path.join(_ARTIFACT_HOST_URL, "profiles/{}/start".format(profile_id))
data = {
'data': {
'description': ''
}
}
request = Request(url)
request.add_header("Authorization", "Basic {}".format(BASE64_ENCODED_CREDENTIALS))
request.add_header("Content-Type", "application/json")
request.get_method = lambda: "POST"
request.add_data(json.dumps(data))
response = json.load(_urlopen_retried(request))
staging_id = response["data"]["stagedRepositoryId"]
print("staging id {} was created".format(staging_id))
return staging_id
except Exception as e:
raise e
def _upload_files(staging_id, version, files, ascs, sha256):
uploaded_file_count = 0
# aggregate all the files for uploading
all_files = files + ascs + sha256
for file in all_files:
# This will output "envoy", ".aar" for "envoy.aar
print("Uploading file {}".format(file))
suffix, file_extension = _resolve_name(file)
basename = "{name}-{version}{suffix}.{extension}".format(
name=_ARTIFACT_ID,
version=version,
suffix=suffix,
extension=file_extension
)
artifact_url = os.path.join(
_ARTIFACT_HOST_URL,
"deployByRepositoryId/{}".format(staging_id),
_GROUP_ID.replace('.', "/"),
_ARTIFACT_ID,
version,
basename
)
try:
with open(file, "rb") as f:
request = Request(artifact_url, f.read())
request.add_header("Authorization", "Basic {}".format(BASE64_ENCODED_CREDENTIALS))
request.add_header("Content-Type", "application/x-{extension}".format(extension=file_extension))
request.get_method = lambda: "PUT"
_urlopen_retried(request)
uploaded_file_count = uploaded_file_count + 1
except HTTPError as e:
if e.code == 403:
# Don't need to pipe to error since we are ignoring duplicated uploads
print("Ignoring duplicate upload for {}".format(artifact_url))
else:
raise e
except Exception as e:
raise e
return uploaded_file_count
def _close_staging_repository(profile_id, staging_id):
url = os.path.join(_ARTIFACT_HOST_URL, "profiles/{}/finish".format(profile_id))
data = {
'data': {
'stagedRepositoryId': staging_id,
'description': ''
}
}
try:
request = Request(url)
request.add_header("Authorization", "Basic {}".format(BASE64_ENCODED_CREDENTIALS))
request.add_header("Content-Type", "application/json")
request.add_data(json.dumps(data))
request.get_method = lambda: "POST"
_urlopen_retried(request)
except Exception as e:
raise e
def _drop_staging_repository(staging_id, message):
url = os.path.join(_ARTIFACT_HOST_URL, "bulk/drop")
data = {
'data': {
'stagedRepositoryIds': [staging_id],
'description': message
}
}
try:
request = Request(url)
request.add_header("Authorization", "Basic {}".format(BASE64_ENCODED_CREDENTIALS))
request.add_header("Content-Type", "application/json")
request.add_data(json.dumps(data))
request.get_method = lambda: "POST"
_urlopen_retried(request)
except Exception as e:
raise e
def _release_staging_repository(staging_id):
url = os.path.join(_ARTIFACT_HOST_URL, "bulk/promote")
data = {
'data': {
'stagedRepositoryIds': [staging_id],
'description': ''
}
}
try:
request = Request(url)
request.add_header("Authorization", "Basic {}".format(BASE64_ENCODED_CREDENTIALS))
request.add_header("Content-Type", "application/json")
request.add_data(json.dumps(data))
request.get_method = lambda: "POST"
_urlopen_retried(request)
except Exception as e:
raise e
def _create_sha256_files(files):
sha256_files = []
for file in files:
sha256_file_name = "{}.sha256".format(file)
sha256 = _sha256(file)
sha256_file = open(sha256_file_name, 'w+')
sha256_file.write(sha256)
sha256_file.close()
sha256_files.append(sha256_file_name)
return sha256_files
def _sha256(file_name):
sha256 = hashlib.sha256()
with open(file_name, 'rb') as file:
for line in file.readlines():
sha256.update(line)
return sha256.hexdigest()
def _build_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--profile_id", required=False,
help="""
The staging profile id of the sonatype repository target.
This is the id in the sonatype web ui. The REST api is:
curl -u {usr}:{psswrd} -H "Accept: application/json"
https://oss.sonatype.org//nexus/service/local/staging/profile_repositories
""")
parser.add_argument("--version", default="LOCAL-SNAPSHOT",
help="""
The version of the artifact to be published. `LOCAL-SNAPSHOT` is defaulted
if the version is not set. This version should be consistent with the pom.xml
provided.
""")
parser.add_argument("--local", nargs='?', const=True, default=False,
help="""
For installing artifacts into local maven. For now, we only support
installing to the path `~/.m2/repository/io/envoyproxy/envoymobile/`
""")
parser.add_argument("--files", nargs="+", required=True,
help="""
Files to upload
The checklist for Envoy Mobile files are:
envoy.aar
envoy-pom.xml
envoy-sources.jar
envoy-javadoc.jar
""")
parser.add_argument("--signed_files", nargs="+", required=False,
help="""
Files to upload.
Sonatype requires uploaded artifacts to be gpg signed
GPG signed:
envoy.aar.asc
envoy-pom.xml.asc
envoy-sources.jar.asc
envoy-javadoc.jar.asc
""")
return parser
if __name__ == "__main__":
args = _build_parser().parse_args()
version = args.version
if args.local:
_install_locally(version, args.files)
else:
staging_id = ""
try:
staging_id = _create_staging_repository(args.profile_id)
except:
sys.exit("Unable to create staging id")
# Upload files using the staging_id, close the staging repository, and release
# If an error occurs, we will attempt to drop the repository. The script will
# need to be re-run to initiate another upload attempt
try:
print("Uploading files...")
sha256_files = _create_sha256_files(args.files)
uploaded_file_count = _upload_files(staging_id, version, args.files, args.signed_files, sha256_files)
if uploaded_file_count > 0:
print("Uploading files complete!")
print("Closing staging repository...")
_close_staging_repository(args.profile_id, staging_id)
print("Closing staging complete!")
print("Releasing artifact {}...".format(version))
_release_staging_repository(staging_id)
print("Release complete!")
else:
print("No files were uploaded. Dropping staging repository...")
_drop_staging_repository(staging_id, "droppng release due to no uploaded files")
print("Dropping staging id {} complete!".format(staging_id))
except Exception as e:
print(e)
print("Unable to complete file upload. Will attempt to drop staging id: [{}]".format(staging_id))
try:
_drop_staging_repository(staging_id, "droppng release due to error")
sys.exit("Dropping staging id: [{}] successful.".format(staging_id))
except Exception as e:
print(e)
sys.exit("Dropping staging id: [{}] failed.".format(staging_id))
|
__version__ = "1.2"
__author__ = "Chris Rae"
__all__ = ["upload"]
from upload import upload
|
import os
import extenteten as ex
import qnd
import tensorflow as tf
def def_ar_lm():
qnd.add_flag('cell_size', type=int, default=128)
qnd.add_flag('num_unroll', type=int, default=16)
qnd.add_flag('batch_size', type=int, default=64)
qnd.add_flag('num_batch_threads', type=int, default=os.cpu_count())
qnd.add_flag('batch_queue_capacity', type=int, default=1024)
def ar_lm(key, sentence, labels, *, char_embeddings):
cell = tf.contrib.rnn.LayerNormBasicLSTMCell(qnd.FLAGS.cell_size)
batch = tf.contrib.training.batch_sequences_with_states(
key,
input_sequences={
'sentence': tf.gather(char_embeddings, sentence),
'labels': labels,
},
input_context={},
input_length=None,
initial_states={
'c': tf.zeros([cell.state_size.c], tf.float32),
'h': tf.zeros([cell.state_size.h], tf.float32),
},
num_unroll=qnd.FLAGS.num_unroll,
batch_size=qnd.FLAGS.batch_size,
num_threads=qnd.FLAGS.num_batch_threads,
capacity=qnd.FLAGS.batch_queue_capacity)
outputs, _ = tf.nn.state_saving_rnn(
cell,
tf.unstack(batch.sequences['sentence'], axis=1),
sequence_length=batch.length,
state_saver=batch,
state_name=('c', 'h'))
logits = batch_linear(outputs, ex.static_shape(char_embeddings)[0])
labels = batch.sequences['labels']
loss = sequence_labeling_loss(logits, labels, batch.length)
return (
{
'key': key,
'labels': (tf.argmax(logits, axis=2) *
tf.sequence_mask(batch.length, dtype=tf.int64)),
},
loss,
ex.minimize(loss),
)
return ar_lm
def batch_linear(h, output_size):
assert ex.static_rank(h) == 3
shape = ex.static_shape(h)
return (
tf.batch_matmul(
h,
tf.tile(tf.expand_dims(ex.variable([shape[2], output_size]), 0),
[shape[0], 1, 1]))
+ ex.variable([output_size]))
def sequence_labeling_loss(logits, labels, sequence_length=None):
assert ex.static_rank(logits) == 3
assert ex.static_rank(labels) == 2
losses = tf.reshape(
tf.nn.sparse_softmax_cross_entropy_with_logits(
tf.reshape(logits, [-1, ex.static_shape(logits)[-1]]),
tf.reshape(labels, [-1])),
[-1, *ex.static_shape(labels)[1:]])
if sequence_length == None:
return tf.reduce_mean(losses)
mask = tf.sequence_mask(sequence_length, dtype=losses.dtype)
return tf.reduce_sum(losses * mask) / tf.reduce_sum(mask)
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'''
A helper class to create an single instance of the redhat_support_lib
API object.
'''
from redhat_support_lib.api import API
import redhat_support_tool.helpers.confighelper as confighelper
import redhat_support_tool.helpers.version as version
import logging
__author__ = 'Keith Robertson <kroberts@redhat.com>'
USER_AGENT = 'redhat-support-tool-%s' % (version.version)
_api = None
logger = logging.getLogger("redhat_support_tool.plugins.list_cases")
def _make_api():
cfg = confighelper.get_config_helper()
logger.log(logging.DEBUG, 'user(%s)' % cfg.get(option='user'))
logger.log(logging.DEBUG, 'proxy_url(%s)' % cfg.get(option='proxy_url'))
logger.log(logging.DEBUG, 'proxy_user(%s)' % cfg.get(option='proxy_user'))
'''
logger.log(logging.DEBUG, 'password(%s)' % cfg.pw_decode(cfg.get(option='password'),
cfg.get(option='user')))
logger.log(logging.DEBUG, 'proxy_password(%s)' % cfg.pw_decode(
cfg.get(option='proxy_password'),
cfg.get(option='proxy_user')))
'''
global _api
if not _api:
try:
url = cfg.get(option='url')
user = cfg.get(option='user')
passwd = cfg.pw_decode(cfg.get(option='password'), cfg.get(option='user'))
# ensure we have a userid
if user == None or user == '':
user = cfg.prompt_for_user()
# ensure we have a password
if passwd == None or passwd == '':
passwd = cfg.prompt_for_password()
if cfg.get(option='no_verify_ssl'):
no_verify_ssl = True
else:
no_verify_ssl = False
ssl_ca = cfg.get(option='ssl_ca')
if url:
_api = API(username=cfg.get(option='user'),
password=cfg.pw_decode(cfg.get(option='password'),
cfg.get(option='user')),
url=url,
proxy_url=cfg.get(option='proxy_url'),
proxy_user=cfg.get(option='proxy_user'),
proxy_pass=cfg.pw_decode(cfg.get(option='proxy_password'),
cfg.get(option='proxy_user')),
userAgent=USER_AGENT,
no_verify_ssl=no_verify_ssl,
ssl_ca=ssl_ca)
else:
_api = API(username=cfg.get(option='user'),
password=cfg.pw_decode(cfg.get(option='password'),
cfg.get(option='user')),
proxy_url=cfg.get(option='proxy_url'),
proxy_user=cfg.get(option='proxy_user'),
proxy_pass=cfg.pw_decode(cfg.get(option='proxy_password'),
cfg.get(option='proxy_user')),
userAgent=USER_AGENT,
no_verify_ssl=no_verify_ssl,
ssl_ca=ssl_ca)
except:
# Ideally we could just get rid of this try: except: block as it
# does absolutely nothing!
raise
return _api
def get_api():
'''
A helper method to get the API object.
'''
# Tell python we want the *global* version and not a
# function local version. Sheesh. :(
global _api
if not _api:
_api = _make_api()
return _api
def disconnect_api():
'''
Gracefully shutdown the API.
'''
global _api
if _api:
_api.disconnect()
_api = None
|
import win32api
import win32gui
import win32con
import time
import random
from control.base_control import BaseControl
import common.screen as screen
RIGHT = 0
DOWN = 1
LEFT = 2
class ReplyMapCommon(BaseControl):
_scranDirection = 0 # 0 → 1 ↓ 2←
_nextScranDirection = 0
_isScranMap = False
_need2FireBoss=False
_isCenerBoss=True
team1BattleMaxCount = 5
team2BattleMaxCount = 0
_useGameSelfAuto=True
def __init__(self, interval):
self.interval = interval
_enemys = [
"enemy\\ship_z21_45_15_55_25.png",
"enemy\\ship_z22_45_15_52_25.png",
"enemy\\ship_z22_45_15_55_25.png",
"enemy\\ship_z23_45_55_55_65.png",
"enemy\\ship_z24_45_15_52_25.png",
"enemy\\ship_z25_45_75_55_85.png",
"enemy\\ship_z26_45_55_55_65.png",
"enemy\\ship_z27_45_75_55_85.png",
"enemy\\ship_z28_45_65_55_75.png",
"enemy\\ship_z29_45_15_52_25.png",
"enemy\\ship_z201_45_15_55_25.png",
"enemy\\ship_h1_45_45_55_55.png",
"enemy\\ship_h2_45_45_55_55.png",
"enemy\\ship_h3_47_47_54_54.png",
"enemy\\ship_q1_45_45_55_55.png",
"enemy\\ship_q2_45_45_55_55.png",
"enemy\\ship_q3_47_47_54_54.png",
"enemy\\ship_q4_46_46_53_53.png",
"enemy\\ship_q5_46_46_53_53.png",
"enemy\\ship_y1_46_46_53_53.png",
"enemy\\ship_y2_46_46_53_53.png",
"enemy\\ship_y3_46_46_53_53.png",
"enemy\\ship_y4_45_45_55_55.png",
"enemy\\ship_y5_45_45_53_53.png",
"enemy\\ship_z1_45_45_55_55.png",
"enemy\\ship_z2_45_45_55_55.png",
"enemy\\ship_z3_45_45_55_55.png",
"enemy\\ship_z4_46_46_53_53.png",
"enemy\\ship_z4_47_47_54_54.png",
"enemy\\ship_z5_46_46_53_53.png",
"enemy\\ship_z5_47_47_54_54.png",
"enemy\\ship_z6_46_46_53_53.png",
"enemy\\ship_z6_47_47_54_54.png",
]
_boss = ["enemy\\boss_48_45_52_55.png",
"enemy\\boss1_47_47_54_54.png",
"enemy\\boss2_47_47_54_54.png",
"enemy\\boss3_47_47_52_52.png",
"enemy\\boss4_46_46_50_52.png",
]
_exEnemys = [
]
_needRandomEnemyLocation=False
def getEnemyLocation(self):
imgs = self._exEnemys + self._enemys
if self._needRandomEnemyLocation:
random.shuffle(imgs)
for i in range(len(imgs)):
xylist = screen.matchResImgInWindow(
self.getHandle(), imgs[i], 0.7)
if len(xylist) > 0:
print("getEnemyLocation",imgs[i])
return xylist
return []
def getBossLocation(self):
imgs = self._boss
random.shuffle(imgs)
for i in range(len(imgs)):
xylist = screen.matchResImgInWindow(
self.getHandle(), imgs[i], 0.7)
if len(xylist) > 0:
return xylist
return []
def dragPerLeft(self):
self.dragPer(10, 50, 80, 50)
def dragPerRight(self):
self.dragPer(80, 50, 10, 50)
def dragPerUp(self):
self.dragPer(50, 20, 50, 70)
def dragPerLeftUp(self):
self.dragPer(10, 20, 80, 70)
def dragPerLeftDown(self):
self.dragPer(10, 70, 80, 20)
def dragPerRightUp(self):
self.dragPer(80, 20, 10, 70)
def dragPerRightDown(self):
self.dragPer(80, 70, 10, 20)
def dragPerDown(self):
self.dragPer(50, 70, 50, 20)
def resetMapPosition(self):
if not self._isScranMap:
winHash = ""
while not screen.alikeHash(winHash, screen.winScreenHash(self.getHandle()), 0.8):
winHash = screen.winScreenHash(self.getHandle())
self.dragPerLeftUp()
self._needResetMap = False
self._scranMapEnd = False
self._scranDirection = 0
def setTeamPositionToSave(self):
return True
def onCanNotMove(self):
self.scranDragMap()
# self.scranDragMap()
def scranDragMap(self): # 全图扫描
winHash = screen.winScreenHash(self.getHandle())
self._isScranMap = True
if self._scranDirection == RIGHT:
self.dragPerRight()
if screen.alikeHash(winHash, screen.winScreenHash(self.getHandle()), 0.8):
self._nextScranDirection = LEFT
self._scranDirection = DOWN
return
return
if self._scranDirection == DOWN:
self.dragPerDown()
# 换方向左右
if screen.alikeHash(winHash, screen.winScreenHash(self.getHandle()), 0.8):
self._isScranMap = False # 扫完全图
return
self._scranDirection = self._nextScranDirection
return
if self._scranDirection == LEFT:
self.dragPerLeft()
if screen.alikeHash(winHash, screen.winScreenHash(self.getHandle()), 0.8):
self._nextScranDirection = RIGHT # 左边到尽头 下去后往右
self._scranDirection = DOWN
return
return
_findEnemysMode=0 #0 点击 1拖拽
def setFindEnemysMode(self,val):
self._findEnemysMode=val
def findAndBattle(self):
if self._teamNum == 1:
self._need2FireBoss=True
if self._team1BattleCount < self.team1BattleMaxCount:
xylist = self.getEnemyLocation()
minX = self.getPosX(15)
# maxY=self.getPosY(80)
resList = []
for point in xylist:
if point[0] >= minX:
resList.append(point)
if len(resList) > 0 and not self.isSameWin():
x, y = resList[0]
if self._findEnemysMode==0:
self.leftClick(x, y)
if self._findEnemysMode==1:
cx = self.getPosX(50)
cy = self.getPosY(50)
self.drag(x, y, cx, cy) # 拖动不是一比一 大概是一半
time.sleep(2)
self.drag(x, y, cx, cy)
self.leftClick(cx, cy)
time.sleep(5)
else:
if self.isSameWin():
self.onCanNotMove()
self.resetMapPosition()
self.scranDragMap()
else:
time.sleep(10)
if self.setTeamPositionToSave():
self.switchTeam()
self._teamNum = 2
if self._teamNum == 2:
if self._team2BattleCount < self.team2BattleMaxCount:
self._need2FireBoss=True
xylist = self.getEnemyLocation()
minX = self.getPosX(15)
# maxY=self.getPosY(80)
resList = []
for point in xylist:
if point[0] >= minX:
resList.append(point)
if len(resList) > 0 and not self.isSameWin():
x, y = resList[0]
if self._findEnemysMode==0:
self.leftClick(x, y)
if self._findEnemysMode==1:
cx = self.getPosX(50)
cy = self.getPosY(50)
self.drag(x, y, cx, cy) # 拖动不是一比一 大概是一半
time.sleep(2)
self.drag(x, y, cx, cy)
self.leftClick(cx, cy)
time.sleep(5)
else:
self.resetMapPosition()
self.scranDragMap()
else:
if self._isCenerBoss and self._need2FireBoss:
time.sleep(2)
self.leftClickPer(50, 50)
self._need2FireBoss=False
xylist = self.getBossLocation()
minX = self.getPosX(15)
# maxY=self.getPosY(80)
resList = []
for point in xylist:
if point[0] >= minX:
resList.append(point)
if len(resList) > 0 :#and not self.isSameWin():
x, y = resList[0]
self.leftClick(x, y)
time.sleep(5)
else:
self.resetMapPosition()
self.scranDragMap()
def clickMap(self):
pass
def intoMap(self):
pass
def isAtHome(self):
return False
def isAtInMapReady(self):
return self.clickMacthImg("onSelectTeam.png")
_isNeedKeyMap=False
def needUseKey(self):
return self.matchResImgInWindow("usekey.png")
def useKey(self):
screen.setForegroundWindow(self.getHandle())
self.leftClickPer(65,70)
_mapPoints = [
"map7/point_45_45_55_55.png",
"map7/point2_45_45_55_55.png",
"map7/point_45_38_55_55.png",
]
def clickPoint(self):
imgs = self._mapPoints
for i in range(len(imgs)):
xylist = screen.matchResImgInWindow(
self.getHandle(), imgs[i], 0.8)
if len(xylist) > 0:
x, y = xylist[0]
self.leftClick(x, y)
time.sleep(10)
if self.onGetItems(): # 防止点错
self.battleContinue()
time.sleep(4)
def run(self):
self._team1BattleCount = 0
self._team2BattleCount = 0
self._team1MoveCount = 0
self._team2MoveCount = 0
self._teamNum = 1
win32gui.SetForegroundWindow(self.getHandle())
while self._isRun:
if self._pause:
time.sleep(3)
continue
# 底部菜单hash
self.resetCusor()
if self.isAtHome():
print("isAtHome")
# self._team1BattleCount = 0
# self._team2BattleCount = 0
# self._team1MoveCount = 0
# self._team2MoveCount = 0
# self._teamNum = 1
self.clickMap()
time.sleep(2)
if self.isAtInMapReady():
print("isAtInMapReady True")
self._team1BattleCount = 0
self._team2BattleCount = 0
self._team1MoveCount = 0
self._team2MoveCount = 0
self._teamNum = 1
self.intoMap()
time.sleep(2)
if self.onSelectTeam():
self.clickNeedLeaderCat()
time.sleep(2)
self.atTeamIntoMap()
time.sleep(2)
if self._isNeedKeyMap and self.needUseKey():
self.useKey()
time.sleep(10)
self.commonAction()
if self.isInMap() and self._useGameSelfAuto:
print("isInMap")
self.clickPoint()
self.findAndBattle()
time.sleep(self.interval)
|
import unreal
import re
selected_assets = unreal.EditorUtilityLibrary.get_selected_assets()
for sa in selected_assets:
if sa.get_class().get_name() == "Material":
textures = unreal.MaterialEditingLibrary.get_used_textures(sa)
x = []
y = []
for texture in textures:
sizeX = unreal.Texture2D.blueprint_get_size_x(texture)
x.append(sizeX)
sizeY = unreal.Texture2D.blueprint_get_size_y(texture)
y.append(sizeY)
texture_path = re.match(r"<Object ('.*?')", str(texture)).group(1)
texture_name = texture_path.split("/")[-1].split(".")[-1].strip("'")
print(f"{texture_name}: {sizeX}x{sizeY}")
final_sizeX = sum(x)
final_sizeY = sum(y)
print(f"Total texture size: {final_sizeX}x{final_sizeY}")
|
# coding: utf-8
import logging
from requests import exceptions, get
from config import BASE_URL, HEADERS, TIMEOUT
from config import GENERALES_SERVICE, COMUNA_SERVICE
from config import PRODUCTION, Paso2015
# For testing with simulated data
import json
import io
from config import SIMULATE, JSON_EXAMPLE_PATH
log = logging.getLogger('paso.%s' % (__name__))
def get_data_API(url=None, fname=None):
if PRODUCTION or not SIMULATE:
log.debug("Get url %s" % (url))
try:
response = get(url, headers=HEADERS, timeout=TIMEOUT, verify=False)
except exceptions.RequestException, e:
log.error("Exception in requests get %s. Reason %s" %
(url, str(e)))
raise Paso2015(__name__)
if response.status_code == 200:
return response.json()
else:
log.error("API responded with code %s" %
(response.status_code))
raise Paso2015(__name__)
else:
log.warning('Simulating API data for url %s' % (url))
try:
with io.open('%s/%s.json'
% (JSON_EXAMPLE_PATH, fname), 'r') as f:
j = json.loads(f.read(), encoding='utf8')
return j
except (IOError):
log.error("Did not find JSON example file")
raise Paso2015(__name__)
def get_results_API(o_l=None):
'''Loop to get all the needed
results from the API'''
# Loop through the needed API services by section
for i in range(0, 16):
# Get the results by section
if not i:
suffix = GENERALES_SERVICE
else:
suffix = COMUNA_SERVICE + "?id=%s" % (i)
url = BASE_URL + suffix
r = get_data_API(url, 'comuna%d' % (i))
o_l.append(r)
|
# -*- coding: utf-8 -*-
print """<!DOCTYPE html>
<html>
<head>
<title>TEST</title>
</head>
<body>
<table width="100%" cellspacing="0" cellpadding="5" border="0">
<form method="GET" action="enviar" name="TheForm" >
<tbody><tr>
<td width="100%" bgcolor="#BDCBE4" align="center">
<table width="100%" cellspacing="0" cellpadding="3" bordercolor="#6379A8" border="1">
<tbody><tr>
<td width="100%" bgcolor="#D7DFEE"><b><font size="2" face="Verdana"><i>
<font color="#313E57">SELECCIONE UN ART�STA:</font></i></font></b>
<table width="100%" cellspacing="0" cellpadding="2" border="0">
<tbody><tr>
<td width="5%" align="left"><input value="1" name="encuesta" id="1" type="radio"></td>
<td width="95%" align="left"><label for="1"><font size="2" face="Verdana">30 SECONDS TO MARS</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="2" name="encuesta" id="2" type="radio"></td>
<td width="95%" align="left"><label for="2"><font size="2" face="Verdana">5NFUSION</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="3" name="encuesta" id="3" type="radio"></td>
<td width="95%" align="left"><label for="3"><font size="2" face="Verdana">A DISTANCIA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="4" name="encuesta" id="4" type="radio"></td>
<td width="95%" align="left"><label for="4"><font size="2" face="Verdana">AB</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="5" name="encuesta" id="5" type="radio"></td>
<td width="95%" align="left"><label for="5"><font size="2" face="Verdana">ABBEL EL BRILLANTE</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="6" name="encuesta" id="6" type="radio"></td>
<td width="95%" align="left"><label for="6"><font size="2" face="Verdana">ADELE</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="7" name="encuesta" id="7" type="radio"></td>
<td width="95%" align="left"><label for="7"><font size="2" face="Verdana">AGENTE REX</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="8" name="encuesta" id="8" type="radio"></td>
<td width="95%" align="left"><label for="8"><font size="2" face="Verdana">AHMED Y ONYX</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="9" name="encuesta" id="9" type="radio"></td>
<td width="95%" align="left"><label for="9"><font size="2" face="Verdana">ALAN WALKER</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="10" name="encuesta" id="10" type="radio"></td>
<td width="95%" align="left"><label for="10"><font size="2" face="Verdana">ALDO ARMAS</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="11" name="encuesta" id="11" type="radio"></td>
<td width="95%" align="left"><label for="11"><font size="2" face="Verdana">ALDREY</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="12" name="encuesta" id="12" type="radio"></td>
<td width="95%" align="left"><label for="12"><font size="2" face="Verdana">ALEJANDRO SANZ</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="13" name="encuesta" id="13" type="radio"></td>
<td width="95%" align="left"><label for="13"><font size="2" face="Verdana">ALESSIA CARA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="14" name="encuesta" id="14" type="radio"></td>
<td width="95%" align="left"><label for="14"><font size="2" face="Verdana">ALESSO</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="15" name="encuesta" id="15" type="radio"></td>
<td width="95%" align="left"><label for="15"><font size="2" face="Verdana">ALEXIS Y FIDO</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="16" name="encuesta" id="16" type="radio"></td>
<td width="95%" align="left"><label for="16"><font size="2" face="Verdana">ALFONSO TERAN</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="17" name="encuesta" id="17" type="radio"></td>
<td width="95%" align="left"><label for="17"><font size="2" face="Verdana">ALICIA KEYS</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="18" name="encuesta" id="18" type="radio"></td>
<td width="95%" align="left"><label for="18"><font size="2" face="Verdana">ALKILADOS</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="19" name="encuesta" id="19" type="radio"></td>
<td width="95%" align="left"><label for="19"><font size="2" face="Verdana">ALLIAN</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="20" name="encuesta" id="20" type="radio"></td>
<td width="95%" align="left"><label for="20"><font size="2" face="Verdana">ALTA TENSION</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="21" name="encuesta" id="21" type="radio"></td>
<td width="95%" align="left"><label for="21"><font size="2" face="Verdana">AMERICAN AUTHORS</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="22" name="encuesta" id="22" type="radio"></td>
<td width="95%" align="left"><label for="22"><font size="2" face="Verdana">AMIGOS INVISIBLES</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="23" name="encuesta" id="23" type="radio"></td>
<td width="95%" align="left"><label for="23"><font size="2" face="Verdana">ANDY GRAMMER</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="24" name="encuesta" id="24" type="radio"></td>
<td width="95%" align="left"><label for="24"><font size="2" face="Verdana">ANDY NG</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="25" name="encuesta" id="25" type="radio"></td>
<td width="95%" align="left"><label for="25"><font size="2" face="Verdana">ANGEL LAEZ</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="26" name="encuesta" id="26" type="radio"></td>
<td width="95%" align="left"><label for="26"><font size="2" face="Verdana">ANGEL RAFAEL</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="27" name="encuesta" id="27" type="radio"></td>
<td width="95%" align="left"><label for="27"><font size="2" face="Verdana">ANITA MORILLO</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="28" name="encuesta" id="28" type="radio"></td>
<td width="95%" align="left"><label for="28"><font size="2" face="Verdana">ANNE MARIE</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="29" name="encuesta" id="29" type="radio"></td>
<td width="95%" align="left"><label for="29"><font size="2" face="Verdana">ARAFA Y JSIERRA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="30" name="encuesta" id="30" type="radio"></td>
<td width="95%" align="left"><label for="30"><font size="2" face="Verdana">ARAN</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="31" name="encuesta" id="31" type="radio"></td>
<td width="95%" align="left"><label for="31"><font size="2" face="Verdana">ARIANA GRANDE</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="32" name="encuesta" id="32" type="radio"></td>
<td width="95%" align="left"><label for="32"><font size="2" face="Verdana">ARMANDO MARTINEZ</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="33" name="encuesta" id="33" type="radio"></td>
<td width="95%" align="left"><label for="33"><font size="2" face="Verdana">ARVELAIZ</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="34" name="encuesta" id="34" type="radio"></td>
<td width="95%" align="left"><label for="34"><font size="2" face="Verdana">AYAMAN</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="35" name="encuesta" id="35" type="radio"></td>
<td width="95%" align="left"><label for="35"><font size="2" face="Verdana">BABY RASTA Y GRINGO</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="36" name="encuesta" id="36" type="radio"></td>
<td width="95%" align="left"><label for="36"><font size="2" face="Verdana">BACANOS</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="37" name="encuesta" id="37" type="radio"></td>
<td width="95%" align="left"><label for="37"><font size="2" face="Verdana">BALMORE Y ROMING</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="38" name="encuesta" id="38" type="radio"></td>
<td width="95%" align="left"><label for="38"><font size="2" face="Verdana">BE AND BEAT</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="39" name="encuesta" id="39" type="radio"></td>
<td width="95%" align="left"><label for="39"><font size="2" face="Verdana">BELANOVA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="40" name="encuesta" id="40" type="radio"></td>
<td width="95%" align="left"><label for="40"><font size="2" face="Verdana">BENAVIDES</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="41" name="encuesta" id="41" type="radio"></td>
<td width="95%" align="left"><label for="41"><font size="2" face="Verdana">BITOQUEAO</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="42" name="encuesta" id="42" type="radio"></td>
<td width="95%" align="left"><label for="42"><font size="2" face="Verdana">BONI Y KELLY</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="43" name="encuesta" id="43" type="radio"></td>
<td width="95%" align="left"><label for="43"><font size="2" face="Verdana">BRUNO MARS</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="44" name="encuesta" id="44" type="radio"></td>
<td width="95%" align="left"><label for="44"><font size="2" face="Verdana">BUDU</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="45" name="encuesta" id="45" type="radio"></td>
<td width="95%" align="left"><label for="45"><font size="2" face="Verdana">CAIBO</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="46" name="encuesta" id="46" type="radio"></td>
<td width="95%" align="left"><label for="46"><font size="2" face="Verdana">CALI Y EL DANDEE</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="47" name="encuesta" id="47" type="radio"></td>
<td width="95%" align="left"><label for="47"><font size="2" face="Verdana">CALLE CIEGA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="48" name="encuesta" id="48" type="radio"></td>
<td width="95%" align="left"><label for="48"><font size="2" face="Verdana">CALVIN HARRIS</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="49" name="encuesta" id="49" type="radio"></td>
<td width="95%" align="left"><label for="49"><font size="2" face="Verdana">CANDY 66</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="50" name="encuesta" id="50" type="radio"></td>
<td width="95%" align="left"><label for="50"><font size="2" face="Verdana">CARAMELOS DE CIANURO</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="51" name="encuesta" id="51" type="radio"></td>
<td width="95%" align="left"><label for="51"><font size="2" face="Verdana">CARLOS BAUTE</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="52" name="encuesta" id="52" type="radio"></td>
<td width="95%" align="left"><label for="52"><font size="2" face="Verdana">CARLOS VIVES</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="53" name="encuesta" id="53" type="radio"></td>
<td width="95%" align="left"><label for="53"><font size="2" face="Verdana">CASEROLOOPS</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="54" name="encuesta" id="54" type="radio"></td>
<td width="95%" align="left"><label for="54"><font size="2" face="Verdana">CBK</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="55" name="encuesta" id="55" type="radio"></td>
<td width="95%" align="left"><label for="55"><font size="2" face="Verdana">CHARLI XCX</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="56" name="encuesta" id="56" type="radio"></td>
<td width="95%" align="left"><label for="56"><font size="2" face="Verdana">CHARLIE PUTH</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="57" name="encuesta" id="57" type="radio"></td>
<td width="95%" align="left"><label for="57"><font size="2" face="Verdana">CHAYANNE</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="58" name="encuesta" id="58" type="radio"></td>
<td width="95%" align="left"><label for="58"><font size="2" face="Verdana">CHICHO SUNSHINE</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="59" name="encuesta" id="59" type="radio"></td>
<td width="95%" align="left"><label for="59"><font size="2" face="Verdana">CHINO Y NACHO</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="60" name="encuesta" id="60" type="radio"></td>
<td width="95%" align="left"><label for="60"><font size="2" face="Verdana">CHOCQUIBTOWN</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="61" name="encuesta" id="61" type="radio"></td>
<td width="95%" align="left"><label for="61"><font size="2" face="Verdana">CHRIS CORNELL</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="62" name="encuesta" id="62" type="radio"></td>
<td width="95%" align="left"><label for="62"><font size="2" face="Verdana">CHRISTIAN DANIEL</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="63" name="encuesta" id="63" type="radio"></td>
<td width="95%" align="left"><label for="63"><font size="2" face="Verdana">CHUCHUGUAZA STYLE</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="64" name="encuesta" id="64" type="radio"></td>
<td width="95%" align="left"><label for="64"><font size="2" face="Verdana">CLEAN BANDIT </font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="65" name="encuesta" id="65" type="radio"></td>
<td width="95%" align="left"><label for="65"><font size="2" face="Verdana">CLIO</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="66" name="encuesta" id="66" type="radio"></td>
<td width="95%" align="left"><label for="66"><font size="2" face="Verdana">CNCO</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="67" name="encuesta" id="67" type="radio"></td>
<td width="95%" align="left"><label for="67"><font size="2" face="Verdana">COLDPLAY</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="68" name="encuesta" id="68" type="radio"></td>
<td width="95%" align="left"><label for="68"><font size="2" face="Verdana">CORINA SMITH</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="69" name="encuesta" id="69" type="radio"></td>
<td width="95%" align="left"><label for="69"><font size="2" face="Verdana">CRUZ TENEPE</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="70" name="encuesta" id="70" type="radio"></td>
<td width="95%" align="left"><label for="70"><font size="2" face="Verdana">D TOTAL ZULIANIDAD</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="71" name="encuesta" id="71" type="radio"></td>
<td width="95%" align="left"><label for="71"><font size="2" face="Verdana">D VEGA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="72" name="encuesta" id="72" type="radio"></td>
<td width="95%" align="left"><label for="72"><font size="2" face="Verdana">DAIQUIRI</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="73" name="encuesta" id="73" type="radio"></td>
<td width="95%" align="left"><label for="73"><font size="2" face="Verdana">DANI BARON</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="74" name="encuesta" id="74" type="radio"></td>
<td width="95%" align="left"><label for="74"><font size="2" face="Verdana">DANIEL ELBITTAR</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="75" name="encuesta" id="75" type="radio"></td>
<td width="95%" align="left"><label for="75"><font size="2" face="Verdana">DANIEL HUEN</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="76" name="encuesta" id="76" type="radio"></td>
<td width="95%" align="left"><label for="76"><font size="2" face="Verdana">DAVID GUETTA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="77" name="encuesta" id="77" type="radio"></td>
<td width="95%" align="left"><label for="77"><font size="2" face="Verdana">DAYA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="78" name="encuesta" id="78" type="radio"></td>
<td width="95%" align="left"><label for="78"><font size="2" face="Verdana">DEBORAH ANDRADE</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="79" name="encuesta" id="79" type="radio"></td>
<td width="95%" align="left"><label for="79"><font size="2" face="Verdana">DEFTONES</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="80" name="encuesta" id="80" type="radio"></td>
<td width="95%" align="left"><label for="80"><font size="2" face="Verdana">DEMI LOVATO</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="81" name="encuesta" id="81" type="radio"></td>
<td width="95%" align="left"><label for="81"><font size="2" face="Verdana">DERECK</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="82" name="encuesta" id="82" type="radio"></td>
<td width="95%" align="left"><label for="82"><font size="2" face="Verdana">DESAKATAOS</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="83" name="encuesta" id="83" type="radio"></td>
<td width="95%" align="left"><label for="83"><font size="2" face="Verdana">DESORDEN PUBLICO</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="84" name="encuesta" id="84" type="radio"></td>
<td width="95%" align="left"><label for="84"><font size="2" face="Verdana">DIMENSION LATINA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="85" name="encuesta" id="85" type="radio"></td>
<td width="95%" align="left"><label for="85"><font size="2" face="Verdana">DJ SNAKE</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="86" name="encuesta" id="86" type="radio"></td>
<td width="95%" align="left"><label for="86"><font size="2" face="Verdana">DNCE</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="87" name="encuesta" id="87" type="radio"></td>
<td width="95%" align="left"><label for="87"><font size="2" face="Verdana">DRAKE</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="88" name="encuesta" id="88" type="radio"></td>
<td width="95%" align="left"><label for="88"><font size="2" face="Verdana">DUA LIPA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="89" name="encuesta" id="89" type="radio"></td>
<td width="95%" align="left"><label for="89"><font size="2" face="Verdana">DUO ANONIMO</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="90" name="encuesta" id="90" type="radio"></td>
<td width="95%" align="left"><label for="90"><font size="2" face="Verdana">EA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="91" name="encuesta" id="91" type="radio"></td>
<td width="95%" align="left"><label for="91"><font size="2" face="Verdana">ED SHEERAN</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="92" name="encuesta" id="92" type="radio"></td>
<td width="95%" align="left"><label for="92"><font size="2" face="Verdana">EDDY LOVER</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="93" name="encuesta" id="93" type="radio"></td>
<td width="95%" align="left"><label for="93"><font size="2" face="Verdana">EJ LA MELODIA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="94" name="encuesta" id="94" type="radio"></td>
<td width="95%" align="left"><label for="94"><font size="2" face="Verdana">EL DUKE</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="269" name="encuesta" id="269" type="radio"></td>
<td width="95%" align="left"><label for="269"><font size="2" face="Verdana">EL POLLO BRITO</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="95" name="encuesta" id="95" type="radio"></td>
<td width="95%" align="left"><label for="95"><font size="2" face="Verdana">EL POTRO</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="96" name="encuesta" id="96" type="radio"></td>
<td width="95%" align="left"><label for="96"><font size="2" face="Verdana">EL TREN GAITERO</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="97" name="encuesta" id="97" type="radio"></td>
<td width="95%" align="left"><label for="97"><font size="2" face="Verdana">EL XAVI MUSICAL</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="98" name="encuesta" id="98" type="radio"></td>
<td width="95%" align="left"><label for="98"><font size="2" face="Verdana">ELIU RAMOS</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="99" name="encuesta" id="99" type="radio"></td>
<td width="95%" align="left"><label for="99"><font size="2" face="Verdana">ELLIE GOULDING</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="100" name="encuesta" id="100" type="radio"></td>
<td width="95%" align="left"><label for="100"><font size="2" face="Verdana">EMIG LA VOZ</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="101" name="encuesta" id="101" type="radio"></td>
<td width="95%" align="left"><label for="101"><font size="2" face="Verdana">ENIO Y SUNDIN</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="102" name="encuesta" id="102" type="radio"></td>
<td width="95%" align="left"><label for="102"><font size="2" face="Verdana">ENRIQUE IGLESIAS</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="103" name="encuesta" id="103" type="radio"></td>
<td width="95%" align="left"><label for="103"><font size="2" face="Verdana">ESTEPHY</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="104" name="encuesta" id="104" type="radio"></td>
<td width="95%" align="left"><label for="104"><font size="2" face="Verdana">FABIO ADAMI</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="105" name="encuesta" id="105" type="radio"></td>
<td width="95%" align="left"><label for="105"><font size="2" face="Verdana">FARRUKO</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="106" name="encuesta" id="106" type="radio"></td>
<td width="95%" align="left"><label for="106"><font size="2" face="Verdana">FEID</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="107" name="encuesta" id="107" type="radio"></td>
<td width="95%" align="left"><label for="107"><font size="2" face="Verdana">FELIPE SANTOS</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="108" name="encuesta" id="108" type="radio"></td>
<td width="95%" align="left"><label for="108"><font size="2" face="Verdana">FERGIE</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="109" name="encuesta" id="109" type="radio"></td>
<td width="95%" align="left"><label for="109"><font size="2" face="Verdana">FIFTH HARMONY</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="110" name="encuesta" id="110" type="radio"></td>
<td width="95%" align="left"><label for="110"><font size="2" face="Verdana">FLO RIDA Y ROBIN THI</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="111" name="encuesta" id="111" type="radio"></td>
<td width="95%" align="left"><label for="111"><font size="2" face="Verdana">FONSECA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="112" name="encuesta" id="112" type="radio"></td>
<td width="95%" align="left"><label for="112"><font size="2" face="Verdana">FOO FIGHTERS</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="113" name="encuesta" id="113" type="radio"></td>
<td width="95%" align="left"><label for="113"><font size="2" face="Verdana">FRANCISCO LEON</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="114" name="encuesta" id="114" type="radio"></td>
<td width="95%" align="left"><label for="114"><font size="2" face="Verdana">FRANCO DE VITA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="115" name="encuesta" id="115" type="radio"></td>
<td width="95%" align="left"><label for="115"><font size="2" face="Verdana">FRANCO T</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="116" name="encuesta" id="116" type="radio"></td>
<td width="95%" align="left"><label for="116"><font size="2" face="Verdana">FRANGEL</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="117" name="encuesta" id="117" type="radio"></td>
<td width="95%" align="left"><label for="117"><font size="2" face="Verdana">FRANGEL RAMOS</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="118" name="encuesta" id="118" type="radio"></td>
<td width="95%" align="left"><label for="118"><font size="2" face="Verdana">FRANK QUINTERO</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="119" name="encuesta" id="119" type="radio"></td>
<td width="95%" align="left"><label for="119"><font size="2" face="Verdana">FUEGO</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="120" name="encuesta" id="120" type="radio"></td>
<td width="95%" align="left"><label for="120"><font size="2" face="Verdana">GABRIEL</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="121" name="encuesta" id="121" type="radio"></td>
<td width="95%" align="left"><label for="121"><font size="2" face="Verdana">GABRIEL PARISI</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="122" name="encuesta" id="122" type="radio"></td>
<td width="95%" align="left"><label for="122"><font size="2" face="Verdana">GABY NOYA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="123" name="encuesta" id="123" type="radio"></td>
<td width="95%" align="left"><label for="123"><font size="2" face="Verdana">GALANTIS</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="124" name="encuesta" id="124" type="radio"></td>
<td width="95%" align="left"><label for="124"><font size="2" face="Verdana">GENTE DE ZONA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="125" name="encuesta" id="125" type="radio"></td>
<td width="95%" align="left"><label for="125"><font size="2" face="Verdana">GLORIA TREVI</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="126" name="encuesta" id="126" type="radio"></td>
<td width="95%" align="left"><label for="126"><font size="2" face="Verdana">GONZO</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="127" name="encuesta" id="127" type="radio"></td>
<td width="95%" align="left"><label for="127"><font size="2" face="Verdana">GRAN COQUIVACOA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="128" name="encuesta" id="128" type="radio"></td>
<td width="95%" align="left"><label for="128"><font size="2" face="Verdana">GUACO</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="129" name="encuesta" id="129" type="radio"></td>
<td width="95%" align="left"><label for="129"><font size="2" face="Verdana">GUSTAVO ELIS</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="130" name="encuesta" id="130" type="radio"></td>
<td width="95%" align="left"><label for="130"><font size="2" face="Verdana">GWEN STEPHANI</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="131" name="encuesta" id="131" type="radio"></td>
<td width="95%" align="left"><label for="131"><font size="2" face="Verdana">HECTOR</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="132" name="encuesta" id="132" type="radio"></td>
<td width="95%" align="left"><label for="132"><font size="2" face="Verdana">IGGY AZALEA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="133" name="encuesta" id="133" type="radio"></td>
<td width="95%" align="left"><label for="133"><font size="2" face="Verdana">IGNACIO RONDON</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="134" name="encuesta" id="134" type="radio"></td>
<td width="95%" align="left"><label for="134"><font size="2" face="Verdana">ILEGALES</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="135" name="encuesta" id="135" type="radio"></td>
<td width="95%" align="left"><label for="135"><font size="2" face="Verdana">IRVING MANUEL</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="136" name="encuesta" id="136" type="radio"></td>
<td width="95%" align="left"><label for="136"><font size="2" face="Verdana">IZAAK</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="137" name="encuesta" id="137" type="radio"></td>
<td width="95%" align="left"><label for="137"><font size="2" face="Verdana">J ALVAREZ</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="138" name="encuesta" id="138" type="radio"></td>
<td width="95%" align="left"><label for="138"><font size="2" face="Verdana">JASON DERULO</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="139" name="encuesta" id="139" type="radio"></td>
<td width="95%" align="left"><label for="139"><font size="2" face="Verdana">JAVO</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="140" name="encuesta" id="140" type="radio"></td>
<td width="95%" align="left"><label for="140"><font size="2" face="Verdana">JAY SANTOS</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="141" name="encuesta" id="141" type="radio"></td>
<td width="95%" align="left"><label for="141"><font size="2" face="Verdana">JEANCE</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="142" name="encuesta" id="142" type="radio"></td>
<td width="95%" align="left"><label for="142"><font size="2" face="Verdana">JELOZ</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="143" name="encuesta" id="143" type="radio"></td>
<td width="95%" align="left"><label for="143"><font size="2" face="Verdana">JENNIFER LOPEZ</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="144" name="encuesta" id="144" type="radio"></td>
<td width="95%" align="left"><label for="144"><font size="2" face="Verdana">JEREMIAS</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="145" name="encuesta" id="145" type="radio"></td>
<td width="95%" align="left"><label for="145"><font size="2" face="Verdana">JERRY RIVERA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="146" name="encuesta" id="146" type="radio"></td>
<td width="95%" align="left"><label for="146"><font size="2" face="Verdana">JESSICA QUIJADA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="147" name="encuesta" id="147" type="radio"></td>
<td width="95%" align="left"><label for="147"><font size="2" face="Verdana">JESUS CHINO MIRANDA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="148" name="encuesta" id="148" type="radio"></td>
<td width="95%" align="left"><label for="148"><font size="2" face="Verdana">JESUS Y YORKY</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="149" name="encuesta" id="149" type="radio"></td>
<td width="95%" align="left"><label for="149"><font size="2" face="Verdana">JHEY SOSA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="150" name="encuesta" id="150" type="radio"></td>
<td width="95%" align="left"><label for="150"><font size="2" face="Verdana">JOEY MONTANA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="151" name="encuesta" id="151" type="radio"></td>
<td width="95%" align="left"><label for="151"><font size="2" face="Verdana">JONAS BLUE</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="152" name="encuesta" id="152" type="radio"></td>
<td width="95%" align="left"><label for="152"><font size="2" face="Verdana">JONATHAN MOLY</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="153" name="encuesta" id="153" type="radio"></td>
<td width="95%" align="left"><label for="153"><font size="2" face="Verdana">JORGE CELEDON</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="154" name="encuesta" id="154" type="radio"></td>
<td width="95%" align="left"><label for="154"><font size="2" face="Verdana">JORGE LUIS CHACIN</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="155" name="encuesta" id="155" type="radio"></td>
<td width="95%" align="left"><label for="155"><font size="2" face="Verdana">JORGE TYLKI</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="156" name="encuesta" id="156" type="radio"></td>
<td width="95%" align="left"><label for="156"><font size="2" face="Verdana">JOSE ARCANGEL</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="157" name="encuesta" id="157" type="radio"></td>
<td width="95%" align="left"><label for="157"><font size="2" face="Verdana">JUAMPI</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="158" name="encuesta" id="158" type="radio"></td>
<td width="95%" align="left"><label for="158"><font size="2" face="Verdana">JUAN CARLOS</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="159" name="encuesta" id="159" type="radio"></td>
<td width="95%" align="left"><label for="159"><font size="2" face="Verdana">JUAN CARLOS SALAZAR</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="160" name="encuesta" id="160" type="radio"></td>
<td width="95%" align="left"><label for="160"><font size="2" face="Verdana">JUAN GABRIEL</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="161" name="encuesta" id="161" type="radio"></td>
<td width="95%" align="left"><label for="161"><font size="2" face="Verdana">JUAN MIGUEL</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="162" name="encuesta" id="162" type="radio"></td>
<td width="95%" align="left"><label for="162"><font size="2" face="Verdana">JULIETA VENEGAS</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="163" name="encuesta" id="163" type="radio"></td>
<td width="95%" align="left"><label for="163"><font size="2" face="Verdana">JUNIOR</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="164" name="encuesta" id="164" type="radio"></td>
<td width="95%" align="left"><label for="164"><font size="2" face="Verdana">JUSTIN BIEBER</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="165" name="encuesta" id="165" type="radio"></td>
<td width="95%" align="left"><label for="165"><font size="2" face="Verdana">JUSTIN TIMBERLAKE</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="166" name="encuesta" id="166" type="radio"></td>
<td width="95%" align="left"><label for="166"><font size="2" face="Verdana">K5</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="167" name="encuesta" id="167" type="radio"></td>
<td width="95%" align="left"><label for="167"><font size="2" face="Verdana">KAMAL</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="168" name="encuesta" id="168" type="radio"></td>
<td width="95%" align="left"><label for="168"><font size="2" face="Verdana">KANYE WEST</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="169" name="encuesta" id="169" type="radio"></td>
<td width="95%" align="left"><label for="169"><font size="2" face="Verdana">KAREN MARTELLO</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="170" name="encuesta" id="170" type="radio"></td>
<td width="95%" align="left"><label for="170"><font size="2" face="Verdana">KATY PERRY</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="171" name="encuesta" id="171" type="radio"></td>
<td width="95%" align="left"><label for="171"><font size="2" face="Verdana">KENT JONES</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="172" name="encuesta" id="172" type="radio"></td>
<td width="95%" align="left"><label for="172"><font size="2" face="Verdana">KRIS FERRER</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="173" name="encuesta" id="173" type="radio"></td>
<td width="95%" align="left"><label for="173"><font size="2" face="Verdana">KUMBA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="174" name="encuesta" id="174" type="radio"></td>
<td width="95%" align="left"><label for="174"><font size="2" face="Verdana">KUNGS VS COOKIN ON 3</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="175" name="encuesta" id="175" type="radio"></td>
<td width="95%" align="left"><label for="175"><font size="2" face="Verdana">KYGO</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="176" name="encuesta" id="176" type="radio"></td>
<td width="95%" align="left"><label for="176"><font size="2" face="Verdana">LA KLAVE</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="177" name="encuesta" id="177" type="radio"></td>
<td width="95%" align="left"><label for="177"><font size="2" face="Verdana">LA MARCHA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="178" name="encuesta" id="178" type="radio"></td>
<td width="95%" align="left"><label for="178"><font size="2" face="Verdana">LA MATERIALISTA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="179" name="encuesta" id="179" type="radio"></td>
<td width="95%" align="left"><label for="179"><font size="2" face="Verdana">LA PAGANA TRINIDAD</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="180" name="encuesta" id="180" type="radio"></td>
<td width="95%" align="left"><label for="180"><font size="2" face="Verdana">LADY GAGA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="181" name="encuesta" id="181" type="radio"></td>
<td width="95%" align="left"><label for="181"><font size="2" face="Verdana">LASSO</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="182" name="encuesta" id="182" type="radio"></td>
<td width="95%" align="left"><label for="182"><font size="2" face="Verdana">LENIN MACIAS</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="183" name="encuesta" id="183" type="radio"></td>
<td width="95%" align="left"><label for="183"><font size="2" face="Verdana">LENNY TAVAREZ</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="184" name="encuesta" id="184" type="radio"></td>
<td width="95%" align="left"><label for="184"><font size="2" face="Verdana">LESS Y CHRISS</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="185" name="encuesta" id="185" type="radio"></td>
<td width="95%" align="left"><label for="185"><font size="2" face="Verdana">LIL WAYNE</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="186" name="encuesta" id="186" type="radio"></td>
<td width="95%" align="left"><label for="186"><font size="2" face="Verdana">LION</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="187" name="encuesta" id="187" type="radio"></td>
<td width="95%" align="left"><label for="187"><font size="2" face="Verdana">LORDE</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="188" name="encuesta" id="188" type="radio"></td>
<td width="95%" align="left"><label for="188"><font size="2" face="Verdana">LOS ADOLESCENTES</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="189" name="encuesta" id="189" type="radio"></td>
<td width="95%" align="left"><label for="189"><font size="2" face="Verdana">LOS AVIADORES</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="190" name="encuesta" id="190" type="radio"></td>
<td width="95%" align="left"><label for="190"><font size="2" face="Verdana">LOS CADILLACS</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="191" name="encuesta" id="191" type="radio"></td>
<td width="95%" align="left"><label for="191"><font size="2" face="Verdana">LOS DALTONICOS</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="192" name="encuesta" id="192" type="radio"></td>
<td width="95%" align="left"><label for="192"><font size="2" face="Verdana">LOS HABIBI</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="193" name="encuesta" id="193" type="radio"></td>
<td width="95%" align="left"><label for="193"><font size="2" face="Verdana">LOS MESONEROS</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="194" name="encuesta" id="194" type="radio"></td>
<td width="95%" align="left"><label for="194"><font size="2" face="Verdana">LOS MUCHACHOS</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="195" name="encuesta" id="195" type="radio"></td>
<td width="95%" align="left"><label for="195"><font size="2" face="Verdana">LOS NENE</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="196" name="encuesta" id="196" type="radio"></td>
<td width="95%" align="left"><label for="196"><font size="2" face="Verdana">LOS PARANOIAS</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="197" name="encuesta" id="197" type="radio"></td>
<td width="95%" align="left"><label for="197"><font size="2" face="Verdana">LOS PELAOS</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="198" name="encuesta" id="198" type="radio"></td>
<td width="95%" align="left"><label for="198"><font size="2" face="Verdana">LOS3</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="199" name="encuesta" id="199" type="radio"></td>
<td width="95%" align="left"><label for="199"><font size="2" face="Verdana">LOSH</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="200" name="encuesta" id="200" type="radio"></td>
<td width="95%" align="left"><label for="200"><font size="2" face="Verdana">LUAR</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="201" name="encuesta" id="201" type="radio"></td>
<td width="95%" align="left"><label for="201"><font size="2" face="Verdana">LUIS IRAN</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="202" name="encuesta" id="202" type="radio"></td>
<td width="95%" align="left"><label for="202"><font size="2" face="Verdana">LUKAS GRAHAM</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="203" name="encuesta" id="203" type="radio"></td>
<td width="95%" align="left"><label for="203"><font size="2" face="Verdana">LUNY TUNES</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="204" name="encuesta" id="204" type="radio"></td>
<td width="95%" align="left"><label for="204"><font size="2" face="Verdana">LYU Y FRANJO</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="205" name="encuesta" id="205" type="radio"></td>
<td width="95%" align="left"><label for="205"><font size="2" face="Verdana">MACKLEMORE</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="206" name="encuesta" id="206" type="radio"></td>
<td width="95%" align="left"><label for="206"><font size="2" face="Verdana">MAELO RUIZ</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="207" name="encuesta" id="207" type="radio"></td>
<td width="95%" align="left"><label for="207"><font size="2" face="Verdana">MAGIC</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="208" name="encuesta" id="208" type="radio"></td>
<td width="95%" align="left"><label for="208"><font size="2" face="Verdana">MAJARETE SOUND MACHINE</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="209" name="encuesta" id="209" type="radio"></td>
<td width="95%" align="left"><label for="209"><font size="2" face="Verdana">MAJOR LAZER</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="210" name="encuesta" id="210" type="radio"></td>
<td width="95%" align="left"><label for="210"><font size="2" face="Verdana">MALEH Y SAMY HAWK</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="211" name="encuesta" id="211" type="radio"></td>
<td width="95%" align="left"><label for="211"><font size="2" face="Verdana">MALUMA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="212" name="encuesta" id="212" type="radio"></td>
<td width="95%" align="left"><label for="212"><font size="2" face="Verdana">MANA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="213" name="encuesta" id="213" type="radio"></td>
<td width="95%" align="left"><label for="213"><font size="2" face="Verdana">MANUEL GUERRA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="214" name="encuesta" id="214" type="radio"></td>
<td width="95%" align="left"><label for="214"><font size="2" face="Verdana">MANUEL LARRAD</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="215" name="encuesta" id="215" type="radio"></td>
<td width="95%" align="left"><label for="215"><font size="2" face="Verdana">MANUEL PETIT</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="216" name="encuesta" id="216" type="radio"></td>
<td width="95%" align="left"><label for="216"><font size="2" face="Verdana">MANUEL ZABALA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="217" name="encuesta" id="217" type="radio"></td>
<td width="95%" align="left"><label for="217"><font size="2" face="Verdana">MARC ANTHONY</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="218" name="encuesta" id="218" type="radio"></td>
<td width="95%" align="left"><label for="218"><font size="2" face="Verdana">MARIA LAURA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="219" name="encuesta" id="219" type="radio"></td>
<td width="95%" align="left"><label for="219"><font size="2" face="Verdana">MARIANA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="220" name="encuesta" id="220" type="radio"></td>
<td width="95%" align="left"><label for="220"><font size="2" face="Verdana">MARIANA VEGA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="221" name="encuesta" id="221" type="radio"></td>
<td width="95%" align="left"><label for="221"><font size="2" face="Verdana">MARIELLE</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="222" name="encuesta" id="222" type="radio"></td>
<td width="95%" align="left"><label for="222"><font size="2" face="Verdana">MARLO</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="223" name="encuesta" id="223" type="radio"></td>
<td width="95%" align="left"><label for="223"><font size="2" face="Verdana">MAROON 5</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="224" name="encuesta" id="224" type="radio"></td>
<td width="95%" align="left"><label for="224"><font size="2" face="Verdana">MARTIN GARRIX</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="225" name="encuesta" id="225" type="radio"></td>
<td width="95%" align="left"><label for="225"><font size="2" face="Verdana">MATEO CARVAJAL</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="226" name="encuesta" id="226" type="radio"></td>
<td width="95%" align="left"><label for="226"><font size="2" face="Verdana">MATT SIMONS</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="227" name="encuesta" id="227" type="radio"></td>
<td width="95%" align="left"><label for="227"><font size="2" face="Verdana">MAURO Y JOTA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="228" name="encuesta" id="228" type="radio"></td>
<td width="95%" align="left"><label for="228"><font size="2" face="Verdana">MAX PIZZOLANTE</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="229" name="encuesta" id="229" type="radio"></td>
<td width="95%" align="left"><label for="229"><font size="2" face="Verdana">MEGHAN TRAINOR</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="230" name="encuesta" id="230" type="radio"></td>
<td width="95%" align="left"><label for="230"><font size="2" face="Verdana">MELODIA PERFECTA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="231" name="encuesta" id="231" type="radio"></td>
<td width="95%" align="left"><label for="231"><font size="2" face="Verdana">METALLICA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="232" name="encuesta" id="232" type="radio"></td>
<td width="95%" align="left"><label for="232"><font size="2" face="Verdana">MIA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="233" name="encuesta" id="233" type="radio"></td>
<td width="95%" align="left"><label for="233"><font size="2" face="Verdana">MICKEY TAVERAS</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="234" name="encuesta" id="234" type="radio"></td>
<td width="95%" align="left"><label for="234"><font size="2" face="Verdana">MIKE BAHIA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="235" name="encuesta" id="235" type="radio"></td>
<td width="95%" align="left"><label for="235"><font size="2" face="Verdana">MIKE POSNER</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="236" name="encuesta" id="236" type="radio"></td>
<td width="95%" align="left"><label for="236"><font size="2" face="Verdana">MILKY CHANCE</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="237" name="encuesta" id="237" type="radio"></td>
<td width="95%" align="left"><label for="237"><font size="2" face="Verdana">MIRELLA CESA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="238" name="encuesta" id="238" type="radio"></td>
<td width="95%" align="left"><label for="238"><font size="2" face="Verdana">MOZART LA PARA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="239" name="encuesta" id="239" type="radio"></td>
<td width="95%" align="left"><label for="239"><font size="2" face="Verdana">NACHO</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="240" name="encuesta" id="240" type="radio"></td>
<td width="95%" align="left"><label for="240"><font size="2" face="Verdana">NELSON ARRIETA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="241" name="encuesta" id="241" type="radio"></td>
<td width="95%" align="left"><label for="241"><font size="2" face="Verdana">NICK JONAS</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="242" name="encuesta" id="242" type="radio"></td>
<td width="95%" align="left"><label for="242"><font size="2" face="Verdana">NICKI MINAJ</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="243" name="encuesta" id="243" type="radio"></td>
<td width="95%" align="left"><label for="243"><font size="2" face="Verdana">NICKY JAM</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="244" name="encuesta" id="244" type="radio"></td>
<td width="95%" align="left"><label for="244"><font size="2" face="Verdana">NICO Y VINZ</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="245" name="encuesta" id="245" type="radio"></td>
<td width="95%" align="left"><label for="245"><font size="2" face="Verdana">OFFSPRING</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="246" name="encuesta" id="246" type="radio"></td>
<td width="95%" align="left"><label for="246"><font size="2" face="Verdana">OKILLS</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="247" name="encuesta" id="247" type="radio"></td>
<td width="95%" align="left"><label for="247"><font size="2" face="Verdana">OLGA TAÐON</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="248" name="encuesta" id="248" type="radio"></td>
<td width="95%" align="left"><label for="248"><font size="2" face="Verdana">OMAR ACEDO</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="249" name="encuesta" id="249" type="radio"></td>
<td width="95%" align="left"><label for="249"><font size="2" face="Verdana">OMAR ENRIQUE</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="250" name="encuesta" id="250" type="radio"></td>
<td width="95%" align="left"><label for="250"><font size="2" face="Verdana">OMAR KOONZE</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="251" name="encuesta" id="251" type="radio"></td>
<td width="95%" align="left"><label for="251"><font size="2" face="Verdana">OMI</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="252" name="encuesta" id="252" type="radio"></td>
<td width="95%" align="left"><label for="252"><font size="2" face="Verdana">ONE DIRECTION</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="253" name="encuesta" id="253" type="radio"></td>
<td width="95%" align="left"><label for="253"><font size="2" face="Verdana">ONE REPUBLIC</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="254" name="encuesta" id="254" type="radio"></td>
<td width="95%" align="left"><label for="254"><font size="2" face="Verdana">OSCARCITO</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="255" name="encuesta" id="255" type="radio"></td>
<td width="95%" align="left"><label for="255"><font size="2" face="Verdana">PARAMORE</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="256" name="encuesta" id="256" type="radio"></td>
<td width="95%" align="left"><label for="256"><font size="2" face="Verdana">PASABORDO</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="257" name="encuesta" id="257" type="radio"></td>
<td width="95%" align="left"><label for="257"><font size="2" face="Verdana">PEDRO ALONSO</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="258" name="encuesta" id="258" type="radio"></td>
<td width="95%" align="left"><label for="258"><font size="2" face="Verdana">PEDRO LEAL</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="259" name="encuesta" id="259" type="radio"></td>
<td width="95%" align="left"><label for="259"><font size="2" face="Verdana">PHARRELL WILLIAMS</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="260" name="encuesta" id="260" type="radio"></td>
<td width="95%" align="left"><label for="260"><font size="2" face="Verdana">PINK</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="261" name="encuesta" id="261" type="radio"></td>
<td width="95%" align="left"><label for="261"><font size="2" face="Verdana">PITBULL</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="262" name="encuesta" id="262" type="radio"></td>
<td width="95%" align="left"><label for="262"><font size="2" face="Verdana">PIWAITI PUCHO Y TUCUTU</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="263" name="encuesta" id="263" type="radio"></td>
<td width="95%" align="left"><label for="263"><font size="2" face="Verdana">PORFI BALOA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="264" name="encuesta" id="264" type="radio"></td>
<td width="95%" align="left"><label for="264"><font size="2" face="Verdana">PRIMERA FILA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="265" name="encuesta" id="265" type="radio"></td>
<td width="95%" align="left"><label for="265"><font size="2" face="Verdana">PRINCE ROYCE</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="266" name="encuesta" id="266" type="radio"></td>
<td width="95%" align="left"><label for="266"><font size="2" face="Verdana">PSY</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="267" name="encuesta" id="267" type="radio"></td>
<td width="95%" align="left"><label for="267"><font size="2" face="Verdana">RABANES</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="268" name="encuesta" id="268" type="radio"></td>
<td width="95%" align="left"><label for="268"><font size="2" face="Verdana">RADIOHEAD</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="270" name="encuesta" id="270" type="radio"></td>
<td width="95%" align="left"><label for="270"><font size="2" face="Verdana">RAUL</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="271" name="encuesta" id="271" type="radio"></td>
<td width="95%" align="left"><label for="271"><font size="2" face="Verdana">RAUL EL LEON</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="272" name="encuesta" id="272" type="radio"></td>
<td width="95%" align="left"><label for="272"><font size="2" face="Verdana">RAY RICHARDSON</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="273" name="encuesta" id="273" type="radio"></td>
<td width="95%" align="left"><label for="273"><font size="2" face="Verdana">RED HOT CHILI PEPPERS</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="274" name="encuesta" id="274" type="radio"></td>
<td width="95%" align="left"><label for="274"><font size="2" face="Verdana">RENNY</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="275" name="encuesta" id="275" type="radio"></td>
<td width="95%" align="left"><label for="275"><font size="2" face="Verdana">REYKON</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="276" name="encuesta" id="276" type="radio"></td>
<td width="95%" align="left"><label for="276"><font size="2" face="Verdana">RICARDO ARJONA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="277" name="encuesta" id="277" type="radio"></td>
<td width="95%" align="left"><label for="277"><font size="2" face="Verdana">RICARDO LOPEZ</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="278" name="encuesta" id="278" type="radio"></td>
<td width="95%" align="left"><label for="278"><font size="2" face="Verdana">RICKY MARTIN</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="279" name="encuesta" id="279" type="radio"></td>
<td width="95%" align="left"><label for="279"><font size="2" face="Verdana">RIGU</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="280" name="encuesta" id="280" type="radio"></td>
<td width="95%" align="left"><label for="280"><font size="2" face="Verdana">RIHANNA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="281" name="encuesta" id="281" type="radio"></td>
<td width="95%" align="left"><label for="281"><font size="2" face="Verdana">RIXTON</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="282" name="encuesta" id="282" type="radio"></td>
<td width="95%" align="left"><label for="282"><font size="2" face="Verdana">RM</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="283" name="encuesta" id="283" type="radio"></td>
<td width="95%" align="left"><label for="283"><font size="2" face="Verdana">ROBERT VOGU</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="284" name="encuesta" id="284" type="radio"></td>
<td width="95%" align="left"><label for="284"><font size="2" face="Verdana">ROBIN SCHULZ</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="285" name="encuesta" id="285" type="radio"></td>
<td width="95%" align="left"><label for="285"><font size="2" face="Verdana">ROGERS SKY</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="286" name="encuesta" id="286" type="radio"></td>
<td width="95%" align="left"><label for="286"><font size="2" face="Verdana">ROMAN EL RO</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="287" name="encuesta" id="287" type="radio"></td>
<td width="95%" align="left"><label for="287"><font size="2" face="Verdana">ROMEO SANTOS</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="288" name="encuesta" id="288" type="radio"></td>
<td width="95%" align="left"><label for="288"><font size="2" face="Verdana">ROMINA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="289" name="encuesta" id="289" type="radio"></td>
<td width="95%" align="left"><label for="289"><font size="2" face="Verdana">ROMINA PALMISANO</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="290" name="encuesta" id="290" type="radio"></td>
<td width="95%" align="left"><label for="290"><font size="2" face="Verdana">RONALD BORJAS</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="291" name="encuesta" id="291" type="radio"></td>
<td width="95%" align="left"><label for="291"><font size="2" face="Verdana">RUBBY PEREZ</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="292" name="encuesta" id="292" type="radio"></td>
<td width="95%" align="left"><label for="292"><font size="2" face="Verdana">RUMMY OLIVO</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="293" name="encuesta" id="293" type="radio"></td>
<td width="95%" align="left"><label for="293"><font size="2" face="Verdana">SAMIR BAZZI</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="294" name="encuesta" id="294" type="radio"></td>
<td width="95%" align="left"><label for="294"><font size="2" face="Verdana">SAMY HAWK</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="295" name="encuesta" id="295" type="radio"></td>
<td width="95%" align="left"><label for="295"><font size="2" face="Verdana">SANLUIS</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="296" name="encuesta" id="296" type="radio"></td>
<td width="95%" align="left"><label for="296"><font size="2" face="Verdana">SARA GOMEZ</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="297" name="encuesta" id="297" type="radio"></td>
<td width="95%" align="left"><label for="297"><font size="2" face="Verdana">SCARLETT LINARES</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="307" name="encuesta" id="307" type="radio"></td>
<td width="95%" align="left"><label for="307"><font size="2" face="Verdana">SEAN PAUL</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="298" name="encuesta" id="298" type="radio"></td>
<td width="95%" align="left"><label for="298"><font size="2" face="Verdana">SEBASTIAN YATRA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="299" name="encuesta" id="299" type="radio"></td>
<td width="95%" align="left"><label for="299"><font size="2" face="Verdana">SELENA GOMEZ</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="300" name="encuesta" id="300" type="radio"></td>
<td width="95%" align="left"><label for="300"><font size="2" face="Verdana">SELENIA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="301" name="encuesta" id="301" type="radio"></td>
<td width="95%" align="left"><label for="301"><font size="2" face="Verdana">SERGIOANDRE</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="302" name="encuesta" id="302" type="radio"></td>
<td width="95%" align="left"><label for="302"><font size="2" face="Verdana">SHAKIRA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="303" name="encuesta" id="303" type="radio"></td>
<td width="95%" align="left"><label for="303"><font size="2" face="Verdana">SHARLENE</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="304" name="encuesta" id="304" type="radio"></td>
<td width="95%" align="left"><label for="304"><font size="2" face="Verdana">SHAWN HOOK</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="305" name="encuesta" id="305" type="radio"></td>
<td width="95%" align="left"><label for="305"><font size="2" face="Verdana">SHAWN MENDES</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="306" name="encuesta" id="306" type="radio"></td>
<td width="95%" align="left"><label for="306"><font size="2" face="Verdana">SIA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="308" name="encuesta" id="308" type="radio"></td>
<td width="95%" align="left"><label for="308"><font size="2" face="Verdana">SIETE BONCHONES</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="309" name="encuesta" id="309" type="radio"></td>
<td width="95%" align="left"><label for="309"><font size="2" face="Verdana">SIGALA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="310" name="encuesta" id="310" type="radio"></td>
<td width="95%" align="left"><label for="310"><font size="2" face="Verdana">SIGILOSO</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="311" name="encuesta" id="311" type="radio"></td>
<td width="95%" align="left"><label for="311"><font size="2" face="Verdana">SILVER Y DOUGLAS</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="312" name="encuesta" id="312" type="radio"></td>
<td width="95%" align="left"><label for="312"><font size="2" face="Verdana">SILVESTRE DANGOND</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="313" name="encuesta" id="313" type="radio"></td>
<td width="95%" align="left"><label for="313"><font size="2" face="Verdana">SIMPLE PLAN</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="314" name="encuesta" id="314" type="radio"></td>
<td width="95%" align="left"><label for="314"><font size="2" face="Verdana">SIN BANDERA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="315" name="encuesta" id="315" type="radio"></td>
<td width="95%" align="left"><label for="315"><font size="2" face="Verdana">SIXTO REIN</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="316" name="encuesta" id="316" type="radio"></td>
<td width="95%" align="left"><label for="316"><font size="2" face="Verdana">SNAKEHIPS</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="317" name="encuesta" id="317" type="radio"></td>
<td width="95%" align="left"><label for="317"><font size="2" face="Verdana">SOCIOO</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="318" name="encuesta" id="318" type="radio"></td>
<td width="95%" align="left"><label for="318"><font size="2" face="Verdana">STEFAN</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="319" name="encuesta" id="319" type="radio"></td>
<td width="95%" align="left"><label for="319"><font size="2" face="Verdana">SUM 41</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="320" name="encuesta" id="320" type="radio"></td>
<td width="95%" align="left"><label for="320"><font size="2" face="Verdana">TAYLOR SWIFT</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="321" name="encuesta" id="321" type="radio"></td>
<td width="95%" align="left"><label for="321"><font size="2" face="Verdana">TECUPAE</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="322" name="encuesta" id="322" type="radio"></td>
<td width="95%" align="left"><label for="322"><font size="2" face="Verdana">TEO GALINDEZ</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="323" name="encuesta" id="323" type="radio"></td>
<td width="95%" align="left"><label for="323"><font size="2" face="Verdana">THALIA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="324" name="encuesta" id="324" type="radio"></td>
<td width="95%" align="left"><label for="324"><font size="2" face="Verdana">THE CHAINSMOKERS</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="325" name="encuesta" id="325" type="radio"></td>
<td width="95%" align="left"><label for="325"><font size="2" face="Verdana">THE FRAY</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="326" name="encuesta" id="326" type="radio"></td>
<td width="95%" align="left"><label for="326"><font size="2" face="Verdana">THE WEEKND</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="327" name="encuesta" id="327" type="radio"></td>
<td width="95%" align="left"><label for="327"><font size="2" face="Verdana">TICO Y JAVI</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="328" name="encuesta" id="328" type="radio"></td>
<td width="95%" align="left"><label for="328"><font size="2" face="Verdana">TIMBALAND</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="329" name="encuesta" id="329" type="radio"></td>
<td width="95%" align="left"><label for="329"><font size="2" face="Verdana">TIMEFLIES</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="330" name="encuesta" id="330" type="radio"></td>
<td width="95%" align="left"><label for="330"><font size="2" face="Verdana">TOMAS THE LATIN BOY</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="331" name="encuesta" id="331" type="radio"></td>
<td width="95%" align="left"><label for="331"><font size="2" face="Verdana">TOMATES FRITOS</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="332" name="encuesta" id="332" type="radio"></td>
<td width="95%" align="left"><label for="332"><font size="2" face="Verdana">TOMMY DRACO</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="333" name="encuesta" id="333" type="radio"></td>
<td width="95%" align="left"><label for="333"><font size="2" face="Verdana">TOVE LO</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="334" name="encuesta" id="334" type="radio"></td>
<td width="95%" align="left"><label for="334"><font size="2" face="Verdana">TREO</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="335" name="encuesta" id="335" type="radio"></td>
<td width="95%" align="left"><label for="335"><font size="2" face="Verdana">TRINA MEDINA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="336" name="encuesta" id="336" type="radio"></td>
<td width="95%" align="left"><label for="336"><font size="2" face="Verdana">TRIPLAND</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="337" name="encuesta" id="337" type="radio"></td>
<td width="95%" align="left"><label for="337"><font size="2" face="Verdana">TWENTY ONE PILOTS</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="338" name="encuesta" id="338" type="radio"></td>
<td width="95%" align="left"><label for="338"><font size="2" face="Verdana">VARGAS</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="339" name="encuesta" id="339" type="radio"></td>
<td width="95%" align="left"><label for="339"><font size="2" face="Verdana">VICTOR CARRUYO</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="340" name="encuesta" id="340" type="radio"></td>
<td width="95%" align="left"><label for="340"><font size="2" face="Verdana">VICTOR DAVID</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="341" name="encuesta" id="341" type="radio"></td>
<td width="95%" align="left"><label for="341"><font size="2" face="Verdana">VICTOR DRIJA</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="342" name="encuesta" id="342" type="radio"></td>
<td width="95%" align="left"><label for="342"><font size="2" face="Verdana">VICTOR MANUELLE</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="343" name="encuesta" id="343" type="radio"></td>
<td width="95%" align="left"><label for="343"><font size="2" face="Verdana">VICTOR MUÐOZ </font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="344" name="encuesta" id="344" type="radio"></td>
<td width="95%" align="left"><label for="344"><font size="2" face="Verdana">VINILOVERSUS</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="345" name="encuesta" id="345" type="radio"></td>
<td width="95%" align="left"><label for="345"><font size="2" face="Verdana">VLADI RAMIREZ</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="346" name="encuesta" id="346" type="radio"></td>
<td width="95%" align="left"><label for="346"><font size="2" face="Verdana">VOCAL SONG</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="347" name="encuesta" id="347" type="radio"></td>
<td width="95%" align="left"><label for="347"><font size="2" face="Verdana">WISIN</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="348" name="encuesta" id="348" type="radio"></td>
<td width="95%" align="left"><label for="348"><font size="2" face="Verdana">X AMBASSADORS</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="349" name="encuesta" id="349" type="radio"></td>
<td width="95%" align="left"><label for="349"><font size="2" face="Verdana">YANDEL</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="350" name="encuesta" id="350" type="radio"></td>
<td width="95%" align="left"><label for="350"><font size="2" face="Verdana">YESY MILANO</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="351" name="encuesta" id="351" type="radio"></td>
<td width="95%" align="left"><label for="351"><font size="2" face="Verdana">YORDANO</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="352" name="encuesta" id="352" type="radio"></td>
<td width="95%" align="left"><label for="352"><font size="2" face="Verdana">ZARA LARSSON</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="353" name="encuesta" id="353" type="radio"></td>
<td width="95%" align="left"><label for="353"><font size="2" face="Verdana">ZAYN MALIK</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="354" name="encuesta" id="354" type="radio"></td>
<td width="95%" align="left"><label for="354"><font size="2" face="Verdana">ZEDD</font></label></td>
</tr>
<tr>
<td width="5%" align="left"><input value="355" name="encuesta" id="355" type="radio"></td>
<td width="95%" align="left"><label for="355"><font size="2" face="Verdana">ZOE</font></label></td>
</tr>
</tbody></table>
</td>
</tr>
</tbody></table>
<br>
<table width="400" cellspacing="0" cellpadding="3" bordercolor="#6379A8" border="1">
<tbody><tr>
<td width="400" bgcolor="#D7DFEE">
<table width="100%" cellspacing="0" cellpadding="2" border="0">
<tbody><tr>
<td width="100%" align="center"><input src="boton-enviar.gif" type="submit" name="" width="250" type="image" height="22" border="0"></td>
</tr>
</tbody></table>
</td>
</tr>
</tbody>
</form>
</table>
</td>
</tr>
</tbody></table>
</body>
</html>"""
|
import torch
from torch.utils.data import Dataset
from keras.datasets import imdb
from keras.preprocessing.sequence import pad_sequences
class imdbTrainDataset(Dataset):
def __init__(self, vocab_size=20000, maxlen=250):
train, _ = imdb.load_data(num_words=vocab_size, maxlen=maxlen)
self.data = train[0]
self.labels = train[1]
self.data = torch.from_numpy(pad_sequences(self.data, maxlen=250))
# self.data = self.data.flatten()
self.labels = torch.from_numpy(self.labels.flatten())
def __len__(self):
return len(self.data)
def __getitem__(self, item):
return self.data[item], self.labels[item]
class imdbTestDataset(Dataset):
def __init__(self, vocab_size=20000, maxlen=250):
_, test = imdb.load_data(num_words=vocab_size, maxlen=maxlen)
self.data = test[0]
self.labels = test[1]
self.data = torch.from_numpy(pad_sequences(self.data, maxlen=250))
# self.data = self.data.flatten()
self.labels = torch.from_numpy(self.labels)
def __len__(self):
return len(self.data)
def __getitem__(self, item):
return self.data[item], self.labels[item]
# train_data = imdbTrainDataset()
# test_data = imdbTestDataset()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""makeTextFile.py -- create text file
"""
import os
ls = os.linesep
fname = ''
# get filename
while True:
fname = raw_input('Input filename: ')
if os.path.exists(fname):
print "ERROR: '%s' already exists" % fname
else:
break
# get file content (text) lines
allContent = []
print "\nEnter lines ('.' by itself quit).\n"
# loop until user terminates input
while True:
entry = raw_input('> ')
if entry == '.':
break
else:
allContent.append(entry)
# write lines to file with proper line-endling
fobj = open(fname, 'w')
fobj.writelines(['%s%s' % (x, ls) for x in allContent])
fobj.close()
print 'DONE!'
|
from little_notes.ext.auth import login_manager
from little_notes.ext.db.models import User
@login_manager.user_loader
def load_user(user_id: int):
return User.query.get(int(user_id))
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# Copyright (c) 2008-2021 pyglet contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
"""Event dispatch framework.
All objects that produce events in pyglet implement :py:class:`~pyglet.event.EventDispatcher`,
providing a consistent interface for registering and manipulating event
handlers. A commonly used event dispatcher is `pyglet.window.Window`.
Event types
===========
For each event dispatcher there is a set of events that it dispatches; these
correspond with the type of event handlers you can attach. Event types are
identified by their name, for example, ''on_resize''.
If you are creating a new class which implements
:py:class:`~pyglet.event.EventDispatcher`, or want to add new events
to an existing dispatcher, you must call `EventDispatcher.register_event_type`
for each event type:
class MyDispatcher(pyglet.event.EventDispatcher):
# ...
MyDispatcher.register_event_type('on_resize')
Attaching event handlers
========================
An event handler is simply a function or method, that is called when system or
program event happens. There are several ways to add a handler for an event.
When the dispatcher object is available as a global variable, it is convenient
to use the `event` decorator:
@window.event
def on_resize(width, height):
# ...
Here `window` is a variable containing an instance of `pyglet.window.Window`,
which inherits from `EventDispatcher` class. This decorator assumes that
the function is named after the event. To use the decorator with a function with
another name, pass the name of the event as the argument for the decorator:
@window.event('on_resize')
def my_resize_handler(width, height);
# ...
The most universal way to add an event handler is to call the `push_handlers`
method on the dispatcher object:
window.push_handlers(on_resize)
window.push_handlers(on_resize=my_handler)
window.push_handlers(on_resize=obj.my_handler)
window.push_handlers(obj)
This methods accepts both positional and keyword parameters. In case of keyword
arguments, the name of the event matches the name of the argument. Otherwise,
the name of the passed function or method is used as the event name.
If an object is passed as a positional argument, all its methods that match
the names of registered events are added as handlers. For example:
class MyDispatcher(pyglet.event.EventDispatcher):
# ...
MyDispatcher.register_event_type('on_resize')
MyDispatcher.register_event_type('on_keypress')
class Listener(object):
def on_resize(self, w, h):
# ...
def on_keypress(self, key):
# ...
def other_method(self):
# ...
dispatcher = MyDispatcher()
listener = Listener()
dispatcher.push_handlers(listener)
In this example both `listener.on_resize` and `listener.on_keypress` are
registered as handlers for respective events, but `listener.other_method` is
not affected, because it doesn't correspond to a registered event type.
Finally, yet another option is to subclass the dispatcher and override the event
handler methods::
class MyDispatcher(pyglet.event.EventDispatcher):
def on_resize(self, width, height):
# ...
If both a parent class and the child class have a handler for the same event,
only the child's version of the method is invoked. If both event handlers are
needed, the child's handler must explicitly call the parent's handler:
class ParentDispatcher(pyglet.event.EventDispatcher):
def on_resize(self, w, h);
# ...
class ChildDispatcher(ParentDispatcher):
def on_resize(self, w, h):
super().on_resize(w, h)
# ...
Multiple handlers for an event
==============================
A single event can be handled by multiple handlers. The handlers are invoked in
the order opposite to the order of their registration. So, the handler
registered last will be the first to be invoke when the event is fired.
An event handler can return the value `pyglet.event.EVENT_HANDLED` to prevent
running the subsequent handlers. Alternatively if the handle returns
`pyglet.event.EVENT_UNHANDLED` or doesn't return an explicit value, the next
event handler will be called (if there is one).
Stopping the event propagation is useful to prevent a single user action from
being handled by two unrelated systems. For instance, in game using WASD keys
for movement, should suppress movement when a chat window is opened: the
"keypress" event should be handled by the chat or by the character
movement system, but not both.
The order of execution of event handlers can be changed by assigning them
priority. Default priority for all handlers is 0. If handler needs to be run
before other handlers even though it was added early, it can be assigned
priority 1. Conversely, a handler added late can be assigned priority -1 to be
run late.
Priority can be assigned by passing the `priority` named parameter to
`push_handlers` method:
window.push_handlers(on_resize, priority=-1)
It can also be specified by using the `@priority` decorators on handler
functions and methods:
@pyglet.event.priority(1)
def on_resize(w, h):
# ...
class Listener(object):
@pyglet.event.priority(-1)
def on_resize(self, w, h):
# ...
listener = Listener()
dispatcher.push_handlers(on_resize, listener)
Removing event handlers
=======================
In most cases it is not necessary to remove event handlers manually. When
the handler is an object method, the event dispatcher keeps only a weak
reference to it. It means, that the dispatcher will not prevent the object from
being deleted when it goes out of scope. In that case the handler will be
silently removed from the list of handlers.
.. note::
This means the following example will not work, because the pushed object
will fall out of scope and be collected::
dispatcher.push_handlers(MyHandlerClass())
Instead, you must make sure to keep a reference to the object before pushing
it. For example::
my_handler_instance = MyHandlerClass()
dispatcher.push_handlers(my_handler_instance)
When explicit removal of handlers is required, the method `remove_handlers`
can be used. Its arguments are the same as the arguments of `push_handlers`:
dispatcher.remove_handlers(on_resize)
dispatcher.remove_handlers(on_resize=my_handler)
dispatcher.remove_handlers(on_resize=obj.my_handler)
dispatcher.remove_handlers(obj)
When an object is passed as a positional parameter to `remove_handlers`, all its
methods are removed from the handlers, regardless of their names.
Dispatching events
==================
pyglet uses a single-threaded model for all application code. Normally event
handlers are invoked while running an event loop by calling
pyglet.app.run()
or
event_loop = pyglet.app.EventLoop()
event_loop.run()
Application code can invoke events directly by calling the method
`dispatch_event` of `EventDispatcher`:
dispatcher.dispatch_event('on_resize', 640, 480)
The first argument of this method is the event name, that has to be previously
registered using `register_event_type` class method. The rest of the arguments
are pass to event handlers.
The handlers of an event fired by calling `dispatch_event` are called directly
from this method. If any of the handlers returns `EVENT_HANDLED`, then
`dispatch_event` also returns `EVENT_HANDLED` otherwise (or if there weren't
any handlers for a given event) it returns `EVENT_UNHANDLED`.
"""
import inspect
from functools import partial
from weakref import WeakMethod
EVENT_HANDLED = True
EVENT_UNHANDLED = None
class EventException(Exception):
"""An exception raised when an event handler could not be attached.
"""
pass
def priority(prio=0):
"""A decorator to set priority on handler functions and handlers.
Default priority is 0. Handlers with higher priority are invoked first.
Recommended priority values are 1 and -1. In most cases more than 3 priority
classes are not required.
"""
def wrap(func):
func.__priority = prio
return func
return wrap
class EventDispatcher(object):
"""Generic event dispatcher interface.
See the module docstring for usage.
"""
# This field will contain the queues of event handlers for every supported
# event type. It is lazily initialized when the first event handler is added
# to the class. After that it contains a dictionary of lists, in which
# handlers are sorted according to their priority:
# {'on_event': [(priority1, handler1),
# (priority2, handler2)]}
# Handlers are invoked until any one of them returns EVENT_HANDLED
_handlers = None
@classmethod
def register_event_type(cls, name):
"""Registers an event type with the dispatcher.
Registering event types allows the dispatcher to validate event
handler names as they are attached, and to search attached objects for
suitable handlers.
:Parameters:
`name` : str
Name of the event to register.
"""
if not hasattr(cls, 'event_types'):
cls.event_types = []
cls.event_types.append(name)
def _get_names_from_handler(self, handler):
"""Yields event names handled by a handler function, method or object.
"""
if callable(handler) and hasattr(handler, '__name__'):
# Take the name of a function or a method.
yield handler.__name__
else:
# Iterate through all the methods of an object and yield those that
# match registered events.
for name in dir(handler):
if (name in self.event_types and
callable(getattr(handler, name))):
yield name
def _finalize_weak_method(self, name, weak_method):
"""Called to remove dead WeakMethods from handlers."""
handlers = self._handlers[name]
i = 0
# This is not the most efficient way of removing several elements from
# an array, but in almost all cases only one element has to be removed.
while i < len(handlers):
if handlers[i][1] is weak_method:
del handlers[i]
else:
i += 1
def _remove_handler_from_queue(self, handlers_queue, handler):
"""Remove all instances of a handler from a queue for a single event.
If `handler` is an object, then all the methods bound to this object
will be removed from the queue.
"""
i = 0
# This is not the most efficient way of removing several elements from
# an array, but in almost all cases only one element has to be removed.
while i < len(handlers_queue):
_, registered_handler = handlers_queue[i]
if isinstance(registered_handler, WeakMethod):
# Wrapped in WeakMethod in `push_handler`.
registered_handler = registered_handler()
if (registered_handler is handler or
getattr(registered_handler, '__self__', None) is handler):
del handlers_queue[i]
else:
i += 1
def push_handler(self, name, handler, priority=None):
"""Adds a single event handler.
If the `handler` parameter is callable, it will be registered directly.
Otherwise it's expected to be an object having a method with a name
matching the name of the event.
If the `priority` parameter is not None, it is used as a priotity.
Otherwise, the value specified by the @priority decorator is used. If
neither is specified the default value of 0 is used.
"""
if not hasattr(self.__class__, 'event_types'):
self.__class__.event_types = []
if name not in self.event_types:
raise EventException('Unknown event "{}"'.format(name))
if not callable(handler):
# If handler is not callable, search for in it for a method with
# a name matching the name of the event.
if hasattr(handler, name):
method = getattr(handler, name)
if not callable(method):
raise EventException(
'Field {} on "{}" is not callable'.format(
name, repr(handler)))
handler = method
else:
raise EventException(
'"{}" is not callable and doesn\'t have '
'a method "{}"'.format(repr(handler), name))
# Determine priority
if priority is None:
priority = getattr(handler, '__priority', 0)
# A hack for the case when handler is a MagicMock.
if type(priority) not in (int, float):
priority = int(priority)
# Wrap methods in weak references.
if inspect.ismethod(handler):
handler = WeakMethod(handler, partial(
self._finalize_weak_method, name))
# Create handler queues if necessary.
if self._handlers is None:
self._handlers = {}
self.push_handlers(self)
if name not in self._handlers:
self._handlers[name] = []
handlers = self._handlers[name]
# Finding the place to insert the new handler. All the previous handlers
# have to have strictly higher priority.
#
# A binary search would theoretically be faster, but a) there's
# usually just a handful of handlers, b) we are going to shift
# the elements in the list anyway, which will take O(n), c) we are
# doing this only during handler registration, and we are more
# conserned in the efficiency of dispatching event.
i = 0
while i < len(handlers) and handlers[i][0] > priority:
i += 1
handlers.insert(i, (priority, handler))
def push_handlers(self, *args, priority=None, **kwargs):
"""Adds new handlers to registered events.
Multiple positional and keyword arguments can be provided.
For a keyword argument, the name of the event is taken from the name
of the argument. If the argument is callable, it is used
as a handler directly. If the argument is an object, it is searched for
a method with the name matching the name of the event/argument.
When a callable named object (usually a function or a method) is passed
as a positional argument, its name is used as the event name. When
an object is passed as a positional argument, it is scanned for methods
with names that match the names of registered events. These methods are
added as handlers for the respective events.
An optional argument priority can be used to override the priority for
all the added handlers. Default priority is 0, and handlers with higher
priority will be invoked first. The priority specified in the call will
take precedence of priority, specified in @priority decorator.
EventException is raised if the event name is not registered.
"""
if not hasattr(self.__class__, 'event_types'):
self.__class__.event_types = []
for handler in args:
for name in self._get_names_from_handler(handler):
self.push_handler(name, handler, priority=priority)
for name, handler in kwargs.items():
self.push_handler(name, handler, priority)
def remove_handler(self, name_or_handler=None, handler=None, name=None):
"""Removes a single event handler.
Can be called in one of the following ways:
dispatcher.remove_handler(my_handler)
dispatcher.remove_handler(handler=my_handler)
dispatcher.remove_handler("event_name", my_handler)
dispatcher.remove_handler(name="event_name", handler=my_handler)
If the event name is specified, only the queue of handlers for that
event is scanned, and the handler is removed from it. Otherwise all
handler queues are scanned and the handler is removed from all of them.
If the handler is an object, then all the registered handlers that are
bound to this object are removed. Unlike `push_handler`, the method
names in the class are not taken into account.
No error is raised if the event handler is not set.
"""
if handler is None:
# Called with one positional argument (example #1)
assert name is None
assert name_or_handler is not None
handler = name_or_handler
elif name is not None:
# Called with keyword arguments for handler and name (example #4)
assert name_or_handler is None
else:
# Called with two positional arguments, or only with handler as
# a keyword argument (examples #2, #3)
name = name_or_handler
if name is not None:
if name in self._handlers:
self._remove_handler_from_queue(self._handlers[name], handler)
else:
for handlers_queue in self._handlers.values():
self._remove_handler_from_queue(handlers_queue, handler)
def remove_handlers(self, *args, **kwargs):
"""Removes event handlers from the event handlers queue.
See :py:meth:`~pyglet.event.EventDispatcher.push_handlers` for the
accepted argument types. Handlers, passed as positional arguments
are removed from all events, regardless of their names.
No error is raised if any handler does not appear among
the registered handlers.
"""
for handler in args:
self.remove_handler(None, handler)
for name, handler in kwargs.items():
self.remove_handler(name, handler)
def dispatch_event(self, event_type, *args):
"""Dispatch a single event to the attached handlers.
The event is propagated to all handlers from from the top of the stack
until one returns `EVENT_HANDLED`. This method should be used only by
:py:class:`~pyglet.event.EventDispatcher` implementors; applications
should call the ``dispatch_events`` method.
Since pyglet 1.2, the method returns `EVENT_HANDLED` if an event
handler returned `EVENT_HANDLED` or `EVENT_UNHANDLED` if all events
returned `EVENT_UNHANDLED`. If no matching event handlers are in the
stack, ``False`` is returned.
:Parameters:
`event_type` : str
Name of the event.
`args` : sequence
Arguments to pass to the event handler.
:rtype: bool or None
:return: (Since pyglet 1.2) `EVENT_HANDLED` if an event handler
returned `EVENT_HANDLED`; `EVENT_UNHANDLED` if one or more event
handlers were invoked but returned only `EVENT_UNHANDLED`;
otherwise ``False``. In pyglet 1.1 and earlier, the return value
is always ``None``.
"""
if not hasattr(self.__class__, 'event_types'):
self.__class__.event_types = []
if event_type not in self.event_types:
raise EventException(
'Attempted to dispatch an event of unknown event type "{}". '
'Event types have to be registered by calling '
'DispatcherClass.register_event_type({})'.format(
event_type, repr(event_type)))
if self._handlers is None:
# Initialize the handlers with the object itself.
self._handlers = {}
self.push_handlers(self)
handlers_queue = self._handlers.get(event_type, ())
for _, handler in handlers_queue:
if isinstance(handler, WeakMethod):
handler = handler()
assert handler is not None
try:
if handler(*args):
return EVENT_HANDLED
except TypeError as exception:
self._raise_dispatch_exception(
event_type, args, handler, exception)
return EVENT_UNHANDLED
@staticmethod
def _raise_dispatch_exception(event_type, args, handler, exception):
# A common problem in applications is having the wrong number of
# arguments in an event handler. This is caught as a TypeError in
# dispatch_event but the error message is obfuscated.
#
# Here we check if there is indeed a mismatch in argument count,
# and construct a more useful exception message if so. If this method
# doesn't find a problem with the number of arguments, the error
# is re-raised as if we weren't here.
n_args = len(args)
# Inspect the handler
argspecs = inspect.getfullargspec(handler)
handler_args = argspecs.args
handler_varargs = argspecs.varargs
handler_defaults = argspecs.defaults
n_handler_args = len(handler_args)
# Remove "self" arg from handler if it's a bound method
if inspect.ismethod(handler) and handler.__self__:
n_handler_args -= 1
# Allow *args varargs to overspecify arguments
if handler_varargs:
n_handler_args = max(n_handler_args, n_args)
# Allow default values to overspecify arguments
if (n_handler_args > n_args and handler_defaults and
n_handler_args - len(handler_defaults) <= n_args):
n_handler_args = n_args
if n_handler_args != n_args:
if inspect.isfunction(handler) or inspect.ismethod(handler):
descr = f"'{handler.__name__}' at {handler.__code__.co_filename}:{handler.__code__.co_firstlineno}"
else:
descr = repr(handler)
raise TypeError("The '{0}' event was dispatched with {1} arguments, "
"but your handler {2} accepts only {3} arguments.".format(
event_type, len(args), descr, len(handler_args)))
else:
raise exception
def event(self, *args):
"""Function decorator for an event handler.
Usage::
win = window.Window()
@win.event
def on_resize(self, width, height):
# ...
or::
@win.event('on_resize')
def foo(self, width, height):
# ...
"""
if len(args) == 0: # @window.event()
def decorator(func):
name = func.__name__
self.push_handler(name, func)
return func
return decorator
elif inspect.isroutine(args[0]): # @window.event
func = args[0]
name = func.__name__
self.push_handler(name, func)
return args[0]
elif isinstance(args[0], str): # @window.event('on_resize')
name = args[0]
def decorator(func):
self.push_handler(name, func)
return func
return decorator
|
import os
import re
import torch
import matplotlib.pyplot as plt
from src.loader import load_dataset
"""
模型效果测试类
"""
# 支持中文
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
class EvalData:
def __init__(self, model, config, logger):
self.config = config
self.model = model
self.logger = logger
self.test_data = load_dataset(config, shuffle=False)
self.list_data = self.test_data.dataset.list_data
self.rel_schema = self.test_data.dataset.rel_schema
self.index_to_label = dict((y, x) for x, y in self.rel_schema.items())
def eval(self, epoch):
self.logger.info('测试第%d轮模型效果:' % epoch)
self.pre_dict = {'o1_acc': 0,
'rel_acc': 0,
'o2_acc': 0,
'full_match_acc': 0}
self.model.eval()
for index, batch_data in enumerate(self.test_data):
text_data = self.list_data[index * self.config['batch_size']: (index + 1) * self.config['batch_size']]
if torch.cuda.is_available():
batch_data = [d.cuda() for d in batch_data]
input_id = batch_data[0]
with torch.no_grad():
rel_pred, bio_pred = self.model(input_id) # 不输入labels,使用模型当前参数进行预测
self.get_result(rel_pred, bio_pred, text_data)
self.show_result()
return self.pre_dict
def get_result(self, rel_pred, bio_pred, text_data):
rel_pred = torch.argmax(rel_pred, dim=-1)
bio_pred = torch.argmax(bio_pred, dim=-1)
for rel_pred, bio_pred, info in zip(rel_pred, bio_pred, text_data):
o1, o2, rel, sentence = info
bio_pred = bio_pred.cpu().detach().tolist()
pred_o1, pred_o2 = self.decode(bio_pred, sentence)
pred_rel = self.index_to_label[int(rel_pred)]
self.pre_dict['o1_acc'] += int(pred_o1 == o1)
self.pre_dict['rel_acc'] += int(pred_rel == rel)
self.pre_dict['o2_acc'] += int(pred_o2 == o2)
if pred_o1 == o1 and pred_rel == rel and pred_o2 == o2:
self.pre_dict['full_match_acc'] += 1
def decode(self, pred_label, context):
pred_label = "".join([str(i) for i in pred_label])
pred_obj = self.seek_pattern('01*', pred_label, context)
pred_value = self.seek_pattern('23*', pred_label, context)
return pred_obj, pred_value
def seek_pattern(self, pattern, pred_label, context):
pred_obj = re.search(pattern, pred_label)
if pred_obj:
s, e = pred_obj.span()
pred_obj = context[s:e]
else:
pred_obj = ""
return pred_obj
def show_result(self):
for key, value in self.pre_dict.items():
self.logger.info("%s : %s " % (key, value / len(self.list_data)))
self.pre_dict[key] = value / len(self.list_data)
self.logger.info('--------------------')
return
def plot_and_save(self, epoch, o1_accs, rel_accs, losses, full_accs):
best_f1 = max(full_accs)
pic_path = os.path.join(self.config['model_path'])
x = range(epoch)
fig = plt.figure()
plt.plot(x, o1_accs, label='o1_num')
plt.plot(x, rel_accs, label='rel_num')
plt.plot(x, losses, label='train loss')
plt.plot(x, full_accs, label='full_num')
plt.xlabel('epoch')
plt.ylabel('num')
plt.title('训练曲线 best full num=%f' % best_f1)
plt.legend()
plt.savefig(os.path.join(pic_path, "report-%s-%s-%s-%f.png" % (
self.config['model_type'],
self.config['learning_rate'],
self.config['rel_loss_ratio'],
best_f1)))
|
from typing import List, Any
from talipp.indicators.Indicator import Indicator
from talipp.indicators.EMA import EMA
class TEMA(Indicator):
"""
Triple Exponential Moving Average
Output: a list of floats
"""
def __init__(self, period: int, input_values: List[float] = None, input_indicator: Indicator = None):
super().__init__()
self.period = period
self.ema = EMA(period)
self.add_sub_indicator(self.ema)
self.ema_ema = EMA(period)
self.ema_ema_ema = EMA(period)
self.add_managed_sequence(self.ema_ema)
self.add_managed_sequence(self.ema_ema_ema)
self.initialize(input_values, input_indicator)
def _calculate_new_value(self) -> Any:
if not self.ema.has_output_value():
return None
self.ema_ema.add_input_value(self.ema[-1])
if not self.ema_ema.has_output_value():
return None
self.ema_ema_ema.add_input_value(self.ema_ema[-1])
if not self.ema_ema_ema.has_output_value():
return None
return 3.0 * self.ema[-1] - 3.0 * self.ema_ema[-1] + self.ema_ema_ema[-1]
|
#!/usr/bin/env python
import os, subprocess, signal
train_dir = os.path.dirname(os.path.realpath(__file__))
enclosed_dir = os.path.normpath(os.path.join(train_dir, '../'))
caffe_dir = os.path.abspath(os.path.join(train_dir, '../../../deps/simnets'))
subprocess.check_call('%s/generate_blob.py' % enclosed_dir, shell=True)
try:
signal.signal(signal.SIGINT, signal.SIG_IGN)
subprocess.check_call('%s/tools/extra/hyper_train.py %s/net.prototmp %s/train_plan.json --gpu all' % (caffe_dir, train_dir,train_dir), shell=True, cwd=enclosed_dir)
except:
print 'Error calling hyper_train script'
|
def default_newrepr(self):
return f'Instance of {self.__class__.__name__}, vars = {vars(self)}'
def betterrepr(newstr=None, newrepr=default_newrepr):
def wrapped(cls):
if newstr is not None:
cls.__str__ = newstr
cls.__repr__ = newrepr
return cls
return wrapped
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
import os
from time import sleep
from random import randint
# lock categories
RWLOCK = 'rwlock' # exclusive lock
#TODO shared lock: READLOCK = 'readlock'
def lock(filename, lockcat, max_attempts=10, max_wait_ms=200):
for i in range(0, max_attempts):
success = attempt_lock(filename, lockcat)
if success:
assert is_locked(filename, lockcat)
assert os.path.isfile(pid_filename(filename, lockcat))
return
# wait before another attempt
wait_ms = randint(0, max_wait_ms)
wait_seconds = wait_ms / 1000.0
sleep(wait_seconds)
raise Exception("couldn't lock '%s' with lock category %s in %s attempts"
% (filename, lockcat, max_attempts))
def release(filename, lockcat):
if not is_locked(filename, lockcat):
return
# remove the pid file, then remove the lock directory
os.unlink(pid_filename(filename, lockcat))
os.rmdir(lockdir_name(filename, lockcat))
return
def attempt_lock(filename, lockcat):
'''Tries to lock the file.
Returns True if successul, False if unsuccessful.
'''
try:
os.mkdir(lockdir_name(filename, lockcat))
except OSError:
# couldn't lock
return False
# locked successfully; put a file with my PID into the lock directory
pid = os.getpid()
with open(pid_filename(filename, lockcat), 'w') as f:
f.write('%d' % pid)
return True
def lockdir_name(filename, lockcat):
'''return the name of the lock directory for the file'''
return filename + '_' + lockcat
def pid_filename(filename, lockcat):
'''return the name of the lock pid file for the file'''
return os.path.join(lockdir_name(filename, lockcat), 'pid')
def is_locked(filename, lockcat):
return os.path.isdir(lockdir_name(filename, lockcat))
def pid_of_lock(filename, lockcat):
'''return the pid of the process that has the file locked'''
if not is_locked(filename, lockcat):
return None
with open(pid_filename(filename, lockcat), 'r') as f:
pid = int(f.read())
return pid
|
import classad
import htcondor
import os
import time
from PersonalCondor import PersonalCondor
from Utils import Utils
JOB_SUCCESS = 0
JOB_FAILURE = 1
class CondorJob(object):
def __init__(self, job_args):
self._job_args = job_args
def Submit(self, wait=True):
# Submit the job defined by submit_args
Utils.TLog("Running job with arguments: " + str(self._job_args))
schedd = htcondor.Schedd()
submit = htcondor.Submit(self._job_args)
try:
with schedd.transaction() as txn:
self._cluster_id = submit.queue(txn)
except:
print("Job submission failed for an unknown error")
return JOB_FAILURE
Utils.TLog("Job running on cluster " + str(self._cluster_id))
# Wait until job has finished running?
if wait is True:
self.WaitForFinish()
# If we got this far, we assume the job succeeded.
return JOB_SUCCESS
def WaitForFinish(self, timeout=240):
schedd = htcondor.Schedd()
for i in range(timeout):
ads = schedd.query("ClusterId == %d" % self._cluster_id, ["JobStatus"])
Utils.TLog("Ads = " + str(ads))
# When the job is complete, ads will be empty
if len(ads) == 0:
break
else:
status = ads[0]["JobStatus"]
if status == 5:
Utils.TLog("Job was placed on hold. Aborting.")
return JOB_FAILURE
time.sleep(1)
|
import numpy as np
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import cross_val_score
from ml import model_evaluation
def best_estimator(estimator,
X_train,
y_train,
cv,
param_grid,
scoring='accuracy',
n_jobs=-1,
verbose=1):
"""
Hyperparameters optimization: finds optimal hyperparameters combination
using CV
"""
grid = GridSearchCV(estimator=estimator,
param_grid=param_grid,
scoring=scoring,
n_jobs=n_jobs,
cv=cv,
verbose=verbose)
grid.fit(X_train, y_train)
print('# Best estimator stats optimized for', scoring, ':')
print()
print('Best index:', grid.best_index_)
print()
print('CV scores for each search done:', grid.cv_results_['mean_test_score'])
print()
print("CV score for the best estimator found: %0.5f (std %0.5f)" % (grid.best_score_, grid.cv_results_['std_test_score'][grid.best_index_]))
print()
print('Best params found:', grid.best_params_)
return grid
def plot_grid_search(cv_results,
grid_param_1,
grid_param_2,
name_param_1,
name_param_2):
# Get Test Scores Mean and std for each grid search
scores_mean = cv_results['mean_test_score']
scores_mean = np.array(scores_mean).reshape(len(grid_param_2),len(grid_param_1))
scores_sd = cv_results['std_test_score']
scores_sd = np.array(scores_sd).reshape(len(grid_param_2),len(grid_param_1))
# Plot Grid search scores
_, ax = plt.subplots(1,1)
# Param1 is the X-axis, Param 2 is represented as a different curve (color line)
for idx, val in enumerate(grid_param_2):
ax.plot(grid_param_1, scores_mean[idx,:], '-o', label= name_param_2 + ': ' + str(val))
ax.set_title("Grid Search Scores", fontsize=20, fontweight='bold')
ax.set_xlabel(name_param_1, fontsize=16)
ax.set_ylabel('CV Average Score', fontsize=16)
ax.legend(loc="best", fontsize=15)
ax.grid('on')
return plt
def nested_cv(estimator,
X,
y,
param_grid,
scoring='accuracy',
inner_cv=2,
outer_cv=5,
n_jobs=-1,
verbose=1):
"""
Returns average cross-validation accuracy. This gives us a good estimate of what
to expect if we tune the hyperparameters of the estimator and then use it on unseen data.
"""
grid = GridSearchCV(estimator=estimator,
param_grid=param_grid,
scoring=scoring,
cv=inner_cv,
verbose=verbose)
scores = cross_val_score(grid,
X,
y,
scoring=scoring,
cv=outer_cv)
return np.mean(scores), np.std(scores)
|
import datetime
from django.utils import timezone
from test_plus.test import TestCase
from .factories import AnnouncementFactory
class TestAnnouncementFeed(TestCase):
def test_ok(self):
announcement = AnnouncementFactory()
response = self.get("announcements:feed")
self.assertResponseContains(announcement.title, response, html=False)
self.assertResponseContains(announcement.description, response, html=False)
def test_limit_items(self):
"""A limited number of items is in the feed."""
AnnouncementFactory(
title="Not going to be there",
expires_at=timezone.now() - datetime.timedelta(days=1),
)
for i in range(5):
AnnouncementFactory()
response = self.get("announcements:feed")
assert "Not going to be there" not in response.content.decode()
|
from display.display import ClockDisplay
|
# coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import cv2
import random
import numpy as np
import traceback
import tensorflow as tf
from . import dataset_util
from ...file.file_operate import FilesOp
from ...file.parse_annotation import Anno_OP
from .data_provider import tf_noise_padd
import imagedt
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
class ImageReader(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def read_image_dims(self, sess, image_data):
image = self.decode_jpeg(sess, image_data)
return image.shape[0], image.shape[1]
def decode_jpeg(self, sess, image_data):
image = sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
class Record_Writer(ImageReader):
"""docstring for RecordWriter"""
def __init__(self):
super(Record_Writer, self).__init__()
self.error_images = []
def image_to_tfexample(self, image_data, image_format, height, width, class_id):
return tf.train.Example(features=tf.train.Features(
feature={'image/encoded': dataset_util.bytes_feature(image_data),
'image/format': dataset_util.bytes_feature(image_format),
'image/class/label': dataset_util.int64_feature(class_id),
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width), }))
def convert_to_tfrecord(self, f_lines, svae_dir, dataset_type='train'):
"""Converts a file to TFRecords."""
print('Generating TFRecords......' )
with tf.Session() as sess:
piece_count = 50000
num_pieces = int(len(f_lines) / piece_count + 1)
for num_piece in range(num_pieces):
start_id, end_id = num_piece*piece_count, min(len(f_lines), (num_piece+1)*piece_count)
output_file = os.path.join(svae_dir, dataset_type + str(num_piece+1).zfill(6) + '.tfrecord')
with tf.python_io.TFRecordWriter(output_file) as record_writer:
for index in range(start_id, end_id):
try:
img = cv2.imread(f_lines[index][0])
height, width, chanel = img.shape
encoded_image = cv2.imencode('.png', img)[1].tostring()
# encoded_image = tf.gfile.FastGFile(f_lines[index][0], 'rb').read()
except Exception as e:
traceback.print_exc(file=sys.stdout)
print("error image file {0}...".format(os.path.basename(f_lines[index][0])))
self.error_images.append(os.path.basename(f_lines[index][0]))
continue
# tf example format: NCHW
image_format = os.path.basename(f_lines[index][0]).split('.')[-1].encode()
example = self.image_to_tfexample(encoded_image, image_format, height, width, int(f_lines[index][1]))
record_writer.write(example.SerializeToString())
print("finished: ", index + 1, '/', len(f_lines), "; image height: {0}, width: {1}".format(height, width))
sys.stdout.write('\n')
sys.stdout.flush()
def map_int(self, m_list):
return np.array(map(int, map(float, m_list)))
def create_tf_example(self, sess, jpg_path, anno_infos):
with tf.gfile.GFile(jpg_path, 'rb') as fid:
encoded_image = fid.read()
# get infos
chanel = 3 # now set 3
height, width = self.read_image_dims(sess, encoded_image)
image_type = os.path.basename(jpg_path).split('.')[-1]
image_format = image_type.encode()
# convert infos
anno_infos = np.array(anno_infos)
xmins = self.map_int(anno_infos[:, 0]) / float(width) # List of normalized left x coordinates in bounding box (1 per box)
ymins = self.map_int(anno_infos[:, 1]) / float(height) # List of normalized top y coordinates in bounding box (1 per box)
xmaxs = self.map_int(anno_infos[:, 2]) / float(width) # List of normalized right x coordinates in bounding box # (1 per box)
ymaxs = self.map_int(anno_infos[:, 3]) / float(height) # List of normalized bottom y coordinates in bounding box # (1 per box)
classes = self.map_int(anno_infos[:, 4]) # List of integer class id of bounding box (1 per box)
classes_text = anno_infos[:, 5] # List of string class name of bounding box (1 per box)
assert len(xmins) == len(ymins) == len(xmaxs) == len(ymaxs) == len(classes) == len(classes_text)
filename = jpg_path.encode() # Filename of the image. Empty if image is not from file
tf_example = tf.train.Example(features=tf.train.Features(
feature={'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/channels': dataset_util.int64_feature(chanel),
'image/filename': dataset_util.bytes_feature(filename),
'image/source_id': dataset_util.bytes_feature(filename),
'image/encoded': dataset_util.bytes_feature(encoded_image),
'image/format': dataset_util.bytes_feature(image_format),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),
'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes), }))
return tf_example
def write_cls_records(self, train_images_file, save_dir, test_ratio=0):
with open(train_images_file, 'r') as f:
all_files = [item.strip().split('\t') for item in f.readlines()]
# save images file in tfrecord dir
imagedt.file.write_csv(all_files, os.path.join(save_dir,'image_files.txt'))
# shuffle all the files
random.shuffle(all_files)
sample_count = len(all_files)
# read file, write to tfrecords
test_count = int(sample_count * test_ratio)
if test_count:
test_samples = all_files[-test_count:]
self.convert_to_tfrecord(test_samples, save_dir, dataset_type='validation_')
# save error images
imagedt.file.write_txt(self.error_images, os.path.join(save_dir,'test_error_images.txt'))
train_samples = all_files[:sample_count-test_count]
self.convert_to_tfrecord(all_files, save_dir, dataset_type='train_')
# save error images
imagedt.file.write_txt(self.error_images, os.path.join(save_dir,'train_error_images.txt'))
def converte_anno_info(self, data_dir, det_cls_name='3488'):
data_pairs = FilesOp.get_jpg_xml_pairs(data_dir)
examples = {}
for index, data_pair in enumerate(data_pairs):
jpg_path, xml_path = data_pair
anno_infos = Anno_OP.parse_lab_xml(xml_path)
# add default class and class_name
anno_infos = [item+['3488']*2 for item in anno_infos]
examples[jpg_path] = anno_infos
print("Read image pairs: ", len(examples))
return examples
def write_detect_records(self, data_dir, save_dir, record_name='traning_detect.tfrecord'):
examples = self.converte_anno_info(data_dir)
record_name = os.path.join(save_dir, record_name)
writer = tf.python_io.TFRecordWriter(record_name)
with tf.Session() as sess:
for index, key_path in enumerate(examples):
tf_example = self.create_tf_example(sess, key_path, examples[key_path])
writer.write(tf_example.SerializeToString())
print("write record files: {0}/{1} ".format(index+1, len(examples)))
writer.close()
print("All class: 3488, class_name 3488, records save dir {}".format(save_dir))
RecordWriter = Record_Writer()
from tensorflow.python.platform import gfile
def write_pbmodel_summery(tf_pbmodel, log_dir):
with tf.Session() as sess:
model_filename =tf_pbmodel
with gfile.FastGFile(model_filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
g_in = tf.import_graph_def(graph_def)
train_writer = tf.summary.FileWriter(log_dir)
train_writer.add_graph(sess.graph)
|
from __future__ import absolute_import, print_function
from sage.all import matrix
__all__ = ["dim_affine_hull"]
def dim_affine_hull(points):
"""Return dimension of affine hull of given collection of points."""
return matrix([p - points[0] for p in points[1:]]).rank()
|
import os
import json
from datetime import datetime, date
import dataclasses
from typing import List, Optional, Callable, Sequence, Any
import click
@click.group()
@click.option(
"--verbose/--quiet",
default=None,
is_flag=True,
show_default=True,
help="Change default log level",
)
def main(verbose: Optional[bool]) -> None:
"""
Parse a google takeout!
"""
import logging
from . import log
if verbose is not None:
if verbose:
log.logger = log.setup(level=logging.DEBUG)
else:
log.logger = log.setup(level=logging.ERROR)
SHARED = [
click.option("--cache/--no-cache", default=False, show_default=True),
click.option(
"-a",
"--action",
type=click.Choice(["repl", "summary", "json"]),
default="repl",
help="What to do with the parsed result",
show_default=True,
),
]
# decorator to apply shared arguments to inspect/merge
def shared_options(func: Callable[..., None]) -> Callable[..., None]:
for decorator in SHARED:
func = decorator(func)
return func
def _serialize_default(obj: Any) -> Any:
if isinstance(obj, Exception):
return {"type": type(obj).__name__, "value": str(obj)}
elif dataclasses.is_dataclass(obj):
d = dataclasses.asdict(obj)
assert "type" not in d
d["type"] = type(obj).__name__
return d
elif isinstance(obj, datetime):
return str(obj)
elif isinstance(obj, date):
return str(obj)
raise TypeError(f"No known way to serialize {type(obj)} '{obj}'")
def _handle_action(res: List[Any], action: str) -> None:
if action == "repl":
import IPython # type: ignore[import]
click.echo(f"Interact with the export using {click.style('res', 'green')}")
IPython.embed()
elif action == "json":
click.echo(json.dumps(res, default=_serialize_default))
else:
from collections import Counter
from pprint import pformat
click.echo(pformat(Counter([type(t).__name__ for t in res])))
@main.command(short_help="parse a takeout directory")
@shared_options
@click.argument("TAKEOUT_DIR", type=click.Path(exists=True), required=True)
def parse(cache: bool, action: str, takeout_dir: str) -> None:
"""
Parse a takeout directory takeout
"""
from .common import Res
from .models import BaseEvent
from .path_dispatch import TakeoutParser
tp = TakeoutParser(takeout_dir, error_policy="drop")
# note: actually no exceptions since since they're dropped
res: List[Res[BaseEvent]] = list(tp.parse(cache=cache))
_handle_action(res, action)
@main.command(short_help="merge multiple takeout directories")
@shared_options
@click.argument("TAKEOUT_DIR", type=click.Path(exists=True), nargs=-1, required=True)
def merge(cache: bool, action: str, takeout_dir: Sequence[str]) -> None:
"""
Parse and merge multiple takeout directories
"""
from .path_dispatch import TakeoutParser
from .merge import cached_merge_takeouts, merge_events
from .models import DEFAULT_MODEL_TYPE
res: List[DEFAULT_MODEL_TYPE] = []
if cache:
res = list(cached_merge_takeouts(list(takeout_dir)))
else:
res = list(merge_events(*iter([TakeoutParser(p).parse(cache=False) for p in takeout_dir]))) # type: ignore[arg-type]
_handle_action(res, action)
@main.group(
name="cache_dir", invoke_without_command=True, short_help="interact with cache dir"
)
@click.pass_context
def cache_dir(ctx: click.Context) -> None:
"""
Print location of cache dir
"""
from .cache import takeout_cache_path
if ctx.invoked_subcommand is None:
click.echo(str(takeout_cache_path.absolute()))
@cache_dir.command(name="clear")
def cache_dir_remove() -> None:
"""
Remove the cache directory
"""
import shutil
from .cache import takeout_cache_path
click.echo(str(takeout_cache_path))
click.echo("Contents:")
for f in takeout_cache_path.rglob("*"):
print(f"\t{str(f)}")
if click.confirm("Really remove this directory?"):
shutil.rmtree(str(takeout_cache_path))
@main.command(name="move", short_help="move new google takeouts")
@click.option(
"--from",
"from_",
required=True,
help="Google takeout zip file",
type=click.Path(exists=True, file_okay=True, dir_okay=False),
)
@click.option(
"--to-dir",
required=True,
type=click.Path(file_okay=False, dir_okay=True, exists=True),
help="Directory which contains your Takeout files",
)
@click.option(
"--extract/--no-extract",
required=False,
default=True,
help="Whether or not to extract the zipfile",
)
def move(from_: str, to_dir: str, extract: bool) -> None:
"""
Utility command to help move/extract takeouts into the correct location
"""
import time
import tempfile
import zipfile
ts = int(time.time())
target = f"{to_dir}/Takeout-{ts}"
if not extract:
target += ".zip"
_safe_shutil_mv(from_, target)
else:
assert from_.endswith("zip")
zf = zipfile.ZipFile(from_)
with tempfile.TemporaryDirectory() as td:
click.echo(f"Extracting {from_} to {td}")
zf.extractall(path=td)
top_level = [f for f in os.listdir(td) if not f.startswith(".")]
if len(top_level) == 1 and top_level[0].lower().startswith("takeout"):
from_ = os.path.join(td, top_level[0])
_safe_shutil_mv(from_, target)
else:
raise RuntimeError(
f"Expected top-level 'Takeout' folder in extracted folder, contents are {top_level}"
)
def _safe_shutil_mv(from_: str, to: str) -> None:
import shutil
click.echo(f"Moving {from_} to {to}")
assert os.path.exists(from_)
assert not os.path.exists(to)
shutil.move(from_, to)
if __name__ == "__main__":
main(prog_name="google_takeout_parser")
|
# See license.txt for license details.
# Copyright (c) 2020-2021, Chris Withers
import os
from setuptools import setup, find_packages
base_dir = os.path.dirname(__file__)
setup(
name='giterator',
version='0.2.0',
author='Chris Withers',
author_email='chris@withers.org',
license='MIT',
description=(
"Python tools for doing git things."
),
long_description=open('README.rst').read(),
url='https://github.com/simplistix/giterator',
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.9',
],
packages=find_packages(exclude=["tests"]),
zip_safe=False,
include_package_data=True,
extras_require=dict(
test=[
'pytest',
'pytest-cov',
'sybil',
'testfixtures',
],
build=['furo', 'sphinx', 'setuptools-git', 'twine', 'wheel']
),
entry_points={
'console_scripts': ['giterator=giterator.cli:main'],
}
)
|
import tensorflow as tf
from MetReg.base.base_model import BaseModel
from tensorflow.keras import models, Model
from tensorflow.keras import layers
class DNNRegressor(Model):
def __init__(self,
activation='relu',):
super().__init__()
self.regressor = None
self.dense1 = layers.Dense(16)
self.dense2 = layers.Dense(8)
self.dense3 = layers.Dense(1)
def call(self, inputs):
x = self.dense1(inputs)
x = self.dense2(x)
x = self.dense3(x)
return x
|
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import dgl
def vis_graph(g, title="", save_name=None):
if isinstance(g, nx.Graph):
pass
elif isinstance(g, np.ndarray):
g = nx.DiGraph(g)
elif isinstance(g, dgl.DGLGraph):
g = g.to_networkx()
else:
raise NameError('unknow format of input graph')
g = nx.Graph(g)
g = nx.DiGraph(g)
g = nx.to_numpy_matrix(g)
np.fill_diagonal(g, 0.0)
g = nx.DiGraph(g)
# g.remove_edges_from(g.selfloop_edges())
g.remove_nodes_from(list(nx.isolates(g)))
nx.draw_networkx(g, arrows=False, with_labels=False,
node_color="#fbb034",
node_size=450,
width=4.5) # networkx draw()
plt.draw() # pyplot draw()
plt.title(title)
plt.axis('off')
if save_name is not None:
plt.savefig(f"{save_name}.png", dpi=1000, bbox_inches='tight')
else:
plt.show()
plt.close()
def plot(x=None, y=None):
if x is None:
plt.plot(y)
else:
plt.plot(x, y)
plt.show()
plt.close()
|
r"""
###############################################################################
:mod:`OpenPNM.Algorithms` -- Algorithms on Networks
###############################################################################
Contents
--------
This submodule contains algorithms for performing simulations on pore networks
Classes
-------
.. autoclass:: GenericAlgorithm
:members:
.. autoclass:: Drainage
:members:
.. autoclass:: InvasionPercolation
:members:
.. autoclass:: FickianDiffusion
:members:
.. autoclass:: StokesFlow
:members:
.. autoclass:: OhmicConduction
:members:
.. autoclass:: FourierConduction
:members:
"""
from .__GenericAlgorithm__ import GenericAlgorithm
from .__GenericLinearTransport__ import GenericLinearTransport
from .__FickianDiffusion__ import FickianDiffusion
from .__FourierConduction__ import FourierConduction
from .__OhmicConduction__ import OhmicConduction
from .__StokesFlow__ import StokesFlow
from .__OrdinaryPercolation__ import OrdinaryPercolation
from .__InvasionPercolation__ import InvasionPercolation
from .__Drainage__ import Drainage
|
import os
from pathlib import Path
from qtpy import QtWidgets, QtCore, QtGui
from pyqtgraph.parametertree.Parameter import ParameterItem
from pyqtgraph.parametertree.parameterTypes.basetypes import WidgetParameterItem
from pyqtgraph.parametertree import Parameter
class FileDirWidget(QtWidgets.QWidget):
"""
================ =========================
**Attributes** **Type**
*value_changed* instance of pyqt Signal
*path* string
================ =========================
See Also
--------
browse_path
"""
value_changed = QtCore.Signal(str)
def __init__(self, init_path='D:/Data', file_type=False):
super().__init__()
self.filetype = file_type
self.path = init_path
self.initUI()
self.base_path_browse_pb.clicked.connect(self.browse_path)
def browse_path(self):
"""
Browse the path attribute if exist.
See Also
--------
set_path
"""
if self.filetype is True:
folder_name = QtWidgets.QFileDialog.getOpenFileName(None, 'Choose File', os.path.split(self.path)[0])[0]
elif self.filetype is False:
folder_name = QtWidgets.QFileDialog.getExistingDirectory(None, 'Choose Folder', self.path)
elif self.filetype == "save":
folder_name = QtWidgets.QFileDialog.getSaveFileName(None, 'Enter a Filename', os.path.split(self.path)[0])[
0]
if not (not(folder_name)): # execute if the user didn't cancel the file selection
self.set_path(folder_name)
self.value_changed.emit(folder_name)
def set_path(self, path_file):
"""
Set the base path attribute with the given path_file.
=============== =========== ===========================
**Parameters** **Type** **Description**
*path_file* string the pathname of the file
=============== =========== ===========================
"""
if isinstance(path_file, Path):
path_file = str(path_file)
self.base_path_edit.setPlainText(path_file)
self.path = path_file
def get_value(self):
"""
Get the value of the base_path_edit attribute.
Returns
-------
string
the path name
"""
return self.base_path_edit.toPlainText()
def initUI(self):
"""
Init the User Interface.
"""
self.hor_layout = QtWidgets.QHBoxLayout()
self.base_path_edit = QtWidgets.QPlainTextEdit(self.path)
self.base_path_edit.setMaximumHeight(50)
self.base_path_browse_pb = QtWidgets.QPushButton()
self.base_path_browse_pb.setText("")
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(":/icons/Icon_Library/Browse_Dir_Path.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.base_path_browse_pb.setIcon(icon3)
self.hor_layout.addWidget(self.base_path_edit)
verlayout = QtWidgets.QVBoxLayout()
verlayout.addWidget(self.base_path_browse_pb)
verlayout.addStretch()
self.hor_layout.addLayout(verlayout)
self.hor_layout.setSpacing(0)
self.setLayout(self.hor_layout)
class FileDirParameterItem(WidgetParameterItem):
def makeWidget(self):
"""
Make an initialized file_browser object with parameter options dictionnary ('readonly' key)0
Returns
-------
w : filebrowser
The initialized file browser.
See Also
--------
file_browser
"""
self.asSubItem = True
self.hideWidget = False
if 'filetype' in self.param.opts:
self.filetype = self.param.opts['filetype']
else:
self.filetype = True
self.w = FileDirWidget(self.param.value(), file_type=self.filetype)
# if 'tip' in self.param.opts:
# self.w.setToolTip(self.param.opts['tip'])
self.w.base_path_edit.setReadOnly(self.param.opts['readonly'])
self.w.value = self.w.get_value
self.w.setValue = self.w.set_path
self.w.sigChanged = self.w.value_changed
return self.w
class FileDirParameter(Parameter):
"""
Editable string; displayed as large text box in the tree.
See Also
--------
file_browserParameterItem
"""
itemClass = FileDirParameterItem
|
from server import Server
import os
def install_apt_packages(server):
print("installing apt packages")
server.update_apt_packages()
server.install_apt_package("python3-pip")
packages = server.get_installed_apt_packages()
assert "python3-pip" in packages
def install_pip3_packages(server):
version = server.get_pip3_version()
assert version.startswith("20.")
assert version.endswith("3.8")
server.install_pip3_package("dataset")
server.install_pip3_package("bottle")
packages = server.get_installed_pip3_packages()
assert "dataset" in packages
assert "bottle" in packages
if __name__ == "__main__":
server = Server(host = "3.129.67.194", user="ubuntu", key_filename="/Users/greg/.ssh/lightsail-ohio-gsd.pem")
#install_apt_packages(server)
install_pip3_packages(server)
print("done.")
|
#from tensorflow import keras
from tensorflow.keras.layers import Conv2D, MaxPooling2D, ZeroPadding2D
from tensorflow.keras.layers import Dense, Activation, Dropout, Flatten
from tensorflow.keras import optimizers
from tensorflow.keras.models import Sequential
img_width = 150
img_height = 150
def load_model(MODEL_2):
model =Sequential()
model.add(Conv2D(32,(3,3), input_shape=(img_width, img_height, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(32,(3,3), input_shape=(img_width, img_height, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(64,(3,3), input_shape=(img_width, img_height, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.load_weights(MODEL_2)
return model
|
import random
import time
import itertools
from time import sleep
import numpy as np
from operator import itemgetter
import operator
from ..util import get_direction, get_random_direction, AStarGraph, AStarSearch, get_closest, get_closest_astar
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
# cool_list structure
# {
# 'x_in': 1,
# 'y_in': 2,
# 'x_ut': 3,
# 'y_ut': 4
# }
class Webjocke_Custom(object):
def __init__(self):
###############################################################################
#
# Config Variables
#
###############################################################################
self.max_diamonds = 3 # Larger is heavy'er on the CPU
self.min_path_cost = 50
#plt.ion()
#lt.show()
def get_next_step(self, start, end, enemies, portals):
from_ = start
to_ = end
#print ("Me: ", from_)
#print ("Goal: ", to_)
graph = AStarGraph(enemies)
best_result = None
best_cost = None
# Without teleporters
best_result, best_cost = AStarSearch(from_, to_, graph)
# With teleportes, one way
result, tempstep = AStarSearch(from_, portals[0], graph)
tempstep += AStarSearch(portals[1], to_, graph)[1]
if tempstep < best_cost:
best_cost = tempstep
best_result = result
print("Portal is shorter one way!")
#print(tempstep)
# With teleportes, other way
result, tempstep = AStarSearch(from_, portals[1], graph)
tempstep += AStarSearch(portals[0], to_, graph)[1]
if tempstep < best_cost:
best_cost = tempstep
best_result = result
print("Portal is shorter other way!")
#print(tempstep)
#print ("Best Route: ", result)
#print ("Steps to goal position: ", cost)
try:
return list(best_result[1]), best_result, graph
except:
return [4,4], [(4,4), (4,4)], graph
def create_full_list(self, combos, diamonds, my_pos, my_home):
#print("Lenght of combos: "+str(len(combos)))
#print("Diamonds: "+str(diamonds))
#print("========FUN STARTS?=======")
big_list = []
for path in combos:
temp = []
temp.append(my_pos)
for obj in path:
temp.append(diamonds[obj])
temp.append(my_home)
#print(temp)
big_list.append(temp)
return big_list
#print("========FUN ENDING?=======")
def get_shortest_path(self, big_list, enemies, portals):
graph = AStarGraph(enemies)
lowest_steps = None
best_path = None
for path in big_list:
steps = 0
for index, obj in enumerate(path):
if index != len(path)-1:
think_cost = 0
# Without teleporters
result, cost = AStarSearch(path[index], path[index+1], graph)
think_cost = cost
#print(think_cost)
# With teleportes, one way
tempstep = AStarSearch(path[index], portals[0], graph)[1]
tempstep += AStarSearch(portals[1], path[index+1], graph)[1]
if tempstep < think_cost:
think_cost = tempstep
print("Portal is shorter one way!")
#print(tempstep)
# With teleportes, other way
tempstep = AStarSearch(path[index], portals[1], graph)[1]
tempstep += AStarSearch(portals[0], path[index+1], graph)[1]
if tempstep < think_cost:
think_cost = tempstep
print("Portal is shorter other way!")
#print(tempstep)
#print("===============")
steps += think_cost
if best_path == None or steps < lowest_steps:
lowest_steps = steps
best_path = path
return best_path, lowest_steps
# Get distance between all the objects in the current order
def get_distance(self, cool_path):
distance = 0
lenth = len(cool_path)-1
for index, obj in enumerate(cool_path):
if index == lenth:
break
else:
x_dist = abs(obj["x_out"] - cool_path[index+1]["x_in"])
y_dist = abs(obj["y_out"] - cool_path[index+1]["y_in"])
distance += x_dist+y_dist
return distance
def create_cool_list(self, combos, diamonds, teleporters, my_home, my_pos, length, my_dias):
new_list = []
for path in combos:
# Add two different, because of teleporters can go two ways
# 1
new_path = []
amount_of_tele = 0 # NEW
teleporter_number = 99#length-1
teleporter_number_2 = 98#length-2 # NEW
#print("==================================")
#print("Path: "+str(path))
#print("Diamonds: "+str(diamonds))
#print("Teleport Number 1: "+str(teleporter_number)) # NEW
#print("Teleport Number 2: "+str(teleporter_number_2)) # NEW
# add player_pos
new_path.append({"x_out":my_pos["x"], "x_in":my_pos["x"], "y_out":my_pos["y"], "y_in":my_pos["y"]})
# Add all the diamonds and check for the teleporter
for objecttt in path:
#print("Objectt = "+str(objecttt))
if objecttt == teleporter_number:
#print("Was Teleport 1")
amount_of_tele += 1
new_path.append({"x_out":teleporters[1]["position"]["x"], "x_in":teleporters[0]["position"]["x"], "y_out":teleporters[1]["position"]["y"], "y_in":teleporters[0]["position"]["y"]})
elif objecttt == teleporter_number_2: # NEW
#print("Was Teleport 2")
amount_of_tele += 1
new_path.append({"x_out":teleporters[0]["position"]["x"], "x_in":teleporters[1]["position"]["x"], "y_out":teleporters[0]["position"]["y"], "y_in":teleporters[1]["position"]["y"]}) # NEW
else:
#print("Was A Diamond")
new_path.append({"x_out":diamonds[objecttt]["x"], "x_in":diamonds[objecttt]["x"], "y_out":diamonds[objecttt]["y"], "y_in":diamonds[objecttt]["y"]})
if len(new_path)+1 == 5-my_dias+amount_of_tele:
break
# Add Home Path
new_path.append({"x_out":my_home["x"], "x_in":my_home["x"], "y_out":my_home["y"], "y_in":my_home["y"]})
new_list.append(new_path)
#print("==================================")
'''
# 2
new_path = []
teleporter_number = len(combos)
# add player_pos
new_path.append({"x_out":my_pos["x"], "x_in":my_pos["x"], "y_out":my_pos["y"], "y_in":my_pos["y"]})
# Add all the diamonds and check for the teleporter
for objecttt in path:
if objecttt == teleporter_number:
new_path.append({"x_out":teleporters[0]["position"]["x"], "x_in":teleporters[1]["position"]["x"], "y_out":teleporters[0]["position"]["y"], "y_in":teleporters[1]["position"]["y"]})
else:
new_path.append({"x_out":diamonds[objecttt]["x"], "x_in":diamonds[objecttt]["x"], "y_out":diamonds[objecttt]["y"], "y_in":diamonds[objecttt]["y"]})
if len(new_path) == 5:
break
# Add Home Path
new_path.append({"x_out":my_home["x"], "x_in":my_home["x"], "y_out":my_home["y"], "y_in":my_home["y"]})
new_list.append(new_path)
'''
return new_list
def get_all_combos(self, nice_list, my_dias):
lench_of_comb = 5-my_dias
if len(nice_list) < lench_of_comb:
lench_of_comb = len(nice_list)
return [list(x) for x in itertools.permutations(nice_list, lench_of_comb)]
def get_random_direction(self, bots):
new_cords = ()
my_random = random.randint(1, 4)
if my_random == 1:
new_cords = (-1, 0)
elif my_random == 2:
new_cords = (1, 0)
elif my_random == 3:
new_cords = (0, -1)
elif my_random == 4:
new_cords = (0, 1)
return new_cords
def get_8_closest_diamonds(self, diamonds, my_pos, my_home, enemies, portals):
unsorted_diamonds = []
graph = AStarGraph(enemies)
for dia in diamonds:
# Without teleporters
cost = AStarSearch(my_home, dia, graph)[1]
# With teleportes, one way
tempstep = AStarSearch(my_pos, portals[0], graph)[1]
tempstep += AStarSearch(portals[1], dia, graph)[1]
if tempstep < cost:
cost = tempstep
print("Portal is shorter one way!")
#print(tempstep)
# With teleportes, other way
tempstep = AStarSearch(my_pos, portals[1], graph)[1]
tempstep += AStarSearch(portals[0], dia, graph)[1]
if tempstep < cost:
cost = tempstep
print("Portal is shorter other way!")
#print(tempstep)
unsorted_diamonds.append([dia, cost])
sorted_8_diamonds_with_costs = sorted(unsorted_diamonds, key=itemgetter(1))[:self.max_diamonds]
sorted_8_diamonds_without_cost = []
for dia in sorted_8_diamonds_with_costs:
sorted_8_diamonds_without_cost.append(dia[0])
return sorted_8_diamonds_without_cost
def get_all_possible_paths(self, potensial_diamonds, my_pos, my_home, my_diamonds):
lench_of_comb = 5-my_diamonds
if len(potensial_diamonds) < lench_of_comb:
lench_of_comb = len(potensial_diamonds)
paths = [list(x) for x in itertools.permutations(potensial_diamonds, lench_of_comb)]
for path in paths:
path.insert(0, my_pos)
path.append(my_home)
return paths
def diamonds_weight_method(self, board, diamonds):
awesome_list = []
'''
List with all players and their paths and costs to all diamonds
[
["botname", {
# dia cost path
1: (4, [(3,5), (2,6), (2,6)]),
2: (4, [(3,5), (2,6), (2,6)])
}]
]
'''
all_players = board.bots
for player in all_players:
if player["diamonds"] == 5:
continue
player_pos = (player["position"]["x"], player["position"]["y"])
cool_list = []
cool_list.append(player["name"])
cool_list.append({})
for index, dia in enumerate(diamonds):
graph = AStarGraph(all_players+board.gameObjects)
best_result, best_cost = AStarSearch(player_pos, dia, graph)
cool_list[1][index] = (best_cost, best_result)
awesome_list.append(cool_list)
# List of all diamonds and their weight
diamonds_weight = {}
'''
{
# dia_index, weight
1: 37,
2: 37
}
'''
# Fill with all the diamonds
for index, dia in enumerate(diamonds):
diamonds_weight[index] = 0
# Modify the weight variable
for player_info in awesome_list:
multiplyer = -1
if player_info[0] == "webjocke":
multiplyer = 1
for index, dia in enumerate(diamonds_weight):
diamonds_weight[index] += multiplyer / (1+player_info[1][index][0])
return diamonds_weight
def other_players_closest(self, players, diamonds, enemies, portals):
cool_lst = []
for player in players:
#cool_lst.append(get_closest_astar(diamonds, player, enemies, portals)[0])
cool_lst.append(get_closest(diamonds, player))
#print(cool_lst)
return cool_lst
def next_move(self, board_bot, board):
# === Variables ===
diamonds = [] # [(x,y),(x,y),(x,y)]
for dia in board.diamonds:
diamonds.append((dia["x"], dia["y"]))
teleporters = [] # [(x,y),(x,y)]
for obj in board.gameObjects:
teleporters.append((obj["position"]["x"], obj["position"]["y"]))
my_diamonds = board_bot["diamonds"] # [{'x': 5, 'y': 6}, {'x': 5, 'y': 6}]
my_pos = (board_bot["position"]["x"], board_bot["position"]["y"]) # (x,y)
my_home = (board_bot["base"]["x"], board_bot["base"]["y"]) # (x,y)
max_diamonds = self.max_diamonds # larger is more heavy on the cpu
goal_position = None # {'x': 5, 'y': 6}
enemies = []
players = []
portals = []
for bad in board.bots:
if bad["name"] != "webjocke":
enemies.append((bad["position"]["x"], bad["position"]["y"]))
players.append((bad["position"]["x"], bad["position"]["y"]))
for bad in board.gameObjects:
enemies.append((bad["position"]["x"], bad["position"]["y"]))
portals.append((bad["position"]["x"], bad["position"]["y"]))
###############################################################################
#
# Diamond Weight Method
#
###############################################################################
#diamonds_weight = self.diamonds_weight_method(board, diamonds)
#sorted_list = sorted(diamonds_weight.items(), key=operator.itemgetter(1), reverse=True)
#goal_diamonds_index = sorted_list[0][0]
###############################################################################
#
# Get closest if not other players is closer - Method
#
###############################################################################
#not_to_go_to = self.other_players_closest(players, diamonds, enemies, portals)
#my_closest = get_closest_astar(diamonds, my_pos, enemies, portals)[0]
#while my_closest in not_to_go_to:
# diamonds.remove(my_closest)
# my_closest = get_closest_astar(diamonds, my_pos, enemies, portals)[0]
# #print("Someone else is closer then me...")
#if my_closest != None:
# goal_diamonds_index = my_closest
#else:
# goal_diamonds_index = my_home
###############################################################################
#
# First Diamond Method
#
###############################################################################
goal_diamonds_index = get_closest_astar(diamonds, my_pos, enemies, portals)[0]
###############################################################################
#
# First. Let's check if we gott full bag and need to return to home
#
###############################################################################
if my_diamonds == 5:
goal_position = my_home
#elif my_diamonds == 0:
# goal_position = get_closest_astar(diamonds, my_pos, enemies, portals)
else:
# If diamonds is to far away, go home instead
#if get_closest_astar([goal_diamonds_index], my_pos, enemies, portals)[1] > 10 and len(players) > 2 and len(diamonds) < 3:
# goal_position = my_home
# print("To Long Away, going home")
#else:
goal_position = goal_diamonds_index
#goal_position = get_closest_astar(diamonds, my_pos, enemies, portals)
next_step, result, graph = self.get_next_step((my_pos[0], my_pos[1]), (goal_position[0], goal_position[1]), enemies, portals)
###############################################################################
#
# Show the plot
#
###############################################################################
'''
list_to_show = []
for row in range(0,10):
col_list = []
for col in range(0, 10):
col_list.append((0,0,0))
list_to_show.append(col_list)
list_to_show[my_pos[1]][my_pos[0]] = (255,255,0)
list_to_show[my_home[1]][my_home[0]] = (255,255,255)
list_to_show[goal_position[1]][goal_position[0]] = (0,0,255)
for index, dia in enumerate(diamonds):
list_to_show[dia[1]][dia[0]] = (0,max(10, int(100+diamonds_weight[index]*155)),0)
for dia in enemies:
list_to_show[dia[1]][dia[0]] = (255,0,0)
'''
###############################################################################
#
# Teleporter check
#
###############################################################################
'''
list_to_show = []
for row in range(0,10):
col_list = []
for col in range(0, 10):
cord = (col, row)
cost = get_closest_astar([cord], my_pos, [], portals)[1]
col_list.append((0,255-cost*12,0))
list_to_show.append(col_list)
list_to_show[portals[0][1]][portals[0][0]] = (0,0,255)
list_to_show[portals[1][1]][portals[1][0]] = (0,0,255)
list_to_show[my_pos[1]][my_pos[0]] = (255,255,0)
plt.clf()
plt.imshow(np.array(list_to_show))
plt.xlim(-0.5,9.5)
plt.ylim(9.5,-0.5)
plt.draw()
plt.pause(0.001)
'''
return get_direction(my_pos[0], my_pos[1], next_step[0], next_step[1])
#else:
'''
elif my_diamonds >= 2:
# Sorting out the closest 8
potensial_diamonds = self.get_8_closest_diamonds(diamonds, my_pos, my_home, enemies, portals)
# Get all combos of the diamonds, and add my position and the home position
all_paths = self.get_all_possible_paths(potensial_diamonds, my_pos, my_home, my_diamonds)
# Get shortest paths of them
path_to_take, cost = self.get_shortest_path(all_paths, enemies, portals)
#print(path_to_take, cost)
goal_position = path_to_take[1]
#goal_position = get_closest_astar(diamonds, my_pos, enemies)
'''
###############################################################################
#
# Let's figure out what diamond to hunt
#
###############################################################################
###############################################################################
#
# Let's generate a list of all combinations of paths between diamonds
#
###############################################################################
# Create list with all combos of 5 diamonds (using <=19 dia)
#lengh_of_diamonds = len(diamonds) #+2 # +1 to have space for one teleporter <-------------------
#if lengh_of_diamonds > max_diamonds: <-------------------
# lengh_of_diamonds = max_diamonds <-------------------
#list_of_diamond_indexes = range(0, lengh_of_diamonds) <-------------------
#combos = self.get_all_combos(list_of_diamond_indexes, my_diamonds) <-------------------
#print("==== Creating Combos =====")
#print(combos)
#print("Amount of combos: "+str(len(combos)))
#print("Total Diamonds: "+str(len(diamonds)))
#print("Only using "+str(self.max_diamonds)+" Diamonds")
###############################################################################
#
# Insert diamond cordinates into list and add me_pos and_home pos for each path
#
###############################################################################
# Generates big list, with me pos, home pos and all the diamonds pos
#big_list = self.create_full_list(combos, diamonds, my_pos, my_home) <-------------------
#print("==== Creating Big List =====")
#print("Amount of combos: "+str(len(big_list)))
#print(big_list)
'''
elif my_diamonds == 0:
players_and_shortest = []
for player in players:
if(len(players) < len(diamonds)):
players_and_shortest.append(get_closest_astar(diamonds, player, enemies))
for dia in players_and_shortest:
if dia in diamonds:
diamonds.remove(dia)
goal_position = get_closest_astar(diamonds, my_pos, enemies)'''
###############################################################################
#
# Let's calculate the shortest path of them all
#
###############################################################################
#shortest_path, steps = self.get_shortest_path(big_list, enemies) <-------------------
#print("==== Getting the shortest path =====")
#print("Amount of Steps: "+str(steps))
#print(shortest_path)
###############################################################################
#
# Run to the first obj on the shortest path
#
###############################################################################
#goal_position = shortest_path[1] <-------------------
''' Skipping cool_list
# Create the cool list using create_cool_list()
cool_list = self.create_cool_list(combos, diamonds, teleporters, my_home, my_pos, lengh_of_diamonds, board_bot["diamonds"])
#print("==== cool_list =====")
#print(cool_list)
#print(len(cool_list))
#print("==== cool_list =====")
# Move towards the next object in the perfect path on the board
self.goal_position = {"x":best_path[1]["x_in"], "y":best_path[1]["y_in"]}
if best_path[1]["x_in"] != best_path[1]["x_out"] or best_path[1]["y_in"] != best_path[1]["y_out"]:
self.is_teleporter = True
END COMMENT'''
#temp = get_closest(board.diamonds, board_bot["position"]) #board.diamonds[0]
#goal_position = (temp["x"], temp["y"])
#print("Closest Diamonds: "+str(goal_position))
###############################################################################
#
# Let's figure out the shortest path to that goal_position (usually a diamond)
#
###############################################################################
# Calculate move according to goal position
#delta_x, delta_y = get_direction(my_pos[0], my_pos[1], goal_position[0], goal_position[1])
#going_to = {"x":current_position["x"]+delta_x,"y":current_position["y"]+delta_y}
#first_going_to = {"x":current_position["x"]+delta_x,"y":current_position["y"]+delta_y}
#temp_going_to = {"x":current_position["x"]+delta_x,"y":current_position["y"]+delta_y}
### TODO
# Fixa så att man går ett random håll ifall man fastnar vid en annan bot --- Förbättra!
# Unvida Portaler (ifall man inte vill gå in i dem just i detta steg)
# Räkna ms ifrån start på mainloopen och kör sen direkt ifall det har gått minUpdateTime eller något
|
from rest_framework.decorators import api_view, permission_classes
from rest_framework.response import Response
from rest_framework.permissions import AllowAny
from django.http import HttpResponse
from rest_framework import status
@api_view()
@permission_classes([AllowAny,])
def HealthCheckView(request):
return Response(data=None, status=status.HTTP_200_OK)
|
import logging
import sys
from common.io.net import NetReader
from core.exceptions.config_exceptions import ConfigNoFileNameGivenException
from core.io.config import ConfigReader
from core.io.lines import LineReader
from core.io.periodic_ean import PeriodicEANReader, PeriodicEANWriter
from core.io.ptn import PTNReader
from core.model.impl.simple_dict_graph import SimpleDictGraph
from core.util.config import Config
from common.util.line_reconstructor import reconstruct_lines
from read_timetable.util.timetable_reconstructor import reconstruct_timetable
if __name__ == '__main__':
logger = logging.getLogger(__name__)
if len(sys.argv) < 2:
raise ConfigNoFileNameGivenException()
ConfigReader.read(sys.argv[1])
logger.info("Begin reading configuration")
timetable_file_name = Config.getStringValueStatic("filename_visum_timetable_file")
period_length = Config.getIntegerValueStatic("period_length")
time_units_per_minute = Config.getIntegerValueStatic("time_units_per_minute")
logger.info("Finished reading configuration")
logger.info("Begin reading input data")
timetable_net = NetReader.parse_file(timetable_file_name)
ptn = PTNReader.read(ptn=SimpleDictGraph())
line_concept = LineReader.read(ptn)
ean = PeriodicEANReader.read(periodic_ean=SimpleDictGraph())[0]
logger.info("Finished reading input data")
logger.info("Begin reconstructing timetable data")
line_dict = reconstruct_lines(line_concept, ptn, timetable_net)
reconstruct_timetable(line_dict, timetable_net, ean, ptn, period_length, time_units_per_minute)
logger.info("Finished reconstructing timetable data")
logger.info("Begin writing output data")
PeriodicEANWriter.write(ean, write_events=False, write_activities=False, write_timetable=True)
logger.info("Finished writing output data")
|
import asyncio
from collections import defaultdict
import contextlib
import hashlib
import io
import os
from pathlib import Path
import subprocess
import textwrap
import unittest
import peru.async as async
import peru.plugin as plugin
import shared
from shared import SvnRepo, GitRepo, HgRepo, assert_contents
class TestDisplayHandle(io.StringIO):
def __enter__(self):
return self
def __exit__(self, *args):
pass
def test_plugin_fetch(context, type, fields, dest):
handle = TestDisplayHandle()
async.run_task(
plugin.plugin_fetch(context, type, fields, dest, handle))
return handle.getvalue()
def test_plugin_get_reup_fields(context, type, fields):
handle = TestDisplayHandle()
return async.run_task(
plugin.plugin_get_reup_fields(context, type, fields, handle))
class PluginsTest(shared.PeruTest):
def setUp(self):
self.content = {"some": "stuff", "foo/bar": "baz"}
self.content_dir = shared.create_dir(self.content)
self.cache_root = shared.create_dir()
self.plugin_context = plugin.PluginContext(
cwd='.',
plugin_cache_root=self.cache_root,
parallelism_semaphore=asyncio.BoundedSemaphore(
plugin.DEFAULT_PARALLEL_FETCH_LIMIT),
plugin_cache_locks=defaultdict(asyncio.Lock),
tmp_root=shared.create_dir())
plugin.debug_assert_clean_parallel_count()
def tearDown(self):
plugin.debug_assert_clean_parallel_count()
def do_plugin_test(self, type, plugin_fields, expected_content, *,
fetch_dir=None):
fetch_dir = fetch_dir or shared.create_dir()
output = test_plugin_fetch(
self.plugin_context, type, plugin_fields, fetch_dir)
assert_contents(fetch_dir, expected_content)
return output
def test_git_plugin(self):
GitRepo(self.content_dir)
self.do_plugin_test("git", {"url": self.content_dir}, self.content)
def test_empty_git_rev(self):
empty_dir = shared.create_dir()
GitRepo(empty_dir)
self.do_plugin_test('git', {'url': empty_dir}, {})
def test_hg_plugin(self):
HgRepo(self.content_dir)
self.do_plugin_test("hg", {"url": self.content_dir}, self.content)
def test_svn_plugin(self):
repo = SvnRepo(self.content_dir)
self.do_plugin_test('svn', {'url': repo.url}, self.content)
def test_svn_plugin_reup(self):
repo = SvnRepo(self.content_dir)
plugin_fields = {'url': repo.url}
output = test_plugin_get_reup_fields(
self.plugin_context, 'svn', plugin_fields)
self.assertDictEqual({'rev': '1'}, output)
def test_git_plugin_with_submodule(self):
content_repo = GitRepo(self.content_dir)
# Git has a small bug: The .gitmodules file is always created with "\n"
# line endings, even on Windows. With core.autocrlf turned on, that
# causes a warning when the file is added/committed, because those line
# endings would get replaced with "\r\n" when the file was checked out.
# We can just turn autocrlf off for this test to silence the warning.
content_repo.run('git', 'config', 'core.autocrlf', 'false')
submodule_dir = shared.create_dir({'another': 'file'})
submodule_repo = GitRepo(submodule_dir)
content_repo.run(
'git', 'submodule', 'add', '-q', submodule_dir, 'subdir/')
content_repo.run('git', 'commit', '-m', 'submodule commit')
expected_content = self.content.copy()
expected_content['subdir/another'] = 'file'
with open(os.path.join(self.content_dir, '.gitmodules')) as f:
expected_content['.gitmodules'] = f.read()
self.do_plugin_test('git', {'url': self.content_dir}, expected_content)
# Now move the submodule forward. Make sure it gets fetched again.
shared.write_files(submodule_dir, {'more': 'stuff'})
submodule_repo.run('git', 'add', '-A')
submodule_repo.run('git', 'commit', '-m', 'more stuff')
subprocess.check_output(
['git', 'pull', '-q'],
cwd=os.path.join(self.content_dir, 'subdir'))
content_repo.run('git', 'commit', '-am', 'submodule update')
expected_content['subdir/more'] = 'stuff'
self.do_plugin_test('git', {'url': self.content_dir}, expected_content)
def test_git_plugin_multiple_fetches(self):
content_repo = GitRepo(self.content_dir)
head = content_repo.run('git', 'rev-parse', 'HEAD')
plugin_fields = {"url": self.content_dir, "rev": head}
output = self.do_plugin_test("git", plugin_fields, self.content)
self.assertEqual(output.count("git clone"), 1)
self.assertEqual(output.count("git fetch"), 0)
# Add a new file to the directory and commit it.
shared.write_files(self.content_dir, {'another': 'file'})
content_repo.run('git', 'add', '-A')
content_repo.run('git', 'commit', '-m', 'committing another file')
# Refetch the original rev. Git should not do a git-fetch.
output = self.do_plugin_test("git", plugin_fields, self.content)
self.assertEqual(output.count("git clone"), 0)
self.assertEqual(output.count("git fetch"), 0)
# Not delete the rev field. Git should default to master and fetch.
del plugin_fields["rev"]
self.content["another"] = "file"
output = self.do_plugin_test("git", plugin_fields, self.content)
self.assertEqual(output.count("git clone"), 0)
self.assertEqual(output.count("git fetch"), 1)
def test_hg_plugin_multiple_fetches(self):
content_repo = HgRepo(self.content_dir)
head = content_repo.run(
'hg', 'identify', '--debug', '-r', '.'
).split()[0]
plugin_fields = {'url': self.content_dir, 'rev': head}
output = self.do_plugin_test('hg', plugin_fields, self.content)
self.assertEqual(output.count('hg clone'), 1)
self.assertEqual(output.count('hg pull'), 0)
# Add a new file to the directory and commit it.
shared.write_files(self.content_dir, {'another': 'file'})
content_repo.run('hg', 'commit', '-A', '-m', 'committing another file')
# Refetch the original rev. Hg should not do a pull.
output = self.do_plugin_test('hg', plugin_fields, self.content)
self.assertEqual(output.count('hg clone'), 0)
self.assertEqual(output.count('hg pull'), 0)
# Not delete the rev field. Git should default to master and fetch.
del plugin_fields['rev']
self.content['another'] = 'file'
output = self.do_plugin_test('hg', plugin_fields, self.content)
self.assertEqual(output.count('hg clone'), 0)
self.assertEqual(output.count('hg pull'), 1)
def test_git_plugin_reup(self):
repo = GitRepo(self.content_dir)
master_head = repo.run('git', 'rev-parse', 'master')
plugin_fields = {'url': self.content_dir}
# By default, the git plugin should reup from master.
expected_output = {'rev': master_head}
output = test_plugin_get_reup_fields(
self.plugin_context, 'git', plugin_fields)
self.assertDictEqual(expected_output, output)
# Add some new commits and make sure master gets fetched properly.
repo.run('git', 'commit', '--allow-empty', '-m', 'junk')
repo.run('git', 'checkout', '-q', '-b', 'newbranch')
repo.run('git', 'commit', '--allow-empty', '-m', 'more junk')
new_master_head = repo.run('git', 'rev-parse', 'master')
expected_output['rev'] = new_master_head
output = test_plugin_get_reup_fields(
self.plugin_context, 'git', plugin_fields)
self.assertDictEqual(expected_output, output)
# Now specify the reup target explicitly.
newbranch_head = repo.run('git', 'rev-parse', 'newbranch')
plugin_fields['reup'] = 'newbranch'
expected_output['rev'] = newbranch_head
output = test_plugin_get_reup_fields(
self.plugin_context, 'git', plugin_fields)
self.assertDictEqual(expected_output, output)
def test_hg_plugin_reup(self):
repo = HgRepo(self.content_dir)
default_tip = repo.run(
'hg', 'identify', '--debug', '-r', 'default'
).split()[0]
plugin_fields = {'url': self.content_dir}
# By default, the hg plugin should reup from default.
expected_output = {'rev': default_tip}
output = test_plugin_get_reup_fields(
self.plugin_context, 'hg', plugin_fields)
self.assertDictEqual(expected_output, output)
# Add some new commits and make sure master gets fetched properly.
shared.write_files(self.content_dir, {
'randomfile': "hg doesn't like empty commits"})
repo.run('hg', 'commit', '-A', '-m', 'junk')
shared.write_files(self.content_dir, {
'randomfile': "hg still doesn't like empty commits"})
repo.run('hg', 'branch', 'newbranch')
repo.run('hg', 'commit', '-A', '-m', 'more junk')
new_default_tip = repo.run(
'hg', 'identify', '--debug', '-r', 'default'
).split()[0]
expected_output['rev'] = new_default_tip
output = test_plugin_get_reup_fields(
self.plugin_context, 'hg', plugin_fields)
self.assertDictEqual(expected_output, output)
# Now specify the reup target explicitly.
newbranch_tip = repo.run(
'hg', 'identify', '--debug', '-r', 'tip'
).split()[0]
plugin_fields['reup'] = 'newbranch'
expected_output['rev'] = newbranch_tip
output = test_plugin_get_reup_fields(
self.plugin_context, 'hg', plugin_fields)
self.assertDictEqual(expected_output, output)
def test_curl_plugin_fetch(self):
curl_content = {'myfile': 'content'}
test_dir = shared.create_dir(curl_content)
test_url = (Path(test_dir) / 'myfile').as_uri()
fields = {'url': test_url}
self.do_plugin_test('curl', fields, curl_content)
# Run the test again with an explicit hash and an explicit filename.
digest = hashlib.sha1()
digest.update(b'content')
real_hash = digest.hexdigest()
fields['sha1'] = real_hash
fields['filename'] = 'newname'
self.do_plugin_test('curl', fields, {'newname': 'content'})
# Now run it with the wrong hash, and confirm that there's an error.
fields['sha1'] = 'wrong hash'
with self.assertRaises(plugin.PluginRuntimeError):
self.do_plugin_test('curl', fields, {'newname': 'content'})
def test_curl_plugin_fetch_archives(self):
for type in 'zip', 'tar':
fields = {
'url': (shared.test_resources / ('with_exe.' + type)).as_uri(),
'unpack': type,
}
fetch_dir = shared.create_dir()
self.do_plugin_test('curl', fields, {
'not_exe.txt': 'Not executable.\n',
'exe.sh': 'echo Executable.\n',
}, fetch_dir=fetch_dir)
shared.assert_not_executable(
os.path.join(fetch_dir, 'not_exe.txt'))
shared.assert_executable(os.path.join(fetch_dir, 'exe.sh'))
def test_curl_plugin_fetch_evil_archive(self):
# There are several evil archives checked in under tests/resources. The
# others are checked directly as part of test_curl_plugin.py.
fields = {
'url': (shared.test_resources / '.tar').as_uri(),
'unpack': 'tar',
}
with self.assertRaises(plugin.PluginRuntimeError):
self.do_plugin_test('curl', fields, {})
def test_curl_plugin_reup(self):
curl_content = {'myfile': 'content'}
test_dir = shared.create_dir(curl_content)
test_url = (Path(test_dir) / 'myfile').as_uri()
digest = hashlib.sha1()
digest.update(b'content')
real_hash = digest.hexdigest()
fields = {'url': test_url}
output = test_plugin_get_reup_fields(
self.plugin_context, 'curl', fields)
self.assertDictEqual({'sha1': real_hash}, output)
# Confirm that we get the same thing with a preexisting hash.
fields['sha1'] = 'preexisting junk'
output = test_plugin_get_reup_fields(
self.plugin_context, 'curl', fields)
self.assertDictEqual({'sha1': real_hash}, output)
def test_cp_plugin(self):
self.do_plugin_test("cp", {"path": self.content_dir}, self.content)
@unittest.skipIf(os.name == 'nt', 'the rsync plugin is written in bash')
def test_rsync_plugin(self):
self.do_plugin_test("rsync", {"path": self.content_dir}, self.content)
def test_empty_plugin(self):
self.do_plugin_test("empty", {}, {})
def test_missing_required_field(self):
# The 'url' field is required for git.
try:
self.do_plugin_test('git', {}, self.content)
except plugin.PluginModuleFieldError as e:
assert 'url' in e.message, 'message should mention missing field'
else:
assert False, 'should throw PluginModuleFieldError'
def test_unknown_field(self):
# The 'junk' field isn't valid for git.
bad_fields = {'url': self.content_dir, 'junk': 'junk'}
try:
self.do_plugin_test('git', bad_fields, self.content)
except plugin.PluginModuleFieldError as e:
assert 'junk' in e.message, 'message should mention bad field'
else:
assert False, 'should throw PluginModuleFieldError'
def test_user_defined_plugin(self):
plugin_prefix = 'peru/plugins/footype/'
fetch_file = plugin_prefix + 'fetch.py'
reup_file = plugin_prefix + 'reup.py'
plugin_yaml_file = plugin_prefix + 'plugin.yaml'
fake_config_dir = shared.create_dir({
fetch_file:
'#! /usr/bin/env python3\nprint("hey there!")\n',
reup_file: textwrap.dedent('''\
#! /usr/bin/env python3
import os
outfile = os.environ['PERU_REUP_OUTPUT']
print("name: val", file=open(outfile, 'w'))
'''),
plugin_yaml_file: textwrap.dedent('''\
sync exe: fetch.py
reup exe: reup.py
required fields: []
''')})
os.chmod(os.path.join(fake_config_dir, fetch_file), 0o755)
os.chmod(os.path.join(fake_config_dir, reup_file), 0o755)
fetch_dir = shared.create_dir()
# We need to trick peru into loading plugins from the fake config dir
# dir. We do this by setting an env var, which depends on the platform.
if os.name == 'nt':
# Windows
config_path_variable = 'LOCALAPPDATA'
else:
# non-Windows
config_path_variable = 'XDG_CONFIG_HOME'
with temporary_environment(config_path_variable, fake_config_dir):
output = test_plugin_fetch(
self.plugin_context, 'footype', {}, fetch_dir)
self.assertEqual('hey there!\n', output)
output = test_plugin_get_reup_fields(
self.plugin_context, 'footype', {})
self.assertDictEqual({'name': 'val'}, output)
def test_no_such_plugin(self):
with self.assertRaises(plugin.PluginCandidateError):
test_plugin_fetch(
self.plugin_context, 'nosuchtype!', {}, os.devnull)
@contextlib.contextmanager
def temporary_environment(name, value):
NOT_SET = object()
old_value = os.environ.get(name, NOT_SET)
os.environ[name] = value
try:
yield
finally:
if old_value is NOT_SET:
del os.environ[name]
else:
os.environ[name] = old_value
|
from django import (
forms,
)
from django.conf import (
settings,
)
from django.contrib import (
admin,
)
from django.db import (
models,
)
from adminsortable2.admin import (
SortableAdminMixin,
SortableInlineAdminMixin,
)
from django_simple_file_handler.file_types import (
CHECK_DOC,
CHECK_WEB_IMAGE,
)
from django_simple_file_handler.validators import (
CheckExtMIME,
)
from .models import (
BulkEmail,
EmailDocument,
EmailImage,
MonthlyStat,
SiteProfile,
Subscriber,
Subscription,
)
class BaseAdmin(admin.ModelAdmin):
actions = None
readonly_fields = [
'created',
'updated',
]
bottom_fieldsets = [
(
'Date and time information', {
'fields': [
'created',
'updated',
],
'classes': [
'collapse',
],
}
),
]
fieldsets = bottom_fieldsets
list_per_page = 20
class SiteProfileAdmin(BaseAdmin):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.top_fieldsets = [
(
None, {
'fields': [
'protocol',
'domain',
'name',
]
}
),
]
self.fieldsets = self.top_fieldsets + self.bottom_fieldsets
search_fields = [
'protocol',
'domain',
'name',
]
list_display = [
'name',
'domain',
]
ordering = [
'name',
]
admin.site.register(
SiteProfile,
SiteProfileAdmin,
)
class SubscriptionAdmin(SortableAdminMixin, BaseAdmin):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.readonly_fields = [
'subscriber_count',
'secret_key',
] + self.readonly_fields
self.top_fieldsets = [
(
None, {
'fields': [
'list_name',
'descriptive_text',
'publicly_visible',
'use_pages',
'subscriber_count',
]
}
),
(
'MailChimp sync', {
'fields': [
'mc_sync',
'mc_user',
'mc_api',
'mc_list',
'secret_key',
],
'classes': [
'collapse',
]
}
),
(
'Advanced settings', {
'fields': [
'email_directory',
'page_directory',
'associated_model',
],
'classes': [
'collapse',
]
}
),
]
self.fieldsets = self.top_fieldsets + self.bottom_fieldsets
search_fields = [
'list_name',
]
list_display = [
'list_name',
'subscriber_count',
'publicly_visible',
'list_link',
]
admin.site.register(
Subscription,
SubscriptionAdmin,
)
class SubscriberAdminForm(forms.ModelForm):
subscriptions = forms.ModelMultipleChoiceField(
queryset=Subscription.objects.order_by(
'list_name',
),
label='Subscriptions',
required=False,
widget=admin.widgets.FilteredSelectMultiple(
'subscriptions',
False,
)
)
class Meta:
model = Subscriber
exclude = [
'subscriber_key',
'mc_email',
'mc_synced',
]
class SubscriberAdmin(BaseAdmin):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.readonly_fields = [
'subscription_lists',
] + self.readonly_fields
self.top_fieldsets = [
(
None, {
'fields': [
'first_name',
'last_name',
'subscriber_email',
'subscriptions',
]
}
),
]
self.fieldsets = self.top_fieldsets + self.bottom_fieldsets
def get_form(self, request, obj=None, **kwargs):
if obj and not self.has_change_permission(request, obj):
return super().get_form(request, obj, **kwargs)
return SubscriberAdminForm
search_fields = [
'first_name',
'last_name',
'subscriber_email',
]
list_display = [
'subscriber_email',
'first_name',
'last_name',
'subscription_lists',
]
ordering = [
'subscriber_email',
]
admin.site.register(
Subscriber,
SubscriberAdmin,
)
def get_image_widths():
try:
width_choices = settings.EMAILER_IMAGE_WIDTHS
except AttributeError:
width_choices = [
(1200, 'Banner'),
(900, 'Large'),
(600, 'Medium'),
(300, 'Small'),
]
return width_choices
class EmailImageInlineForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['saved_file'].validators.append(CheckExtMIME(allowed_attributes=CHECK_WEB_IMAGE))
image_width = forms.ChoiceField(
label='Image size',
choices=get_image_widths(),
)
class Meta:
exclude = []
class EmailImageInline(admin.StackedInline):
form = EmailImageInlineForm
model = EmailImage
fieldsets = [
(
None, {
'fields': [
'image_width',
'description',
'caption',
'saved_file',
]
}
),
]
formfield_overrides = {
models.CharField: {
'widget': forms.TextInput(
attrs={
'size': '95',
},
),
},
models.TextField: {
'widget': forms.Textarea(
attrs={
'rows': 3,
'cols': 95,
},
),
},
}
extra = 0
max_num = 1
class EmailDocumentInlineForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['saved_file'].validators.append(CheckExtMIME(allowed_attributes=CHECK_DOC))
class Meta:
exclude = []
class EmailDocumentInline(SortableInlineAdminMixin, admin.TabularInline):
form = EmailDocumentInlineForm
model = EmailDocument
fieldsets = [
(
None, {
'fields': [
'title',
'extra_text',
'saved_file',
'sort_order',
]
}
),
]
formfield_overrides = {
models.TextField: {
'widget': forms.Textarea(
attrs={
'rows': 1,
},
),
},
}
extra = 0
class BulkEmailAdminForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['subscription_list'].queryset = Subscription.objects.filter(
associated_model__contains=self.instance.__module__,
).filter(
associated_model__contains=self.instance.__class__.__name__,
)
self.fields['subscription_list'].empty_label = None
class Meta:
model = BulkEmail
exclude = [
'sendable',
'sending',
'sent',
'send_history',
]
widgets = {
'headline': forms.TextInput(
attrs={
'size': '95',
},
),
'secondary_headline': forms.Textarea(
attrs={
'rows': 3,
'cols': 95,
},
),
'update_text': forms.Textarea(
attrs={
'rows': 3,
'cols': 95,
},
),
'publication_date': admin.widgets.AdminDateWidget,
'deletion_date': admin.widgets.AdminDateWidget,
}
class BulkEmailAdmin(BaseAdmin):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.readonly_fields = [
'subscription_name',
'short_headline',
'page_preview',
'email_preview',
] + self.readonly_fields
self.top_fieldsets = [
(
None, {
'fields': [
'subscription_list',
'headline',
'secondary_headline',
'update_text',
'body_text',
]
}
),
]
self.middle_fieldsets = [
(
None, {
'fields': [
'published',
'publication_date',
'deletion_date',
]
}
),
]
self.fieldsets = self.top_fieldsets + self.middle_fieldsets + self.bottom_fieldsets
def get_form(self, request, obj=None, **kwargs):
if obj and not self.has_change_permission(request, obj):
return super().get_form(request, obj, **kwargs)
return BulkEmailAdminForm
inlines = [
EmailImageInline,
EmailDocumentInline,
]
search_fields = [
'headline',
'body_text',
]
list_display = [
'short_headline',
'email_preview',
'sent',
'page_preview',
'published',
'subscription_name',
'publication_date',
'deletion_date',
]
ordering = [
'-publication_date',
'-created',
]
admin.site.register(
BulkEmail,
BulkEmailAdmin,
)
class MonthlyStatAdmin(BaseAdmin):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.readonly_fields = [
'month_and_year',
'stat_table',
] + self.readonly_fields
self.top_fieldsets = [
(
None, {
'fields': [
'month_and_year',
'stat_table',
]
}
),
]
self.fieldsets = self.top_fieldsets + self.bottom_fieldsets
list_display = [
'month_and_year',
]
ordering = [
'-year_int',
'-month_int',
]
def has_add_permission(self, request, obj=None):
return False
def has_change_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
class Media:
css = {
'all': ('admin/css/django_simple_bulk_emailer.css',)
}
admin.site.register(
MonthlyStat,
MonthlyStatAdmin,
)
|
try:
import openbayestool
except ImportError:
openbayestool = None
else:
from openbayestool import log_param, log_metric, clear_metric
if openbayestool:
def log_train_acc(acc):
log_metric('train acc max', max(acc))
log_metric('train acc new', acc[-1])
def log_test_acc(acc):
log_metric('test acc max', max(acc))
log_metric('test acc new', acc[-1])
def log_args(param):
log_param('epoch', param.epoch)
log_param('n-way', param.n_way)
log_param('k-spt', param.k_spt)
log_param('k-qry', param.k_qry)
log_param('img size', param.imgsz)
log_param('img classes', param.imgc)
log_param('task num', param.task_num)
log_param('meta learning rate', param.meta_lr)
log_param('update learning rate', param.update_lr)
log_param('update step', param.update_step)
log_param('update_step_test', param.update_step_test)
log_param('data', param.data)
else:
def log_train_acc(acc):
pass
def log_test_acc(acc):
pass
def log_param(param):
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.