text stringlengths 8 6.05M |
|---|
#!/usr/bin/env python3
import sys
nums = {}
highest = 0
for line in sys.stdin:
parts = line.strip().split()
source = int(parts[0])
targets = {int(v) for v in parts[2:]}
nums[source] = targets
if source > highest:
highest = source
counts = {0: 0}
dists = {0: 0}
for i in range(101, highest + 1):
if i not in nums:
counts[0] += 1
else:
targets = nums[i]
if len(targets) not in counts:
counts[len(targets)] = 0
counts[len(targets)] += 1
for num in targets:
dist = i - num
if dist not in dists:
dists[dist] = 0
dists[dist] += 1
for dist in dists:
print('dist', dist, dists[dist])
for count in counts:
print('count', count, counts[count])
|
import pygame
NEGRO=[0,0,0]
VERDE=[0,255,0]
ROJO=[255,0,0]
AZUL=[0,0,255]
BLANCO = [255,255,255]
def escalamiento(ptoPvte,puntos,tamanoEscala):
x=0
y=0
puntosFinal = []
for value in puntos:
x=value[0]-ptoPvte[0]
x=x*tamanoEscala
x=x+ptoPvte[0]
y=value[1]-ptoPvte[1]
y=y*tamanoEscala
y=y+ptoPvte[1]
puntosFinal.append([x,y])
return puntosFinal
if __name__ == '__main__':
pygame.init()
pantalla=pygame.display.set_mode([640,480])
pygame.display.flip()
flag = 0
puntosCapturados =[]
puntoFin = []
pos =[]
fin=False
while not fin:
for event in pygame.event.get():
pygame.display.flip()
if event.type == pygame.QUIT:
fin=True
if event.type == pygame.MOUSEBUTTONDOWN:
if flag < 5:
puntosCapturados.append(event.pos)
pygame.draw.circle(pantalla,VERDE,event.pos,2)
flag+=1
pygame.display.flip()
if flag == 5:
puntoFin=escalamiento(puntosCapturados[0],puntosCapturados,2)
pygame.draw.polygon(pantalla,BLANCO,puntosCapturados,1)
pygame.draw.polygon(pantalla,AZUL,puntoFin,1)
puntoFin=[]
puntosCapturados=[]
flag=0
pygame.display.flip()
|
"""Treadmill docker runtime."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from .runtime import DockerRuntime
__all__ = [
'DockerRuntime'
]
|
# Author: Christian Brodbeck <christianbrodbeck@nyu.edu>
from _eeg_systems import predefined_connectivity
|
import os
import click
from pprint import pprint
import logging
from treadmill.infra import constants, connection, vpc, subnet
from treadmill.infra.setup import ipa, ldap, node, cell
from treadmill.infra.utils import mutually_exclusive_option, cli_callbacks
from treadmill.infra.utils import security_group
from treadmill import cli
_LOGGER = logging.getLogger(__name__)
_OPTIONS_FILE = 'manifest'
def init():
"""Cloud CLI module"""
@click.group()
@click.option('--domain', required=True,
envvar='TREADMILL_DNS_DOMAIN',
callback=cli_callbacks.validate_domain,
help='Domain for hosted zone')
@click.pass_context
def cloud(ctx, domain):
"""Manage Treadmill on cloud"""
ctx.obj['DOMAIN'] = domain
@cloud.group()
@click.option('--proid', default='treadmld',
show_default=True,
help='Proid user for treadmill')
@click.pass_context
def configure(ctx, proid):
"""Configure Treadmill EC2 Objects"""
ctx.obj['PROID'] = proid
@configure.command(name='vpc')
@click.option(
'--name',
required=True,
help='VPC name',
callback=cli_callbacks.validate_vpc_name
)
@click.option('--region', help='Region for the vpc')
@click.option('--vpc-cidr-block', default='172.23.0.0/16',
show_default=True,
help='CIDR block for the vpc')
@click.option('-m', '--' + _OPTIONS_FILE,
cls=mutually_exclusive_option.MutuallyExclusiveOption,
mutually_exclusive=['region',
'vpc_cidr_block',
'name'],
help="Options YAML file. ")
@click.pass_context
@cli.ON_CLI_EXCEPTIONS
def configure_vpc(ctx, name, region, vpc_cidr_block,
manifest):
"""Configure Treadmill VPC"""
domain = ctx.obj['DOMAIN']
if region:
connection.Connection.context.region_name = region
connection.Connection.context.domain = domain
_vpc = vpc.VPC.setup(
name=name,
cidr_block=vpc_cidr_block,
)
click.echo(
pprint(_vpc.show())
)
@configure.command(name='ldap')
@click.option('--vpc-name', 'vpc_id',
required=True,
callback=cli_callbacks.convert_to_vpc_id,
help='VPC name')
@click.option('--key', required=True, help='SSH Key Name')
@click.option('--name', required=True, help='LDAP Instance Name')
@click.option('--image', required=True,
help='Image to use for instances e.g. RHEL-7.4')
@click.option('--subnet-name', help='Subnet Name for LDAP',
required=True)
@click.option('--region', help='Region for the vpc')
@click.option('--instance-type',
default=constants.INSTANCE_TYPES['EC2']['micro'],
show_default=True,
help='AWS ec2 instance type')
@click.option('--tm-release',
callback=cli_callbacks.create_release_url,
help='Treadmill release to use')
@click.option('--app-root', default='/var/tmp',
show_default=True,
help='Treadmill app root')
@click.option('--ldap-cidr-block', default='172.23.1.0/24',
show_default=True,
help='CIDR block for LDAP')
@click.option('--ipa-admin-password',
callback=cli_callbacks.ipa_password_prompt,
envvar='TREADMILL_IPA_ADMIN_PASSWORD',
help='Password for IPA admin')
@click.option('-m', '--' + _OPTIONS_FILE,
cls=mutually_exclusive_option.MutuallyExclusiveOption,
mutually_exclusive=['region',
'vpc_name',
'key',
'name',
'image',
'subnet_name',
'instance_type',
'tm_release',
'app_root',
'ipa_admin_password'
'ldap_cidr_block'],
help="Options YAML file. ")
@click.pass_context
@cli.ON_CLI_EXCEPTIONS
def configure_ldap(ctx, vpc_id, key, name, image, subnet_name, region,
instance_type, tm_release, app_root,
ldap_cidr_block, ipa_admin_password, manifest):
"""Configure Treadmill LDAP"""
domain = ctx.obj['DOMAIN']
proid = ctx.obj['PROID']
if region:
connection.Connection.context.region_name = region
connection.Connection.context.domain = domain
_ldap = ldap.LDAP(
name=name,
vpc_id=vpc_id,
)
_ldap.setup(
key=key,
count=1,
image=image,
instance_type=instance_type,
tm_release=tm_release,
app_root=app_root,
cidr_block=ldap_cidr_block,
ipa_admin_password=ipa_admin_password,
proid=proid,
subnet_name=subnet_name
)
click.echo(
pprint(_ldap.subnet.show())
)
@configure.command(name='cell')
@click.option('--vpc-name', 'vpc_id',
required=True,
callback=cli_callbacks.convert_to_vpc_id,
help='VPC Name')
@click.option('--key', required=True, help='SSH Key Name')
@click.option('--image', required=True,
help='Image to use for new instances e.g. RHEL-7.4')
@click.option('--subnet-name', help='Cell(Subnet) Name',
required=True)
@click.option('--count', default='3', type=int,
show_default=True,
help='Number of Treadmill masters to spin up')
@click.option('--region', help='Region for the vpc')
@click.option('--name', default='TreadmillMaster',
show_default=True,
help='Treadmill master name')
@click.option('--instance-type',
default=constants.INSTANCE_TYPES['EC2']['micro'],
show_default=True,
help='AWS ec2 instance type')
@click.option('--tm-release',
callback=cli_callbacks.create_release_url,
help='Treadmill release to use')
@click.option('--app-root',
default='/var/tmp',
show_default=True,
help='Treadmill app root')
@click.option('--cidr-block', default='172.23.0.0/24',
show_default=True,
help='CIDR block for the cell')
@click.option('--ipa-admin-password',
callback=cli_callbacks.ipa_password_prompt,
envvar='TREADMILL_IPA_ADMIN_PASSWORD',
help='Password for IPA admin')
@click.option('-m', '--' + _OPTIONS_FILE,
cls=mutually_exclusive_option.MutuallyExclusiveOption,
mutually_exclusive=['region',
'vpc_name',
'name',
'key',
'count',
'image',
'instance_type',
'tm_release',
'app_root',
'cidr_block',
'cell_subnet_name',
'ipa_admin_password'],
help="Options YAML file. ")
@click.pass_context
@cli.ON_CLI_EXCEPTIONS
def configure_cell(ctx, vpc_id, key, image, subnet_name,
count, region, name, instance_type, tm_release,
app_root, cidr_block,
ipa_admin_password, manifest):
"""Configure Treadmill Cell"""
domain = ctx.obj['DOMAIN']
proid = ctx.obj['PROID']
if region:
connection.Connection.context.region_name = region
connection.Connection.context.domain = domain
_cell = cell.Cell(
vpc_id=vpc_id,
subnet_name=subnet_name
)
result = {}
_cell.setup_zookeeper(
name=constants.TREADMILL_ZOOKEEPER,
key=key,
count=count,
image=image,
instance_type=instance_type,
subnet_cidr_block=cidr_block,
ipa_admin_password=ipa_admin_password,
proid=proid,
)
_cell.setup_master(
name=name,
key=key,
count=count,
image=image,
instance_type=instance_type,
tm_release=tm_release,
app_root=app_root,
subnet_cidr_block=cidr_block,
ipa_admin_password=ipa_admin_password,
proid=proid,
)
result['Cell'] = _cell.show()
click.echo(
pprint(result)
)
@configure.command(name='domain')
@click.option('--vpc-name', 'vpc_id',
callback=cli_callbacks.convert_to_vpc_id,
required=True, help='VPC Name')
@click.option('--key', required=True, help='SSH key name')
@click.option('--image', required=True,
help='Image to use for new master instance e.g. RHEL-7.4')
@click.option('--subnet-name', help='Subnet Name', required=True)
@click.option('--name', default='TreadmillIPA',
show_default=True,
help='Name of the instance')
@click.option('--region', help='Region for the vpc')
@click.option('--subnet-cidr-block', help='Cidr block of subnet for IPA',
show_default=True,
default='172.23.2.0/24')
@click.option('--count', help='Count of the instances',
show_default=True,
default=1)
@click.option('--ipa-admin-password',
callback=cli_callbacks.validate_ipa_password,
envvar='TREADMILL_IPA_ADMIN_PASSWORD',
help='Password for IPA admin')
@click.option('--tm-release',
callback=cli_callbacks.create_release_url,
help='Treadmill Release')
@click.option('--instance-type',
default=constants.INSTANCE_TYPES['EC2']['medium'],
show_default=True,
help='Instance type')
@click.option('-m', '--' + _OPTIONS_FILE,
cls=mutually_exclusive_option.MutuallyExclusiveOption,
mutually_exclusive=['region',
'vpc_id',
'name',
'key',
'count',
'image',
'instance_type',
'tm_release',
'subnet_cidr_block'
'subnet_name',
'ipa_admin_password'],
help="Options YAML file. ")
@click.pass_context
def configure_domain(ctx, vpc_id, key, image, subnet_name, name, region,
subnet_cidr_block, count, ipa_admin_password,
tm_release, instance_type, manifest):
"""Configure Treadmill Domain (IPA)"""
domain = ctx.obj['DOMAIN']
proid = ctx.obj['PROID']
connection.Connection.context.domain = domain
if region:
connection.Connection.context.region_name = region
if not ipa_admin_password:
ipa_admin_password = os.environ.get(
'TREADMILL_IPA_ADMIN_PASSWORD',
click.prompt(
'Create IPA admin password ',
hide_input=True,
confirmation_prompt=True
)
)
_ipa = ipa.IPA(name=name, vpc_id=vpc_id)
_ipa.setup(
count=count,
ipa_admin_password=ipa_admin_password,
tm_release=tm_release,
key=key,
instance_type=instance_type,
image=image,
cidr_block=subnet_cidr_block,
proid=proid,
subnet_name=subnet_name
)
click.echo(
pprint(_ipa.show())
)
@configure.command(name='node')
@click.option('--vpc-name', 'vpc_id',
callback=cli_callbacks.convert_to_vpc_id,
required=True, help='VPC Name')
@click.option('--key', required=True, help='SSH Key Name')
@click.option('--image', required=True,
help='Image to use for new node instance e.g. RHEL-7.4')
@click.option('--subnet-name', required=True, help='Cell(Subnet) Name')
@click.option('--region', help='Region for the vpc')
@click.option('--name', default='TreadmillNode',
show_default=True,
help='Node name')
@click.option('--instance-type',
default=constants.INSTANCE_TYPES['EC2']['large'],
show_default=True,
help='AWS ec2 instance type')
@click.option('--tm-release',
callback=cli_callbacks.create_release_url,
help='Treadmill release to use')
@click.option('--app-root', default='/var/tmp/treadmill-node',
show_default=True,
help='Treadmill app root')
@click.option('--ipa-admin-password',
callback=cli_callbacks.ipa_password_prompt,
envvar='TREADMILL_IPA_ADMIN_PASSWORD',
help='Password for IPA admin')
@click.option('--with-api', required=False, is_flag=True,
show_default=True,
default=False, help='Provision node with Treadmill APIs')
@click.option('-m', '--' + _OPTIONS_FILE,
cls=mutually_exclusive_option.MutuallyExclusiveOption,
mutually_exclusive=['region',
'vpc_name',
'name',
'key',
'image',
'instance_type',
'tm_release',
'app_root',
'subnet_name',
'ipa_admin_password'
'with_api'],
help="Options YAML file. ")
@click.pass_context
@cli.ON_CLI_EXCEPTIONS
def configure_node(ctx, vpc_id, key, image, subnet_name, region, name,
instance_type, tm_release, app_root,
ipa_admin_password, with_api, manifest):
"""Configure new Node in Cell"""
domain = ctx.obj['DOMAIN']
proid = ctx.obj['PROID']
connection.Connection.context.domain = domain
if region:
connection.Connection.context.region_name = region
if not ipa_admin_password:
ipa_admin_password = os.environ.get(
'TREADMILL_IPA_ADMIN_PASSWORD',
click.prompt('IPA admin password ', hide_input=True)
)
_node = node.Node(name, vpc_id)
_node.setup(
key=key,
image=image,
instance_type=instance_type,
tm_release=tm_release,
app_root=app_root,
ipa_admin_password=ipa_admin_password,
with_api=with_api,
proid=proid,
subnet_name=subnet_name
)
click.echo(
pprint(_node.subnet.show())
)
@cloud.group()
def delete():
"""Delete Treadmill EC2 Objects"""
pass
@delete.command(name='vpc')
@click.option('--vpc-name', 'vpc_id',
callback=cli_callbacks.convert_to_vpc_id,
required=True, help='VPC Name')
@click.pass_context
@cli.ON_CLI_EXCEPTIONS
def delete_vpc(ctx, vpc_id):
"""Delete VPC"""
domain = ctx.obj['DOMAIN']
connection.Connection.context.domain = domain
vpc.VPC(id=vpc_id).delete()
@delete.command(name='cell')
@click.option('--vpc-name', 'vpc_id',
callback=cli_callbacks.convert_to_vpc_id,
required=True, help='VPC Name')
@click.option('--subnet-name', required=True,
help='Subnet Name of cell')
@click.pass_context
@cli.ON_CLI_EXCEPTIONS
def delete_cell(ctx, vpc_id, subnet_name):
"""Delete Cell (Subnet)"""
connection.Connection.context.domain = ctx.obj['DOMAIN']
subnet.Subnet(vpc_id=vpc_id, name=subnet_name).destroy()
@delete.command(name='domain')
@click.option('--vpc-name', 'vpc_id',
callback=cli_callbacks.convert_to_vpc_id,
required=True, help='VPC Name')
@click.option('--subnet-name',
required=True, help='Subnet Name of Domain')
@click.option('--name', help='Name of Instance',
show_default=True,
default="TreadmillIPA")
@click.pass_context
@cli.ON_CLI_EXCEPTIONS
def delete_domain(ctx, vpc_id, subnet_name, name):
"""Delete IPA"""
connection.Connection.context.domain = ctx.obj['DOMAIN']
_ipa = ipa.IPA(name=name, vpc_id=vpc_id)
_ipa.destroy(subnet_name=subnet_name)
@delete.command(name='ldap')
@click.option('--vpc-name', 'vpc_id',
callback=cli_callbacks.convert_to_vpc_id,
required=True, help='VPC Name')
@click.option('--subnet-name',
help='Subnet Name of LDAP')
@click.option('--name', help='Name of Instance',)
@click.pass_context
@cli.ON_CLI_EXCEPTIONS
def delete_ldap(ctx, vpc_id, subnet_name, name):
"""Delete LDAP"""
connection.Connection.context.domain = ctx.obj['DOMAIN']
_ldap = ldap.LDAP(name=name, vpc_id=vpc_id)
_ldap.destroy(subnet_name=subnet_name)
@delete.command(name='node')
@click.option('--vpc-name', 'vpc_id',
callback=cli_callbacks.convert_to_vpc_id,
required=True, help='VPC Name')
@click.option('--name', help='Instance Name', required=False)
@click.option('--instance-id', help='Instance ID', required=False)
@click.pass_context
@cli.ON_CLI_EXCEPTIONS
def delete_node(ctx, vpc_id, name, instance_id):
"""Delete Node"""
domain = ctx.obj['DOMAIN']
if not name and not instance_id:
_LOGGER.error('Provide either --name or --instance-id of'
'Node Instance and try again.')
return
connection.Connection.context.domain = domain
_node = node.Node(name=name, vpc_id=vpc_id)
_node.destroy(instance_id=instance_id)
@cloud.group('list')
def _list():
"""Show Treadmill Cloud Resources"""
pass
@_list.command(name='vpc')
@click.option('--vpc-name', 'vpc_id',
callback=cli_callbacks.convert_to_vpc_id,
help='VPC Name')
@click.pass_context
@cli.ON_CLI_EXCEPTIONS
def vpc_resources(ctx, vpc_id):
"""Show VPC(s)"""
domain = ctx.obj['DOMAIN']
connection.Connection.context.domain = domain
if vpc_id:
result = pprint(vpc.VPC(id=vpc_id).show())
click.echo(result)
else:
_vpcs = vpc.VPC.all()
result = list(map(lambda v: {'id': v.id, 'name': v.name}, _vpcs))
click.echo({'Vpcs': result})
@_list.command(name='cell')
@click.option('--vpc-name', 'vpc_id',
callback=cli_callbacks.convert_to_vpc_id,
help='VPC Name')
@click.option('--subnet-name',
help='Subnet Name of cell')
@click.pass_context
@cli.ON_CLI_EXCEPTIONS
def cell_resources(ctx, vpc_id, subnet_name):
"""Show Cell"""
domain = ctx.obj['DOMAIN']
connection.Connection.context.domain = domain
if subnet_name:
click.echo(
pprint(
subnet.Subnet(name=subnet_name, vpc_id=vpc_id).show()
)
)
return
if vpc_id:
vpcs = [vpc_id]
else:
vpcs = [_vpc.id for _vpc in vpc.VPC.all()]
result = []
for v in vpcs:
subnets = vpc.VPC(id=v).list_cells()
if subnets:
result.append({
'VpcId': v,
'Subnets': subnets
})
click.echo(pprint(result))
@cloud.group()
def port():
"""enable/disable EC2 instance port"""
pass
@port.command(name='enable')
@click.option(
'-a', '--anywhere', is_flag=True,
default=True,
show_default=True,
help='From Anywhere?'
)
@click.option('--protocol', help='Protocol',
show_default=True,
default='tcp')
@click.option('-p', '--port', required=True, help='Port')
@click.option('-s', '--security-group-id', required=True,
help='Security Group ID')
def enable_port(security_group_id, port, protocol, anywhere):
"""Enable Port from my ip"""
security_group.enable(port, security_group_id, protocol, anywhere)
@port.command(name='disable')
@click.option(
'-a', '--anywhere',
is_flag=True,
default=True,
show_default=True,
help='From Anywhere?'
)
@click.option('--protocol', help='Protocol',
show_default=True,
default='tcp')
@click.option('-p', '--port', required=True, help='Port')
@click.option('-s', '--security-group-id', required=True,
help='Security Group ID')
def disable_port(security_group_id, port, protocol, anywhere):
"""Disable Port from my ip"""
security_group.disable(port, security_group_id, protocol, anywhere)
return cloud
|
from django.core.management.commands.runserver import BaseRunserverCommand
class Command(BaseRunserverCommand):
help = "Starts a lightweight Web server for development without automatically serving static files."
|
__author__ = 'Hanzhiyun'
print(3+7)
print(type(3+7))
print(2-1)
print("this is a chunk of text")
print(type("this is a chunk of text")) |
class Solution(object):
def firstBadVersion(self, n):
"""
https://leetcode.com/problems/first-bad-version/
isBadVersion is already given. just needed to binary search to find the solution.
"""
low = 1
high = n
if isBadVersion(1):
return 1
while(low <= high):
mid = (low+high)//2
f = isBadVersion(mid)
g = isBadVersion(mid+1)
if not f and g:
break
elif f:
high = mid - 1
else:
low = mid + 1
# print(mid)
return mid+1 |
from src.util.Logging import info
from src.layout.PlannedGraph import PlannedGraph
import src.plan.GraphUtils as GraphUtils
import networkx as nx
from difflib import SequenceMatcher
from collections import OrderedDict
import random
def LCS(str1, str2):
seqMatch = SequenceMatcher(None, str1, str2)
match = seqMatch.find_longest_match(0, len(str1), 0, len(str2))
if match.size != 0:
return match
return None
def get_node_id_mapping(sourceIds, targetIds, match):
if match == None:
return None
mapping = OrderedDict()
for i in range(match.size):
a = sourceIds[match.a + i]
b = targetIds[match.b + i]
mapping[a] = b
return mapping
class GraphAssembler:
def __init__(self, nodes, edges, regions, dimensions):
self.nodes = nodes
self.edges = edges
self.regions = regions
self.dimensions = dimensions
self.node_counts = {node_name: 0 for node_name in self.nodes}
self.graph = nx.OrderedMultiDiGraph()
self.graph_contains_loops = False
class MergeAssembler(GraphAssembler):
def create_fact_subgraphs(self):
for region in self.regions:
region.construct_region_graph(self.nodes, self.edges, self.node_counts)
def node_ids_to_lcs_string(self, node_ids, graph):
if len(node_ids) < 1:
return ([], [])
node = graph.nodes[node_ids[0]]['data']
id_string = [node_ids[0]]
name_string = [node.node_name]
for i in range(len(node_ids) - 1):
nodeA_id = node_ids[i]
nodeB_id = node_ids[i + 1]
edge_data = graph.get_edge_data(nodeA_id, nodeB_id)
edge_id = list(dict(edge_data).keys())[0]
nodeB_data = graph.nodes[nodeB_id]['data']
id_string.append(edge_id)
name_string.append(edge_id)
id_string.append(nodeB_id)
name_string.append(nodeB_data.node_name)
return (id_string, name_string) ###
def get_max_subgraph_mapping(self, graph, subGraph, paths, cutoff = None):
pathAIds = paths[0]
pathBIds = [] if len(paths) < 2 else paths[1]
cutoff = max(len(pathAIds), len(pathBIds)) if cutoff == None else cutoff
(stringAIds, stringANames) = self.node_ids_to_lcs_string(pathAIds, subGraph)
(stringBIds, stringBNames) = self.node_ids_to_lcs_string(pathBIds, subGraph)
mappings_A = []
mappings_B = []
for node in graph:
targets = [x for x in graph if x != node]
nx_paths = nx.all_simple_paths(graph, source = node, target = targets, cutoff = cutoff)
all_paths = list(nx_paths) + [[node]]
for path in all_paths:
(pathStringIds, pathStringNames) = self.node_ids_to_lcs_string(path, graph)
matchA = LCS(stringANames, pathStringNames)
matchB = LCS(stringBNames, pathStringNames)
mapping_A = get_node_id_mapping(stringAIds, pathStringIds, matchA)
mapping_B = get_node_id_mapping(stringBIds, pathStringIds, matchB)
if mapping_A != None:
mappings_A.append(mapping_A)
if mapping_B != None:
mappings_B.append(mapping_B)
maximum_mapping = {}
if len(mappings_A) + len(mappings_B) > 0:
maximum_mapping = max({}, *mappings_A, *mappings_B, key = len)
ties = [maximum_mapping]
for mapA in mappings_A:
for mapB in mappings_B:
if self.is_mapping_compatible(mapA, mapB, pathAIds, stringAIds, stringBIds):
new_max = OrderedDict(list(mapA.items()) + list(mapB.items()))
if len(new_max) > len(maximum_mapping):
maximum_mapping = new_max
ties = [maximum_mapping]
elif len(new_max) == len(maximum_mapping):
ties.append(new_max)
# prefer merge if it's the entirety of either path
for m in ties:
x = list(m.keys())[::2]
if x == pathAIds or x == pathBIds:
return m
return maximum_mapping
def is_mapping_compatible(self, mapA, mapB, pathAIds, stringA, stringB):
maps_adjacent = False
for ka in mapA.keys():
for kb in mapB.keys():
nameA = ka.split('-')[0]
nameB = kb.split('-')[0]
if nameA == nameB:
maps_adjacent = True
break
if not maps_adjacent:
return False
caiusA = set(mapA.keys())
caiusB = set(mapB.keys())
keys_union = caiusA.union(caiusB)
if len(stringA) > 2:
for i in range(len(stringA) - 2):
if stringA[i] in keys_union and stringA[i + 2] in keys_union and not stringA[i + 1] in keys_union:
return False
for k in caiusA.intersection(caiusB):
if mapA[k] != mapB[k]:
return False
listA = list(mapA)
listB = list(mapB)
if listA[0] == pathAIds[0]:
if len(listA) > 1 and len(listB) > 1 and listA[1] == listB[1]:
# must take different routes initially
return False
# if listA[-1] == pathAIds[-1]:
# if len(listA) > 1 and len(listB) > 1 and listA[-2] == listB[-2]:
# # must take different routes initially
# return False
elif listA[0] == listB[0]: # if it's not the path start node, that can't share a start node
return False
return True
def merge_fact_subgraph(self, graph, subGraph, mapping):
# TODO fix merging - doesn't always respect facts in reverse ordered regions
for node_id in subGraph:
mapped_id = node_id if not node_id in mapping else mapping[node_id]
if mapped_id not in graph:
node_data = subGraph.nodes[node_id]
graph.add_node(mapped_id, data = node_data['data'], pos = node_data['pos'], regions = node_data['regions'])
else:
regionsA = subGraph.nodes[node_id]['regions']
regionsB = graph.nodes[mapped_id]['regions']
graph.nodes[mapped_id]['regions'] = regionsA | regionsB
for start, end, edge_id, edge_data in subGraph.edges(keys = True, data = 'data'):
mapped_start = start if not start in mapping else mapping[start]
mapped_end = end if not end in mapping else mapping[end]
graph.add_edge(mapped_start, mapped_end, edge_id, data = edge_data)
return graph
def merge_regions(self, graph, regions):
for region in regions:
subgraph = region.graph
# loops break facts if there are already loops present, so only allow single node merges
cutoff = 0 if region.is_identity() and self.graph_contains_loops else None
if region.is_identity():
self.graph_contains_loops = True
mapping = self.get_max_subgraph_mapping(graph, region.graph, region.path_node_ids, cutoff = cutoff)
region.apply_node_mapping(mapping) # TODO
self.merge_fact_subgraph(graph, subgraph, mapping)
def get_random_node_instance(self, graph, node_name):
nodes = [x for x, y in graph.nodes(data = 'data') if y.node_name == node_name]
if len(nodes) == 0:
return None
return random.choice(nodes)
def add_unused_edges(self):
# TODO improve nonregion edges
self.nonregion_edges = [eid for eid in self.edges if not any(eid in r.edge_set() for r in self.regions)]
for edge_id in self.nonregion_edges:
edge = self.edges[edge_id]
start_node_name = edge.start.node_name
end_node_name = edge.end.node_name
start_node_id = self.get_random_node_instance(self.graph, start_node_name)
end_node_id = None
if start_node_id == None:
end_node_id = self.get_random_node_instance(self.graph, end_node_name)
GraphUtils.add_edge(self.graph, edge, self.node_counts, start = start_node_id, end = end_node_id)
def getGraph(self):
if len(self.regions) > 0:
self.create_fact_subgraphs()
loop_regions = filter(lambda x : x.is_identity(), self.regions)
pair_regions = filter(lambda x : not x.is_identity(), self.regions)
sort_options = {'key': lambda x : len(x.graph), 'reverse': True}
self.merge_regions(self.graph, list(sorted(pair_regions, **sort_options)))
self.add_unused_edges()
self.merge_regions(self.graph, list(sorted(loop_regions, **sort_options)))
else:
self.add_unused_edges()
N = len(self.graph.nodes)
E = len(self.graph.edges)
info('Constructed graph with %s nodes and %s edges' % (N, E))
return PlannedGraph(self.graph, self.regions, self.dimensions) |
# coding=utf-8
user = raw_input('Enter login name:')
# Enter login name:root
print
'Your login is:', user
print
3 ** 2
pystr = 'python'
import sys
print
pystr[1:]
aList = [1, 2, 3, 4]
print
aList
print
aList[2:],
if 2 <= 4:
print
'this is using if'
counter = 0
while counter < 3:
print
'loop#%d' % (counter)
counter += 1
def add(x):
return (x + x)
function = add(4.25)
print
function
sys.stdout.write('Hello World\n')
|
##
## connect the object detections in multiple video frames
## obtained by a pretrained object detector
##
## Author: Abhishek Dutta <adutta@robots.ox.ac.uk>
## Date: 10 Dec. 2018
##
import threading
import os
import csv
import json # for debug
import cv2
import numpy as np
import math
import urllib.request # to download model file
import shutil # to copy files
import svt.models as models
import torch
from functools import partial
import pickle
from torch.autograd import Variable
import torch.nn.functional as F
class siamrpn_tracker():
def __init__(self, model_path=None, config=None):
if model_path is None:
raise ValueError('model_path must be provided')
else:
self.model_path = model_path
self.state = {} # state of the tracker initialized with a template
self.pretrained_model = {} # placeholder for pretrained model loaded into CPU or GPU
self.config = config
self.use_gpu = True
if self.config['download_model_if_missing']:
self._download_model_if_missing(model_url=self.config['model_url'],
model_path=self.model_path,
force_update=self.config['force_model_download'])
self._setup_gpu()
self._preload_model();
def init_tracker(self, template_img, template_bbox):
self.state = {}
template_bbox = [ int(template_bbox[1]),
int(template_bbox[2]),
int(template_bbox[3]),
int(template_bbox[4]) ]
## Initialize state with pytorch model
self.state['model'] = self.pretrained_model;
self._init_tracker_with_template(template_img, template_bbox);
def track(self, search):
context_length = 0.5 * ( self.state['target_size'][0] + self.state['target_size'][1] )
search_square_crop_size = np.sqrt( (self.state['target_size'][0] + context_length) * (self.state['target_size'][1] + context_length) )
scale_z = self.state['model_template_size'] / search_square_crop_size
search_pad = ( self.state['model_search_size'] - self.state['model_template_size'] ) / 2
search_pad_scaled = search_pad / scale_z
search_length = self._round_python27( search_square_crop_size + 2*search_pad_scaled )
search_subwindow = self._transform_img_for_model_input(search,
self.state['target_position'],
self.state['model_search_size'],
search_length,
self.state['template_img_channel_avg'])
# track object
search_subwindow_tensor = Variable( search_subwindow.unsqueeze(0) )
if self.use_gpu:
score, delta = self.state['model'].track( search_subwindow_tensor.cuda() )
else:
score, delta = self.state['model'].track( search_subwindow_tensor )
search_pos = self.state['target_position']
search_size = self.state['target_size'] * scale_z
delta = delta.permute(1, 2, 3, 0).contiguous().view(4, -1).data.cpu().numpy()
score = F.softmax(score.permute(1, 2, 3, 0).contiguous().view(2, -1).permute(1, 0), dim=1).data[:, 1].cpu().numpy()
delta[0, :] = delta[0, :] * self.state['anchors'][:, 2] + self.state['anchors'][:, 0]
delta[1, :] = delta[1, :] * self.state['anchors'][:, 3] + self.state['anchors'][:, 1]
delta[2, :] = np.exp(delta[2, :]) * self.state['anchors'][:, 2]
delta[3, :] = np.exp(delta[3, :]) * self.state['anchors'][:, 3]
def change(r):
return np.maximum(r, 1./r)
def sz(w, h):
pad = (w + h) * 0.5
sz2 = (w + pad) * (h + pad)
return np.sqrt(sz2)
def sz_wh(wh):
pad = (wh[0] + wh[1]) * 0.5
sz2 = (wh[0] + pad) * (wh[1] + pad)
return np.sqrt(sz2)
# size penalty
s_c = change( sz(delta[2, :], delta[3, :]) / sz_wh(search_size) ) # scale penalty
r_c = change( (search_size[0] / search_size[1]) / (delta[2, :] / delta[3, :]) ) # ratio penalty
penalty = np.exp(-(r_c * s_c - 1) * self.state['penalty_k'])
pscore = penalty * score
# window float
pscore = pscore * (1 - self.state['window_influence']) + self.state['window'] * self.state['window_influence']
best_pscore_id = np.argmax(pscore)
target = delta[:, best_pscore_id] / scale_z
search_size = search_size / scale_z
lr = penalty[best_pscore_id] * score[best_pscore_id] * self.state['lr'] # lr for OTB
res_x = target[0] + search_pos[0]
res_y = target[1] + search_pos[1]
res_w = search_size[0] * (1 - self.state['lr']) + target[2] * self.state['lr']
res_h = search_size[1] * (1 - self.state['lr']) + target[3] * self.state['lr']
new_target_position = np.array([res_x, res_y])
new_target_size = np.array([res_w, res_h])
new_target_position[0] = max(0, min(self.state['image_width'] , new_target_position[0]))
new_target_position[1] = max(0, min(self.state['image_height'], new_target_position[1]))
new_target_size[0] = max(10, min(self.state['image_width'], new_target_size[0]))
new_target_size[1] = max(10, min(self.state['image_height'], new_target_size[1]))
# update state
self.state['target_position'] = new_target_position
self.state['target_size'] = new_target_size
self.state['score'] = score[best_pscore_id]
self.state['track_count'] = self.state['track_count'] + 1
return new_target_position, new_target_size, score[best_pscore_id]
def _download_model_if_missing(self, model_url, model_path, force_update=False):
try:
if force_update:
self._download_latest_model(model_url, model_path)
else:
if not os.path.exists(model_path) or os.path.getsize(model_path) == 0:
self._download_latest_model(model_url, model_path)
except:
raise ValueError('Failed to download tracker model file')
def _download_latest_model(self, url, file_path):
try:
print('Downloading latest model file from [%s]' % (url))
# create parent folder in file_path, if it does not exist
file_path_parent = os.path.dirname(file_path)
if not os.path.isdir(file_path_parent):
os.makedirs(file_path_parent)
with urllib.request.urlopen(url) as response, open(file_path, 'wb') as f:
print('Saving latest model to [%s]' % (file_path))
shutil.copyfileobj(response, f)
except:
raise ValueError('Failed to download tracker model file from [%s] and save to [%s]' % (url, file_path))
def _setup_gpu(self):
try:
if torch.cuda.is_available() and self.config['gpu_id'] != -1:
self.use_gpu = True
self.device = torch.device('cuda:' + str(self.config['gpu_id']))
if self.config['verbose']:
print('Using GPU %d' % (self.config['gpu_id']))
else:
self.use_gpu = False
self.gpu_id = -1
self.device = torch.device('cpu')
if self.config['verbose']:
print('Using CPU only')
except:
raise ValueError('Failed to setup GPU %d' %(self.config['gpu_id']))
def _load_image(self, fn):
im = cv2.imread(fn)
return im
## routines to preload pytorch model
def _preload_model(self):
if self.config['verbose']:
print('Preloading model [ %s ] ... ' % (self.model_path), end='', flush=True)
## @todo: get rid of absolute path to model file
## load configuration
cfg_json_str = '''
{
"anchors": {
"stride": 8,
"ratios": [0.33, 0.5, 1, 2, 3],
"scales": [8],
"round_dight": 0
},
"hp": {
"instance_size": 255,
"search_model": "adapation",
"penalty_k": 0.31,
"window_influence": 0.448,
"lr": 0.14
}
}'''
cfg = json.loads(cfg_json_str)
self.pretrained_model = models.Custom(anchors=cfg['anchors'])
self._load_pretrained_model(self.model_path)
self.pretrained_model.to(self.device) # move the pretrained model to GPU (if available)
if self.config['verbose']:
print('done', flush=True)
def _check_keys(self, model, pretrained_state_dict):
ckpt_keys = set(pretrained_state_dict.keys())
model_keys = set(model.state_dict().keys())
used_pretrained_keys = model_keys & ckpt_keys
unused_pretrained_keys = ckpt_keys - model_keys
missing_keys = model_keys - ckpt_keys
#print('missing keys:')
#print(missing_keys)
#print('unused checkpoint keys:{}'.format(len(unused_pretrained_keys)))
#print('used keys:{}'.format(len(used_pretrained_keys)))
assert len(used_pretrained_keys) > 0, 'load NONE from pretrained checkpoint'
return True
def _remove_prefix(self, state_dict, prefix):
''' Old style model is stored with all names of parameters share common prefix 'module.' '''
f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x
return {f(key): value for key, value in state_dict.items()}
def _load_pretrained_model(self, pretrained_path):
## bug fix
## see https://github.com/CSAILVision/places365/issues/25#issuecomment-333871990
pickle.load = partial(pickle.load, encoding="latin1")
pickle.Unpickler = partial(pickle.Unpickler, encoding="latin1")
#model = torch.load(model_file, map_location=lambda storage, loc: storage, pickle_module=pickle)
#pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage.cuda(self.device), pickle_module=pickle)
if self.use_gpu:
pretrained_dict = torch.load(pretrained_path,
map_location=lambda storage,
loc: storage.cuda(),
pickle_module=pickle)
else:
pretrained_dict = torch.load(pretrained_path,
map_location=lambda storage,
loc: storage.cpu(),
pickle_module=pickle)
if "state_dict" in pretrained_dict.keys():
pretrained_dict = self._remove_prefix(pretrained_dict['state_dict'], 'module.')
else:
pretrained_dict = self._remove_prefix(pretrained_dict, 'module.')
self._check_keys(self.pretrained_model, pretrained_dict)
self.pretrained_model.load_state_dict(pretrained_dict, strict=False)
# see: https://github.com/python/cpython/blob/master/Python/pymath.c
def _copysign_python27(self, x, y):
if ( y > 0. or ( y == 0. and math.atan2(y, -1.) > 0. ) ):
return math.fabs(x);
else:
return -math.fabs(x);
#
# `round()` method in python:
# if python2.7, round(0.5) = 1.0
# [if two multiples are equally close, rounding is done away from 0 -- https://docs.python.org/2/library/functions.html#round]
# if python3.7, round(0.5) = 0.0
# [if two multiples are equally close, rounding is done toward the even choice -- https://docs.python.org/3/library/functions.html#round]
#
def _round_python27(self, x):
absx = math.fabs(x)
y = math.floor(absx)
if ( absx - y >= 0.5 ):
y += 1.0
return self._copysign_python27(y, x)
# To track an object, the user selects a region containing this object in a
# given image frame. This region is called template. The user selected template
# can be of any size and aspect ratio. Therefore, this template needs to be
# transformed into an image size that is accepted as input to the model
#
# Input
# img full frame image containing the template
# bbox_center center coordinates of the bounding box containing the template
# model_square_input_size size of the model input (square shaped image) for template
# square_crop_size size of the square to which the user selected template is expanded (to get additional context)
# Returns
# a square image of size [model_square_input_size x model_square_input_size]
# containing the user selected object and some context around it
def _transform_img_for_model_input(self, img, bbox_center, model_square_input_size, square_crop_size, img_channel_avg):
# if the template is near image boundary, image channel average of the
# template image is used to fill the empty regions
if isinstance(bbox_center, float):
bbox_center = [bbox_center, bbox_center]
template_width = img.shape[0]
template_height = img.shape[1]
template_center_to_boundary_length = (square_crop_size + 1) / 2
context_xmin = self._round_python27(bbox_center[0] - template_center_to_boundary_length)
context_xmax = context_xmin + square_crop_size - 1
context_ymin = self._round_python27(bbox_center[1] - template_center_to_boundary_length)
context_ymax = context_ymin + square_crop_size - 1
left_pad = int( max(0., -context_xmin) )
top_pad = int(max(0., -context_ymin))
right_pad = int(max(0., context_xmax - template_height + 1))
bottom_pad = int(max(0., context_ymax - template_width + 1))
context_xmin = context_xmin + left_pad
context_xmax = context_xmax + left_pad
context_ymin = context_ymin + top_pad
context_ymax = context_ymax + top_pad
r, c, k = img.shape
if any([top_pad, bottom_pad, left_pad, right_pad]):
## fill average image colour if the template region is near the boundary of image
te_im = np.zeros((r + top_pad + bottom_pad, c + left_pad + right_pad, k), np.uint8) # 0 is better than 1 initialization
te_im[top_pad:top_pad + r, left_pad:left_pad + c, :] = img
if top_pad:
te_im[0:top_pad, left_pad:left_pad + c, :] = img_channel_avg
if bottom_pad:
te_im[r + top_pad:, left_pad:left_pad + c, :] = img_channel_avg
if left_pad:
te_im[:, 0:left_pad, :] = img_channel_avg
if right_pad:
te_im[:, c + left_pad:, :] = img_channel_avg
square_img_data = te_im[int(context_ymin):int(context_ymax + 1), int(context_xmin):int(context_xmax + 1), :]
else:
square_img_data = img[int(context_ymin):int(context_ymax + 1), int(context_xmin):int(context_xmax + 1), :]
if not np.array_equal(model_square_input_size, square_crop_size):
model_input = cv2.resize(square_img_data, (model_square_input_size, model_square_input_size))
else:
model_input = square_img_data
model_input_tensor = torch.from_numpy( np.transpose(model_input, (2, 0, 1)) ).float() # Channel x Height x Width
return model_input_tensor
## generate anchors
## [arguments]
## total_stride : (search_input_size - template_input_size) / total_stride + 1 = size of square feature map
## anchor_scale_list : list of scales by which all initial anchors will be scaled
## anchor_aspect_ratio_list : list of aspect ratio for all initial anchors
## square_feature_map_length : the dimension of final scores generated by classification and regression branches of region proposal network
##
## [description]
## the area of all generated anchors must be same as the area of initial anchor
## therefore, for all generate anchors of dimension Aw x Ah, and aspect ratio = aspect_ratio
## Ah = Aw * aspect_ratio ---- (1) by definition of aspect ratio
## Aw * Ah = initial_anchor_area ---- (2) because the area of anchor remains constant
##
## where, initial_anchor_area = 4 * total_stride
##
## Therefore, substituting values of aspect_ratio and initial_anchor_area in (1) and (2)
## and substituting (1) in (2), we get
##
## Aw * Aw * aspect_ratio = initial_anchor_area
## or, Aw = sqrt( initial_anchor_area / aspect_ratio )
## and substituting value of Aw in (2), we get the value of Ah of the new anchor
##
## we scale each anchor using the scale provided in anchor_scale_list
def _generate_anchor(self, total_stride, anchor_scale_list, anchor_aspect_ratio_list, square_feature_map_length):
anchor_count = len(anchor_aspect_ratio_list) * len(anchor_scale_list)
anchors = np.zeros((anchor_count, 4), dtype=np.float32)
initial_anchor_area = total_stride * total_stride;
anchor_count = 0
for anchor_aspect_ratio in anchor_aspect_ratio_list:
anchor_width = int( np.sqrt( initial_anchor_area / anchor_aspect_ratio ) )
anchor_height = int( anchor_width * anchor_aspect_ratio )
for anchor_scale in anchor_scale_list:
anchor_scaled_height = anchor_height * anchor_scale
anchor_scaled_width = anchor_width * anchor_scale
anchors[anchor_count, 0] = 0 # will be updated later
anchors[anchor_count, 1] = 0 # will be updated later
anchors[anchor_count, 2] = anchor_scaled_width
anchors[anchor_count, 3] = anchor_scaled_height
anchor_count = anchor_count + 1
feature_map_anchors = np.zeros( (anchor_count, square_feature_map_length, square_feature_map_length, 4), dtype=np.float32 )
center_of_feature_map = (square_feature_map_length - 1 ) / 2 # Li uses ori = square_feature_map_length / 2
offset_from_center_of_feature_map = -center_of_feature_map * total_stride;
for anchor_index in range(anchor_count):
anchor = anchors[anchor_index]
for i in range(square_feature_map_length):
for j in range(square_feature_map_length):
anchors_ij = np.copy(anchor)
# update the (x,y) coordinate of each anchor for feature map location (i,j)
anchors_ij[0] = offset_from_center_of_feature_map + total_stride * j
anchors_ij[1] = offset_from_center_of_feature_map + total_stride * i
feature_map_anchors[anchor_index, i, j] = anchors_ij
feature_map_anchors = np.reshape(feature_map_anchors, (-1, 4)) # collapse the (i,j) dimension of feature map as it is not needed
return feature_map_anchors
def _init_tracker_with_template(self, template, template_bbox):
self.state['model'].eval();
if self.use_gpu:
self.state['model'] = self.state['model'].cuda();
self.state['image_width'] = template.shape[1]
self.state['image_height'] = template.shape[0]
target_cx = template_bbox[0] + template_bbox[2]/2
target_cy = template_bbox[1] + template_bbox[3]/2
target_w = template_bbox[2]
target_h = template_bbox[3]
self.state['template_position'] = np.array( [target_cx, target_cy] )
self.state['template_size'] = np.array( [target_w , target_h ] )
self.state['target_position'] = self.state['template_position']
self.state['target_size'] = self.state['template_size']
self.state['model_template_size'] = 127 # 127x127
self.state['model_search_size'] = 255 # 255x255
self.state['total_stride'] = 8
self.state['penalty_k'] = 0.31
self.state['window_influence'] = 0.448
self.state['lr'] = 0.14
self.state['search_model'] = 'adaption'
self.state['anchor_aspect_ratio_list'] = [0.33, 0.5, 1, 2, 3]
self.state['anchor_scale_list'] = [8, ]
self.state['anchor_count'] = len(self.state['anchor_aspect_ratio_list']) * len(self.state['anchor_scale_list'])
if self.state['search_model'] == 'adaption':
if ( (self.state['target_size'][0] * self.state['target_size'][1]) / (float(self.state['image_width'] * self.state['image_height'])) ) < 0.004:
self.state['model_search_size'] = 287 # small object big search region
else:
self.state['model_search_size'] = 271
# 17x17 for model_search_size = 255
# 19x19 for model_search_size = 271 # OTB2017 dataset
self.state['square_feature_map_length'] = int( (self.state['model_search_size'] - self.state['model_template_size']) / self.state['total_stride'] + 1)
self.state['anchors'] = self._generate_anchor(self.state['total_stride'],
self.state['anchor_scale_list'],
self.state['anchor_aspect_ratio_list'],
self.state['square_feature_map_length'])
self.state['context'] = 0.5
context_length = 0.5 * ( self.state['target_size'][0] + self.state['target_size'][1] )
square_crop_size = round( np.sqrt( (self.state['target_size'][0] + context_length) * (self.state['target_size'][1] + context_length) ) ) # see equation (15) of [Li et al. 2018]
## initialize model with template image
self.state['template_img_channel_avg'] = np.mean(template, axis=(0, 1))
template_subwindow = self._transform_img_for_model_input(template,
self.state['target_position'],
self.state['model_template_size'],
square_crop_size,
self.state['template_img_channel_avg'])
template_subwindow_tensor = Variable(template_subwindow.unsqueeze(0))
if self.use_gpu:
self.state['model'].temple( template_subwindow_tensor.cuda() )
else:
self.state['model'].temple( template_subwindow_tensor )
# cosine window
self.state['window'] = np.outer(np.hanning(self.state['square_feature_map_length']), np.hanning(self.state['square_feature_map_length']))
self.state['window'] = np.tile(self.state['window'].flatten(), self.state['anchor_count'])
self.state['track_count'] = 0
|
from mongoengine import *
from flask_mongoengine import MongoEngine
db = MongoEngine()
class OneImage(db.EmbeddedDocument):
element = db.ImageField(thumbnail_size=(100, 100, True))
def get_el(self):
return self.element.name
class UserV(db.Document):
second_name = db.StringField(required=True, max_length=50)
first_name = db.StringField(required=True, max_length=50)
last_name = db.StringField(required=True, max_length=50)
image = ImageField(thumbnail_size=(200, 200, True))
image2 = db.ListField(db.EmbeddedDocumentField(OneImage))
image3 = db.StringField(required=False, max_length=500)
|
# Flask
from flask import Flask, redirect, url_for, request, render_template, Response, jsonify, redirect
from model1 import imgg, pimgg
import base64
import json
import time
# Declare a flask app
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
img = request.files['file']
new_graph_name = "graph" + str(time.time()) + ".png"
img.save("./"+new_graph_name)
pimgg(new_graph_name)
imgg(new_graph_name)
return render_template('index.html', result=1, pimggg=new_graph_name)
return render_template('index.html')
if __name__ == '__main__':
app.run()
|
# -*- coding: utf-8 -*-
class Solution:
def capitalizeTitle(self, title: str) -> str:
return " ".join(
word.capitalize() if len(word) > 2 else word.lower()
for word in title.split()
)
if __name__ == "__main__":
solution = Solution()
assert "Capitalize The Title" == solution.capitalizeTitle("capiTalIze tHe titLe")
assert "First Letter of Each Word" == solution.capitalizeTitle(
"First leTTeR of EACH Word"
)
assert "i Love Leetcode" == solution.capitalizeTitle("i lOve leetcode")
|
from django.shortcuts import render, redirect
# the index function is called when root is visited
# CONTROLLER!!
def index(request):
print ("*" * 100
)
return render(request, "vinmyMVC/index.html")
# response = "Hello, I am your first request!"
# return HttpResponse(response)
def show(request):
print (request.method)
return render(request, "vinmyMVC/show_users.html")
def create(request):
print(request.method)
if request.method == "POST":
print ("*" * 10)
print (request.POST)
print ("*" * 10)
request.session['name'] = request.POST['first_name']
return redirect('/')
else:
return redirect('/')
|
import os
from typing import Dict
from be.model import table
from be.model import tuple
from be.model import user
import sqlite3 as sqlite
class Store:
tables: Dict[str, table.Table]
database: str
def __init__(self):
self.database = "be.db"
self.tables = dict()
self.create("User")
self.create("Order")
self.create("Goods")
self.init_tables()
def init_tables(self):
try:
conn = self.get_db_conn()
conn.execute(
"CREATE TABLE IF NOT EXISTS user ("
"username TEXT PRIMARY KEY, password TEXT,"
" token TEXT, terminal TEXT);"
)
conn.execute(
"CREATE TABLE IF NOT EXISTS goods ("
"goodsId TEXT PRIMARY KEY, goodsName TEXT NOT NULL,"
"goodsAuth TEXT, goodsPrice TEXT NOT NULL, goodsNum TEXT, goodsType TEXT, goodsDsr TEXT, sellerName TEXT);"
)
conn.execute(
"CREATE TABLE IF NOT EXISTS orders ("
"orderId TEXT, buyername TEXT ,"
"sellerName TEXT, orderStatus TEXT , goodsName TEXT, goodsPrice TEXT, totalValue TEXT, addr TEXT,"
"PRIMARY KEY(orderId, goodsName));"
)
conn.commit()
except sqlite.Error as e:
print(e)
conn.rollback()
def create(self, table_name: str):
self.tables[table_name] = table.Table(table_name)
def get_table(self, table_name: str) -> table.Table:
return self.tables.get(table_name)
def get_db_conn(self) -> sqlite.Connection:
return sqlite.connect(self.database)
database_instance = Store()
def get_db_conn():
return database_instance.get_db_conn()
def get_row(table_name: str, key: str) -> tuple.Tuple:
return database_instance.get_table(table_name).get(key)
def del_row(table_name: str, key: str) -> bool:
return database_instance.get_table(table_name).remove(key)
def put_row(table_name, row: tuple.Tuple):
database_instance.get_table(table_name).put(row)
def put_row_absent(table_name, row: tuple.Tuple) -> (bool, tuple.Tuple):
return database_instance.get_table(table_name).put_absent(row)
|
#NumPy intro
import os
"""
NumPy is a pythin library for woriking with arrays it stands for numerical python
It used as lists are slow to use pretty much so this is faster
an array object is created called an ndarray
To start you need to install numpy if you dont have in the cmd linw call pip install numpy
"""
"To start you need to import the numpy module "
import numpy as np
"Then to create an array object use the array() function and in the paraentheses use sqaure or normal brackets"
"We can pass any array like object into this method and it will be converted into an ndarray"
arr = np.array([1,2,3])
print(arr)
#Dimensions
"A dimension refers to the level of the array depth"
#0-D Arrays
"Known as scalars these are the eleents in an array"
arr0 = np.array(42)
print("A 0-D Array",arr0)
#1-D Arrays
"This is an array that has 0-D arrays as it's elements"
arr1 = np.array([1,2,3,4,5])
print("A 1-D Array",arr1)
#2-D Arrays
"This is an array that has 1-D arrays as it's elements used to represent matrices or tensors"
arr2 = np.array([[1,2,3],[4,5,6]])
print("A 2-D Array",arr2)
#3-D Arrays
"This is an array that has 2-D arrays as it's elements used to represent 3rd order tensors"
arr3 = np.array([[[1,2,3],[4,5,6]],[[1,2,3],[4,5,6]]])
print("A 3-D Array",arr3)
"To check the number of dimensionsuse the ndim attribute"
os.system('clear')
print(arr.ndim)
print(arr1.ndim)
print(arr2.ndim)
print(arr3.ndim)
|
# -*- python -*-
# Assignment: Find Characters
# Write a program that:
# Takes:
# - a list of strings
# - a string containing a single character
# Produces:
# - prints a new list of all the strings containing that character.
# Here's an example:
# input
l1 = ['hello','world','my','name','is','Anna']
char1 = 'o'
# output
# n = ['hello','world']
# Hint: how many loops will you need to complete this task?
######################################################################
def find_chars( l, c ):
n = []
for i in l:
if i.find( c ) >= 0:
n.append( i )
print "Debug: l =", l
print "Debug: c =", c
print "Debug: n =", n
find_chars( l1, char1 )
|
from celery import shared_task
from lxml import etree as et
import requests
from .models import CurrencyMap
from django.core.exceptions import FieldDoesNotExist
CURRENCY_STORAGE_URL = "https://www.cbr-xml-daily.ru/daily.xml"
@shared_task
def sample_task():
print("The sample task just ran.")
@shared_task
def update_currency():
currency_xml = requests.get(CURRENCY_STORAGE_URL, allow_redirects=True)
tree = et.fromstring(currency_xml.content)
for element in tree:
currency_code, value = None, None
for child in element:
if child.tag == "CharCode":
currency_code = child.text
if child.tag == "Value":
value = child.text
if currency_code and value:
try:
currency = CurrencyMap.objects.get(currency_code=currency_code)
if currency:
currency.value = float(value.replace(',', '.'))
except Exception:
currency = CurrencyMap(currency_code=currency_code, value=float(value.replace(',', '.')))
currency.save()
|
from django.shortcuts import render
from rest_framework import viewsets,generics
from rest_framework.response import Response
from .models import Head_content
from .serializers import HeadContentSerializer
# Create your views here.
class HeadTabarViewSet(viewsets.ViewSet):
def list(self, request):
queryset = Head_content.objects.filter(is_in_serving=1)
serializer = HeadContentSerializer(queryset,many=True)
return Response(serializer.data)
|
import roomba
import time
init_serial()
start_data()
stop()
end_data()
set_mode(0)
|
# coding=utf-8
"""
最长不含重复字符的子字符串
请从字符串中找出一个最长的不包含重复字符的子字符串,计算该最长子字符串的长度。假设字符串中只包含从’a’到’z’的字符。例如,在字符串中”arabcacfr”,最长非重复子字符串为”acfr”,长度为4。
"""
class Solution(object):
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
last_appear_dict = {}
max_length = 0
left = -1
for index, char in enumerate(s):
if char in last_appear_dict and left < last_appear_dict[char]:
left = last_appear_dict[char]
last_appear_dict[char] = index
max_length = max(max_length, index - left)
return max_length
if __name__ == '__main__':
print Solution().lengthOfLongestSubstring("tmmzuxt")
|
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import json
import smtplib
from tornado.template import Loader, os
from logging import Logger
from core import Data
class MailPublisher(object):
def __init__(self, logger, db, config_path):
"""
@type db: Data
@type logger: Logger
"""
self.logger = logger
self.db = db
try:
f = open(os.path.join(config_path, 'email.json'))
self.config = json.load(f)
self.logger.debug('Mail module config: {0}'.format(self.config))
except Exception as e:
self.logger.error('Failed to initialize mail module: {0}'.format(e))
def send(self, gid, subject, template_name, params, check_accept=True):
try:
if check_accept:
accept = self.db.get_terms_accept(gid)
if not accept or not accept['email']:
self.logger.warning('Warning: Not sending email, user opt-out: {0}'.format(gid))
return
gid_info = self.db.get_gid_info(gid)
if 'name' and 'email' in gid_info:
params['name'] = gid_info['name']
self.logger.info('Info: Emailing to [{0}], gid [{1}]...'.format(gid_info['email'], gid))
self._do_send(params, gid_info['email'], subject, template_name)
self.logger.info('Info: Email to [{0}] is sent...'.format(gid_info['email']))
else:
self.logger.warning('Warning: Not sending email, email unknown: {0}'.format(gid))
except Exception as e:
self.logger.error('Error: Exception in MailPublisher.send(): {0}'.format(e))
def _do_send(self, params, to, subject, template_name):
# Create message container - the correct MIME type is multipart/alternative.
msg = MIMEMultipart('alternative')
msg['Subject'] = subject
msg['From'] = self.config['from'].encode('utf-8', 'ignore')
msg['To'] = to
# Create the body of the message (a plain-text and an HTML version).
loader = Loader('./templates/mail')
text = loader.load(template_name + '.txt').generate(**params)
html = loader.load(template_name + '.html').generate(**params)
# Record the MIME types of both parts - text/plain and text/html.
part1 = MIMEText(text, 'plain')
part2 = MIMEText(html, 'html')
# Attach parts into message container.
# According to RFC 2046, the last part of a multipart message, in this case
# the HTML message, is best and preferred.
msg.attach(part1)
msg.attach(part2)
# sendmail function takes 3 arguments: sender's address, recipient's address
# and message to send - here it is sent as one string.
if self.config['dummy']:
self.logger.warning('WARNING: Not sending e-mail to {0}, subj: {1}'.format(msg['To'], subject))
else:
s = smtplib.SMTP(host=self.config['host'].encode('utf-8', 'ignore'), port=self.config['port'])
s.starttls()
s.login(self.config['login'].encode('utf-8', 'ignore'), self.config['password'].encode('utf-8', 'ignore'))
s.sendmail(msg['From'], [msg['To'], msg['From']], msg.as_string())
s.quit() |
# Copyright (c) 2021 kamyu. All rights reserved.
#
# Google Code Jam 2021 Qualification Round - Problem E. Cheating Detection
# https://codingcompetitions.withgoogle.com/codejam/round/000000000043580a/00000000006d1155
#
# Time: O(S * Q + SlogS + QlogQ)
# Space: O(S + Q)
#
# Difference with neighbors in easiest and hardest 5% questions method
# Accuracy: 982/1000 = 98.2%
#
def diff(player1, player2, extreme_questions):
return abs(sum(player1[j] for j in extreme_questions) - sum(player2[j] for j in extreme_questions))
def neighbor_diffs(scores, players, extreme_questions, i):
diffs = cnt = 0
if i-1 >= 0:
diffs += diff(scores[players[i-1]], scores[players[i]], extreme_questions)
cnt += 1
if i+1 < S:
diffs += diff(scores[players[i]], scores[players[i+1]], extreme_questions)
cnt += 1
return float(diffs)/cnt
def cheating_detection():
scores = []
p_count = [0]*S
q_count = [0]*Q
for i in xrange(S):
scores.append(map(int, list(raw_input().strip())))
for j, c in enumerate(scores[i]):
p_count[i] += c
q_count[j] += c
players = sorted(range(S), key=lambda x:p_count[x])
questions = sorted(range(Q), key=lambda x:q_count[x])
extreme_questions = [questions[j] for j in xrange(int(Q*EXTREME_RATIO))] + [questions[j] for j in xrange(Q-int(Q*EXTREME_RATIO), Q)]
result = 0
for i in xrange(S):
if neighbor_diffs(scores, players, extreme_questions, i) > neighbor_diffs(scores, players, extreme_questions, result):
result = i
return players[result]+1
EXTREME_RATIO = 0.05
S, Q, T, P = 100, 10000, input(), input()
for case in xrange(T):
print 'Case #%d: %s' % (case+1, cheating_detection())
|
c=input()
a=c//2
print(a)
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 04 12:21:33 2016
An implementation of Sequential T-test Analysis of Regime Shifts
as described in the paper:
"A sequential algorithm for testing climate regime shifts"
S.N. Rodionov
Geophys. Rev. Ltrs. V31 N9
May 2004
@author: kristencutler
"""
import scipy.stats
import numpy as np
import math
class RegimeShiftChecker(object):
def __init__(self, data, limit, probability):
self.data = data
self.limit = limit
self.p = probability
self.rsiList = []
self.getDiff()
self.getRegimeMean(0, self.limit)
self.checkLimits()
def getDiff(self):
"""Determine the difference between mean values of two regimes. T is
the value of T-distrubtion, p is probability and l is cut-off length.
Find the variance for every number l interval and get the average."""
t = scipy.stats.t.interval(1.0-(self.p), 2*(int(self.limit))-2)[1]
first, last, varianceList = 0, 10, []
for elem in self.data:
varianceList.append(scipy.var(self.data[first:last]))
first += 1
last += 1
self.averageVar = round(np.mean(varianceList), 2)
self.diff = t * math.sqrt(2.0*(self.averageVar/10.0))
def getRegimeMean(self, first, last):
""""Calculate the mean of a regime and the levels that should be reached
to qualify as a regime shift."""
self.xr1 = np.mean(self.data[int(first):int(last)])
self.greaterLimit = round(self.xr1 + self.diff, 2)
self.lesserLimit = round(self.xr1 - self.diff, 2)
self.bounds = [self.greaterLimit, self.lesserLimit]
def checkLimits(self):
"""For every value starting with year l + 1, check if it exceeds
bounds. If it does not exceed, recalculate the
average of the regime (call getMean) to include value (i) and previous
values (l - 1) else calculate the RSI."""
currentshift = ''
self.point = self.limit
while (int(self.point) <= len(self.data)-1):
if self.rsiList and self.point <= self.rsiList[0] + self.limit:
self.getRegimeMean(self.rsiList[0], (min(self.rsiList[0] + self.limit, len(self.data)-1)))
else:
self.getRegimeMean(self.point - self.limit, self.point)
if self.data[int(self.point)] > self.greaterLimit or self.data[int(self.point)] < self.lesserLimit:
testrange = range(int(self.point), (min(int(self.point) + int(self.limit), len(self.data)-1) + 1))
rsiflag, direction = self.calcRSI(testrange, self.bounds)
if rsiflag and currentshift != direction:
self.rsiList.append(self.point)
currentshift = direction
self.point += 1
def calcRSI(self, sub_index, bounds):
"""Calculate the Regime Shift Index"""
x_sign = -1 if self.data[sub_index[0]] < min(bounds) else 1
limit = min(bounds) if self.data[sub_index[0]] < min(bounds) else max(bounds)
RSI = 0
for idx in sub_index:
x_star = x_sign * (self.data[idx] - limit)
marginal_RSI = x_star / (np.sqrt(self.averageVar) * self.limit)
RSI += marginal_RSI
if RSI < 0:
return False, None
strDir = 'up' if x_sign > 0 else 'down'
return True, strDir
# pdo = [.04, .79, .82, .86, .63, .73, .92, -.3, 1.36, .23, -.25, -1.11, -1.72, -.03, 0.34, -.41, -.64, -.79, -1.13, -1.07, -1.18, -.66,
# 1.05, .75, 1.29, -.05, .3, 1.07, .96, .97, .97, .08, -.26, .29, .17, 1.01, 1.79, 0, .5, 1.36, 2.03, 2.14, 1.01, -.18, .18, -1.02,
# -.91, -.73, -.11, -2.01, -2.13, -1.54, -2.01, -.57, -1.32, .2, -2.48, -1.82, 0.25, .69, .3, 1.18, -1.29, -.33, .01, -1.24, -.82,
# -.2, -.95, -1.26, .61, -1.9, -1.99, -.46, -1.22, -.84, -1.14, 1.65, .34, -.58, -.11, .59, .34, .56, 1.5, 1.27,
# 1.12, 1.88, .93, -.95, -.3, -2.02, .05, .05, 1.21, -.49, .59, .23, .83, -.32, -2., .6, .27, 2.09, .43, .44]
# testRegime = RegimeShift(pdo, 10.0, 0.05)
# years = range(1900, 2006)
# for i in testRegime.rsiList:
# print(years[int(i)])
# result = [1910, 1922, 1943, 1958, 1977, 1989, 2003]
|
# add = lambda x, y : x + y
def double(x):
return x * 2
sequence = [1, 3, 5, 9]
doubled = [double(x) for x in sequence]
doubled = map(double, sequence)
doubled = list(map(lambda x: x * 2, sequence))
print(doubled)
|
# Generated by Django 3.1.6 on 2021-02-08 04:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main_app', '0012_auto_20210208_0405'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='photo',
field=models.TextField(default='https://t4america.org/wp-content/uploads/2016/10/Blank-User.jpg'),
),
]
|
from db import db
class TutorModel(db.Model):
__tablename__ = "tutors"
id = db.Column(db.Integer, primary_key=True)
#unique full name
name = db.Column(db.String(32))
first_name = db.Column(db.String(32))
last_name = db.Column(db.String(32))
email = db.Column(db.String(32))
subject = db.Column(db.String(32))
def __init__(self, name, first_name, last_name, email, subject):
self.name = name
self.first_name = first_name
self.last_name = last_name
self.email = email
self.subject = subject
def save_to_db(self):
db.session.add(self)
db.session.commit()
def delete_from_db(self):
db.session.delete(self)
db.session.commit()
def json(self):
return {
"name": self.name,
"first_name": self.first_name,
"last_name": self.last_name,
"email": self.email,
"subject": self.subject
}
@classmethod
def get_by_name(cls, name):
return cls.query.filter_by(name=name).first()
|
import logging
import os
import json
import uuid
from datetime import datetime
from shapely.geometry.point import Point
from shapely.wkt import loads
import pandas as pd
import numpy as np
from geopy.distance import vincenty
# redis
from redis_client import RedisClient
import pickle
from vibebot import EventBot
from vibepy.class_postgres import PostgresManager
from vibepy.read_config import read_config
from batch import Batch
from traineval.output_to_postgres import update_postgres
from sbbrequest import sbb_response
CONFIG = read_config(ini_filename='application.ini', ini_path=os.path.dirname(__file__))
DB = PostgresManager(CONFIG, 'database')
logger = logging.getLogger(__name__)
class ScheduleMatchingBot(EventBot):
def __init__(self):
queues_callbacks = {
CONFIG.get('rabbit', 'rabbit_mot_exchange'): self.callback_new_mot,
"spf_response_exchange": self.callback_process_mot
}
# load geometries initially to be used for entire session
self.geoms = self.read_geo_valid()
# Queue on the exchange the bot are reading from
# %%RABBIT_MOT_EXCHANGE%%-bot-%%SCHEDULE_MATCHING_BOT_ID%%
super(ScheduleMatchingBot, self).__init__('sched_matching', CONFIG, queues_callbacks)
self.redis_client = RedisClient(CONFIG.get('redis','redis_host'), CONFIG.get('redis','redis_port'))
logger.info("Event bot created")
def callback_new_mot(self, json_body):
logging.debug("[received] new MoT segments %r" % json_body)
# Extract the batch ID and the list mo MoT segments froim the json!
try:
batch_id = str(uuid.uuid4())
if not json_body['mot_segments']:
logging.debug('mot_segments is empty. Skipping schedule matching.')
return []
list_mot_id, loc_bounds = self.geo_valid(json_body['mot_segments'])
if not list_mot_id:
logging.debug('no mot id. Skipping schedule matching.')
return []
except Exception as e:
logging.error(e)
err = 'Unable to parse JSON into a batch id and list of mot ids'
logging.error(err)
# we are going to write the error to a separate queue
return []
b = Batch(batch_id, list_mot_id, loc_bounds, CONFIG, DB)
b.init_trips()
# we have to clear out the DB attr since it can't be pickled
b.DB = None
# now we are doing this in redis
self.redis_client.upload_to_redis(batch_id, {'status': 0, 'batch': pickle.dumps(b)})
# send reqs
b.send_trip_requests()
# Empty list when not sending any message out otherwise rabbit consumer doesn't like it
return []
def callback_process_mot(self, json_body):
logging.debug("[received] new SBB response")
status = 1
try:
# first let's put this in redis
self.redis_client.upload_to_redis(json_body.get('uuid'), json_body.get('xml'))
batch_id, trip_id, max_res, leave_at = json_body.get('uuid').split("_")
if not batch_id:
return []
# get obj
# now in redis
b_binary = self.redis_client.get_hm_obj(batch_id, 'batch')[0]
if not b_binary:
logging.warning('batch not found ({bi})'.format(bi=batch_id))
return []
else:
b = pickle.loads(b_binary)
# now we can lock up this batch for processing
# we need to update DB
b.DB = DB
# ok, let's see which trip this is
trip = b.trip_objs[trip_id]
if trip.request_params[(max_res, leave_at)] == 0:
# this request hasn't been processed yet
resp = sbb_response.SBBResponse(json_body['xml'].encode('utf-8'))
good_to_go = resp.check_if_error()
if good_to_go > 0:
# we are either going to retry or skip
if good_to_go == 2:
return [] # skipping
else:
# we will republish the request
trip.republish_req([(max_res, leave_at)])
return []
trip.build_single_itinerary(resp, max_res, leave_at)
if trip.requests_processed == len(trip.request_params):
# we've processed everything
b.trips_processed += 1
trip.complete_processing()
if b.trips_processed == len(b.trip_objs):
# we are done processing
logging.debug("batch %s ready for processing" % b.batch_id)
b.process_trips()
status = 2
# we need to write the updated obj back in redis
b.DB = None # bye, you can't be pickled
self.redis_client.upload_to_redis(batch_id, {'status' : status, 'batch' : pickle.dumps(b)})
except Exception as e:
logging.error(e)
err = 'Unable to process batch'
logging.error(err)
return []
del b
return []
def read_geo_valid(self):
"""
:param CONFIG: The parsed config file
:return: A list of geometries (polygons) defining out geo valid regions
"""
path = os.path.dirname(os.path.realpath(__file__))
filenames = CONFIG.get('geovalidity', 'VALID_REGION_WKT').split(',')
geoms = []
for filename in filenames:
filepath = path + '/filters/wkt/' + filename + '.wkt'
try:
f = open(filepath)
geoms.append(loads(f.read()))
except:
logging.critical('Could not read shapefile {filepath}'.format(filepath=filepath))
raise IOError
return geoms
def geo_valid(self, json_input):
"""
In order to make a guess at MoT we have to have OSM data. geoms is a list of geometries (closed polygons)
that define the regions of validity of this model. This function returns a list of visit id's that are valid
TODO: If we don't specify a lat and lon, go look this up.
:param visits: A list of dictionaries containing a lat, a lon and a visit id
:param CONFIG: The parsed config file
:return: Will output a list of visits that are within the regions of validity
"""
# geoms = read_geo_valid(CONFIG)
p = pd.DataFrame(json_input)
p = p[p['mot'] == 'train']
if p.empty:
logging.debug('No train journey within json')
return [], []
# Is within region of geovalidity
is_within = p.apply(lambda x: np.all([geom.contains(Point(x['start_lon'], x['start_lat'])) and
geom.contains(Point(x['end_lon'], x['end_lat'])) for geom in self.geoms]),
axis=1).values
# is greater than the min distance
is_gt_min_dist = p.apply(lambda x: vincenty((x['start_lat'], x['start_lon']),(x['end_lat'], x['end_lon'])).meters
> CONFIG.getint('geovalidity', 'MIN_DIST_BTW_PTS'),
axis=1).values
# If trips need to be discarded, log in in the failed trips table
if any(~is_within) or any(~is_gt_min_dist):
p['failure_cause'] = ''
p['datetime_created'] = datetime.utcnow()
error_msg = 'Not within region of geo-validity ({}). '
p.loc[~is_within, 'failure_cause'] += error_msg.format(CONFIG.get('geovalidity', 'VALID_REGION_WKT'))
error_msg = 'Start and end point of MoT segment too close (within {} meters of each-other). '
p.loc[~is_gt_min_dist, 'failure_cause'] += error_msg.format(CONFIG.getint('geovalidity', 'MIN_DIST_BTW_PTS'))
col_list = ['mot_segment_id', 'failure_cause', 'datetime_created', 'bound_from_id', 'bound_to_id']
failed_trips = p.loc[p['failure_cause'] != '', col_list]
update_postgres(failed_trips, 'train_trips_failed', DB)
mask = np.logical_and(is_within, is_gt_min_dist)
loc_bounds = p.loc[mask, ['mot_segment_id', 'bound_from_id', 'bound_to_id']]
return p.loc[mask, 'mot_segment_id'].tolist(), loc_bounds
|
# class Coordinates(object):
# def __init__(self, x, y):
# self.x = x
# self.y = y
# def __str__(self, x, y):
# return '<' + str(self.x) + '+' + str(self.y) + '>'
# point1 = Coordinates(5,5)
# point2 = Coordinates(6,6)
# origin = Coordinates(0,0)
# print(origin)
class Dog:
def __init__(self, name, age):
self.name = name
self.age = age
def printName(self):
print(self.name)
def bark(self):
print('Get the hack out of here! Or you will deal with ' + str(self.name))
rocky = Dog('Rocky', 4)
rocky.printName()
rocky.bark()
|
#!/usr/bin/python
# This is used as a setup for servers!
from distutils.core import setup
import socket
setup(name='pykonverse',
description='chat client/server',
author='nulltf, originally created by russjr08',
author_email='jayitinc@jayitstudios.com',
url='jayitstudios.com',
)
class default:
port = 1337
host = "localhost"
host = default.host
port = default.port
def getServInfo(host, port):
host = socket.gethostname()
port = raw_input('Enter Port (default=1337): ')
if (port == None):
port = default.port
return host, port
getServInfo(host, port)
def writeConfig(host, port):
ans = raw_input("Write settings to config? [Y\N]: ")
if ("y" in ans):
with open('server.cfg', 'w') as cfg:
cfg.write('port %i\r\n'%port + 'host %s\r\n'%host)
print('Writing to config!')
elif ("n" in ans):
print("\r\n***You will have to manually input the given server info for every time you run the server.***")
writeConfig(host, port) |
def add_to_dict(name, bid,):
auction_dict[name] = bid
auction_finished = 0
auction_dict = {}
while auction_finished == False:
print("Welcome to the secret auction program\n")
name_input = input('What is your name? ').lower()
bid_input = input('What is your bid? $')
decision_input = input('Are there any other bidders? Type "yes" or "no" ').lower()
bid_input = int(bid_input)
add_to_dict(name_input, bid_input)
print(auction_dict)
if decision_input == 'yes':
clear()
continue
else:
clear()
auction_finished == False
for key in auction_dict:
score = auction_dict[key]
winner = 0
if score > winner:
winner = key
print(f'The winner is {winner} with a bid of ${auction_dict[winner]}')
|
# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import json
from pants.backend.terraform.goals.deploy import DeployTerraformFieldSet
from pants.backend.terraform.testutil import rule_runner_with_auto_approve, standard_deployment
from pants.core.goals.deploy import Deploy, DeployProcess
from pants.engine.internals.native_engine import Address
from pants.testutil.rule_runner import RuleRunner, mock_console
rule_runner = rule_runner_with_auto_approve
standard_deployment = standard_deployment
def test_run_terraform_deploy(rule_runner: RuleRunner, standard_deployment, tmpdir) -> None:
"""Test end-to-end running of a deployment."""
rule_runner.write_files(standard_deployment.files)
with mock_console(rule_runner.options_bootstrapper, stdin_content="yes") as (_, m):
result = rule_runner.run_goal_rule(
Deploy, args=["src/tf:stg", *rule_runner.options_bootstrapper.args]
)
# assert Pants thinks we succeeded
assert result.stdout.splitlines() == []
assert "✓ src/tf:stg deployed" in result.stderr.splitlines()
# assert Terraform did things
with open(standard_deployment.state_file) as raw_state_file:
raw_state = raw_state_file.read()
assert raw_state, "Terraform state file not found where expected."
state = json.loads(raw_state)
assert len(state["resources"]) == 1, "Resource not found in terraform state"
def test_deploy_terraform_forwards_args(rule_runner: RuleRunner, standard_deployment) -> None:
rule_runner.write_files(standard_deployment.files)
target = rule_runner.get_target(Address("src/tf", target_name="stg"))
field_set = DeployTerraformFieldSet.create(target)
deploy_process = rule_runner.request(DeployProcess, [field_set])
assert deploy_process.process
argv = deploy_process.process.process.argv
assert "-chdir=src/tf" in argv, "Did not find expected -chdir"
assert "-var-file=stg.tfvars" in argv, "Did not find expected -var-file"
assert "-auto-approve" in argv, "Did not find expected passthrough args"
# assert standard_deployment.state_file.check()
def test_deploy_terraform_with_module(rule_runner: RuleRunner) -> None:
"""Test that we can deploy a root module with a nearby shared module."""
files = {
"src/tf/root/BUILD": """terraform_deployment(root_module=":mod")\nterraform_module(name="mod")""",
"src/tf/root/main.tf": """module "mod0" { source = "../mod0" }""",
"src/tf/mod0/BUILD": """terraform_module()""",
"src/tf/mod0/main.tf": """resource "null_resource" "dep" {}""",
}
rule_runner.write_files(files)
with mock_console(rule_runner.options_bootstrapper, stdin_content="yes") as (_, m):
result = rule_runner.run_goal_rule(
Deploy, args=["src/tf::", *rule_runner.options_bootstrapper.args]
)
# assert Pants thinks we succeeded
assert result.stdout.splitlines() == []
# assert deployment succeeded
assert "✓ src/tf/root:root deployed" in result.stderr.splitlines()
# assert module was not deployed
assert not any("src/tf/mod0" in line for line in result.stderr.splitlines())
|
__author__ = "Narwhale"
def loop_merge_sort(l1, l2):
temp = []
while len(l1) > 0 and len(l2) > 0:
if l1[0] < l2[0]:
temp.append(l1.pop(0))
else:
temp.append(l2.pop(0))
temp.extend(l1)
temp.extend(l2)
return temp
l1 = [1,3,5,7,9]
l2 = [2,4,6,8,10,14,15,78]
l = loop_merge_sort(l1,l2)
print(l) |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
class KinematicsDataAdjuster:
def __init__(self, data_frame):
self.data_frame = data_frame
def column_adjuster(self, keep_cols, new_column_names):
self.data_frame = self.data_frame[keep_cols]
self.data_frame = self.data_frame.rename(columns = new_column_names)
def key_data_adder(self, key_data):
self.data_frame['RH.index'] = self.data_frame['RH.index'].map(lambda rh_index: rh_index[:3])
self.data_frame['RH.index'] = self.data_frame['RH.index'].astype('int64')
self.data_frame = self.data_frame.merge(key_data, on='RH.index')
def measure_adjuster(self, adjust_variable, measure='displacement'):
self.data_frame['adjustor_column'] = self.data_frame[measure].map(lambda value: value/min(self.data_frame[measure]))
self.data_frame.loc[self.data_frame['day']==3, 'adjustor_column'] = 1
self.data_frame[adjust_variable+'_adjust'] = self.data_frame[adjust_variable]*self.data_frame['adjustor_column']
def indexator(self, variable, denominator):
dictionary = dict(list(self.data_frame.groupby('RH.index')))
keys = dictionary.keys()
def normalizer(dict, dict_key):
if(denominator=='day3mean'):
divisor = dict[dict_key].loc[dict[dict_key]['day'] == 3, variable].mean()
elif(denominator=='day3min'):
divisor = self.data_frame.min()[variable]
dict[dict_key][variable] = dict[dict_key][variable] / divisor
return dict[dict_key]
self.data_frame = pd.concat(list(map(lambda key: normalizer(dictionary, key), keys)), axis=0)
def column_cleanup(self, keep_columns):
self.data_frame = self.data_frame[keep_columns]
def aggregate_per_animal(self):
return self.data_frame.groupby(['RH.index', 'day', 'group'], as_index=False).median()
def summary(self):
return self.data_frame.groupby(['day', 'group'], as_index=False).mean()
#0. Importing key data (RH.index, group, force and displacment)
animal_key = pd.read_csv('animal_key_kinematics.csv')
#1. Managing ILIAC CREST HEIGHT
#A. Importing data, creating variables
data_set_iliac = pd.read_csv('merge_side_view.csv', encoding='utf-16')
keep_columns_iliac = ['RH.index', 'day', 'Dist [cm].1', 'Dist [cm].2', 'Dist [cm].3', 'Dist [cm].4', 'Dist [cm].5']
new_column_names_iliac = {'Dist [cm].1':'iliac_crest_height', 'Dist [cm].2':'iliac_crest_2_trochanter_major', 'Dist [cm].3':'trochanter_major_2_knee',
'Dist [cm].4':'knee_2_ankle', 'Dist [cm].5':'ankle_2_toe'}
#B. Creating instance, calling methods
instance_iliac = KinematicsDataAdjuster(data_set_iliac)
instance_iliac.column_adjuster(keep_columns_iliac, new_column_names_iliac)
instance_iliac.key_data_adder(animal_key)
instance_iliac.measure_adjuster('iliac_crest_height')
instance_iliac.indexator('iliac_crest_height_adjust', 'day3min')
instance_iliac.column_cleanup(['RH.index', 'day', 'group', 'iliac_crest_height_adjust'])
data_aggregate_iliac = instance_iliac.aggregate_per_animal()
data_summary_iliac = instance_iliac.summary()
#2. Managing INTER KNEE DISTANCE
#A. Importing data, creating variables
data_set_knee = pd.read_csv('merge_bottom_view.csv')
keep_columns_knee = ['Dist [cm].1', 'RH.index', 'day']
new_column_names_knee = {'Dist [cm].1': 'inter_knee_distance'}
#B. Creating instance, calling methods
instance_knee = KinematicsDataAdjuster(data_set_knee)
instance_knee.column_adjuster(keep_columns_knee, new_column_names_knee)
instance_knee.key_data_adder(animal_key)
instance_knee.measure_adjuster('inter_knee_distance')
instance_knee.indexator('inter_knee_distance_adjust', 'day3mean')
instance_knee.column_cleanup(['RH.index', 'day', 'group', 'inter_knee_distance_adjust'])
data_aggregate_knee = instance_knee.aggregate_per_animal()
data_summary_knee = instance_knee.summary()
#3. Defining color palette for plotting
palette_BrBG = pd.DataFrame(list(sns.color_palette("BrBG", 7)))
palette_RdBu_r = pd.DataFrame(list(sns.color_palette("RdBu_r", 7)))
palette_custom_1 = [tuple(palette_BrBG.iloc[0,:]), tuple(palette_RdBu_r.iloc[0,:]), tuple(palette_RdBu_r.iloc[6,:])]
def group_2_color(argument):
'''Dictionary mapping (replacing switch/case statement)'''
switcher = {
'sci': palette_custom_1[0],
'sci_medium': palette_custom_1[1],
'sci_msc': palette_custom_1[2]
}
return switcher.get(argument)
study_groups = ['sci', 'sci_medium', 'sci_msc']
#4. Plotting distributions
def distribution_plot(plot_data, x_var, x_label, color_palette, x_limits):
out_plot = sns.FacetGrid(plot_data, row='day', hue='group', aspect=4, size=1.5, palette=color_palette)
out_plot.map(sns.kdeplot, x_var, clip_on=False, shade=True, alpha=0.5, lw=1.5, bw=0.2, kernel='cos')
out_plot.map(sns.kdeplot, x_var, clip_on=False, color='w', lw=2, bw=0.2, kernel='cos')
for row, day in enumerate(['3', '7', '14', '21', '28']):
out_plot.axes[row, 0].set_ylabel('Day ' + day, size=15, fontweight='bold')
out_plot.set_titles('')
out_plot.set(yticks=[])
out_plot.despine(left=True)
plt.xlabel(x_label, size=20, fontweight='bold')
plt.xticks(list(np.arange(0.5, 5, 0.25)))
plt.xlim(x_limits)
distribution_plot(data_aggregate_iliac, 'iliac_crest_height_adjust', 'Iliac crest height index', palette_custom_1, [0.7,3])
#plt.savefig('distribution_plot_iliac.jpg', dpi=1000)
distribution_plot(data_aggregate_knee, 'inter_knee_distance_adjust', 'Inter knee distance index', palette_custom_1, [0.4,2.5])
#plt.savefig('distribution_plot_knee.jpg', dpi=1000)
#5. Plotting over time
def day_adjuster(dataset):
dataset.loc[dataset['group'] == 'sci', ['day']] += 1
dataset.loc[dataset['group'] == 'sci_medium', ['day']] -= 1
datasets_iliac = [instance_iliac.data_frame, data_aggregate_iliac, data_summary_iliac]
datasets_knee = [instance_knee.data_frame, data_aggregate_knee, data_summary_knee]
[day_adjuster(data_set) for data_set in datasets_iliac]
[day_adjuster(data_set) for data_set in datasets_knee]
def overtime_plot(data_technical, data_biological, data_summary, study_group, y_variable):
#Creating plot data
plot_data_technical = data_technical[data_technical['group']==study_group]
plot_data_biological = data_biological[data_biological['group']==study_group]
plot_data_summary = data_summary[data_summary['group']==study_group]
#Creating plots
plt.scatter('day', y_variable, data = plot_data_technical, color = group_2_color(study_group), s=50, alpha=0.1)
plt.scatter('day', y_variable, data = plot_data_biological, color = group_2_color(study_group), s=300,
alpha=0.4, marker="^")
plt.scatter('day', y_variable, data=plot_data_summary, color=group_2_color(study_group), s=1000,
alpha=0.8, marker="p")
plt.plot('day', y_variable, data=plot_data_summary, color=group_2_color(study_group),alpha=0.8, lw=5)
#Plot adjust
sns.despine(left=True)
plt.xlabel('Day (Post SCI)', size=15, fontweight='bold')
plt.ylabel('Iliac crest height index', size=15, fontweight='bold')
plt.xticks(list(np.arange(0, 49, 7)))
plt.yticks(list(np.arange(0.75, 3.5, 0.5)))
list(map(lambda group: overtime_plot(instance_iliac.data_frame, data_aggregate_iliac,data_summary_iliac,group, 'iliac_crest_height_adjust'), study_groups))
list(map(lambda group: overtime_plot(instance_knee.data_frame, data_aggregate_knee,data_summary_knee, group, 'inter_knee_distance_adjust'), study_groups)) |
# A high-level class for easily conducting
# convergence scans.
##################################################
import numpy as np
from . import DREAMIO
from . import runiface
from DREAM.DREAMSettings import DREAMSettings
from DREAM.DREAMOutput import DREAMOutput
class ConvergenceScan:
def __init__(self, settings, inparams=None, outparams=None, scanUntilConvergence=False,
verbose=True):
"""
Creates a new ConvergenceScan object with 'settings' representing
the settings for the baseline scenario.
:param DREAMSettings settings: Baseline ``DREAMSettings`` object to base all convergence runs on.
:param list inparams: Either a string (or a list of strings), specifying the set(s) of parameters to scan, or ``None``, which sets no parameters (and they must then be explicitly set using ``addScanParameter()`` later).
:param list outparams: Either a string (or a list of strings), specifying the set(s) of parameters to measure for convergence. Alternatively, ``None`` clears all output parameters (which must then be explicitly set using ``addOutputParameter()`` later).
:param bool scanUntilConvergence: If ``True``, does not limit the number of runs to do and updates the value of each parameter until the output parameter changes less than the given tolerance.
:param bool verbose: If ``True``, prints progress message to stdout when running.
"""
self.settings = settings
self.scanParameters = dict()
self.outputParameters = dict()
self.result = {}
self.baselineOutput = None
self.verbose = verbose
# Maximum number of iterations if running scan adaptively
self.NMAX = 10
# Set scan parameters
arr = inparams
if type(inparams) is not list:
arr = [inparams]
for inparam in arr:
if inparam == 'hottail':
self.addScanParameter(name='hottailgrid.pgrid.np', f=_CS_setiHottailNp, baselineValue=settings.hottailgrid.pgrid.np, scanUntilConvergence=scanUntilConvergence)
self.addScanParameter(name='hottailgrid.xigrid.nxi', f=_CS_setiHottailNxi, baselineValue=settings.hottailgrid.xigrid.nxi, scanUntilConvergence=scanUntilConvergence)
elif inparam == 'runaway':
self.addScanParameter(name='runawaygrid.pgrid.np', f=_CS_setiRunawayNp, baselineValue=settings.runawaygrid.pgrid.np, scanUntilConvergence=scanUntilConvergence)
self.addScanParameter(name='runawaygrid.xigrid.nxi', f=_CS_setiRunawayNxi, baselineValue=settings.runawaygrid.xigrid.nxi, scanUntilConvergence=scanUntilConvergence)
elif inparam == 'hottailgrid.np':
self.addScanParameter(name='hottailgrid.pgrid.np', f=_CS_setiHottailNp, baselineValue=settings.hottailgrid.pgrid.np, scanUntilConvergence=scanUntilConvergence)
elif inparam == 'hottailgrid.nxi':
self.addScanParameter(name='hottailgrid.xigrid.nxi', f=_CS_setiHottailNxi, baselineValue=settings.hottailgrid.xigrid.nxi, scanUntilConvergence=scanUntilConvergence)
elif inparam == 'runawaygrid.np':
self.addScanParameter(name='runawaygrid.pgrid.np', f=_CS_setiRunawayNp, baselineValue=settings.runawaygrid.pgrid.np, scanUntilConvergence=scanUntilConvergence)
elif inparam == 'runawaygrid.nxi':
self.addScanParameter(name='runawaygrid.xigrid.nxi', f=_CS_setiRunawayNxi, baselineValue=settings.runawaygrid.xigrid.nxi, scanUntilConvergence=scanUntilConvergence)
elif inparam == 'nr':
self.addScanParameter(name='radialgrid.nr', f=_CS_setiNr, baselineValue=settings.radialgrid.nr, scanUntilConvergence=scanUntilConvergence)
elif inparam == 'nt':
self.addScanParameter(name='timestep.nt', f=_CS_setiNt, baselineValue=settings.timestep.nt, scanUntilConvergence=scanUntilConvergence)
elif inparam is None: continue
else:
self.addScanParameter(name=inparam, scanUntilConvergence=scanUntilConvergence)
#raise ConvergenceScanException("Unrecognized scan parameter set: '{}'.".format(inparam))
# Set output parameters
arr = outparams
if type(outparams) is not list:
arr = [outparams]
for outparam in arr:
if outparam is None: continue
else:
self.addOutputParameter(name=outparam)
def addOutputParameter(self, name: str, f=None, reltol=1e-2):
"""
Adds an output parameter to check convergence for.
:param str name: Name of output parameter (used as an identifier, but does not have to correspond to the parameter's actual name in DREAM).
:param function f: A function which, given a DREAMOutput object, returns a single float value corresponding to this output parameter.
:param float reltol: Relative tolerance to demand if 'scanUntilConvergence' is ``True`` for any of the scan parameters.
"""
if f is None:
# Try to get parameter by name
f = lambda do : _CS_getoByName(do, name)
self.outputParameters[name] = {'f': f, 'reltol': reltol}
def addScanParameter(self, name: str, f=None, baselineValue=None, scanUntilConvergence=False, nvalues=3, startindex=-1):
"""
Adds an input parameter to scan in.
:param str name: Name of parameter (used as an identifier, but does not have to correspond to the parameter's actual name in DREAM).
:param function f: A function which, given an index, a ``DREAMSettings`` object and a baseline value, updates the scan parameter in the settings object. The index can take both positive and negative values, with ``0`` corresponding to the baseline value (negative values thus correspond to *lower resolution* while positive values correspond to *higher resolution*). The function should return a tuple consisting of the modified settings object (which may be the same as the input object) and the value representing the changes made to the ``DREAMSettings`` object (for identification purposes in plots).
:param baselineValue: Baseline value of the parameter (for passing on to ``f``).
:param bool scanUntilConvergence: If ``True``, does not limit the number of runs to do and updates the value of each parameter until the output parameter changes less than the given tolerance.
:param int nvalues: Number of values to scan over. Ignored if ``scanUntilConvergence = True``.
:param int startindex: First index to run from. Default: ``-1`` (so that runs are ``-1``, ``0``, ``1``, ..., ``nvalues-2``)
"""
if f is None:
f = lambda idx, ds, v : _CS_setiByName(idx, ds, v, name)
if baselineValue is None:
baselineValue = self._getBaselineValue(name)
self.scanParameters[name] = {'f': f, 'baseline': baselineValue, 'scanUntilConvergence': scanUntilConvergence, 'nvalues': int(nvalues), 'startindex': int(startindex)}
self.result[name] = {}
def _getBaselineValue(self, name):
"""
Returns the baseline value for the named input parameter.
"""
obj = _CS_getObjectByName(self.settings, name)
if np.isscalar(obj):
return obj
else:
raise ConvergenceScanException("Unrecognized type of input parameter '{}': {}.".format(name, type(obj)))
def getOutputParameters(self):
"""
Get a dictionary containing details about the output parameters used in the scan.
:return: A dict which specifies the settings for the output parameters to consider as measures of convergence.
:rtype: dict
"""
oparams = {}
for opname, op in self.outputParameters.items():
oparams[opname] = {'reltol': op['reltol']}
return oparams
def _processOutput(self, index, scanParameter, scanValue, output):
"""
Process the output of a single simulation.
index: Index of run.
scanParameter: Name of scan parameter settings specifying the scan.
output: DREAMOutput object resulting from the simulation.
RETURNS False if the relative variation in any output parameter,
compared to the previous (or next) index, exceeds its relative
tolerance, or if there is no previous or next scan index.
Otherwise, returns True (meaning that the parameter is converged).
"""
sp = self.scanParameters[scanParameter]
converged = True
if scanParameter not in self.result:
self.result[scanParameter] = {}
for opname, op in self.outputParameters.items():
f = op['f']
oval = f(output)
# Store output value in result
if opname not in self.result[scanParameter]:
self.result[scanParameter][opname] = {'index': [], 'scanval': [], 'outval': []}
if not np.isscalar(scanValue):
scanValue = scanValue[0]
self.result[scanParameter][opname]['index'].append(index)
self.result[scanParameter][opname]['scanval'].append(scanValue)
self.result[scanParameter][opname]['outval'].append(oval)
# Check if simulation is converged
if len(self.result[scanParameter][opname]['outval']) == 1:
converged = False
else:
v = self.result[scanParameter][opname]['outval'][-2:]
reltol = op['reltol']
# We should really have an absolute tolerance for this...
cv = False
Delta = 1
if v[1] == 0:
Delta = np.abs(v[0])
cv = (Delta < reltol)
else:
Delta = np.abs(v[0]/v[1] - 1)
cv = (Delta < reltol)
converged = converged and cv
self._status('Output parameter {} {} converged in scan parameter {} (Delta = {})'.format(opname, 'is' if cv else 'is not', scanParameter, Delta))
return converged
def run(self):
"""
Run the convergence scan.
"""
self.result = {}
# Run baseline case
self._status(':: Running baseline case...')
self.baselineOutput = runiface.runiface(self.settings, quiet=not self.verbose)
# Iterate over scan parameters
for scanParameter, sp in self.scanParameters.items():
s = sp['startindex']
if sp['scanUntilConvergence']:
n = self.NMAX + s
i = s
converged = False
while i < n and not converged:
output, scanValue = self._runScan(i, scanParameter)
converged = self._processOutput(i, scanParameter, scanValue, output)
i += 1
else:
n = sp['nvalues']
for i in range(s, n+s):
output, scanValue = self._runScan(i, scanParameter)
self._processOutput(i, scanParameter, scanValue, output)
def _runScan(self, index, scanParameter):
"""
Run an individual DREAM simulation corresponding to index
'index' in the scan parameter 'scanParameter'.
index: Index in scan of this simulation.
scanParameter: Name of scan parameter settings specifying the scan.
"""
sp = self.scanParameters[scanParameter]
self._status(':: Scan {} ({}/{}) in parameter {}'.format(index, index-sp['startindex']+1, sp['nvalues'], scanParameter))
# Skip the baseline case
if index == 0:
self._status(':: Skipping baseline case')
return self.baselineOutput, sp['baseline']
f = sp['f']
# Copy DREAMSettings object
ns = DREAMSettings(self.settings, chain=False)
# Modify the settings
ns, scanValue = f(index, ns, sp['baseline'])
return runiface.runiface(ns), scanValue
def save(self, filename):
"""
Saves this convergence scan to an HDF5 file.
:param str filename: Name of file to save scan results to.
"""
d = self.todict()
DREAMIO.SaveDictAsHDF5(filename=filename, data=d)
def setVerbose(self, verbose=True):
"""
If verbose is ``True``, the scan will print progress messages to stdout when running.
:param bool verbose: Value to set verbosity to.
"""
self.verbose = verbose
def _status(self, msg):
if self.verbose:
print(msg)
def todict(self):
"""
Converts the results of this scan to a dictionary object which can easily be saved to file.
"""
oparams = self.getOutputParameters()
return {
'result': self.result,
'outputParameters': oparams
}
class ConvergenceScanException(Exception):
def __init__(self, msg):
super(Exception, self).__init__(msg)
def _CS_getObjectByName(do, name, paramType='input'):
lst = []
if '.' in name: lst = name.split('.')
else: lst = [name]
obj = do
for o in lst:
print('o = {}, obj = {}'.format(o, obj))
if o in obj:
obj = obj[o]
else:
raise ConvergenceScanException("Unrecognized {} parameter specified: '{}'.".format(paramType, o))
return obj
def _CS_setObjectByName(ds, name, val, paramType='input'):
lst = []
if '.' in name: lst = name.split('.')
else: lst = [name]
obj = ds
for o in lst[:-1]:
if o in obj:
obj = obj[o]
else:
raise ConvergenceScanException("Unrecognized {} parameter specified: '{}'.".format(paramType, name))
if not np.isscalar(obj[lst[-1]]):
raise ConvergenceScanException("The input parameter '{}' is of an unrecognized type: {}.".format(name, type(obj)))
obj.__dict__[lst[-1]] = val
# Helper functions for updating resolution parameters
def _CS_setiByName(index: int, settings: DREAMSettings, baseline, name: str):
val = max(1,int(np.round(baseline * np.float_power(2, index))))
_CS_setObjectByName(settings, name, val, 'input')
return settings, val
def _CS_setiHottailNp(index: int, settings: DREAMSettings, baseline):
val = max(1,int(np.round(baseline * np.float_power(2, index))))
settings.hottailgrid.setNp(val)
return settings, val
def _CS_setiHottailNxi(index: int, settings: DREAMSettings, baseline):
val = max(1,int(np.round(baseline * np.float_power(2, index))))
settings.hottailgrid.setNxi(val)
return settings, val
def _CS_setiRunawayNp(index: int, settings: DREAMSettings, baseline):
val = max(1,int(np.round(baseline * np.float_power(2, index))))
settings.runawaygrid.setNp(val)
return settings, val
def _CS_setiRunawayNxi(index: int, settings: DREAMSettings, baseline):
val = max(1,int(np.round(baseline * np.float_power(2, index))))
settings.runawaygrid.setNxi(val)
return settings, val
def _CS_setiNr(index: int, settings: DREAMSettings, baseline):
val = max(1,int(np.round(baseline * np.float_power(2, index))))
settings.radialgrid.setNr(val)
return settings, val
def _CS_setiNt(index: int, settings: DREAMSettings, baseline):
val = max(1,int(np.round(baseline * np.float_power(2, index))))
settings.timestep.setNt(val)
return settings, val
def _CS_getoByName(do: DREAMOutput, param: str) -> float:
obj = _CS_getObjectByName(do, param, 'output')
# Get last element of array (this makes most
# sense for time-evolving scalar data, as this
# selection then corresponds to the final value
# of the scalar parameter)
arr = obj[:]
slc = tuple([-1] * arr.ndim)
return arr[slc]
|
from keras.preprocessing.image import ImageDataGenerator
import keras.applications as keras_applications
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import classification_report, confusion_matrix
import pandas as pd
import sys
from timeit import default_timer as timer
from keras import backend as K
from keras.callbacks import CSVLogger
import json
from keras.callbacks import ModelCheckpoint
from keras.optimizers import RMSprop, Adam, Adadelta
from keras.layers.convolutional import Convolution2D
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from sklearn.model_selection import train_test_split
import os
from keras.utils import plot_model
from keras import optimizers
def create_model():
nb_filters = 8
nb_conv = 5
image_size=200
model = Sequential()
model.add(Convolution2D(nb_filters, nb_conv, nb_conv,
border_mode='valid',
input_shape=( image_size, image_size,1) ) )
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(Convolution2D(nb_filters*2, nb_conv, nb_conv))
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters*2, nb_conv, nb_conv))
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters*2, nb_conv, nb_conv))
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters*2, nb_conv, nb_conv))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(256))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('linear'))
sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
#model.compile(loss='mean_squared_error', optimizer=Adadelta(), metrics=['mean_squared_error'])
model.compile(loss='mean_squared_error', optimizer=sgd, metrics=['mean_squared_error'])
return model
start=timer()
input_shape=(200,200,1)
#i=int(sys.argv[1])-1
#classes=int(sys.argv[2])
rootoutput='outputs/'
rootdataset='dataset/'
expprefix="customeregression1"
datapath="rotationtraindata"
#classes_name=["nonsym","H"]
normalizefactor=1 # or 90
timerfile= rootoutput+ expprefix+'/timer.csv'
outdir=rootoutput + expprefix+"/output/"
checkpoint_dir = rootoutput+ expprefix+ "/models/"
validation_data_dir = rootdataset+ datapath+'/valid'
train_path = rootdataset+datapath+'/'
test_path = rootdataset+datapath+'/test'
if not os.path.exists(outdir):
os.makedirs(outdir)
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
img_width, img_height = 200, 200
train_data_dir = train_path
nb_train_samples = 12000#14000
nb_validation_samples = 1600
epochs = 20
batch_size = 16
def recall_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def f1_m(y_true, y_pred):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
modelname= expprefix
model=create_model()
csv_logger = CSVLogger(outdir+ modelname+ 'log.csv', append=True, separator=';')
checkpoint = ModelCheckpoint(checkpoint_dir+modelname+"_checkpoint.best.hdf5", monitor='val_mean_squared_error', verbose=1, save_best_only=True, mode='min')
model.compile(loss='mean_squared_error', optimizer=Adadelta(), metrics=['mean_squared_error'])
#model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy',f1_m,precision_m, recall_m])
model.summary()
#plot_model(model, to_file='model.png')
#exit()
train_datagen = ImageDataGenerator(rescale = 1. / 255)
test_datagen = ImageDataGenerator(rescale = 1. / 255)
#train_generator = train_datagen.flow_from_directory(train_data_dir, classes= classes_name, target_size =(img_width, img_height), batch_size = batch_size, class_mode='categorical',color_mode="grayscale")
train_generator = train_datagen.flow_from_directory(train_path, classes= [""], target_size =(img_width, img_height), batch_size = 10000, class_mode='sparse',color_mode="grayscale")
X,_=train_generator.next()
y=[int(f.split("-")[5].replace(".png",""))/normalizefactor for f in train_generator.filenames]
cv_size = 2000
X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=cv_size, random_state=56741)
print("X_train=",len(X_train))
#validation_generator = test_datagen.flow_from_directory( validation_data_dir, classes= classes_name, target_size =(img_width, img_height), batch_size = batch_size, class_mode='categorical',color_mode="grayscale")
model.fit(X, y, batch_size=batch_size, nb_epoch=epochs, verbose=1, validation_data=(X_valid, y_valid), callbacks=[csv_logger,checkpoint] )
model.save_weights(checkpoint_dir+modelname+'_model_saved_weight.h5')
#model.fit_generator(train_generator, verbose=1, steps_per_epoch = nb_train_samples // batch_size, epochs = epochs, validation_data = validation_generator, validation_steps = nb_validation_samples // batch_size, callbacks=[csv_logger,checkpoint])
#model.save_weights(checkpoint_dir+modelname+'_model_saved_weight.h5')
history=model.history
with open( outdir+ "/history_" + modelname+ '.json', 'w') as f:
json.dump(history.history, f)
'''
steps = np.ceil(validation_generator.samples/batch_size)
Y_pred = model.predict_generator(validation_generator, steps=steps)
y_pred = np.argmax(Y_pred, axis=1)
#print('Confusion Matrix')
target_names = [k for k in validation_generator.class_indices]
#print(target_names)
cm=confusion_matrix(validation_generator.classes, y_pred)
np.savetxt(outdir+ "/conf_" + modelname+".csv", cm, fmt="%d", delimiter=",")
f=open(outdir+ "/conf_" + modelname+".csv", "a")
txt= str(target_names).replace("[","").replace("]","")
with open(outdir+ "/conf_" + modelname+".csv", 'r') as original: data = original.read()
with open(outdir+ "/conf_" + modelname+".csv", 'w') as modified: modified.write(txt + "\n" + data)
'''
with open( timerfile, 'a+') as modified: modified.write(modelname + ", " + str( round( (timer()- start) /(60*60) ,2)) + "\n")
|
from db import db
class PlanetImage(db.Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
img_ref = db.Column(db.String, nullable=False)
description = db.Column(db.String, nullable=True)
task_id = db.Column(db.BigInteger, db.ForeignKey('task.id'), nullable=True)
task = db.relationship('Task')
|
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
driver = webdriver.Chrome()
# 隐式等待10s
driver.implicitly_wait(10)
# 服务大厅地址
url = "https://ehall.jlu.edu.cn/jlu_portal/index"
driver.get(url)
# 登录
driver.find_element_by_id("username").send_keys("用户名")
driver.find_element_by_id("password").send_keys("密码")
button1 = driver.find_element_by_id("login-submit")
button1.click()
# 登录完毕,点击”本科生每日健康打卡“按钮
driver.find_elements_by_class_name("center")[10].click()
# 获得当前页面句柄
sreach_windows = driver.current_window_handle
# 获得当前所有打开的窗口的句柄
all_handles = driver.window_handles
# 点击在线办理
driver.find_element_by_link_text("在线办理").click()
# 切换网页(若浏览器未自动切换到新弹出网页)
for handle in all_handles:
if handle != sreach_windows:
driver.switch_to.window(handle)
# 填写地址
province = driver.find_element_by_xpath('/html/body/div[4]/form/div/div[3]/div[2]/div[1]/div[1]/div['
'2]/table/tbody/tr[2]/td/div/table/tbody/tr[11]/td[2]/div/div/div/div/input')
city = driver.find_element_by_xpath('/html/body/div[4]/form/div/div[3]/div[2]/div[1]/div[1]/div[2]/table/tbody/tr['
'2]/td/div/table/tbody/tr[11]/td[4]/div/div/div/div/input')
county = driver.find_element_by_xpath('/html/body/div[4]/form/div/div[3]/div[2]/div[1]/div[1]/div[2]/table/tbody/tr['
'2]/td/div/table/tbody/tr[11]/td[6]/div/div/div/div/input')
street = driver.find_element_by_xpath('/html/body/div[4]/form/div/div[3]/div[2]/div[1]/div[1]/div[2]/table/tbody/tr['
'2]/td/div/table/tbody/tr[11]/td[8]/div/input')
province.send_keys(Keys.CONTROL, 'a')
province.send_keys("省")
province.send_keys(Keys.ENTER)
city.send_keys(Keys.CONTROL, 'a')
city.send_keys("市")
city.send_keys(Keys.ENTER)
county.send_keys(Keys.CONTROL, 'a')
county.send_keys("区")
county.send_keys(Keys.ENTER)
street.send_keys(Keys.CONTROL, 'a')
street.send_keys("详细地址")
street.send_keys(Keys.ENTER)
# ‘14天内是否前往中高风险地区’选否
risk_n = driver.find_element_by_id('V1_CTRL43')
risk_n.is_selected()
# 体温状态选择正常
temp_status_y = driver.find_element_by_id('V1_CTRL28')
temp_status_y.is_selected()
# 点击提交按钮
submit = driver.find_element_by_link_text('提交')
submit.click()
time.sleep(1)
# 点击好
driver.find_element_by_xpath("//*[contains(text(),'好')]/../button ").click()
|
def newfile():
fout = open('running-config.cfg')
fin = open('newconfigfile.cfg','w+')
l = []
l1 = []
book = fout.read()
line = book.split()
for i in range(len(line)):
if '192.' in line[i]:
line[i] = line[i].replace('192.','10.')
if '172.' in line[i]:
line[i] = line[i].replace('172.','10.')
if '255.255.255.0' in line[i]:
line[i] = line[i].replace('255.255.255.0','255.0.0.0')
if '255.255.0.0' in line[i]:
line[i] = line[i].replace('255.255.0.0','255.0.0.0')
result = '\n'.join(line)
print(result)
fin.write(result)
newfile()
|
import sys
from decimal import *
getcontext().prec = 25
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
class Queue:
def __init__(self):
self.l = []
def put(self, n):
if (n == 0):
return self
for i in range(0, len(self.l)):
if self.l[i][0] == n:
self.l[i] = (n, self.l[i][1] + 1)
return self
elif self.l[i][0] > n:
self.l.insert(i, (n, 1))
return self
self.l.append((n, 1))
def pop(self):
n, c = self.l.pop()
if c > 1:
self.l.append((n, c - 1))
return n
def put_with_count(self, n, c):
if (n == 0):
return self
for i in range(0, len(self.l)):
if self.l[i][0] == n:
self.l[i] = (n, self.l[i][1] + c)
return self
elif self.l[i][0] > n:
self.l.insert(i, (n, c))
return self
self.l.append((n, c))
def pop_with_count(self):
return self.l.pop()
def empty(self):
return len(self.l) == 0
def cunstruct_result_list(n):
result = []
q = Queue()
q.put(n)
max, min = 0, 0
while not q.empty():
n, c = q.pop_with_count()
if n % 2 == 0:
max, min = n / 2, n / 2 - 1
else:
max, min = n // 2, n // 2
q.put_with_count(max, c)
q.put_with_count(min, c)
for i in range(0, len(result)):
if result[i][0] == max and result[i][1] == min:
result[i][2] += c
break
elif result[i][0] > max or result[i][1] > min:
result.insert(i, [max, min, c])
break
else:
result.append([max, min, c])
return result
def solve(n, k):
result = cunstruct_result_list(n)
num = 0
while len(result) != 0:
max, min, c = result.pop()
num += c
if num >= k:
return (max, min)
def main():
t = int(input())
for i in range(1, t + 1):
#eprint(str(i))
n, k = [Decimal(s) for s in input().split(" ")]
max, min = solve(n, k)
print("Case #{}: {} {}".format(i, max, min))
if __name__ == "__main__":
main()
|
from . import models
from . import controllers
from . import wizard
from odoo import api, fields, SUPERUSER_ID
def add_book_hook(cr, registry):
env = api.Environment(cr, SUPERUSER_ID, {})
book_data1 = {
'name': 'Java Course',
'pages': 98,
'date_release': fields.Date.today()
}
book_data2 = {
'name': 'HTML for dummies',
'pages': 567,
'date_release': fields.Date.today()
}
env['library.book'].create([book_data1, book_data2]) |
#! /usr/bin/env python
#coding:utf-8
import sys
import os
import re
import urllib2
import urllib
import requests
import cookielib
import getpass
import json
from bs4 import BeautifulSoup
import socket
socket.setdefaulttimeout(100.0)
## 这段代码是用于解决中文报错的问题
reload(sys)
sys.setdefaultencoding("utf8")
#####################################################
domain = 'https://leetcode.com/'
class xSpider(object):
def __init__(self):
'''initiation'''
self.name = ''
self.passwprd = ''
self.cj = cookielib.LWPCookieJar()
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj))
urllib2.install_opener(self.opener)
def setLoginInfo(self,username,password):
'''set user information'''
self.name = username
self.pwd = password
def preLogin(self):
'''to get csrfmiddlewaretoken'''
req = urllib2.Request('https://leetcode.com/accounts/login/')
req.add_header('Host','leetcode.com')
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.130 Safari/537.36')
response = urllib2.urlopen(req)
login_page = response.read()
pattern = re.compile('<input.*?csrfmiddlewaretoken.*?/>')
item = re.findall(pattern, login_page)
print 'get csrfmiddlewaretoken success!'
return item[0][item[0].find('value=') + 7 : -4]
def login(self, csrfmiddlewaretoken):
'''login'''
loginurl = 'https://leetcode.com/accounts/login/'
loginparams = {'csrfmiddlewaretoken':csrfmiddlewaretoken,'login':self.name, 'password':self.pwd}
req = urllib2.Request(loginurl, urllib.urlencode(loginparams))
req.add_header('Host','leetcode.com')
req.add_header('Origin','https://leetcode.com')
req.add_header('Referer','https://leetcode.com/accounts/login/')
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.130 Safari/537.36')
response = urllib2.urlopen(req)
self.operate = self.opener.open(req)
thePage = response.read()
print 'login success!'
return thePage
def getAcceptedQuetionList(self, problem_page):
'''get Accepted Quetion List'''
question_soup = BeautifulSoup(problem_page)
trList = question_soup.find('table', attrs={'id': 'problemList'}).find_all('tr')
trList.pop(0)
acceptedQuetionList = []
for tr in trList:
if tr.span.attrs['class'][0] == 'ac':
acceptedQuetionList.append(tr.a.attrs['href'])
print 'getAcceptedQuetionList success'
return acceptedQuetionList
def getSubmissionId(self, questionName):
'''download each submission question id'''
quesURL = domain + questionName + '/submissions/'
req = urllib2.Request(quesURL)
req.add_header('Host','leetcode.com')
req.add_header('Origin','https://leetcode.com')
req.add_header('Referer','https://leetcode.com/accounts/login/')
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.130 Safari/537.36')
response = urllib2.urlopen(req)
self.operate = self.opener.open(req)
submissionPage = response.read()
submission_soup = BeautifulSoup(submissionPage)
trList = submission_soup.find('table', attrs={'id': 'result_testcases'}).find_all('tr')
trList.pop(0)
for tr in trList:
if tr.find_next('a').find_next('a').string == 'Accepted':
return tr.find_next('a').find_next('a').attrs['href']
def getCode(self, submissionId):
'''get description and code'''
codeURL = domain + submissionId
req = urllib2.Request(codeURL)
req.add_header('Host','leetcode.com')
req.add_header('Origin','https://leetcode.com')
req.add_header('Referer','https://leetcode.com/accounts/login/')
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.130 Safari/537.36')
response = urllib2.urlopen(req)
self.operate = self.opener.open(req)
codePage = response.read()
code_soup = BeautifulSoup(codePage)
metaList = code_soup.find('meta', attrs={'name': 'description'})
metap = re.compile('(\r\n)+')
subdsc = metap.sub('\n*', str(metaList)[15:-22])
description = '/**\n*' + subdsc + '\n*/\n'
pattern = re.compile('vm.code.cpp .*?;')
codeStr = str(re.findall(pattern, codePage)[0])
codeReal = codeStr[15:-2]
return description, eval("u'%s'"%codeReal).replace('\r\n','\n')
if __name__ == '__main__':
if len(sys.argv) != 2:
print 'Usage ./lcSpider.py USERNAME'
sys.exit(0)
userSpider = xSpider()
username = sys.argv[1]
password = getpass.getpass('Password:')
userSpider.setLoginInfo(username,password)
csrfmiddlewaretoken = userSpider.preLogin()
questionPage = userSpider.login(csrfmiddlewaretoken)
acceptedQuetionList = userSpider.getAcceptedQuetionList(questionPage)
FileExistNames = os.listdir('./leetcode')
for acceptedQuetion in acceptedQuetionList:
if acceptedQuetion[10:-1] + '.cpp' not in FileExistNames:
print 'get ' + acceptedQuetion[10:-1] +'......'
submissionId = userSpider.getSubmissionId(acceptedQuetion)
description, myCode = userSpider.getCode(submissionId)
codeFile = open('leetcode/' + acceptedQuetion[10:-1] + '.cpp', 'w')
codeFile.write(description)
codeFile.write(myCode)
codeFile.close
print 'get ' + acceptedQuetion[10:-1] + '.cpp success'
|
from sklearn import preprocessing
from clustering import *
import pandas as pd
import numpy as np
import cPickle as pickle
def get_ratings(df):
n_labels = len(df.label.unique())
athlete_ids = np.array(sorted(df.athlete_id.unique()))
n_athletes = len(athlete_ids)
ath_labels = df.groupby(['athlete_id', 'label']).count()['id'].to_dict()
ratings = np.zeros((n_athletes, n_labels))
for k,v in ath_labels.iteritems():
ratings[np.where(athlete_ids==k[0])[0][0], k[1]] = v
# scale ratings matrix to between 0-5
min_max_scaler = preprocessing.MinMaxScaler(feature_range=(0, 5))
ratings_scaled = min_max_scaler.fit_transform(ratings)
return ratings_scaled
def get_idx_to_activities_dict(df):
idx_to_activities = {}
df_groupby = df.groupby(['label', 'id'])['label'].agg({'Frequency':'count'}).to_dict('series')['Frequency']
for label in np.sort(df['label'].unique()):
idx_to_activities[label] = df_groupby[label].index.values
return idx_to_activities
def fast_similarity(ratings, kind='item', epsilon=1e-9):
# epsilon -> small number for handling divide-by-zero errors
if kind == 'user':
sim = ratings.dot(ratings.T) + epsilon
elif kind == 'item':
sim = ratings.T.dot(ratings) + epsilon
norms = np.array([np.sqrt(np.diagonal(sim))])
return (sim / norms / norms.T)
if __name__ == '__main__':
# load data from clustering.py
co_runs_df, co_rides_df = load_data()
# append labels and return clustering models from clustering.py
co_runs_df, co_rides_df, runs_clusterer, rides_clusterer = get_labels(co_runs_df, co_rides_df)
# generate ratings matrix's for both runs and rides
co_runs_ratings = get_ratings(co_runs_df)
co_rides_ratings = get_ratings(co_rides_df)
# generate similarity matrix's for both runs and rides
item_similarity_runs = fast_similarity(co_runs_ratings, kind='item')
item_similarity_rides = fast_similarity(co_rides_ratings, kind='item')
idx_to_runs = get_idx_to_activities_dict(co_runs_df)
idx_to_rides = get_idx_to_activities_dict(co_rides_df)
# Export index file to return runs
with open('data/runs_mapper.pkl', 'wb') as f:
pickle.dump(idx_to_runs, f, protocol=pickle.HIGHEST_PROTOCOL)
# Export index file to return runs
with open('data/rides_mapper.pkl', 'wb') as f:
pickle.dump(idx_to_rides, f, protocol=pickle.HIGHEST_PROTOCOL)
# Export run similarity matrix via numpy save
with open('data/item_similarity_runs.npy', 'wb') as f:
np.save(f, item_similarity_runs)
# Export ride similarity matrix via numpy save
with open('data/item_similarity_rides.npy', 'wb') as f:
np.save(f, item_similarity_rides)
# Export run clustering model via pickle
with open('data/runs_clusterer.pkl', 'wb') as f:
pickle.dump(runs_clusterer, f)
# Export ride clustering model via pickle
with open('data/rides_clusterer.pkl', 'wb') as f:
pickle.dump(rides_clusterer, f)
|
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function, absolute_import
import os
from IPython.nbconvert.preprocessors.base import Preprocessor
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class EfernanPreprocessor(Preprocessor):
def preprocess(self, nb, resources):
# Global notebook metadata
# print ("Global notebook metadata:")
# for element in resources['metadata']:
# print(str(element)+" : "+ resources['metadata'][element])
# Individual Cell Metadata
print ("Global individual cell metadata:")
for worksheet in nb.worksheets:
remove_cells = []
for index, cell in enumerate(worksheet.cells):
if 'homeworkReport' in cell.metadata and 'showCell' in cell.metadata['homeworkReport']:
if cell.metadata['homeworkReport']['showCell'] == 'nothing':
# Mark cell for removal
remove_cells.append(index)
# if len(cell.metadata)>0:
# print("Cell {} metadata:".format(index))
# for element in cell.metadata:
# print(str(element) + ":" + str(cell.metadata[element]))
# Remove cells tagged for removal
for index in remove_cells[::-1]:
del worksheet.cells[index]
print('{} cells removed from worksheet.'.format(len(remove_cells)))
# resources["enrique"]["author"] = "Enrique PY"
return nb, resources
|
from django.views.generic import TemplateView
class IndexView(TemplateView):
template_name = "index.html"
def get_context_data(self):
ctxt = super().get_context_data()
ctxt["username"] = "phuoclv"
return ctxt
class AboutView(TemplateView):
template_name = "about.html"
def get_context_data(self, **kwargs):
ctxt = super().get_context_data()
ctxt["skills"] = [
"Python",
"Django",
"Java"
]
return ctxt |
import scrapy
import datetime
import re
class ShoesSpider(scrapy.Spider):
name = 'Shoes'
allowed_domains = ['www.wildberries.ru']
start_urls = ['http://www.wildberries.ru']
pages_count = 6
cookie = {'__region': '64_75_4_38_30_33_70_1_22_31_66_40_71_69_80_48_68',
'__store': '119261_122252_122256_117673_122258_122259_121631_122466_122467_122495_122496_122498_122590_122591_122592_123816_123817_123818_123820_123821_123822_124093_124094_124095_124096_124097_124098_124099_124100_124101_124583_124584_125611_116433_6159_507_3158_117501_120762_119400_120602_6158_121709_2737_117986_1699_1733_686_117413_119070_118106_119781',
'__wbl': 'cityId%3D77%26regionId%3D77%26city%3D%D0%9C%D0%BE%D1%81%D0%BA%D0%B2%D0%B0%26phone%3D84957755505%26latitude%3D55%2C7247%26longitude%3D37%2C7882',
'ncache': '119261_122252_122256_117673_122258_122259_121631_122466_122467_122495_122496_122498_122590_122591_122592_123816_123817_123818_123820_123821_123822_124093_124094_124095_124096_124097_124098_124099_124100_124101_124583_124584_125611_116433_6159_507_3158_117501_120762_119400_120602_6158_121709_2737_117986_1699_1733_686_117413_119070_118106_119781%3B64_75_4_38_30_33_70_1_22_31_66_40_71_69_80_48_68%3B1.0--%3B12_3_18_15_21%3B%3B0',
'route': '5a25b90133d5524bd16297bd4f3f280681faf08e'
}
def start_requests(self):
for page in range(1, 1 + self.pages_count):
url = f'https://www.wildberries.ru/catalog/muzhchinam/spetsodezhda/rabochaya-obuv?sort=popular&page={page}'
yield scrapy.Request(url, callback=self.parse_pages, dont_filter=True,
cookies=self.cookie_msc,
meta={'dont_merge_cookies': True})
def parse_pages(self, response):
for href in response.xpath("//a[contains(@class, 'ref_goods_n_p')]/@href").extract():
url = response.urljoin(href)
yield scrapy.Request(url, callback=self.parse, dont_filter=True,
cookies=self.cookie_msc,
meta={'dont_merge_cookies': True})
def parse(self, response, **kwargs):
timestamp = datetime.datetime.now().timestamp()
RPC = response.xpath("//div[@class = 'article']/span/text()").extract_first()
color = response.xpath("//span[@class = 'color']/text()").extract_first()
url = response.url
title = ''.join(self.remove_empty_strs(response.xpath("//span[@class = 'name ']/text()").extract_first().split('/')[0]))
if color:
title = f"{title}, {color}"
marketing_tags = self.remove_empty_strs(self.clear_list_from_spaces(
response.xpath("//li[contains(@class,'about-advantages-item')]/text()").extract()))
brand = response.xpath("//span[@class = 'brand']/text()").extract_first()
section = self.remove_empty_strs(self.clear_list_from_spaces(
response.xpath("//span[@class = 'name ']/text()").extract_first().split('/')[1:]))
current_price = self.get_digits_from_str(response.xpath("//span[@class = 'final-cost']/text()").extract_first())
original_price = self.get_digits_from_str(response.xpath("//del[@class = 'c-text-base']/text()").extract_first())
sale_tag = ""
if original_price:
sale_tag = f"Скидка: {round(current_price / original_price * 100)}%"
all_sizes = len(response.xpath("//div[contains(@class, 'size-list')]/label").extract())
miss_sizes = len(response.xpath("//div[contains(@class, 'size-list')]/label[contains(@class, 'disabled')]").extract())
in_stock = miss_sizes < all_sizes
main_image = response.xpath("//img[@class = 'preview-photo j-zoom-preview']/@src").extract_first()
set_images = response.xpath("//span[@class = 'slider-content']/img/@src").extract()
view = response.xpath("//span[@class = 'slider-content thumb_3d']/img/@src").extract_first()
video =''
description = response.xpath("//div[contains(@class, 'j-description collapsable-content description-text')]/p/text()").extract_first()
keys = response.xpath("//div[contains(@class, 'pp')]/span/b/text()").extract()
value = self.remove_empty_strs(self.clear_list_from_spaces(
response.xpath("//div[contains(@class, 'pp')]/span/text()").extract()))
metadata = {"Артикул": RPC, "Цвет": color}
metadata.update({keys[i]:value[i] for i in range(len(keys))})
variants = len(response.xpath("//li[contains(@class, 'color-v1')]/a").extract())
result = {
"timestamp": timestamp,
"RPC": RPC,
"color": color,
"url": url,
"title" : title,
"marketing tags": marketing_tags,
"brand": brand,
"section": section,
"price data": {"current": current_price,
"original": original_price,
"sale_tag": sale_tag},
"stock":{"in stock": in_stock,
"count": 0},
"assets": {"main image": main_image,
"set images": set_images,
"view360": view,
"video": video},
"metadata": {"description": description,
"metadata": metadata},
"variants": variants
}
yield result
def get_digits_from_str(self, string):
# Используем для выделения числа из цены
digits = string.replace('\xa0', '')
digits = float(re.search(r'\d+', digits).group(0))
return digits
def clear_list_from_spaces(self, other_list):
# Очистить элементы (строки) списка от пробелов
new_list = []
[new_list.append(elem.replace('\n', '').strip()) for elem in other_list]
return new_list
def remove_empty_strs(self, other_list):
new_list = []
[new_list.append(string) for string in other_list if string]
return new_list
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `eust` package."""
import pytest
from click.testing import CliRunner
import tempfile
import shutil
import eust
from eust import cli
@pytest.fixture
def temp_repo(request):
temp_dir = tempfile.mkdtemp()
def fin():
shutil.rmtree(temp_dir)
def temp_dir_conf():
return {'data_dir': temp_dir}
eust.conf.loaders.insert(0, temp_dir_conf)
eust.conf.load()
request.addfinalizer(fin)
return temp_dir
def test_nuts_download_and_read(temp_repo):
with pytest.raises(FileNotFoundError):
eust.read_nuts_codes(2006)
with pytest.raises(FileNotFoundError):
eust.read_nuts_codes('2006')
eust.download_nuts_codes()
nc_2006 = eust.read_nuts_codes(2006)
nc_2006_2 = eust.read_nuts_codes('2006')
assert nc_2006.shape == nc_2006_2.shape
assert (nc_2006.stack() == nc_2006_2.stack()).all()
_TEST_TABLE_DOWNLOAD = 'educ_thpar'
def test_table_download_and_read(temp_repo):
with pytest.raises(FileNotFoundError):
eust.read_table_data(_TEST_TABLE_DOWNLOAD)
with pytest.raises(FileNotFoundError):
eust.read_table_metadata(_TEST_TABLE_DOWNLOAD)
eust.download_table(_TEST_TABLE_DOWNLOAD)
metadata = eust.read_table_metadata(_TEST_TABLE_DOWNLOAD)
data = eust.read_table_data(_TEST_TABLE_DOWNLOAD)
dims_in_metadata = set(
metadata['dimensions'].index.unique('dimension'))
dims_without_metadata = {'time'}
dims_in_data = set(data.index.names)
assert dims_in_data <= (dims_in_metadata | dims_without_metadata)
def test_table_read_twice(temp_repo):
# Read data twice and check that the data is identical;
# this is relevant since we create the hdf5 format when
# reading the tsv.gz file the first time.
eust.download_table(_TEST_TABLE_DOWNLOAD)
data1 = eust.read_table_data(_TEST_TABLE_DOWNLOAD)
data2 = eust.read_table_data(_TEST_TABLE_DOWNLOAD)
assert data1.shape == data2.shape
assert (data1.stack() == data2.stack()).all()
def test_command_line_interface():
"""Test the CLI."""
runner = CliRunner()
result = runner.invoke(cli.main)
assert result.exit_code == 0
help_result = runner.invoke(cli.main, ['--help'])
assert help_result.exit_code == 0
assert '--help Show this message and exit.' in help_result.output
|
import numpy as np
from sklearn.cross_validation import LeaveOneOut, KFold
X = np.array([[0., 0.], [1., 1.], [-1., -1.], [2., 2.]])
Y = np.array([0, 1, 0, 1])
loo = LeaveOneOut(len(Y))
print "Leave-One-Out indices"
for train, test in loo:
print("%s %s" % (train, test))
kf = KFold(len(Y), n_folds=2)
print "Kfold indices"
for train, test in kf:
print("%s %s" % (train, test))
|
###############################################################################
# В інтервалі від 1 до 10 визначити числа
# • парні, які діляться на 2,
# • непарні, які діляться на 3,
# • числа, які не діляться на 2 та 3.
for x in range(1,11):
if x % 2 == 0:
print(f"{x} ділиться націло на 2")
if x % 3 == 0:
print(f"{x} ділиться націло на 3")
if x % 3 != 0 and x % 2 != 0:
print(f"{x} не ділиться націло на 2 і 3")
###############################################################################
# Напишіть скрипт, який перевіряє логін, який вводить користувач.
# Якщо логін вірний (First), то привітайте користувача.
# Якщо ні, то виведіть повідомлення про помилку.
# (використайте цикл while)
login_name = input("Your login: ")
while login_name != "First":
login_name = input("Incorrect. Try again: ")
continue
else:
print("Success!")
###############################################################################
# Перший випадок.
# Написати програму, яка буде зчитувати числа поки не зустріне від’ємне число.
# При появі від’ємного числа програма зупиняється
# (якщо зустрічається 0 програма теж зупиняється).
user_number = (input("Введіть число: "))
while int(user_number) >= 0:
print (f"{user_number} - додатнє число. Введіть інше число.")
user_number = (input("Інше число: "))
while int(user_number) == 0:
user_number = input("Ще трішки менше: ")
else:
print("Ви ввели від'ємне число")
###############################################################################
# Створити список цілих чисел, які вводяться з терміналу
# та визначити серед них максимальне та мінімальне число.
some_list = []
amount_of_numbers = int(input("How much numbers will be in your list?: "))
for x in range(amount_of_numbers):
user_number = int(input("Value: "))
some_list.append(user_number)
print(f"Maximal number in your list is: {max(some_list)}")
print(f"Minamal number in your list is: {min(some_list)}")
###############################################################################
###############################################################################
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from .ligne import Ligne
L211_loyer_brut = Ligne(211, "loyer brut")
L221_frais_administration = Ligne(221, "frais d'administration")
L222_autre_frais_gestion = Ligne(222, "autre frais de gestion")
L223_prime_assurance = Ligne(223, "Prime d'assurance")
L224_travaux = Ligne(224, "Travaux reparation, entretien, amelioration")
L227_taxe_fonciere = Ligne(227, "Taxe fonciere")
L229_copropriete_provision = Ligne(229, "copropriete: provision pour charge")
L230_copropriete_regularisation = Ligne(230, "copropriete: regularisation des provision pour charges")
L250_interet_emprunt = Ligne(250, "interet d'emprunt")
L250_assurance_emprunteur = Ligne(250, "assurance emprunteur")
L250_frais_dossier = Ligne(250, "Frais de dossier")
L250_frais_garantie = Ligne(250, "Frais de garantie")
class Annexe_2044:
'''
https://www.corrigetonimpot.fr/impot-location-vide-appartement-proprietaire-calcul/
210 Recettes (Titre)
211 Loyer brut (année concernée, arriéré et percu d'avance)
212 Dépense à payer par le proprietaire que le locataire à payé
213 Subvention et indemnité impayé
215 Total des recettes (211 à 214)
220 Frais et charges (Titre)
221 Frais d'administration (gardien, agence location, comptable, syndicat (UNPI), Procedure (avocat, huissier, expert))
222 Autre frais de gestion (appel, courrier au locataire, 20€/lot)
223 Prime d'assurance (PNO)
224 Travaux reparation, entretien, amelioration
https://www.corrigetonimpot.fr/revenu-foncier-travaux-deductibles-calcul-impot-revenu-louer/
225 Charge recuperable non recupere au depart du locataire
226 Indeminite eviction pour amelioration du logement
227 Taxe fonciere (ne pas inclure les taxe d'ordure menagere)
228 Deduction specifique (Besson, Borlo, Cosse)
229 coproriete: provision pour charge annee n
230 copropriete: regularisation des provisions pour charge annee n-1
240 Total frais et charges: 221 à 229 - 230
250 Interet d'emprunt, assurance emprunteur, frais dossier, frais garantie, ...
261 Revenu foncier taxable: 215-240-250
'''
def __init__(self, database):
self._database = database
self._lignes = list()
def add_ligne(self, type_, valeur):
self._lignes.append({'type': type_, 'valeur': valeur})
def get_ligne(self, lignes):
if not isinstance(lignes, list):
lignes = [lignes]
return sum(ligne['valeur'] for ligne in self._lignes if ligne['type'] in lignes)
@property
def total_recettes(self):
'''Ligne 215 = 211 à 214'''
return self.get_ligne(L211_loyer_brut)
@property
def total_frais_et_charges(self):
'''Ligne 240'''
return self.get_ligne([L221_frais_administration,
L222_autre_frais_gestion,
L223_prime_assurance,
L224_travaux,
L227_taxe_fonciere,
L229_copropriete_provision]) - self.get_ligne(L230_copropriete_regularisation)
@property
def total_charges_emprunt(self):
'''Ligne 250'''
return self.get_ligne([L250_interet_emprunt, L250_assurance_emprunteur,
L250_frais_dossier, L250_frais_garantie])
@property
def total_charges_taux(self):
return 1 - (self.revenu_foncier_taxable / self.total_recettes)
@property
def revenu_foncier_taxable(self):
'''Ligne 260'''
return self.total_recettes - self.total_frais_et_charges - self.total_charges_emprunt
@property
def prelevement_sociaux(self):
return self.revenu_foncier_taxable * self._database.prelevement_sociaux_taux
|
from __future__ import print_function
import sys
from os.path import join as oj
import torch
import torch.nn as nn
import torch.optim as optim
# pytorch stuff
import torch.utils.data as data
import torchvision.models as tv_models
from torch.autograd import Variable
# set up data
sys.path.insert(1, oj(sys.path[0], '..')) # insert parent path
import data
# transformations = transforms.Compose([transforms.Scale(32), transforms.Normalize(0, 1)])
dset = data.GlaucomaDataset('data')
print('data len', len(dset), 'im shape', dset[0][0].shape)
net = tv_models.alexnet() # alexnet.alexnet()
train_loader = torch.utils.data.DataLoader(dset,
batch_size=12,
shuffle=True,
num_workers=4
# pin_memory=True # CUDA only
)
# set up training params
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
# train
for epoch in range(2): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(train_loader, 0):
# get the inputs
inputs, labels = data
# print(inputs.shape)
# wrap them in Variable
inputs, labels = Variable(inputs), Variable(labels)
inputs = inputs.type(torch.ByteTensor)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.data[0]
if i % 100 == 1999: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
print('Finished Training')
|
# -*- coding: utf-8 -*-
cin=input().strip().split(' ')
m,n,s=map(int,cin)
user={}
cnt=0
for i in range(m):
cin=input().strip()
if i >= s-1:
if cnt%n ==0:
if cin not in user:
print(cin)
user[cin]=cin
else:
cnt-=1
cnt += 1
if not user:
print("Keep going...") |
with open("day7input.txt", "r") as f:
input_data = f.read()
progdict = {}
for line in input_data.split("\n"):
print(line)
arrow = line.find('>')
if arrow > 0:
children = line[arrow+2:].split(', ')
print(children)
parent = line[:arrow-2].split()[0]
if parent not in progdict:
progdict[parent] = None
for child in children:
progdict[child] = parent
else:
child = line.split()[0]
if child not in progdict:
progdict[child] = None
print(progdict)
print([k for k,v in progdict.items() if v == None]) |
class AStarPlanner(object):
def __init__(self, planning_env, visualize):
self.planning_env = planning_env
self.visualize = visualize
self.nodes = dict()
def Plan(self, start_config, goal_config):
plan = []
# TODO: Here you will implement the AStar planner
# The return path should be a numpy array
# of dimension k x n where k is the number of waypoints
# and n is the dimension of the robots configuration space
plan.append(start_config)
plan.append(goal_config)
return plan
|
# huff_functions.py
# Contains functions for building huffman trees
import heapq as hq
import bitstring as bs
# Build forest of nodes of form (frequency, index, left child, right child)
def build_forest(freqs):
forest = []
for node in freqs:
hq.heappush(forest, ((freqs[node],node,None,None)))
return forest
# Builds a huffman tree from a forest (heap)
def buildhufftree(forest):
placeholder_index = -1
while len(forest) > 1:
node1 = hq.heappop(forest)
node2 = hq.heappop(forest)
hq.heappush(forest, (node1[0] + node2[0], placeholder_index, node1, node2))
placeholder_index = placeholder_index - 1
return(forest)
def buildhufftree_full(freqs):
forest = build_forest(freqs)
tree = buildhufftree(forest)
return tree
# Builds a huff table from a huffman tree
# NOTE: Change to use bitstrings
def buildhufftable(forest):
huff_table = {}
buildhufftable_rec(forest[0], "", huff_table)
return huff_table
# Recursive helper method for building huff table
def buildhufftable_rec(node, cur_sequence, huff_table):
if not node[2] and not node[3]:
huff_table[node[1]] = cur_sequence
else:
buildhufftable_rec(node[2], cur_sequence + "0", huff_table)
buildhufftable_rec(node[3], cur_sequence + "1", huff_table)
# Gets a list of code lengths from a huffman tree
def getcodelengths(tree):
len_table = {}
getcodelengths_rec(tree[0], 0, len_table)
return len_table
# Recursive helper method for getting code lengths
def getcodelengths_rec(node, cur_length, len_table):
if not node[2] and not node[3]:
len_table[node[1]] = cur_length
else:
getcodelengths_rec(node[2], cur_length + 1, len_table)
getcodelengths_rec(node[3], cur_length + 1, len_table)
# Given an ordered symbol set list and a dictionary of symbol/code length pairs,
# construct a list of code lengths w/same order as symbol set
def lengthslist(symbols, lengths):
llist = []
for symbol in symbols:
if symbol in lengths:
llist.append(lengths[symbol])
else:
llist.append(0)
return llist
# Given an ordered symbol set list and a list of code lengths,
# constructs a dictionary containing the corresponding canonical huffman code
# with codes stored as bitstrings.
# Algorithm from DEFLATE docs
def makecanonical(symbols, lengths):
max_length = 0
for i in range(0, len(lengths)):
if lengths[i] > max_length:
max_length = lengths[i]
bitlength_counts = []
for i in range(0, max_length + 1):
bitlength_counts.append(0)
# Count number of codes with each bit length
for i in range(0, len(lengths)):
bitlength_counts[lengths[i]] = bitlength_counts[lengths[i]] + 1
# Find numerical value of smallest code of each bit length &
# store them in next_code
bitlength_counts[0] = 0
code = 0
next_code = []
next_code.append(0)
for bits in range(1, max_length + 1):
code = (code + bitlength_counts[bits-1]) << 1
next_code.append(code)
canon_codes = {}
for i in range(0, len(symbols)):
if lengths[i] != 0:
canon_codes[symbols[i]] = next_code[lengths[i]]
next_code[lengths[i]] = next_code[lengths[i]] + 1
print(canon_codes)
canon_codes_bitstrings = {}
for i in range(0, len(symbols)):
if symbols[i] in canon_codes:
canon_codes_bitstrings[symbols[i]] = bs.Bits(uint = canon_codes[symbols[i]], length = lengths[i])
print(canon_codes_bitstrings)
return canon_codes_bitstrings
# Given a canonical huffman code, returns a tree reflecting that code
# NOTE: Nodes are of the form (data, left child, right child)
def makecanonicaltree(canonical_code):
code_copy = canonical_code.copy()
root = [-1, None, None]
current_node = root
for symbol in code_copy:
current_node = root
for bit in canonical_code[symbol]:
if not bit:
if not current_node[1]:
current_node[1] = [-1, None, None]
current_node = current_node[1]
else:
if not current_node[2]:
current_node[2] = [-1, None, None]
current_node = current_node[2]
current_node[0] = symbol
return root
|
from lib.multi import Threader
from lib.auth import *
import lib.execption as err
import lib.action as YT
import lib.cli as cli
import threading
import signal
cli.banner()
action = cli.ask_action()
threader = Threader(cli.ask_threads())
# accounts_path = cli.ask_accounts_file()
accounts_path = 'examples/accounts.txt'
# action_path = cli.ask_action_file()
action_path = 'examples/actions.txt'
slogin = 0
flogin = 0
saction = 0
faction = 0
clock = threading.Lock()
lock = threading.Lock()
botgaurd = Botgaurd()
server = botgaurd.server_start()
def counters(name,value=1):
global slogin
global flogin
global saction
global faction
global clock
mapping = {
'login-t': 'slogin',
'login-f': 'flogin',
'action-t': 'saction',
'action-f': 'faction',
}
with clock:
globals()[mapping[name]] += value
cli.show_status(slogin, flogin, saction, faction)
def youtube_session(email,password):
try:
authenticator = GAuth(email, password)
authenticator.set_botguard_server(server)
google = authenticator.Glogin()
status = authenticator.ServiceLogin('youtube','https://www.youtube.com/signin?app=desktop&next=%2F&hl=en&action_handle_signin=true')
counters('login-t')
return status
except err.LoginFailed:
counters('login-f')
return -1
def like_wrapper(email,password,video_id):
session = youtube_session(email, password)
if session == -1:
cli.debug("Like: [%s:%s]:UNAUTH -> %s:0" %(email,password,video_id) )
counters('login-f')
return "unauthenticated"
status = YT.like(video_id, session)
counters('action-t') if status == 1 else counters('action-f')
cli.debug("Like: [%s]:LOGGED -> %s:%i" %(email,video_id,status))
def subscribe_wrapper(email,password,channel_id):
session = youtube_session(email, password)
if session == -1:
cli.debug("Sub: [%s:%s]:UNAUTH -> %s:0" %(email,password,channel_id) )
counters('action-f')
return "authenticated"
status = YT.subscribe(channel_id, session)
counters('action-t') if status == 1 else counters('action-f')
cli.debug("Sub: [%s]:LOGGED -> %s:%i" %(email,channel_id,status))
def on_exit(sig, frame):
botgaurd.server_shutdown()
cli.sys.exit(0)
signal.signal(signal.SIGINT, on_exit)
for identifier in cli.read_action_file(action_path):
for credentials in cli.read_acounts_file(accounts_path):
if action == "l":
threader.put(like_wrapper,[credentials[0],credentials[1],identifier])
elif action == "s":
threader.put(subscribe_wrapper,[credentials[0],credentials[1],identifier])
threader.finish_all()
botgaurd.server_shutdown() |
from setuptools import setup
from setuptools import find_packages
requires = [
'pyramid',
'asgiref',
'uvicorn[watchgodreload]',
'watchdog',
'pyramid_mako',
'zope.interface',
'zope.sqlalchemy',
'zope.deprecation',
'SQLAlchemy',
'transaction',
'filelock',
'importlib-metadata',
'bcrypt',
'redis',
'msgpack-python',
'numpy',
'pep8',
'yapf',
'sphinx',
'sphinx-js',
'sphinx-astropy',
'sphinx_rtd_theme',
]
setup(
name='hmi',
version='0.0',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=requires,
) |
# coding: utf-8
import itchat
from itchat.content import TEXT
from yabot.monitors.ya import YaMonitor
from yabot.monitors.vote import VoteMonitor
class WechatBot(object):
def __init__(self):
self.chatroom_uin_monitors = {}
self.member_uin_monitors = {}
@classmethod
def init(cls):
itchat.auto_login(hotReload=True, enableCmdQR=True)
return cls()
def monitor_chatroom(self, chatroom_uin):
self.chatroom_uin_monitors[chatroom_uin] = [
YaMonitor(chatroom_uin),
VoteMonitor(),
]
def monitor_member(self, member_id):
pass
def reply_chatroom_msg(self, msg):
chatroom_username = msg['FromUserName']
chatroom_d = itchat.search_chatrooms(userName=chatroom_username)
if not chatroom_d:
return
text = msg['Text'].strip()
from_nickname = msg['ActualNickName']
is_command = text.startswith(u'整 ')
chatroom_uin = chatroom_d['Uin']
if chatroom_uin not in self.chatroom_uin_monitors:
return
if is_command:
text = text[2:]
for monitor in self.chatroom_uin_monitors[chatroom_uin]:
if is_command:
ret = monitor.process_command(from_nickname, text)
else:
ret = monitor.process_text(from_nickname, text)
if ret:
itchat.send_msg(ret, chatroom_username)
def reply_member_msg(self, msg):
pass
def run(self):
itchat.msg_register([TEXT], isGroupChat=True)(self.reply_chatroom_msg)
itchat.msg_register([TEXT])(self.reply_member_msg)
itchat.run()
|
#!/usr/bin/python
ans = 1
for _ in range(7830457):
ans *= 2
ans %= 10000000000
ans *= 28433
ans %= 10000000000
ans += 1
print(ans)
|
a=[1,2,3]
b=[4,5,6]
print(a+b)
|
import torch
import torch.nn as nn
class PropConv(nn.Module):
def __init__(self,
in_features,
out_features=1,
K=10,
bias=False):
super().__init__()
assert out_features == 1, "'out_features' must be 1"
self.in_features = in_features
self.out_features = out_features
self.w = nn.Linear(in_features, out_features, bias=bias)
self.K = K
def reset_parameters(self):
self.w.reset_parameters()
def forward(self, x, adj):
propagations = [x]
for _ in range(self.K):
x = adj.mm(x)
propagations.append(x)
h = torch.stack(propagations, dim=1)
retain_score = self.w(h).permute(0, 2, 1).contiguous()
out = (retain_score @ h).squeeze(1)
return out
def __repr__(self):
return f"{self.__class__.__name__}({self.in_features}, {self.out_features}, K={self.K})"
|
from spack import *
import sys,os
sys.path.append(os.path.join(os.path.dirname(__file__), '../../common'))
from scrampackage import write_scram_toolfile
class HeppdtToolfile(Package):
url = 'file://' + os.path.dirname(__file__) + '/../../common/junk.xml'
version('1.0', '68841b7dcbd130afd7d236afe8fd5b949f017615', expand=False)
depends_on('heppdt')
def install(self,spec,prefix):
values = {}
values['VER'] = spec['heppdt'].version
values['PFX'] = spec['heppdt'].prefix
fname = 'heppdt.xml'
contents = str("""
<tool name="heppdt" version="${VER}">
<lib name="HepPDT"/>
<lib name="HepPID"/>
<client>
<environment name="HEPPDT_BASE" default="${PFX}"/>
<environment name="LIBDIR" default="$$HEPPDT_BASE/lib"/>
<environment name="INCLUDE" default="$$HEPPDT_BASE/include"/>
</client>
<runtime name="HEPPDT_PARAM_PATH" value="$$HEPPDT_BASE"/>
<runtime name="ROOT_INCLUDE_PATH" value="$$INCLUDE" type="path"/>
<use name="root_cxxdefaults"/>
<flags SKIP_TOOL_SYMLINKS="1"/>
</tool>
""")
write_scram_toolfile(contents, values, fname, prefix)
|
# pi-funcs.py
# File containing useful functions
# function to remap one range to another
def remap(value, fromLow, fromHigh, toLow, toHigh):
# get how wide each range is
fromRange = fromHigh - fromLow
toRange = toHigh - toLow
# convert low range into a 0-1 range
valueNew = float(value - fromLow) / float(fromRange)
# convert 0-1 range to high range
return toLow + (valueNew * toRange)
# end remap()
|
import sys
import math
def stripped_lines(filename):
with open(filename) as f:
for line in f.readlines():
yield line.strip()
def parse_file(filename):
lines = [line for line in stripped_lines(filename)]
available_time = int(lines[0])
buses = [None if bus == 'x' else int(bus) for bus in lines[1].split(',')]
return available_time, buses
def p1(inputs):
available_time, buses = inputs
min_wait = math.inf
min_bus = None
for bus in buses:
if bus == None:
continue
waiting = bus - (available_time % bus)
if waiting < min_wait:
min_wait = waiting
min_bus = bus
return min_wait * min_bus
def get_max_offset(raw_buses):
'''
Return the index of the maximum bus id
'''
lst = [0 if b == None else int(b) for b in raw_buses]
return lst.index(max(lst))
def parse(raw_buses, max_offset):
'''
Remove Nones and sort by bus number (decreasing)
Elements are a tuple of bus number and its offset, relative to max_offset
'''
buses = []
for i in range(len(raw_buses)):
if raw_buses[i] == None:
continue
buses.append((raw_buses[i], i - max_offset))
buses.sort(reverse=True)
return buses
def simplify(buses):
'''
If the bus id is a multiple of the offset, the offset may as well be zero.
Combine all the zero offset buses and put them at the head of the list.
'''
new_buses = []
conglomerate = 1
for bus, offset in buses:
if offset == 0 or bus % abs(offset) == 0:
conglomerate = math.lcm(conglomerate, bus)
else:
new_buses.append((bus, offset))
return [(conglomerate, 0)] + new_buses
def p2(inputs):
_, raw_buses = inputs
max_offset = get_max_offset(raw_buses)
buses = simplify(parse(raw_buses, max_offset))
start_time = 0
while True:
for bus, offset in buses[1:]:
if (start_time + offset) % bus != 0:
break
else:
return start_time - max_offset
start_time += buses[0][0]
def main(args):
inputs = parse_file(args[1])
p1_ans = p1(inputs)
print(f'part one: {p1_ans}')
p2_ans = p2(inputs)
print(f'part two: {p2_ans}')
# part one: 207
# part two: 530015546283687
if __name__ == '__main__':
main(sys.argv)
|
#!/usr/bin/env python3 # if user.get('user_id') and user.get('is_superuser'):
# -*- coding: utf-8 -*-
__version__ = '1.0.1'
create_traceable_object_type_element_query = """
INSERT INTO public.traceable_object_type AS tobt
(name, active, deleted)
VALUES
($1, $2, FALSE) RETURNING *;
"""
|
import pygame
class Wallnut(pygame.sprite.Sprite):
def __init__(self):
super(Wallnut, self).__init__()
self.image = pygame.image.load('resources/images/wall_nut/WallNut_00.png').convert_alpha()
self.images = [pygame.image.load('resources/images/wall_nut/WallNut_{:02d}.png'.format(i)).convert_alpha()
for i in range(0, 13)]
self.crackedimg = [pygame.transform.smoothscale(
pygame.image.load('resources/images/wall_nut/Wallnut_body.png').convert_alpha(),
(self.image.get_rect().width, self.image.get_rect().height)),
pygame.transform.smoothscale(
pygame.image.load('resources/images/wall_nut/Wallnut_cracked1.png').convert_alpha(),
(self.image.get_rect().width, self.image.get_rect().height)),
pygame.transform.smoothscale(
pygame.image.load('resources/images/wall_nut/Wallnut_cracked2.png').convert_alpha(),
(self.image.get_rect().width, self.image.get_rect().height))]
self.rect = self.images[0].get_rect()
self.energy = 8 * 15
self.zombies = set()
def update(self, *args, **kwargs) -> None:
for zombie in self.zombies:
if zombie.isalive == False:
continue
self.energy -= 1
if self.energy <= 0:
self.kill()
for zombie in self.zombies:
zombie.ismeetwallnut = False
if self.energy == 8 * 15:
self.image = self.images[args[0] % len(self.images)]
elif 6 * 15 <= self.energy < 8 * 15:
self.image = self.crackedimg[0]
elif 3 * 15 <= self.energy < 6 * 15:
self.image = self.crackedimg[1]
else:
self.image = self.crackedimg[2]
|
from .getTRMM_PF import getTRMM_PF
from .getGPM_PF import getGPM_PF
|
# KVM-based Discoverable Cloudlet (KD-Cloudlet)
# Copyright (c) 2015 Carnegie Mellon University.
# All Rights Reserved.
#
# THIS SOFTWARE IS PROVIDED "AS IS," WITH NO WARRANTIES WHATSOEVER. CARNEGIE MELLON UNIVERSITY EXPRESSLY DISCLAIMS TO THE FULLEST EXTENT PERMITTEDBY LAW ALL EXPRESS, IMPLIED, AND STATUTORY WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT OF PROPRIETARY RIGHTS.
#
# Released under a modified BSD license, please see license.txt for full terms.
# DM-0002138
#
# KD-Cloudlet includes and/or makes use of the following Third-Party Software subject to their own licenses:
# MiniMongo
# Copyright (c) 2010-2014, Steve Lacy
# All rights reserved. Released under BSD license.
# https://github.com/MiniMongo/minimongo/blob/master/LICENSE
#
# Bootstrap
# Copyright (c) 2011-2015 Twitter, Inc.
# Released under the MIT License
# https://github.com/twbs/bootstrap/blob/master/LICENSE
#
# jQuery JavaScript Library v1.11.0
# http://jquery.com/
# Includes Sizzle.js
# http://sizzlejs.com/
# Copyright 2005, 2014 jQuery Foundation, Inc. and other contributors
# Released under the MIT license
# http://jquery.org/license
from pycloud.pycloud.mongo import Model, ObjectID
# ###############################################################################################################
# Represents a device authorized into the system.
################################################################################################################
class CloudletCredential(Model):
# Meta class is needed so that minimongo can map this class onto the database.
class Meta:
collection = "cloudlet_credentials"
external = ['_id', 'cloudlet_fqdn', 'encryption_password']
mapping = {
}
################################################################################################################
# Constructor.
################################################################################################################
def __init__(self, *args, **kwargs):
self.cloudlet_fqdn = None
self.encryption_password = None
super(CloudletCredential, self).__init__(*args, **kwargs)
################################################################################################################
# Locate a device by its internal DB ID
################################################################################################################
# noinspection PyBroadException
@staticmethod
def by_id(internal_id):
rid = internal_id
if not isinstance(rid, ObjectID):
# noinspection PyBroadException
try:
rid = ObjectID(rid)
except:
return None
return CloudletCredential.find_one({'_id': rid})
################################################################################################################
# Locate cloudlet credentials by the FQDN of the cloudlet
################################################################################################################
# noinspection PyBroadException
@staticmethod
def by_cloudlet_fqdn(cloudlet_fqdn):
try:
credential = CloudletCredential.find_one({'cloudlet_fqdn': cloudlet_fqdn})
except:
return None
return credential
################################################################################################################
# Remove credentials.
################################################################################################################
@staticmethod
def find_and_remove(cloudlet_fqdn):
return CloudletCredential.find_and_modify(query={'cloudlet_fqdn': cloudlet_fqdn}, remove=True)
|
#!/usr/bin/env python
"""
from Foundation import *
from ScriptingBridge import *
word = SBApplication.applicationWithBundleIdentifier_('com.microsoft.word')
#word.activate()
#the_count = word.__getattribute__('recentFiles')
#the_count = word.countNumberedItems_numberType_level_('recentFiles', 1, 1)
print the_count
#help(word)
"""
import os
the_count = """osascript<<END
tell application "Microsoft Word"
return count of recent file
end tell
END"""
p_list = """osascript<<END
tell application "System Events"
set processlist to name of every process
end tell
if "Microsoft Word" is in processlist then
return "it's here"
else
return "not here"
end if
END"""
print os.system(p_list)
#print os.system(the_count)
if "Microsoft Word" not in p_list:
print "true"
else:
print "false"
|
import sys
import copy
from .utils import con_dimension, cursor
class ConsoleDriver:
def __init__(self, **kwargs):
self.specialCharacterMode = kwargs.get('specialCharacterMode', False)
self.ignoreOverflow = kwargs.get('ignoreOverflow', False)
self.historyLength = kwargs.get('historyLength', 10)
self.history = []
self._generate_layout()
def __enter__(self):
cursor.hide_cursor()
cursor.clear()
return self
def __exit__(self, * args):
cursor.show_cursor()
pass
def _generate_layout(self):
self.conWidth, self.conHeight = con_dimension.get_terminal_size() # Y = Width, X = Height
self.shadowBuff = [[' ' for x in range(self.conWidth - 1 if self.specialCharacterMode else self.conWidth)] for y in range(self.conHeight)]
self.renderedBuffer = copy.deepcopy(self.shadowBuff)
self.cursorPos = [0, 0]
@property
def width(self):
return self.conWidth
@property
def height(self):
return self.conHeight
def _con_write(self, data):
sys.stdout.write(data)
def _con_move(self, y, x):
return "\033[%d;%dH" % (y, x)
def _is_overflow(self, y, x):
if y >= self.conHeight or x >= self.conWidth:
return True
return False
def optimizedDraw(self):
# a = [[1,2,3],
# [4,5,6],
# [7,8,9]]
# b = [[1, ,3],
# [4, ,6],
# [7, , ]]
# [(i,j) for i, row in enumerate(a) for j, x in enumerate(row) if b[i][j] != x]
# [(0, 1), (1, 1), (2, 1), (2, 2)]
changeMap = [self._con_move(i, j) + self.shadowBuff[i][j] for i, row in enumerate(self.renderedBuffer)
for j, x in enumerate(self.shadowBuff) if self.renderedBuffer[i][j] != x]
print(changeMap)
return "".join(changeMap)
def draw(self):
self._con_write(self._con_move(0, 0))
self._con_write(self.transform_shadow(self.renderedBuffer))
#self._con_write(self.optimizedDraw())
self.renderedBuffer = copy.deepcopy(self.shadowBuff)
def write_char(self, data, direction='x', diffpos=None, CJK = False):
direction = 0 if direction == "x" else 1
pos = self.cursorPos if diffpos is None else diffpos
if direction == 1 and diffpos is None:
if self._is_overflow(*pos):
self.cursorPos = [0, pos[1] + 1]
else:
self.cursorPos = [pos[0] + 1, pos[1]]
elif direction == 0 and diffpos is None:
if self._is_overflow(*pos):
self.cursorPos = [pos[0] + 1, 0]
else:
self.cursorPos = [pos[0], pos[1] + 1]
try:
self.shadowBuff[pos[0]][pos[1]] = data
if CJK and direction == 0:
self.write_char('')
except:
if not self.ignoreOverflow:
raise Exception(f'Attempting to write outside of buffer. {pos} -> {[self.conHeight - 1, self.conWidth - 1]}')
def write_string(self, data, direction='x', CJK=False):
for char in data:
self.write_char(char, direction, None, CJK)
def transform_shadow(self, shadow):
return "\n".join( "".join(x for x in y) for y in shadow)
|
import numpy as np
import matplotlib.pylab as plt
from mpl_toolkits.mplot3d import Axes3D
def _numerical_gradient_no_batch(f, x):
h = 1e-4 # 0.0001
grad = np.zeros_like(x)
for idx in range(x.size):
tmp_val = x[idx]
x[idx] = float(tmp_val) + h
fxh1 = f(x) # f(x+h)
x[idx] = tmp_val - h
fxh2 = f(x) # f(x-h)
grad[idx] = (fxh1 - fxh2) / (2*h)
x[idx] = tmp_val
return grad
def numerical_gradient(f, X):
if X.ndim == 1:
return _numerical_gradient_no_batch(f, X)
else:
grad = np.zeros_like(X)
for idx, x in enumerate(X):
grad[idx] = _numerical_gradient_no_batch(f, x)
return grad
def function(x):
if x.ndim == 1:
return np.sum(x**2)
else:
return np.sum(x**2, axis=1)
def tangent_line(f, x):
d = numerical_gradient(f, x)
print(d)
y = f(x) - d*x
return lambda t: d*t + y
if __name__ == '__main__':
x0 = np.arange(-3, 3.5, 0.35)
x1 = np.arange(-3, 3.5, 0.35)
X, Y = np.meshgrid(x0, x1)
X = X.flatten()
Y = Y.flatten()
grad = numerical_gradient(function, np.array([X, Y]).T).T
plt.figure()
plt.quiver(X, Y, -grad[0], -grad[1], angles="xy",color="#666666")
plt.xlim([-3, 3])
plt.ylim([-3, 3])
plt.xlabel('x0')
plt.ylabel('x1')
plt.grid()
plt.draw()
plt.show()
|
from urllib.request import Request, urlopen
from bs4 import BeautifulSoup
import math
'''
REQUIREMENTS:
python3
urllib.request
bs4
math
AUTHOR:
Javo
'''
target = "https://example.com"
max_groups = 200
members_per_page = 10000
user_agent = "Mozilla/5.0 (X11; U; Linux i686) Gecko/20071127 Firefox/2.0.0.11"
def get_members(url, groups, page_count):
members = []
for group in groups:
members_url = str(url + "/groups/" + group + "/members/")
print("Fetching: " + members_url)
req = Request(members_url, headers={'User-Agent': user_agent})
webpage = urlopen(req).read()
soup = BeautifulSoup(webpage, "html.parser")
try:
forum_members_count = soup.find(id='members').find('span').decode_contents().replace(",", "")
print("The group has " + str(forum_members_count) + " members")
members_url_page = members_url + "?mlpage=1&num=" + str(page_count)
print("Fetching member page: " + members_url_page)
req = Request(members_url_page, headers={'User-Agent': user_agent})
webpage = urlopen(req).read()
soup = BeautifulSoup(webpage, "html.parser")
links = soup.find_all('a', href=True)
for link in links:
href = link["href"]
i = href.find("members/")
if i >= 0:
href = href[i + 8:len(href) - 1]
if href != "" and href.find("?mlpage=") < 0 and not href in members:
members.append(href)
except:
print("! '" + group + "' is private group. Need an account to access it.")
members = list(dict.fromkeys(members))
members.sort()
return members
def get_groups(url):
groups = []
try:
req = Request(url, headers={'User-Agent': user_agent})
webpage = urlopen(req).read()
soup = BeautifulSoup(webpage, "html.parser")
links = soup.find_all("a", href=True)
for link in links:
href = link["href"]
if "groups/" in href:
i = href.find("groups/")
if i >= 0:
href = href[i + 7:len(href) -1]
if href != "" and href.find("?grpage=1") < 0:
groups.append(href)
groups = list(dict.fromkeys(groups))
groups.sort()
except:
print("ERROR: Could not connect to the url")
return groups
groups = get_groups(target + "/groups/?grpage=1&num=" + str(max_groups))
print("Found " + str(len(groups)) + " groups")
for group in groups:
print(group)
if len(groups) > 0:
members = get_members(target, groups, members_per_page)
print("Total users found " + str(len(members)) + " users")
for member in members:
print(member)
|
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import logging
import sys
from dataclasses import dataclass
from pants.base.exiter import PANTS_FAILED_EXIT_CODE, PANTS_SUCCEEDED_EXIT_CODE, ExitCode
from pants.base.specs import Specs
from pants.base.specs_parser import SpecsParser
from pants.build_graph.build_configuration import BuildConfiguration
from pants.core.util_rules.environments import determine_bootstrap_environment
from pants.engine.env_vars import CompleteEnvironmentVars
from pants.engine.goal import CurrentExecutingGoals
from pants.engine.internals import native_engine
from pants.engine.internals.native_engine import PyExecutor, PySessionCancellationLatch
from pants.engine.internals.scheduler import ExecutionError
from pants.engine.internals.selectors import Params
from pants.engine.internals.session import SessionValues
from pants.engine.streaming_workunit_handler import (
StreamingWorkunitHandler,
WorkunitsCallback,
WorkunitsCallbackFactories,
)
from pants.engine.unions import UnionMembership
from pants.goal.builtin_goal import BuiltinGoal
from pants.goal.run_tracker import RunTracker
from pants.init.engine_initializer import EngineInitializer, GraphScheduler, GraphSession
from pants.init.logging import stdio_destination_use_color
from pants.init.options_initializer import OptionsInitializer
from pants.init.specs_calculator import calculate_specs
from pants.option.global_options import DynamicRemoteOptions, DynamicUIRenderer, GlobalOptions
from pants.option.options import Options
from pants.option.options_bootstrapper import OptionsBootstrapper
from pants.util.logging import LogLevel
logger = logging.getLogger(__name__)
@dataclass
class LocalPantsRunner:
"""Handles a single pants invocation running in the process-local context.
LocalPantsRunner is used both for single runs of Pants without `pantsd` (where a Scheduler is
created at the beginning of the run and destroyed at the end, and also for runs of Pants in
`pantsd` (where a Scheduler is borrowed from `pantsd` creation time, and left running at the
end).
"""
options: Options
options_bootstrapper: OptionsBootstrapper
session_end_tasks_timeout: float
build_config: BuildConfiguration
run_tracker: RunTracker
specs: Specs
graph_session: GraphSession
executor: PyExecutor
union_membership: UnionMembership
is_pantsd_run: bool
working_dir: str
@classmethod
def create(
cls,
env: CompleteEnvironmentVars,
working_dir: str,
options_bootstrapper: OptionsBootstrapper,
options_initializer: OptionsInitializer | None = None,
scheduler: GraphScheduler | None = None,
cancellation_latch: PySessionCancellationLatch | None = None,
) -> LocalPantsRunner:
"""Creates a new LocalPantsRunner instance by parsing options.
By the time this method runs, logging will already have been initialized in either
PantsRunner or DaemonPantsRunner.
:param env: The environment for this run.
:param options_bootstrapper: The OptionsBootstrapper instance to reuse.
:param scheduler: If being called from the daemon, a warmed scheduler to use.
"""
global_bootstrap_options = options_bootstrapper.bootstrap_options.for_global_scope()
executor = (
scheduler.scheduler.py_executor
if scheduler
else GlobalOptions.create_py_executor(global_bootstrap_options)
)
options_initializer = options_initializer or OptionsInitializer(
options_bootstrapper,
executor,
)
build_config = options_initializer.build_config(options_bootstrapper, env)
union_membership = UnionMembership.from_rules(build_config.union_rules)
options = options_initializer.options(
options_bootstrapper, env, build_config, union_membership, raise_=True
)
stdio_destination_use_color(options.for_global_scope().colors)
run_tracker = RunTracker(options_bootstrapper.args, options)
native_engine.maybe_set_panic_handler()
# Option values are usually computed lazily on demand, but command line options are
# eagerly computed for validation.
with options_initializer.handle_unknown_flags(options_bootstrapper, env, raise_=True):
for scope, values in options.scope_to_flags.items():
if values:
# Only compute values if there were any command line options presented.
options.for_scope(scope)
# Verify configs.
if global_bootstrap_options.verify_config:
options.verify_configs(options_bootstrapper.config)
# If we're running with the daemon, we'll be handed a warmed Scheduler, which we use
# to initialize a session here.
is_pantsd_run = scheduler is not None
if scheduler is None:
dynamic_remote_options, _ = DynamicRemoteOptions.from_options(
options, env, remote_auth_plugin_func=build_config.remote_auth_plugin_func
)
bootstrap_options = options.bootstrap_option_values()
assert bootstrap_options is not None
scheduler = EngineInitializer.setup_graph(
bootstrap_options, build_config, dynamic_remote_options, executor
)
with options_initializer.handle_unknown_flags(options_bootstrapper, env, raise_=True):
global_options = options.for_global_scope()
graph_session = scheduler.new_session(
run_tracker.run_id,
dynamic_ui=global_options.dynamic_ui,
ui_use_prodash=global_options.dynamic_ui_renderer
== DynamicUIRenderer.experimental_prodash,
use_colors=global_options.get("colors", True),
max_workunit_level=max(
global_options.streaming_workunits_level,
global_options.level,
*(
LogLevel[level.upper()]
for level in global_options.log_levels_by_target.values()
),
),
session_values=SessionValues(
{
OptionsBootstrapper: options_bootstrapper,
CompleteEnvironmentVars: env,
CurrentExecutingGoals: CurrentExecutingGoals(),
}
),
cancellation_latch=cancellation_latch,
)
specs = calculate_specs(
options_bootstrapper=options_bootstrapper,
options=options,
session=graph_session.scheduler_session,
working_dir=working_dir,
)
return cls(
options=options,
options_bootstrapper=options_bootstrapper,
session_end_tasks_timeout=global_bootstrap_options.session_end_tasks_timeout,
build_config=build_config,
run_tracker=run_tracker,
specs=specs,
graph_session=graph_session,
executor=executor,
union_membership=union_membership,
is_pantsd_run=is_pantsd_run,
working_dir=working_dir,
)
def _perform_run(self, goals: tuple[str, ...]) -> ExitCode:
global_options = self.options.for_global_scope()
if not global_options.get("loop", False):
return self._perform_run_body(goals, poll=False)
iterations = global_options.loop_max
exit_code = PANTS_SUCCEEDED_EXIT_CODE
while iterations:
# NB: We generate a new "run id" per iteration of the loop in order to allow us to
# observe fresh values for Goals. See notes in `scheduler.rs`.
self.graph_session.scheduler_session.new_run_id()
try:
exit_code = self._perform_run_body(goals, poll=True)
except ExecutionError as e:
logger.error(e)
iterations -= 1
return exit_code
def _perform_run_body(self, goals: tuple[str, ...], poll: bool) -> ExitCode:
return self.graph_session.run_goal_rules(
union_membership=self.union_membership,
goals=goals,
specs=self.specs,
poll=poll,
poll_delay=(0.1 if poll else None),
)
def _get_workunits_callbacks(self) -> tuple[WorkunitsCallback, ...]:
# Load WorkunitsCallbacks by requesting WorkunitsCallbackFactories, and then constructing
# a per-run instance of each WorkunitsCallback.
params = Params(
self.union_membership,
determine_bootstrap_environment(self.graph_session.scheduler_session),
)
(workunits_callback_factories,) = self.graph_session.scheduler_session.product_request(
WorkunitsCallbackFactories, [params]
)
return tuple(filter(bool, (wcf.callback_factory() for wcf in workunits_callback_factories)))
def _run_builtin_goal(self, builtin_goal: str) -> ExitCode:
scope_info = self.options.known_scope_to_info[builtin_goal]
assert scope_info.subsystem_cls
scoped_options = self.options.for_scope(builtin_goal)
goal = scope_info.subsystem_cls(scoped_options)
assert isinstance(goal, BuiltinGoal)
return goal.run(
build_config=self.build_config,
graph_session=self.graph_session,
options=self.options,
specs=self.specs,
union_membership=self.union_membership,
)
def _run_inner(self) -> ExitCode:
if self.options.builtin_goal:
return self._run_builtin_goal(self.options.builtin_goal)
goals = tuple(self.options.goals)
if not goals:
return PANTS_SUCCEEDED_EXIT_CODE
try:
return self._perform_run(goals)
except Exception as e:
logger.error(e)
return PANTS_FAILED_EXIT_CODE
except KeyboardInterrupt:
print("Interrupted by user.\n", file=sys.stderr)
return PANTS_FAILED_EXIT_CODE
def run(self, start_time: float) -> ExitCode:
spec_parser = SpecsParser(working_dir=self.working_dir)
specs = []
for spec_str in self.options.specs:
spec, is_ignore = spec_parser.parse_spec(spec_str)
specs.append(f"-{spec}" if is_ignore else str(spec))
self.run_tracker.start(run_start_time=start_time, specs=specs)
global_options = self.options.for_global_scope()
streaming_reporter = StreamingWorkunitHandler(
self.graph_session.scheduler_session,
run_tracker=self.run_tracker,
specs=self.specs,
options_bootstrapper=self.options_bootstrapper,
callbacks=self._get_workunits_callbacks(),
report_interval_seconds=global_options.streaming_workunits_report_interval,
allow_async_completion=(
global_options.pantsd and global_options.streaming_workunits_complete_async
),
max_workunit_verbosity=global_options.streaming_workunits_level,
)
try:
with streaming_reporter:
engine_result = PANTS_FAILED_EXIT_CODE
try:
engine_result = self._run_inner()
finally:
self.graph_session.scheduler_session.wait_for_tail_tasks(
self.session_end_tasks_timeout
)
metrics = self.graph_session.scheduler_session.metrics()
self.run_tracker.set_pantsd_scheduler_metrics(metrics)
self.run_tracker.end_run(engine_result)
return engine_result
finally:
if not self.is_pantsd_run:
# Tear down the executor. See #16105.
self.executor.shutdown(3)
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'^register/$', views.UserFormView.as_view(), name='register'),
url(r'^login/$', views. login_view, name='login'),
url(r'^logout/$', views.logout_view, name='logout'),
url(r'^eintraege/$', views.eintraege_list, name='eintraege_list'),
url(r'^eintraege/(?P<pk>\d+)/$', views.eintraege_detail, name='eintraege_detail'),
url(r'^eintrag/new/$', views.eintrag_new, name='eintrag_new'),
url(r'^schuetze/$', views.schuetzen_list, name='schuetzen_list'),
url(r'^schuetze/(?P<pk>\d+)/$', views.schuetzen_detail, name='schuetzen_detail'),
url(r'^schuetze/new/$', views.schuetze_new, name='schuetze_new'),
url(r'^mannschaft/$', views.mannschaft_list, name='mannschaft_list'),
url(r'^mannschaft/(?P<pk>\d+)/$', views.mannschaft_detail, name='mannschaft_detail'),
url(r'^mannschaft/new/$', views.mannschaft_new, name='mannschaft_new'),
url(r'^einzahlung/$', views.einzahlungen_list, name='einzahlungen_list'),
url(r'^einzahlung/(?P<pk>\d+)/$', views.einzahlung_detail, name='einzahlung_detail'),
url(r'^einzahlung/new/$', views.einzahlung_new, name='einzahlung_new'),
url(r'^email/$', views.email, name='email'),
] |
import sqlite3
import unittest
import final_proj_4_14 as proj
class TestTables(unittest.TestCase):
def testRestaurant(self):
try:
conn = sqlite3.connect('food_event.db')
cur = conn.cursor()
except Error as e:
print(e)
sql1 = '''
SELECT COUNT(*), city
FROM Restaurants
'''
results = cur.execute(sql1)
result_lst = results.fetchall()
sql2 = '''
SELECT COUNT(*), state
FROM Restaurants
'''
results2 = cur.execute(sql2)
result_lst2 = results.fetchall()
sql3 = '''
SELECT city
FROM Restaurants
'''
results3 = cur.execute(sql3)
result_lst3 = results.fetchall()
self.assertGreater(result_lst[0][0], 50)
self.assertIs(type(result_lst[0][1]), str)
self.assertIs(len(result_lst2[0][1]), 2)
self.assertIn(('San Francisco',), result_lst3)
self.assertGreater(len(result_lst3),100)
def testEvents(self):
try:
conn = sqlite3.connect('food_event.db')
cur = conn.cursor()
except Error as e:
print(e)
sql1 = '''
SELECT PostalCode, city, state
FROM Events
'''
results = cur.execute(sql1)
result_lst = results.fetchall()
self.assertIs(len(result_lst[0][2]), 2)
self.assertIs(type(result_lst[0][1]), str)
self.assertGreater(len(result_lst), 100)
self.assertIs(type(result_lst[0]), tuple)
self.assertIs(type(result_lst[0][0]), str)
def testPostalCodes(self):
try:
conn = sqlite3.connect('food_event.db')
cur = conn.cursor()
except Error as e:
print(e)
sql1 = '''
SELECT PostalCode
FROM PostalCodes
'''
results = cur.execute(sql1)
result_lst = results.fetchall()
sql2 = '''
SELECT Id
FROM PostalCodes
'''
results2 = cur.execute(sql2)
result_lst2 = results.fetchall()
self.assertIs(type(result_lst[0][0]), str)
self.assertIn(('48331',), result_lst)
self.assertIs(type(result_lst2[0][0]), int)
self.assertGreater(len(result_lst[0][0]), 0)
self.assertGreater(len(str(result_lst2[0][0])), 0)
unittest.main(verbosity=2)
|
#!/usr/bin/env python
# coding: utf-8
# ### Welcome to Hiro's Calculator
#
# This calculator will contain simple function for mathematical operations
# In[4]:
from sklearn.base import BaseEstimator
import sklearn.metrics
import numpy as np
import sklearn.metrics
from sklearn.datasets import fetch_openml
class DummyClassifier(BaseEstimator):
def fit(self, X, y=None):
pass
def predict(self, X):
return np.zeros((len(X), 1), dtype=bool)
fig_cnt = 0
def MNIST_PlotDigit(data):
import matplotlib
import matplotlib.pyplot as plt
global fig_cnt
plt.figure(fig_cnt)
fig_cnt += 1
image = data.reshape(28, 28)
plt.imshow(image, cmap = matplotlib.cm.binary, interpolation="nearest")
plt.axis("off")
plt.show()
def MNIST_GetDataSet():
# Load data from https://www.openml.org/d/554
X, y = fetch_openml('mnist_784', return_X_y=1) # needs to return X, y, replace '??' with suitable parameters!
#MNIST_PlotDigit(X[6])
print(f"X.shape={X.shape}") # print X.shape= (70000, 28, 28)
if X.ndim==3:
print("reshaping X..")
assert y.ndim==1
X = X.reshape((X.shape[0],X.shape[1]*X.shape[2]))
assert X.ndim==2
print(f"X.shape={X.shape}") # X.shape= (70000, 784)
return (X, y)
# Convert at scale (not always needed)
#X = X / 255.
|
"""This module contains simple helper functions """
from __future__ import print_function
import torch
import numpy as np
from PIL import Image
import os
from matplotlib.colors import hsv_to_rgb
from torchvision import transforms
def resize_flow(flow, new_shape):
_, _, h, w = flow.shape
new_h, new_w = new_shape
flow = torch.nn.functional.interpolate(flow, (new_h, new_w),
mode='bilinear', align_corners=True)
scale_h, scale_w = h / float(new_h), w / float(new_w)
flow[:, 0] /= scale_w
flow[:, 1] /= scale_h
return flow
def flow_to_image(flow):
"""
convert optical flow into color image
:param flow_data:
:return: color image
"""
# print(flow_data.shape)
# print(type(flow_data))
flow = flow.data[0].cpu().float().numpy().transpose(1,2,0)
u = flow[:, :, 0]
v = flow[:, :, 1]
UNKNOW_FLOW_THRESHOLD = 1e7
pr1 = abs(u) > UNKNOW_FLOW_THRESHOLD
pr2 = abs(v) > UNKNOW_FLOW_THRESHOLD
idx_unknown = (pr1 | pr2)
u[idx_unknown] = v[idx_unknown] = 0
# get max value in each direction
maxu = -999.
maxv = -999.
minu = 999.
minv = 999.
maxu = max(maxu, np.max(u))
maxv = max(maxv, np.max(v))
minu = min(minu, np.min(u))
minv = min(minv, np.min(v))
rad = np.sqrt(u ** 2 + v ** 2)
maxrad = max(-1, np.max(rad))
u = u / maxrad + np.finfo(float).eps
v = v / maxrad + np.finfo(float).eps
img = compute_color(u, v)
idx = np.repeat(idx_unknown[:, :, np.newaxis], 3, axis=2)
img[idx] = 0
return np.uint8(img)
def compute_color(u, v):
"""
compute optical flow color map
:param u: horizontal optical flow
:param v: vertical optical flow
:return:
"""
height, width = u.shape
img = np.zeros((height, width, 3))
NAN_idx = np.isnan(u) | np.isnan(v)
u[NAN_idx] = v[NAN_idx] = 0
colorwheel = make_color_wheel()
ncols = np.size(colorwheel, 0)
rad = np.sqrt(u ** 2 + v ** 2)
a = np.arctan2(-v, -u) / np.pi
fk = (a + 1) / 2 * (ncols - 1) + 1
k0 = np.floor(fk).astype(int)
k1 = k0 + 1
k1[k1 == ncols + 1] = 1
f = fk - k0
for i in range(0, np.size(colorwheel, 1)):
tmp = colorwheel[:, i]
col0 = tmp[k0 - 1] / 255
col1 = tmp[k1 - 1] / 255
col = (1 - f) * col0 + f * col1
idx = rad <= 1
col[idx] = 1 - rad[idx] * (1 - col[idx])
notidx = np.logical_not(idx)
col[notidx] *= 0.75
img[:, :, i] = np.uint8(np.floor(255 * col * (1 - NAN_idx)))
return img
def make_color_wheel():
"""
Generate color wheel according Middlebury color code
:return: Color wheel
"""
RY = 15
YG = 6
GC = 4
CB = 11
BM = 13
MR = 6
ncols = RY + YG + GC + CB + BM + MR
colorwheel = np.zeros([ncols, 3])
col = 0
# RY
colorwheel[0:RY, 0] = 255
colorwheel[0:RY, 1] = np.transpose(np.floor(255 * np.arange(0, RY) / RY))
col += RY
# YG
colorwheel[col:col + YG, 0] = 255 - np.transpose(np.floor(255 * np.arange(0, YG) / YG))
colorwheel[col:col + YG, 1] = 255
col += YG
# GC
colorwheel[col:col + GC, 1] = 255
colorwheel[col:col + GC, 2] = np.transpose(np.floor(255 * np.arange(0, GC) / GC))
col += GC
# CB
colorwheel[col:col + CB, 1] = 255 - np.transpose(np.floor(255 * np.arange(0, CB) / CB))
colorwheel[col:col + CB, 2] = 255
col += CB
# BM
colorwheel[col:col + BM, 2] = 255
colorwheel[col:col + BM, 0] = np.transpose(np.floor(255 * np.arange(0, BM) / BM))
col += + BM
# MR
colorwheel[col:col + MR, 2] = 255 - np.transpose(np.floor(255 * np.arange(0, MR) / MR))
colorwheel[col:col + MR, 0] = 255
return colorwheel
"""
def flow_to_image(flow, max_flow=None):
flow = flow.data[0].cpu().float().numpy().transpose(1,2,0)
if max_flow is not None:
max_flow = max(max_flow, 1.)
else:
max_flow = np.max(flow)
n = 8
u, v = flow[:, :, 0], flow[:, :, 1]
mag = np.sqrt(np.square(u) + np.square(v))
angle = np.arctan2(v, u)
im_h = np.mod(angle / (2 * np.pi) + 1, 1)
im_s = np.clip(mag * n / max_flow, a_min=0, a_max=1)
im_v = np.clip(n - im_s, a_min=0, a_max=1)
im = hsv_to_rgb(np.stack([im_h, im_s, im_v], 2))
return (im * 255).astype(np.uint8)
"""
def denormalize(tensor, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)):
for t, m, s in zip(tensor, mean, std):
t.mul_(s).add_(m)
return tensor
def tensor2im(input_image, imtype=np.uint8, normalize=False):
""""Converts a Tensor array into a numpy image array.
Parameters:
input_image (tensor) -- the input image tensor array
imtype (type) -- the desired type of the converted numpy array
"""
if not isinstance(input_image, np.ndarray):
if isinstance(input_image, torch.cuda.FloatTensor): # get the data from a variable
image_tensor = input_image.data
elif isinstance(input_image, torch.cuda.LongTensor):
image_tensor = input_image.data
image_tensor = image_tensor.float() / 16
else:
return input_image
image_numpy = image_tensor[0].cpu().float().numpy() # convert it into a numpy array
if image_numpy.shape[0] == 1: # grayscale to RGB
image_numpy = np.tile(image_numpy, (3, 1, 1))
if image_tensor[0].size(0) == 3 and normalize: # denormalize RGB
image_numpy = denormalize(image_tensor[0])
image_numpy = image_numpy.cpu().float().numpy()
image_numpy = np.transpose(image_numpy, (1, 2, 0)) * 255.0 # post-processing: tranpose & scaling
else: # if it is a numpy array, do nothing
image_numpy = input_image
return image_numpy.astype(imtype)
def diagnose_network(net, name='network'):
"""Calculate and print the mean of average absolute(gradients)
Parameters:
net (torch network) -- Torch network
name (str) -- the name of the network
"""
mean = 0.0
count = 0
for param in net.parameters():
if param.grad is not None:
mean += torch.mean(torch.abs(param.grad.data))
count += 1
if count > 0:
mean = mean / count
print(name)
print(mean)
def save_image(image_numpy, image_path, aspect_ratio=1.0, ori_h=None, ori_w=None):
"""Save a numpy image to the disk
Parameters:
image_numpy (numpy array) -- input numpy array
image_path (str) -- the path of the image
"""
image_pil = Image.fromarray(image_numpy)
h, w, _ = image_numpy.shape
if aspect_ratio > 1.0:
image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC)
if aspect_ratio < 1.0:
image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC)
if (ori_h is not None) and (ori_w is not None):
image_pil = image_pil.resize((ori_w, ori_h), Image.BILINEAR)
image_pil.save(image_path)
def print_numpy(x, val=True, shp=False):
"""Print the mean, min, max, median, std, and size of a numpy array
Parameters:
val (bool) -- if print the values of the numpy array
shp (bool) -- if print the shape of the numpy array
"""
x = x.astype(np.float64)
if shp:
print('shape,', x.shape)
if val:
x = x.flatten()
print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
def mkdirs(paths):
"""create empty directories if they don't exist
Parameters:
paths (str list) -- a list of directory paths
"""
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
"""create a single empty directory if it didn't exist
Parameters:
path (str) -- a single directory path
"""
if not os.path.exists(path):
os.makedirs(path)
|
import requests
from time import sleep
import datetime
import hashlib
import hmac
import json
import requests
API_HOST = 'https://api.bitkub.com'
API_KEY = '8a0b8f4649d9c8c4858181ee286c6714'
API_SECRET = b'c4e815924288b34c32d942a4cb29a624'
mycoins = ['THB_ADA']
def checkPrice():
response = requests.get(API_HOST + '/api/market/ticker')
result = response.json()
alltext = ''
sym = 'THB_ADA'
data = result[sym]
last = data['last']
return sym + ': ' + str(last)
def lineNotify(message):
payload = {'message' : message}
return _lineNotify(payload)
def _lineNotify(payload, file=None):
url = 'https://notify-api.line.me/api/notify'
token = 'ceE0ixKWTLi74RRj8mDgEIMkv6wIVXJd2QngytIruJU'
headers = {'Authorization' : 'Bearer ' + token}
return requests.post(url, headers=headers, data = payload, files=file)
while True:
lineNotify(checkPrice())
sleep(60*60)
|
from flask import Flask, render_template
from flask_wtf import FlaskForm
from wtforms import StringField, Form, BooleanField, PasswordField, validators
from wtforms.validators import DataRequired
app = Flask(__name__)
@app.route('/')
def index():
return render_template("index.html")
@app.route('/login')
def login():
username = StringField('Username', [validators.Length(min=4, max=25)])
email = StringField('Email Address', [validators.Length(min=6, max=35)])
password = PasswordField('New Password', [
validators.DataRequired(),
validators.EqualTo('confirm', message='Passwords must match')
])
return render_template("login.html")
if __name__ == "__main__":
app.run(debug=True)
|
# Generated by Django 2.1.7 on 2019-04-20 11:11
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('amazon', '0012_auto_20190420_1634'),
]
operations = [
migrations.DeleteModel(
name='accessories',
),
migrations.DeleteModel(
name='footware',
),
]
|
from selenium import webdriver
import unittest
import json
class APITest(unittest.TestCase):
def setUp(self):
self.browser = webdriver.Firefox()
def tearDown(self):
self.browser.quit()
def is_correct_json(self, string):
"""
Check if the string is a well formed json
"""
try:
json.loads(string)
except ValueError:
return False
return True
def test_can_access_api_root_json(self):
# A client check out the api,
self.browser.get('http://localhost:8000/api?format=json')
# then check if the response is JSON
json = self.browser.find_element_by_tag_name('pre').text
self.assertTrue(self.is_correct_json(json))
needed_content = [
'launchsite',
'operationalstatus',
'orbitalstatus',
'catalogentry',
'tle',
'datasource',
]
# and if the correct data is available
for element in needed_content:
self.assertIn('api/%s/?format=json' % element, json)
def test_can_access_api_root_browsable(self):
# A user checks out the browsable API
self.browser.get('http://localhost:8000/api?format=api')
# then check the page's title
self.assertIn('Api Root', self.browser.title)
# and if the content is html
self.assertIn('<body', self.browser.page_source)
stripped = ''.join(self.browser.page_source.split())
self.assertTrue(stripped.endswith('</html>'))
if __name__ == '__main__':
unittest.main(warnings='ignore')
|
import os
import pickle
import numpy as np
from keras import callbacks
from keras import optimizers
from keras import regularizers
from keras.utils import np_utils
from keras import layers
from keras import models
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import LearningRateScheduler
import tools
dir_path = os.path.dirname(os.path.abspath(__file__))
train_data_path = os.path.join(dir_path, "train_data")
with open(train_data_path, 'rb') as f:
train_data = pickle.load(f)
train_label = pickle.load(f)
train_data = tools.reshape(train_data)
print('train data shape:', train_data.shape)
print(train_data.shape[0], 'train samples')
train_data = train_data.astype('float32')
train_data /= 255.0
cifar_mean = train_data.mean(axis=(0, 1, 2), keepdims=True)
cifar_std = train_data.std(axis=(0, 1, 2), keepdims=True)
print("Mean:", cifar_mean)
print("Std:", cifar_std)
train_data = (train_data - cifar_mean) / (cifar_std + 1e-8)
########### TRAIN ############
model_file = "./trial_6_model.h5"
result_filename = "trial_6_results.csv"
np.random.seed(2017)
batch_size = 128 # batch size
num_classes = 100 # number of classes
epochs = 200 # epoch size
def schedule(epoch):
if epoch <= 60:
return 0.1
if epoch <= 120:
return 0.01
elif epoch <= 160:
return 0.001
elif epoch <= 200:
return 0.0001
train_label = np_utils.to_categorical(train_label, num_classes)
data_generator = ImageDataGenerator(
# featurewise_center=False, # set input mean to 0 over the dataset
# samplewise_center=False, # set each sample mean to 0
# featurewise_std_normalization=False, # divide inputs by std of the dataset
# samplewise_std_normalization=False, # divide each input by its std
# zca_whitening=False, # apply ZCA whitening
# rotation_range=5, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=4.0 / 32, # randomly shift images horizontally (fraction of total width)
height_shift_range=4.0 / 32, # randomly shift images vertically (fraction of total height)
fill_mode='constant',
cval=0,
horizontal_flip=True, # randomly flip images
# vertical_flip=False
)
# ******************* The VGG 19 Model with Regularization **********************
weight_decay = 0.0005
model = models.Sequential()
# block 1
model.add(layers.Conv2D(64, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(weight_decay),
input_shape=train_data.shape[1:]))
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.3))
model.add(layers.Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(layers.BatchNormalization())
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
# block 2
model.add(
layers.Conv2D(128, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.4))
model.add(
layers.Conv2D(128, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(layers.BatchNormalization())
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
# block 3
model.add(
layers.Conv2D(256, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.4))
model.add(
layers.Conv2D(256, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.4))
# model.add(layers.Conv2D(256, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
# model.add(layers.BatchNormalization())
# model.add(layers.Dropout(0.4))
model.add(
layers.Conv2D(256, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(layers.BatchNormalization())
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
# block 4
model.add(
layers.Conv2D(512, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.4))
model.add(
layers.Conv2D(512, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.4))
# model.add(layers.Conv2D(512, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
# model.add(layers.BatchNormalization())
# model.add(layers.Dropout(0.4))
model.add(
layers.Conv2D(512, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(layers.BatchNormalization())
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
# block 5
model.add(
layers.Conv2D(512, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.4))
model.add(
layers.Conv2D(512, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.4))
# model.add(layers.Conv2D(512, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
# model.add(layers.BatchNormalization())
# model.add(layers.Dropout(0.4))
model.add(
layers.Conv2D(512, (3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(layers.BatchNormalization())
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
# final block
model.add(layers.Dropout(0.5))
model.add(layers.Flatten())
model.add(layers.Dense(512, activation='relu'))
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.5))
# model.add(layers.Dense(4096, activation='relu'))
# model.add(layers.BatchNormalization())
# model.add(layers.Dropout(0.5))
model.add(layers.Dense(100, activation='softmax'))
# ********************************************************************************
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.SGD(lr=0.1, momentum=0.9, decay=0.0, nesterov=False),
metrics=['accuracy'])
model.summary()
data_generator.fit(train_data)
# lr_reducer = callbacks.ReduceLROnPlateau(verbose=1, monitor='loss', factor=np.sqrt(0.1),
# cooldown=0, patience=10, min_lr=1e-6)
# model_checkpoint = callbacks.ModelCheckpoint(model_file, verbose=1, monitor="acc", save_best_only=True, mode='auto')
learning_rate_scheduler = LearningRateScheduler(schedule, verbose=1)
train_callbacks = [learning_rate_scheduler]
model.fit_generator(data_generator.flow(train_data, train_label,
batch_size=batch_size),
steps_per_epoch=train_data.shape[0] // batch_size,
callbacks=train_callbacks,
epochs=epochs, verbose=1)
model.save(os.path.join(dir_path, model_file))
prd = model.predict(train_data)
predict_result_idx = np.argmax(prd, axis=1)
label_result_idx = np.argmax(train_label, axis=1)
print("Accuracy on training data:", np.sum(predict_result_idx == label_result_idx) / len(train_data))
########### Test on test data ###################
test_data_path = os.path.join(dir_path, "test_data")
with open(test_data_path, 'rb') as f:
test_data = pickle.load(f)
test_data = tools.reshape(test_data)
test_data = (test_data - cifar_mean) / (cifar_std + 1e-8)
print(test_data.shape, 'test samples')
test_data = test_data.astype('float32')
test_data /= 255.0
print("Predicting test data!!!")
prd = model.predict(test_data)
predict_result_idx = np.argmax(prd, axis=1)
csv_out = open(os.path.join(dir_path, result_filename), "w")
csv_out.write("ids,labels\n")
for i in range(0, test_data.shape[0]):
csv_out.write("%d,%d\n" % (i, predict_result_idx[i]))
csv_out.close()
print("CSV Saved!!!")
|
import logging
from os import path
from utils import send_mail, setup_logging
from uploaders import UploaderFuse, UploaderScp, UploaderSshFs
from backuper import Backuper
from filedeleter import filedeleter
import configparser
def worker(backuper, configuration, uploader=None):
lines_start = 0
message = ""
mail_recipients = configuration["Mail"].get("Recipients", None)
delete_files = configuration["FileDeleter"].getbool("DeleteFiles", False)
deleter_expr = configuration["FileDeleter"].get("Expr", None)
if mail_recipients:
with open(LOG_FILE) as logfile:
lines_start = sum(1 for line in logfile)
result_path = backuper.backup()
if uploader:
uploader.upload(result_path)
if delete_files:
filedeleter.delete(deleter_expr, path.dirname(result_path), False)
if not mail_recipients:
return
with open(LOG_FILE) as log_file:
lines = [l for l in log_file]
message += lines[lines_start:]
send_mail(mail_recipients, message)
def main():
cnf = configparser.ConfigParser()
cnf.read_file("./config.ini")
gencnf = cnf["General"]
setup_logging(gencnf["Log"]);
dbnames = [ x for x in cnf.sections() if x.startswith("db-")]
for db in dbnames:
uploader_name = cnf[db].get("Uploader")
uploader = getattr(globals(), uploader_name)
uploader_args = cnf[db].get("UploaderArgs").split(',')
backup_destination_folder = cnf[db].get("BackupDestenationFolder", '/var/lib/postgresql/data/backup')
worker(
Backuper(db.remove('db-'), backup_destination_folder),
cnf,
uploader(*uploader_args)
)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""
Author: Yili Yu
Date: December 7 2015
Description: this module contains unit-testing for test_grades and data_setup functions
"""
import unittest
from unittest import TestCase
from hw10functions import *
class CommandTest(TestCase):
def test_data_setup(self):
data = data_setup()
self.assertGreater(len(data), 10)
self.assertEqual(list(data.columns.values), ['CAMIS', 'BORO', 'GRADE', 'DATE'])
def test_test_grades(self):
self.assertEqual(test_grades(['A','B','C']),-1)
self.assertEqual(test_grades(['B','B','C']),-1)
self.assertEqual(test_grades(['C','B','C']),0)
self.assertEqual(test_grades(['C','B','B','C','A']),1)
if __name__ == '__main__':
unittest.main() |
#!/usr/bin/python3
def somme(n):
S=0
sgn=-1
for k in range(1,n+1):
S+=sgn/(k*k)
sgn=-sgn
return(S)
def produit(n):
P=1
for k in range(1,n):
P=P*((k-n)/(k+n))
return(P)
def maxi_c(L):
x=L[0]
y=L[1]
maxi=abs(x-y)
for i in range(2,len(L)):
x=y
y=L[i]
t=abs(x-y)
if t>maxi:
maxi=t
return(maxi)
def median(L):
medians=[]
for k in L:
up=0
down=0
for i in L:
if i<=k:
down+=1
if i>=k:
up+=1
if (2*up >= len(L) & 2*down >= len(L)):
medians.append(k)
return(min(medians))
def dio2 (p):
L=[]
x=0
z=2*p*p
while x*x <= z:
y=x
while x*x+y*y <= z:
if (x*x+y*y) == z:
L.append([x,y])
y+=1
x+=1
return(L)
def diovar(p):
T=[]
for i in range(p+1):
r = dio2(i)
for k in range(len(r)):
r[k].append(i)
T.append(r)
return T
def nontriv(m):
L=[]
for k in range(m+1):
if len(dio2(k))>1:
L.append(k)
return L
def valmax(m):
maxi=0
maxi_list=[]
for k in range(m+1):
t=len(dio2(k))
if maxi<t:
maxi=t
maxi_list=[]
if maxi==t:
maxi_list.append(k)
return maxi_list
def comp(l1,l2):
if len(l1) != len(l2):
return False
i=0
test = True
while i<len(l1) and test:
test=(l1[i]==l2[i])
i+=1
return test
def ana1(l1,l2):
l1b = sorted(l1)
l2b = sorted(l2)
#print(l1b,l2b)
return(comp(l1b,l2b))
def ana2(l1,l2):
ll1 = l1.lower()
ll2 = l2.lower()
l1b=[x for x in ll1 if x != ' ']
l2b=[x for x in ll2 if x != ' ']
#print(l1b,l2b)
return(comp(sorted(l1b),sorted(l2b)))
print(ana2('prin t', 'pirnt')) |
# -*- coding: utf-8 -*-
from app.models import Poll
from app.notifications import notify_via_email, Events
from app.utils import session, http
from web import config
import web
class View:
@session.login_required
def GET(self, poll_id):
# Loads the poll
poll = Poll.get(int(poll_id), joined_attrs=["choices", "votes_by_user", "comments"])
if poll is None:
raise web.notfound()
return config.views.layout(
config.views.poll(
poll,
config.views.poll_header(poll),
config.views.poll_vote_unit(poll),
config.views.poll_votes(poll),
config.views.comments(poll, config.views.comment)
)
)
class Vote:
@session.login_required
@http.jsonify
def POST(self):
# Reads the HTTP request parameters
http_input = web.input(poll_id=None, poll_user_choices=[])
poll_id = http_input.poll_id
poll_user_choices = http_input.poll_user_choices
# Loads the poll
if poll_id is None:
raise web.notfound()
poll = Poll.get(int(poll_id), joined_attrs=["choices", "votes_by_user"])
if poll is None:
raise web.notfound()
# Passes the user's choices to the model
try:
# Parses the choice numbers & makes sure they're valid
poll_user_choices = map(int, poll_user_choices)
if any(i not in range(len(poll.choices)) for i in poll_user_choices):
raise ValueError(u"Un des entiers passes a la methode /poll/vote n'est pas compris dans l'intervalle %s" % range(len(poll.choices)))
# Determines if it's the first vote ever in the poll
someone_already_voted = poll.has_votes
# Determines if it's the first vote for the user
user_already_voted = config.session_manager.user in poll.choices_by_user
# Actual vote action for the user
poll_vote = poll.vote(config.session_manager.user, [poll.choices[i] for i in poll_user_choices])
# Registers an email notification
http.register_hook(lambda: notify_via_email(poll_vote, Events.MODIFIED if user_already_voted else Events.NEW))
return dict(
data=config.views.poll_votes(poll, highlight_user=config.session_manager.user if someone_already_voted else None),
partial=someone_already_voted
)
except ValueError as exception:
raise http.Forbidden(exception)
class Comment:
@session.login_required
def POST(self):
# Reads the HTTP request parameters
http_input = web.input()
poll_id = http_input.poll_id
comment = http_input.comment
# Loads the poll
if poll_id is None:
raise web.notfound()
poll = Poll.get(int(poll_id), joined_attrs=["comments"])
if poll is None:
raise web.notfound()
# Appends the comment
poll_comment = poll.add_comment(config.session_manager.user, comment)
# Registers an email notification
http.register_hook(lambda: notify_via_email(poll_comment, Events.NEW))
# Returns the dictionary
return config.views.comment(poll_comment)
|
# views.py
import imdb
from django.contrib.auth import logout
from django.shortcuts import render,redirect
from django.http import HttpResponse, HttpResponseRedirect
from .models import MoviesToWatch, Movie
from .wp_model import get_recommendation, get_details, searchMovie, genreSearch
# Create your views here.
def index(response, id):
ls = MoviesToWatch.objects.get(id=id)
return render(response, "main/list.html", {"ls":ls})
def home(response):
return render(response, "main/home.html", {})
def suggest(response,name):
movies=get_recommendation(name)
if response.user.is_authenticated:
return render(response, 'main/suggest.html', {'name':name, 'Movies':movies})
return render(response, 'main/home.html', {'msg':'Please login!'})
def genre(response):
return render(response,'main/getgenre.html')
def searchByGenre(response,genre):
listofmov = genreSearch(str(genre))
tupleofdata = []
for i in listofmov:
tupleofdata.append(tuple(i.values()))
return render(response,'main/genres.html',{'data':tuple(tupleofdata),'genre':genre})
def search(request):
if request.method=='POST':
name=request.POST.get('name')
results=searchMovie(name)
names=[]
for i in results:
names.append(i.values())
username=None
if request.user.is_authenticated:
username = request.user.username
return render(request,'main/search.html', {'uid':username,'name':name, 'Movies':tuple(names),'msg':'Search results for {}:'.format(name)})
else:
username=None
if request.user.is_authenticated:
username = request.user.username
return render(request,'main/search.html',{'uid':username})
return render(request, 'main/home.html')
def detail(response,id):
if response.user.is_authenticated:
try:
det= get_details(id)
movies=get_recommendation(det['title'])
det.update({'Movies':movies})
return render(response, 'main/detail.html',det)
except:
username=None
if response.user.is_authenticated:
username = response.user.username
return redirect('/search')
return render(response, 'main/home.html', {'msg':'Please login!'})
def searchDet(request):
if request.method=='POST':
name=request.POST.get('name')
return redirect('/detail/'+name)
else:
username=None
if request.user.is_authenticated:
username = request.user.username
return render(request,'main/search_movie.html',{'uid':username})
return render(request, 'main/home.html')
def index(response):
if response.user.is_authenticated:
username = response.user.username
return render(response,'main/try_new.html',{'uid':username})
return render(response, 'main/home.html', {'msg':'Please login!'})
def logout_view(request):
logout(request)
return render(request, 'main/home.html', {'msg':'You have been logged out!'})
def genreSearch(genre):
ia = imdb.IMDb()
top = ia.get_top250_movies()
listOfMovies = []
ctr = 0
for i in top:
if ctr ==5:
break
filmID = i.getID()
filmObj = ia.get_movie(filmID)
if genre in filmObj.get('genres'):
listOfMovies.append({'name':filmObj.get('title'),'year':filmObj.get('year'),'picurl':filmObj.get('cover url')})
if listOfMovies[ctr]['picurl']==None:
listOfMovies[ctr]['picurl']="https://watch--next.herokuapp.com/static/default.png"
ctr+=1
return listOfMovies |
#!/usr/bin/env python
"""
VolumeShaders.py: This module provides a python wrapper for the GLSL Volume Raycasting
shader as well as an an interface object for initializing and modifying uniform values
on the fly.
"""
__author__ = "John McDermott"
__email__ = "JFMcDermott428@gmail.com"
__version__ = "1.0.0"
__status__ = "Development"
# vizard modules
import viz
# local modules
import FileParser
import OTFMaker
from echo import CallbackProperty
#------------------------------------------------------------------------------------------
# Vertex Shader (GLSL)
#------------------------------------------------------------------------------------------
_VS = """
#version 330 compatibility
//
// Pipelined output to Fragment Processor
//
// interpolated 3D uV coords
smooth out vec3 vUV;
//
// Function Main
//
void main() {
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
gl_ClipVertex = gl_ModelViewMatrix * gl_Vertex;
vUV = gl_Vertex.xyz + vec3(0.5);
gl_FrontColor = vec4(1,1,1,1);
gl_BackColor = vec4(1,1,1,1);
}
"""
#------------------------------------------------------------------------------------------
# Fragment Shader (GLSL)
#------------------------------------------------------------------------------------------
_FS = """
#version 330 compatibility
//
// Pipelined input from Vertex Processor
//
// raster-interpolated 3D uV coords
smooth in vec3 vUV;
//
// Uniforms
//
// 3D texture of volume data
uniform sampler3D volume;
// 1D texture transfer function LUT
uniform sampler1D transfer;
// camera position (computed on CPU)
uniform vec3 cam;
// vector of ray-marching step sizes
uniform vec3 step;
//
// Constants
//
// max samples for each ray march step
const int MAX_SAMPLES = 50;
// minimum texture coordinate
const vec3 texMin = vec3(0);
// maximum texture coordinate
const vec3 texMax = vec3(1);
const float minIntensity = 0.1;
const float maxIntensity = 1.0;
//
// Function Main
//
void main() {
// get 3D texture coordinates
vec3 dataPos = vUV;
// get ray marching direction and step size
vec3 geomDir = normalize((vUV-vec3(0.5))-cam);
vec3 dirStep = geomDir * step;
// initialize stop condition flag
bool stop = false;
// Advance ray through volume
for (int i=0; i < MAX_SAMPLES; i++) {
// update ray position
dataPos = dataPos + dirStep;
// ensure sample position is still within volume
stop = dot(sign(dataPos-texMin), sign(texMax-dataPos)) < 3.0;
if (stop) { break; }
// sample the data volume 3D texture
float sample = texture3D(volume, dataPos).r;
// check if sample is within threshold
if ((sample >= minIntensity) && (sample <= maxIntensity)) {
// store the sampled alpha value
float prev_alpha = sample - (sample * gl_FragColor.a);
// update accumulated fragment color
gl_FragColor.rgb = prev_alpha * vec3(sample) + gl_FragColor.rgb;
gl_FragColor.a += prev_alpha;
// evaluate early ray termination condition
if(gl_FragColor.a > 0.95) { break; }
} else {
i--;
continue;
}
}
}
"""
class RaycastShader(object):
def __init__(self):
""" Wrapper interface object for GLSL Raycast shader """
# create uniforms
self._u_volume = viz.addUniformInt('volume', 0)
self._u_transfer = viz.addUniformInt('transfer',1)
self._u_cam = viz.addUniformFloat('cam', [0.0,0.0,0.0])
self._u_step = viz.addUniformFloat('step',[0.0,0.0,0.0])
# create shader
self._shader = viz.addShader(vert=_VS, frag=_FS)
# attach uniforms
self._shader.attach(self._u_volume)
self._shader.attach(self._u_transfer)
self._shader.attach(self._u_cam)
self._shader.attach(self._u_step)
#=========================#
# Property accessors #
#=========================#
@CallbackProperty
def u_cam(self):
return self._u_cam.get()
@CallbackProperty
def u_step(self):
return self._u_step.get()
#=========================#
# Property mutators #
#=========================#
@u_step.setter
def u_step(self, val):
self._u_step.set(val)
@u_cam.setter
def u_cam(self, val):
self._u_cam.set(val)
#=========================#
# Methods #
#=========================#
def apply(self, Node):
Node.apply(self._shader)
def unapply(self):
self._shader.unapply()
class Raycaster(viz.EventClass):
"""
Encapsulates the rendering geometry, textures, and shader interface for volume
raycasting. Inherits from viz.EventClass
"""
def __init__(self, lutTex1D, volTex3D, v_size=[512.0,512.0,512.0], view=viz.MainView):
viz.EventClass.__init__(self)
self.view = view
# configure 1D LUT texture
self.lutTex1D = lutTex1D
self.lutTex1D.filter(viz.MIN_FILTER, viz.LINEAR)
self.lutTex1D.filter(viz.MAG_FILTER, viz.LINEAR)
self.lutTex1D.wrap(viz.WRAP_S, viz.REPEAT)
# configure 3D volume texture
self.volTex3D = volTex3D
self.volTex3D.filter(viz.MIN_FILTER, viz.LINEAR_MIPMAP_LINEAR)
self.volTex3D.filter(viz.MAG_FILTER, viz.LINEAR)
self.volTex3D.wrap(viz.WRAP_S, viz.CLAMP)
self.volTex3D.wrap(viz.WRAP_T, viz.CLAMP)
self.volTex3D.wrap(viz.WRAP_R, viz.CLAMP)
# load and configure rendering geometry
max_dim = max(v_size)
print [x*1.0/max_dim for x in v_size]
self.w = OTFMaker.get_box(1.0,1.0,1.0)
#self.w = OTFMaker.get_box(1.0,1.0,1.0)
self.w.enable(viz.DEPTH_TEST)
self.w.drawOrder(0, bin=viz.BIN_TRANSPARENT)
#self.w.disable(viz.CULL_FACE)
self.w.blendFunc(viz.GL_SRC_ALPHA,viz.GL_ONE_MINUS_SRC_ALPHA)
# create GLSL shader interface object
self.shader = RaycastShader()
# set initial shader values
mv = viz.MainView.getPosition()
self.shader.u_cam = [mv[0], mv[1], -mv[2]]
self.shader.u_step = [1.0/x for x in v_size]
# set textures
self.w.texture(self.volTex3D, unit=0)
self.w.texture(self.lutTex1D, unit=1)
# apply shader to rendering geometry
self.shader.apply(self.w)
# register update callback to interal timer
self.callback(viz.TIMER_EVENT, self.update)
# start update callback timer
self.starttimer(1, 0.001, viz.FOREVER)
def update(self, _):
# view vector shader uniform update
v_pos = self.getPosition()
v_pos[2] = -v_pos[2]
# view vector uniform update
self.shader.u_cam = v_pos
#------------------------------------------------------------------------------------------
# Unit Main
#------------------------------------------------------------------------------------------
def main():
pass
if __name__ == '__main__':
main()
|
import sys
from time import sleep
import pygame
from bullet import Bullet
from ku import Ku
def check_keydown_events(event, ai_settings, screen, cha, bullets, stats):
"""Respond to key presses."""
if event.key == pygame.K_RIGHT:
cha.moving_right = True
elif event.key == pygame.K_LEFT:
cha.moving_left = True
elif event.key == pygame.K_UP:
cha.moving_up = True
elif event.key == pygame.K_DOWN:
cha.moving_down = True
elif event.key == pygame.K_SPACE:
fire_bullet(ai_settings, screen, cha, bullets)
elif event.key == pygame.K_q:
sys.exit()
elif event.key == pygame.K_p:
if stats.game_active:
stats.game_active = False
elif not stats.game_active:
stats.game_active = True
def fire_bullet(ai_settings, screen, cha, bullets):
# Create a new bullet if limit has not been reached, and add it to the bullets group.
if len(bullets) <= ai_settings.bullets_allowed:
new_bullet = Bullet(ai_settings, screen, cha)
bullets.add(new_bullet)
def check_keyup_events(event, cha):
"""Respond to key releases."""
if event.key == pygame.K_RIGHT:
cha.moving_right = False
elif event.key == pygame.K_LEFT:
cha.moving_left = False
elif event.key == pygame.K_UP:
cha.moving_up = False
elif event.key == pygame.K_DOWN:
cha.moving_down = False
def check_events(ai_settings, screen, cha, bullets, stats, play_button, kus):
"""Respond to keypresses and mouse events."""
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.MOUSEBUTTONDOWN:
mouse_x, mouse_y = pygame.mouse.get_pos()
check_play_button(ai_settings, screen, stats, play_button, cha, kus, bullets, mouse_x, mouse_y)
elif event.type == pygame.KEYDOWN:
check_keydown_events(event, ai_settings, screen, cha, bullets, stats)
elif event.type == pygame.KEYUP:
check_keyup_events(event, cha)
def check_play_button(ai_settings, screen, stats, play_button, cha, kus, bullets, mouse_x, mouse_y):
"""Start a new game when the player clicks the play button."""
if play_button.rect.collidepoint(mouse_x, mouse_y) and not stats.game_active:
# Hide mouse cursor
pygame.mouse.set_visible(False)
# Reset game statistics
stats.reset_stats()
stats.game_active = True
# Clear the list of Kus and bullets.
kus.empty()
bullets.empty()
# Create a new fleet and center Cha.
create_fleet(ai_settings, screen, cha, kus)
cha.center_cha()
def update_screen(ai_settings, screen, cha, kus, bullets, stats, play_button):
"""Update images on the screen and flip to the new screen."""
# Redraw the screen during each pass through the loop.
screen.fill(ai_settings.bg_color)
# Redraw all bullets behind cha and aliens.
for bullet in bullets.sprites():
bullet.draw_bullet()
cha.blitme()
kus.draw(screen)
# Draw a play button if the game is inactive.
if not stats.game_active:
play_button.draw_button()
# Make the most recently drawn screen visible.
pygame.display.flip()
def update_bullets(ai_settings, screen, cha, kus, bullets):
"""Update position of bullets and delete old bullets."""
# Update position of bullets.
bullets.update()
# Delete bullets that have disappeared off the screen
for bullet in bullets.copy():
if bullet.rect.bottom <= 0:
bullets.remove(bullet)
check_bullet_ku_collisions(ai_settings, screen, cha, kus, bullets)
def check_bullet_ku_collisions(ai_settings, screen, cha, kus, bullets):
"""Check for any bullets that have hit Kus."""
# Remove and kus and bullets that have collided.
collisions = pygame.sprite.groupcollide(bullets, kus, True, True)
if len(kus) == 0:
# Destroy existing bullets and create new fleet.
bullets.empty()
create_fleet(ai_settings, screen, cha, kus)
def get_number_kus_x(ai_settings, ku_width):
"""Determine the number of Kus that fit in a row."""
available_space_x = ai_settings.screen_width - 2 * ku_width
number_kus_x = int(available_space_x / (2 * ku_width))
return number_kus_x
def get_number_rows(ai_settings, cha_height, ku_height):
"""Determine the number of Kus that fit on the screen."""
available_space_y = ai_settings.screen_height - 2 * ku_height - cha_height
number_rows = int(available_space_y / (2 * ku_height))
return number_rows
def create_ku(ai_settings, screen, kus, ku_number, row_number):
"""Create a Ku and place it in a row."""
ku = Ku(ai_settings, screen)
ku.x = ku.rect.width + 2 * ku.rect.width * ku_number
ku.rect.x = ku.x
ku.rect.y = ku.rect.height + 1.5 * ku.rect.height * row_number - 50
kus.add(ku)
def create_fleet(ai_settings, screen, cha, kus):
"""Create a full fleet of Kus."""
# Create a Ku and find the number of Kus in a row.
# The spacing between each Ku is the width of one Ku.
ku = Ku(ai_settings, screen)
number_kus_x = get_number_kus_x(ai_settings, ku.rect.width)
number_rows = get_number_rows(ai_settings, cha.rect.height, ku.rect.height)
# Create the Kus.
for row_number in range(number_rows):
for ku_number in range(number_kus_x):
create_ku(ai_settings,screen, kus, ku_number, row_number)
def check_fleet_edges(ai_settings, kus):
"""Respond appropriately if any Kus have reached the edge of the screen."""
for ku in kus.sprites():
if ku.check_edges():
change_fleet_direction(ai_settings, kus)
break
def change_fleet_direction(ai_settings, kus):
"""Drop the entire fleet and change direction."""
for ku in kus.sprites():
ku.rect.y += ai_settings.fleet_drop_speed
ai_settings.fleet_direction *= -1
def cha_hit(ai_settings, stats, screen, cha, kus, bullets):
"""Respond to Cha being hit by a Ku"""
if stats.chas_left > 0:
# Decrement number of Chas left by 1
stats.chas_left -= 1
# Empty the list of Kus and bullets.
kus.empty()
bullets.empty()
# Create a new fleet and center Cha
create_fleet(ai_settings, screen, cha, kus)
cha.center_cha()
# Pause.
sleep(0.5)
else:
stats.game_active = False
pygame.mouse.set_visible(True)
def check_kus_bottom(ai_settings, stats, screen, cha, kus, bullets):
"""Check if any Kus have reached the bottom of the screen."""
screen_rect = screen.get_rect()
for ku in kus.sprites():
if ku.rect.bottom >= screen_rect.bottom:
# Treat this the same as if Cha got hit by Ku.
cha_hit(ai_settings, stats, screen, cha, kus, bullets)
break
def update_kus(ai_settings, stats, screen, cha, kus, bullets):
"""Check if fleet is at an edge, and update the position of all Kus in the fleet."""
check_fleet_edges(ai_settings, kus)
kus.update()
# Check for Cha-Ku collisions
if pygame.sprite.spritecollideany(cha, kus):
cha_hit(ai_settings, stats, screen, cha, kus, bullets)
# Check for Ku-bottom collisions
check_kus_bottom(ai_settings, stats, screen, cha, kus, bullets) |
"""
This file is part of the private API. Please do not refer to any variables defined here directly as they will be
removed on future versions without warning.
"""
# This will eventually be replaced with a call at torchvision.datasets.info("imagenet").categories
_IMAGENET_CATEGORIES = [
"tench",
"goldfish",
"great white shark",
"tiger shark",
"hammerhead",
"electric ray",
"stingray",
"cock",
"hen",
"ostrich",
"brambling",
"goldfinch",
"house finch",
"junco",
"indigo bunting",
"robin",
"bulbul",
"jay",
"magpie",
"chickadee",
"water ouzel",
"kite",
"bald eagle",
"vulture",
"great grey owl",
"European fire salamander",
"common newt",
"eft",
"spotted salamander",
"axolotl",
"bullfrog",
"tree frog",
"tailed frog",
"loggerhead",
"leatherback turtle",
"mud turtle",
"terrapin",
"box turtle",
"banded gecko",
"common iguana",
"American chameleon",
"whiptail",
"agama",
"frilled lizard",
"alligator lizard",
"Gila monster",
"green lizard",
"African chameleon",
"Komodo dragon",
"African crocodile",
"American alligator",
"triceratops",
"thunder snake",
"ringneck snake",
"hognose snake",
"green snake",
"king snake",
"garter snake",
"water snake",
"vine snake",
"night snake",
"boa constrictor",
"rock python",
"Indian cobra",
"green mamba",
"sea snake",
"horned viper",
"diamondback",
"sidewinder",
"trilobite",
"harvestman",
"scorpion",
"black and gold garden spider",
"barn spider",
"garden spider",
"black widow",
"tarantula",
"wolf spider",
"tick",
"centipede",
"black grouse",
"ptarmigan",
"ruffed grouse",
"prairie chicken",
"peacock",
"quail",
"partridge",
"African grey",
"macaw",
"sulphur-crested cockatoo",
"lorikeet",
"coucal",
"bee eater",
"hornbill",
"hummingbird",
"jacamar",
"toucan",
"drake",
"red-breasted merganser",
"goose",
"black swan",
"tusker",
"echidna",
"platypus",
"wallaby",
"koala",
"wombat",
"jellyfish",
"sea anemone",
"brain coral",
"flatworm",
"nematode",
"conch",
"snail",
"slug",
"sea slug",
"chiton",
"chambered nautilus",
"Dungeness crab",
"rock crab",
"fiddler crab",
"king crab",
"American lobster",
"spiny lobster",
"crayfish",
"hermit crab",
"isopod",
"white stork",
"black stork",
"spoonbill",
"flamingo",
"little blue heron",
"American egret",
"bittern",
"crane bird",
"limpkin",
"European gallinule",
"American coot",
"bustard",
"ruddy turnstone",
"red-backed sandpiper",
"redshank",
"dowitcher",
"oystercatcher",
"pelican",
"king penguin",
"albatross",
"grey whale",
"killer whale",
"dugong",
"sea lion",
"Chihuahua",
"Japanese spaniel",
"Maltese dog",
"Pekinese",
"Shih-Tzu",
"Blenheim spaniel",
"papillon",
"toy terrier",
"Rhodesian ridgeback",
"Afghan hound",
"basset",
"beagle",
"bloodhound",
"bluetick",
"black-and-tan coonhound",
"Walker hound",
"English foxhound",
"redbone",
"borzoi",
"Irish wolfhound",
"Italian greyhound",
"whippet",
"Ibizan hound",
"Norwegian elkhound",
"otterhound",
"Saluki",
"Scottish deerhound",
"Weimaraner",
"Staffordshire bullterrier",
"American Staffordshire terrier",
"Bedlington terrier",
"Border terrier",
"Kerry blue terrier",
"Irish terrier",
"Norfolk terrier",
"Norwich terrier",
"Yorkshire terrier",
"wire-haired fox terrier",
"Lakeland terrier",
"Sealyham terrier",
"Airedale",
"cairn",
"Australian terrier",
"Dandie Dinmont",
"Boston bull",
"miniature schnauzer",
"giant schnauzer",
"standard schnauzer",
"Scotch terrier",
"Tibetan terrier",
"silky terrier",
"soft-coated wheaten terrier",
"West Highland white terrier",
"Lhasa",
"flat-coated retriever",
"curly-coated retriever",
"golden retriever",
"Labrador retriever",
"Chesapeake Bay retriever",
"German short-haired pointer",
"vizsla",
"English setter",
"Irish setter",
"Gordon setter",
"Brittany spaniel",
"clumber",
"English springer",
"Welsh springer spaniel",
"cocker spaniel",
"Sussex spaniel",
"Irish water spaniel",
"kuvasz",
"schipperke",
"groenendael",
"malinois",
"briard",
"kelpie",
"komondor",
"Old English sheepdog",
"Shetland sheepdog",
"collie",
"Border collie",
"Bouvier des Flandres",
"Rottweiler",
"German shepherd",
"Doberman",
"miniature pinscher",
"Greater Swiss Mountain dog",
"Bernese mountain dog",
"Appenzeller",
"EntleBucher",
"boxer",
"bull mastiff",
"Tibetan mastiff",
"French bulldog",
"Great Dane",
"Saint Bernard",
"Eskimo dog",
"malamute",
"Siberian husky",
"dalmatian",
"affenpinscher",
"basenji",
"pug",
"Leonberg",
"Newfoundland",
"Great Pyrenees",
"Samoyed",
"Pomeranian",
"chow",
"keeshond",
"Brabancon griffon",
"Pembroke",
"Cardigan",
"toy poodle",
"miniature poodle",
"standard poodle",
"Mexican hairless",
"timber wolf",
"white wolf",
"red wolf",
"coyote",
"dingo",
"dhole",
"African hunting dog",
"hyena",
"red fox",
"kit fox",
"Arctic fox",
"grey fox",
"tabby",
"tiger cat",
"Persian cat",
"Siamese cat",
"Egyptian cat",
"cougar",
"lynx",
"leopard",
"snow leopard",
"jaguar",
"lion",
"tiger",
"cheetah",
"brown bear",
"American black bear",
"ice bear",
"sloth bear",
"mongoose",
"meerkat",
"tiger beetle",
"ladybug",
"ground beetle",
"long-horned beetle",
"leaf beetle",
"dung beetle",
"rhinoceros beetle",
"weevil",
"fly",
"bee",
"ant",
"grasshopper",
"cricket",
"walking stick",
"cockroach",
"mantis",
"cicada",
"leafhopper",
"lacewing",
"dragonfly",
"damselfly",
"admiral",
"ringlet",
"monarch",
"cabbage butterfly",
"sulphur butterfly",
"lycaenid",
"starfish",
"sea urchin",
"sea cucumber",
"wood rabbit",
"hare",
"Angora",
"hamster",
"porcupine",
"fox squirrel",
"marmot",
"beaver",
"guinea pig",
"sorrel",
"zebra",
"hog",
"wild boar",
"warthog",
"hippopotamus",
"ox",
"water buffalo",
"bison",
"ram",
"bighorn",
"ibex",
"hartebeest",
"impala",
"gazelle",
"Arabian camel",
"llama",
"weasel",
"mink",
"polecat",
"black-footed ferret",
"otter",
"skunk",
"badger",
"armadillo",
"three-toed sloth",
"orangutan",
"gorilla",
"chimpanzee",
"gibbon",
"siamang",
"guenon",
"patas",
"baboon",
"macaque",
"langur",
"colobus",
"proboscis monkey",
"marmoset",
"capuchin",
"howler monkey",
"titi",
"spider monkey",
"squirrel monkey",
"Madagascar cat",
"indri",
"Indian elephant",
"African elephant",
"lesser panda",
"giant panda",
"barracouta",
"eel",
"coho",
"rock beauty",
"anemone fish",
"sturgeon",
"gar",
"lionfish",
"puffer",
"abacus",
"abaya",
"academic gown",
"accordion",
"acoustic guitar",
"aircraft carrier",
"airliner",
"airship",
"altar",
"ambulance",
"amphibian",
"analog clock",
"apiary",
"apron",
"ashcan",
"assault rifle",
"backpack",
"bakery",
"balance beam",
"balloon",
"ballpoint",
"Band Aid",
"banjo",
"bannister",
"barbell",
"barber chair",
"barbershop",
"barn",
"barometer",
"barrel",
"barrow",
"baseball",
"basketball",
"bassinet",
"bassoon",
"bathing cap",
"bath towel",
"bathtub",
"beach wagon",
"beacon",
"beaker",
"bearskin",
"beer bottle",
"beer glass",
"bell cote",
"bib",
"bicycle-built-for-two",
"bikini",
"binder",
"binoculars",
"birdhouse",
"boathouse",
"bobsled",
"bolo tie",
"bonnet",
"bookcase",
"bookshop",
"bottlecap",
"bow",
"bow tie",
"brass",
"brassiere",
"breakwater",
"breastplate",
"broom",
"bucket",
"buckle",
"bulletproof vest",
"bullet train",
"butcher shop",
"cab",
"caldron",
"candle",
"cannon",
"canoe",
"can opener",
"cardigan",
"car mirror",
"carousel",
"carpenter's kit",
"carton",
"car wheel",
"cash machine",
"cassette",
"cassette player",
"castle",
"catamaran",
"CD player",
"cello",
"cellular telephone",
"chain",
"chainlink fence",
"chain mail",
"chain saw",
"chest",
"chiffonier",
"chime",
"china cabinet",
"Christmas stocking",
"church",
"cinema",
"cleaver",
"cliff dwelling",
"cloak",
"clog",
"cocktail shaker",
"coffee mug",
"coffeepot",
"coil",
"combination lock",
"computer keyboard",
"confectionery",
"container ship",
"convertible",
"corkscrew",
"cornet",
"cowboy boot",
"cowboy hat",
"cradle",
"crane",
"crash helmet",
"crate",
"crib",
"Crock Pot",
"croquet ball",
"crutch",
"cuirass",
"dam",
"desk",
"desktop computer",
"dial telephone",
"diaper",
"digital clock",
"digital watch",
"dining table",
"dishrag",
"dishwasher",
"disk brake",
"dock",
"dogsled",
"dome",
"doormat",
"drilling platform",
"drum",
"drumstick",
"dumbbell",
"Dutch oven",
"electric fan",
"electric guitar",
"electric locomotive",
"entertainment center",
"envelope",
"espresso maker",
"face powder",
"feather boa",
"file",
"fireboat",
"fire engine",
"fire screen",
"flagpole",
"flute",
"folding chair",
"football helmet",
"forklift",
"fountain",
"fountain pen",
"four-poster",
"freight car",
"French horn",
"frying pan",
"fur coat",
"garbage truck",
"gasmask",
"gas pump",
"goblet",
"go-kart",
"golf ball",
"golfcart",
"gondola",
"gong",
"gown",
"grand piano",
"greenhouse",
"grille",
"grocery store",
"guillotine",
"hair slide",
"hair spray",
"half track",
"hammer",
"hamper",
"hand blower",
"hand-held computer",
"handkerchief",
"hard disc",
"harmonica",
"harp",
"harvester",
"hatchet",
"holster",
"home theater",
"honeycomb",
"hook",
"hoopskirt",
"horizontal bar",
"horse cart",
"hourglass",
"iPod",
"iron",
"jack-o'-lantern",
"jean",
"jeep",
"jersey",
"jigsaw puzzle",
"jinrikisha",
"joystick",
"kimono",
"knee pad",
"knot",
"lab coat",
"ladle",
"lampshade",
"laptop",
"lawn mower",
"lens cap",
"letter opener",
"library",
"lifeboat",
"lighter",
"limousine",
"liner",
"lipstick",
"Loafer",
"lotion",
"loudspeaker",
"loupe",
"lumbermill",
"magnetic compass",
"mailbag",
"mailbox",
"maillot",
"maillot tank suit",
"manhole cover",
"maraca",
"marimba",
"mask",
"matchstick",
"maypole",
"maze",
"measuring cup",
"medicine chest",
"megalith",
"microphone",
"microwave",
"military uniform",
"milk can",
"minibus",
"miniskirt",
"minivan",
"missile",
"mitten",
"mixing bowl",
"mobile home",
"Model T",
"modem",
"monastery",
"monitor",
"moped",
"mortar",
"mortarboard",
"mosque",
"mosquito net",
"motor scooter",
"mountain bike",
"mountain tent",
"mouse",
"mousetrap",
"moving van",
"muzzle",
"nail",
"neck brace",
"necklace",
"nipple",
"notebook",
"obelisk",
"oboe",
"ocarina",
"odometer",
"oil filter",
"organ",
"oscilloscope",
"overskirt",
"oxcart",
"oxygen mask",
"packet",
"paddle",
"paddlewheel",
"padlock",
"paintbrush",
"pajama",
"palace",
"panpipe",
"paper towel",
"parachute",
"parallel bars",
"park bench",
"parking meter",
"passenger car",
"patio",
"pay-phone",
"pedestal",
"pencil box",
"pencil sharpener",
"perfume",
"Petri dish",
"photocopier",
"pick",
"pickelhaube",
"picket fence",
"pickup",
"pier",
"piggy bank",
"pill bottle",
"pillow",
"ping-pong ball",
"pinwheel",
"pirate",
"pitcher",
"plane",
"planetarium",
"plastic bag",
"plate rack",
"plow",
"plunger",
"Polaroid camera",
"pole",
"police van",
"poncho",
"pool table",
"pop bottle",
"pot",
"potter's wheel",
"power drill",
"prayer rug",
"printer",
"prison",
"projectile",
"projector",
"puck",
"punching bag",
"purse",
"quill",
"quilt",
"racer",
"racket",
"radiator",
"radio",
"radio telescope",
"rain barrel",
"recreational vehicle",
"reel",
"reflex camera",
"refrigerator",
"remote control",
"restaurant",
"revolver",
"rifle",
"rocking chair",
"rotisserie",
"rubber eraser",
"rugby ball",
"rule",
"running shoe",
"safe",
"safety pin",
"saltshaker",
"sandal",
"sarong",
"sax",
"scabbard",
"scale",
"school bus",
"schooner",
"scoreboard",
"screen",
"screw",
"screwdriver",
"seat belt",
"sewing machine",
"shield",
"shoe shop",
"shoji",
"shopping basket",
"shopping cart",
"shovel",
"shower cap",
"shower curtain",
"ski",
"ski mask",
"sleeping bag",
"slide rule",
"sliding door",
"slot",
"snorkel",
"snowmobile",
"snowplow",
"soap dispenser",
"soccer ball",
"sock",
"solar dish",
"sombrero",
"soup bowl",
"space bar",
"space heater",
"space shuttle",
"spatula",
"speedboat",
"spider web",
"spindle",
"sports car",
"spotlight",
"stage",
"steam locomotive",
"steel arch bridge",
"steel drum",
"stethoscope",
"stole",
"stone wall",
"stopwatch",
"stove",
"strainer",
"streetcar",
"stretcher",
"studio couch",
"stupa",
"submarine",
"suit",
"sundial",
"sunglass",
"sunglasses",
"sunscreen",
"suspension bridge",
"swab",
"sweatshirt",
"swimming trunks",
"swing",
"switch",
"syringe",
"table lamp",
"tank",
"tape player",
"teapot",
"teddy",
"television",
"tennis ball",
"thatch",
"theater curtain",
"thimble",
"thresher",
"throne",
"tile roof",
"toaster",
"tobacco shop",
"toilet seat",
"torch",
"totem pole",
"tow truck",
"toyshop",
"tractor",
"trailer truck",
"tray",
"trench coat",
"tricycle",
"trimaran",
"tripod",
"triumphal arch",
"trolleybus",
"trombone",
"tub",
"turnstile",
"typewriter keyboard",
"umbrella",
"unicycle",
"upright",
"vacuum",
"vase",
"vault",
"velvet",
"vending machine",
"vestment",
"viaduct",
"violin",
"volleyball",
"waffle iron",
"wall clock",
"wallet",
"wardrobe",
"warplane",
"washbasin",
"washer",
"water bottle",
"water jug",
"water tower",
"whiskey jug",
"whistle",
"wig",
"window screen",
"window shade",
"Windsor tie",
"wine bottle",
"wing",
"wok",
"wooden spoon",
"wool",
"worm fence",
"wreck",
"yawl",
"yurt",
"web site",
"comic book",
"crossword puzzle",
"street sign",
"traffic light",
"book jacket",
"menu",
"plate",
"guacamole",
"consomme",
"hot pot",
"trifle",
"ice cream",
"ice lolly",
"French loaf",
"bagel",
"pretzel",
"cheeseburger",
"hotdog",
"mashed potato",
"head cabbage",
"broccoli",
"cauliflower",
"zucchini",
"spaghetti squash",
"acorn squash",
"butternut squash",
"cucumber",
"artichoke",
"bell pepper",
"cardoon",
"mushroom",
"Granny Smith",
"strawberry",
"orange",
"lemon",
"fig",
"pineapple",
"banana",
"jackfruit",
"custard apple",
"pomegranate",
"hay",
"carbonara",
"chocolate sauce",
"dough",
"meat loaf",
"pizza",
"potpie",
"burrito",
"red wine",
"espresso",
"cup",
"eggnog",
"alp",
"bubble",
"cliff",
"coral reef",
"geyser",
"lakeside",
"promontory",
"sandbar",
"seashore",
"valley",
"volcano",
"ballplayer",
"groom",
"scuba diver",
"rapeseed",
"daisy",
"yellow lady's slipper",
"corn",
"acorn",
"hip",
"buckeye",
"coral fungus",
"agaric",
"gyromitra",
"stinkhorn",
"earthstar",
"hen-of-the-woods",
"bolete",
"ear",
"toilet tissue",
]
# To be replaced with torchvision.datasets.info("coco").categories
_COCO_CATEGORIES = [
"__background__",
"person",
"bicycle",
"car",
"motorcycle",
"airplane",
"bus",
"train",
"truck",
"boat",
"traffic light",
"fire hydrant",
"N/A",
"stop sign",
"parking meter",
"bench",
"bird",
"cat",
"dog",
"horse",
"sheep",
"cow",
"elephant",
"bear",
"zebra",
"giraffe",
"N/A",
"backpack",
"umbrella",
"N/A",
"N/A",
"handbag",
"tie",
"suitcase",
"frisbee",
"skis",
"snowboard",
"sports ball",
"kite",
"baseball bat",
"baseball glove",
"skateboard",
"surfboard",
"tennis racket",
"bottle",
"N/A",
"wine glass",
"cup",
"fork",
"knife",
"spoon",
"bowl",
"banana",
"apple",
"sandwich",
"orange",
"broccoli",
"carrot",
"hot dog",
"pizza",
"donut",
"cake",
"chair",
"couch",
"potted plant",
"bed",
"N/A",
"dining table",
"N/A",
"N/A",
"toilet",
"N/A",
"tv",
"laptop",
"mouse",
"remote",
"keyboard",
"cell phone",
"microwave",
"oven",
"toaster",
"sink",
"refrigerator",
"N/A",
"book",
"clock",
"vase",
"scissors",
"teddy bear",
"hair drier",
"toothbrush",
]
# To be replaced with torchvision.datasets.info("coco_kp")
_COCO_PERSON_CATEGORIES = ["no person", "person"]
_COCO_PERSON_KEYPOINT_NAMES = [
"nose",
"left_eye",
"right_eye",
"left_ear",
"right_ear",
"left_shoulder",
"right_shoulder",
"left_elbow",
"right_elbow",
"left_wrist",
"right_wrist",
"left_hip",
"right_hip",
"left_knee",
"right_knee",
"left_ankle",
"right_ankle",
]
# To be replaced with torchvision.datasets.info("voc").categories
_VOC_CATEGORIES = [
"__background__",
"aeroplane",
"bicycle",
"bird",
"boat",
"bottle",
"bus",
"car",
"cat",
"chair",
"cow",
"diningtable",
"dog",
"horse",
"motorbike",
"person",
"pottedplant",
"sheep",
"sofa",
"train",
"tvmonitor",
]
# To be replaced with torchvision.datasets.info("kinetics400").categories
_KINETICS400_CATEGORIES = [
"abseiling",
"air drumming",
"answering questions",
"applauding",
"applying cream",
"archery",
"arm wrestling",
"arranging flowers",
"assembling computer",
"auctioning",
"baby waking up",
"baking cookies",
"balloon blowing",
"bandaging",
"barbequing",
"bartending",
"beatboxing",
"bee keeping",
"belly dancing",
"bench pressing",
"bending back",
"bending metal",
"biking through snow",
"blasting sand",
"blowing glass",
"blowing leaves",
"blowing nose",
"blowing out candles",
"bobsledding",
"bookbinding",
"bouncing on trampoline",
"bowling",
"braiding hair",
"breading or breadcrumbing",
"breakdancing",
"brush painting",
"brushing hair",
"brushing teeth",
"building cabinet",
"building shed",
"bungee jumping",
"busking",
"canoeing or kayaking",
"capoeira",
"carrying baby",
"cartwheeling",
"carving pumpkin",
"catching fish",
"catching or throwing baseball",
"catching or throwing frisbee",
"catching or throwing softball",
"celebrating",
"changing oil",
"changing wheel",
"checking tires",
"cheerleading",
"chopping wood",
"clapping",
"clay pottery making",
"clean and jerk",
"cleaning floor",
"cleaning gutters",
"cleaning pool",
"cleaning shoes",
"cleaning toilet",
"cleaning windows",
"climbing a rope",
"climbing ladder",
"climbing tree",
"contact juggling",
"cooking chicken",
"cooking egg",
"cooking on campfire",
"cooking sausages",
"counting money",
"country line dancing",
"cracking neck",
"crawling baby",
"crossing river",
"crying",
"curling hair",
"cutting nails",
"cutting pineapple",
"cutting watermelon",
"dancing ballet",
"dancing charleston",
"dancing gangnam style",
"dancing macarena",
"deadlifting",
"decorating the christmas tree",
"digging",
"dining",
"disc golfing",
"diving cliff",
"dodgeball",
"doing aerobics",
"doing laundry",
"doing nails",
"drawing",
"dribbling basketball",
"drinking",
"drinking beer",
"drinking shots",
"driving car",
"driving tractor",
"drop kicking",
"drumming fingers",
"dunking basketball",
"dying hair",
"eating burger",
"eating cake",
"eating carrots",
"eating chips",
"eating doughnuts",
"eating hotdog",
"eating ice cream",
"eating spaghetti",
"eating watermelon",
"egg hunting",
"exercising arm",
"exercising with an exercise ball",
"extinguishing fire",
"faceplanting",
"feeding birds",
"feeding fish",
"feeding goats",
"filling eyebrows",
"finger snapping",
"fixing hair",
"flipping pancake",
"flying kite",
"folding clothes",
"folding napkins",
"folding paper",
"front raises",
"frying vegetables",
"garbage collecting",
"gargling",
"getting a haircut",
"getting a tattoo",
"giving or receiving award",
"golf chipping",
"golf driving",
"golf putting",
"grinding meat",
"grooming dog",
"grooming horse",
"gymnastics tumbling",
"hammer throw",
"headbanging",
"headbutting",
"high jump",
"high kick",
"hitting baseball",
"hockey stop",
"holding snake",
"hopscotch",
"hoverboarding",
"hugging",
"hula hooping",
"hurdling",
"hurling (sport)",
"ice climbing",
"ice fishing",
"ice skating",
"ironing",
"javelin throw",
"jetskiing",
"jogging",
"juggling balls",
"juggling fire",
"juggling soccer ball",
"jumping into pool",
"jumpstyle dancing",
"kicking field goal",
"kicking soccer ball",
"kissing",
"kitesurfing",
"knitting",
"krumping",
"laughing",
"laying bricks",
"long jump",
"lunge",
"making a cake",
"making a sandwich",
"making bed",
"making jewelry",
"making pizza",
"making snowman",
"making sushi",
"making tea",
"marching",
"massaging back",
"massaging feet",
"massaging legs",
"massaging person's head",
"milking cow",
"mopping floor",
"motorcycling",
"moving furniture",
"mowing lawn",
"news anchoring",
"opening bottle",
"opening present",
"paragliding",
"parasailing",
"parkour",
"passing American football (in game)",
"passing American football (not in game)",
"peeling apples",
"peeling potatoes",
"petting animal (not cat)",
"petting cat",
"picking fruit",
"planting trees",
"plastering",
"playing accordion",
"playing badminton",
"playing bagpipes",
"playing basketball",
"playing bass guitar",
"playing cards",
"playing cello",
"playing chess",
"playing clarinet",
"playing controller",
"playing cricket",
"playing cymbals",
"playing didgeridoo",
"playing drums",
"playing flute",
"playing guitar",
"playing harmonica",
"playing harp",
"playing ice hockey",
"playing keyboard",
"playing kickball",
"playing monopoly",
"playing organ",
"playing paintball",
"playing piano",
"playing poker",
"playing recorder",
"playing saxophone",
"playing squash or racquetball",
"playing tennis",
"playing trombone",
"playing trumpet",
"playing ukulele",
"playing violin",
"playing volleyball",
"playing xylophone",
"pole vault",
"presenting weather forecast",
"pull ups",
"pumping fist",
"pumping gas",
"punching bag",
"punching person (boxing)",
"push up",
"pushing car",
"pushing cart",
"pushing wheelchair",
"reading book",
"reading newspaper",
"recording music",
"riding a bike",
"riding camel",
"riding elephant",
"riding mechanical bull",
"riding mountain bike",
"riding mule",
"riding or walking with horse",
"riding scooter",
"riding unicycle",
"ripping paper",
"robot dancing",
"rock climbing",
"rock scissors paper",
"roller skating",
"running on treadmill",
"sailing",
"salsa dancing",
"sanding floor",
"scrambling eggs",
"scuba diving",
"setting table",
"shaking hands",
"shaking head",
"sharpening knives",
"sharpening pencil",
"shaving head",
"shaving legs",
"shearing sheep",
"shining shoes",
"shooting basketball",
"shooting goal (soccer)",
"shot put",
"shoveling snow",
"shredding paper",
"shuffling cards",
"side kick",
"sign language interpreting",
"singing",
"situp",
"skateboarding",
"ski jumping",
"skiing (not slalom or crosscountry)",
"skiing crosscountry",
"skiing slalom",
"skipping rope",
"skydiving",
"slacklining",
"slapping",
"sled dog racing",
"smoking",
"smoking hookah",
"snatch weight lifting",
"sneezing",
"sniffing",
"snorkeling",
"snowboarding",
"snowkiting",
"snowmobiling",
"somersaulting",
"spinning poi",
"spray painting",
"spraying",
"springboard diving",
"squat",
"sticking tongue out",
"stomping grapes",
"stretching arm",
"stretching leg",
"strumming guitar",
"surfing crowd",
"surfing water",
"sweeping floor",
"swimming backstroke",
"swimming breast stroke",
"swimming butterfly stroke",
"swing dancing",
"swinging legs",
"swinging on something",
"sword fighting",
"tai chi",
"taking a shower",
"tango dancing",
"tap dancing",
"tapping guitar",
"tapping pen",
"tasting beer",
"tasting food",
"testifying",
"texting",
"throwing axe",
"throwing ball",
"throwing discus",
"tickling",
"tobogganing",
"tossing coin",
"tossing salad",
"training dog",
"trapezing",
"trimming or shaving beard",
"trimming trees",
"triple jump",
"tying bow tie",
"tying knot (not on a tie)",
"tying tie",
"unboxing",
"unloading truck",
"using computer",
"using remote controller (not gaming)",
"using segway",
"vault",
"waiting in line",
"walking the dog",
"washing dishes",
"washing feet",
"washing hair",
"washing hands",
"water skiing",
"water sliding",
"watering plants",
"waxing back",
"waxing chest",
"waxing eyebrows",
"waxing legs",
"weaving basket",
"welding",
"whistling",
"windsurfing",
"wrapping present",
"wrestling",
"writing",
"yawning",
"yoga",
"zumba",
]
|
import sys
import string
# swaps freq and word so that we have (word freq)
f = open("C:/Users/Daway Chou-Ren/Documents/REU/linguistics/all.num.txt", 'r')
output = open("C:/Users/Daway Chou-Ren/Documents/REU/linguistics/all_num_swapped.txt", 'w')
sum_freqs = 0;
for line in f.readlines():
line = line.lower()
fields = line.split(" ") # (freq, word, tag, num files)
output.write(fields[1] + " " + fields[0] + "\n")
f.close()
|
import os
import sys
root = os.path.dirname(__file__)
sys.path.insert(0, os.path.join(root, 'site-packages'))
os.environ.setdefault("DJANGO_SETTINGS_MODULE" , "iexam.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
def app(environ, start_response):
status = '200 OK'
response_headers = [('Content-type', 'text/plain')]
start_response(status, response_headers)
return ["Hello, django! version: " + str(django.VERSION)]
#import sae
#from iexam import wsgi
#application = sae.create_wsgi_app(wsgi.application)
#application = sae.create_wsgi_app(app) |
from datetime import datetime,date,timedelta
import statistics
import networkx as nx
from itertools import combinations
from feature_tools import get_statistical_results_of_list
import pickle
def get_average_age_difference_in_retweets(tweets):
my_age = datetime.strptime(tweets[0]['user']['created_at'],'%a %b %d %H:%M:%S +0000 %Y')
age_difList=[]
for t in tweets:
if 'retweeted_status' in t:
retweeted_age =datetime.strptime(t['retweeted_status']['user']['created_at'],'%a %b %d %H:%M:%S +0000 %Y')
age_dif = abs((my_age-retweeted_age).days)
age_difList.append(age_dif)
if len(age_difList)>0:
# print (age_difList)
try:
return statistics.mean(age_difList),statistics.median(age_difList)
except:
print('error in get average age difference in retweets')
else:
return None,None
def get_network_of_retweeters(retweets,neighbor_tweets):
user = retweets[0]['user']['id_str']
G = nx.Graph()
for t in retweets:
retweeter = t['retweeted_status']['user']['id_str']
G.add_edge(user,retweeter)
for n in neighbor_tweets:
neighbor = n['user']['id_str']
entities = n['entities']
for i in entities['user_mentions']:
G.add_edge(neighbor,i)
print (nx.info(G))
return G,user
def graph_features(G):
try:
density = nx.density(G)
avg_clustering = nx.average_clustering(G)
triangles = nx.triangles(G)
# betweeness = nx.betweenness_centrality(G)[user]
# closeness = nx.closeness_centrality(G,wf_improved = True)[user]
# pagerank = nx.pagerank(G,alpha=0.85)[user]
# voterank = nx.voterank(G).index(user)
volume = G.number_of_nodes()
mass = G.number_of_edges()
except ZeroDivisionError:
return 0,0,[],0,0
return density,avg_clustering,triangles,volume,mass
def get_hashtag_network(tweets):
G = nx.Graph()
for t in tweets:
entities = t['entities']
tags = entities['hashtags']
hashtags = set()
for i in tags:
hashtags.add(i['text'])
G.add_nodes_from(hashtags)
if len(hashtags)>1:
combis = combinations(hashtags,2)
for c in combis:
# print (c)
if G.has_edge(c[0],c[1]):
G[c[0]][c[1]]['weight']+=1.0
else:
G.add_edge(c[0],c[1],weight=1.0)
allweights=[x[2]['weight'] for x in G.edges(data=True)]
return G,allweights
|
import os
import math
import csv
def gen_curve(filename: str):
with open(filename, 'w', newline='') as fs:
writer = csv.writer(fs)
for i in range(630):
x = i / 100.0
writer.writerow([x, math.cos(x), math.sin(x)])
def gen_state(filename: str):
with open(filename, 'w', newline='') as fs:
writer = csv.writer(fs)
for i in range(30):
writer.writerow([i, i % 2, (i // 10) % 2, i // 5])
base = os.path.basename(os.path.dirname(os.path.realpath(__file__)))
gen_curve(os.path.join(base, 'cos_sin.csv'))
gen_state(os.path.join(base, 'states.csv'))
|
# -*- coding: utf-8 -*-
import os
import shutil
from tqdm import tqdm
from PIL import Image
import numpy as np
import re
import hashlib
import piexif
MAX_NUM_IMAGES_PER_CLASS = 2 ** 27 - 1
def move_fungi(old_dir, new_dir, validation_percentage, testing_percentage):
if not os.path.exists(new_dir):
os.mkdir(new_dir)
else:
shutil.rmtree(new_dir)
os.mkdir(new_dir)
for i in ['train', 'val', 'test']:
os.mkdir(os.path.join(new_dir, i))
for i in tqdm(os.listdir(old_dir)):
old_name = os.path.join(old_dir, i)
if n_files(old_name) > 20:
for j in ['train', 'val', 'test']:
new_name = os.path.join(new_dir, j, i)
if not os.path.exists(new_name):
os.mkdir(new_name)
for j in os.listdir(old_name):
old_image_name = os.path.join(old_name, j)
# from TF poets
hash_name = re.sub(r'_nohash_.*$', '', old_image_name)
hash_name_hashed = hashlib.sha1(str.encode(hash_name)).hexdigest()
percentage_hash = ((int(hash_name_hashed, 16) %
(MAX_NUM_IMAGES_PER_CLASS + 1)) *
(100.0 / MAX_NUM_IMAGES_PER_CLASS))
namy = ".".join([j.split(".")[0], j.split(".")[-1].lower()])
if percentage_hash < validation_percentage:
new_image_name = os.path.join(new_dir,
'val',
i,
namy)
elif percentage_hash < (testing_percentage +
validation_percentage):
new_image_name = os.path.join(new_dir,
'test',
i,
namy)
else:
new_image_name = os.path.join(new_dir,
'train',
i,
namy)
shutil.copy(old_image_name, new_image_name)
piexif.remove(new_image_name)
def n_files(direc):
return len([j for j in os.listdir(direc)])
if __name__ == "__main__":
m, s = get_mean_n_std("../with_fungi_dataset/images/")
# np.save("./data_info.npy", np.array([m, s]))
move_fungi("./images_use", "./images_app", 20, 20)
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
通过使用浏览器的“审查元素”功能,其中的 network 可以查看网页在请求数据时发送和接收了哪些数据
GET:从服务器请求获取数据
POST:向指定服务器提交被处理的数据
"""
import urllib.request
import urllib.parse
import json
# url = 'http://fanyi.youdao.com/translate_o?smartresult=dict&smartresult=rule'
url = 'http://fanyi.youdao.com/translate?smartresult=dict&smartresult=rule' # 去掉 _o
data = {}
data['i'] = 'fuck you'
data['from'] = 'AUTO'
data['to'] = 'AUTO'
data['smartresult'] = 'dict'
data['client'] = 'fanyideskweb'
data['salt'] = '15649771276210'
data['sign'] = '2bb3ce093644dcf0ba07e2760a239a43'
data['ts'] = '1564977127621'
data['bv'] = '53539dde41bde18f4a71bb075fcf2e66'
data['doctype'] = 'json'
data['version'] = '2.1'
data['keyfrom'] = 'fanyi.web'
data['action'] = 'FY_BY_CLICKBUTTION'
data = urllib.parse.urlencode(data).encode('utf-8')
response = urllib.request.urlopen(url, data)
html = response.read().decode('utf-8')
print(html)
target = json.loads(html)
src = target['translateResult'][0][0]['src']
tgt = target['translateResult'][0][0]['tgt']
print('元数据:%s\n翻译结果:%s' % (src, tgt))
|
from graphviz import Digraph
from hashlib import md5
def dictToGraph(j, echo=True, coFilter=[]):
"""
A quick and dirty function to convert JSON into a Digraph.
"""
g = Digraph(format='png')
g.graph_attr['rankdir'] = 'LR'
for clusteroperator in j.get('items'):
coname = clusteroperator['metadata']['name']
if len(coFilter) > 0:
if coname in coFilter:
# Filter match, continue. Add to digraph.
g.node(md5(coname.encode()).hexdigest(), coname, fontcolor='red')
else:
# Cluster Operator doesn't match filter, skip
continue
# No filter, continue. Add to digraph.
g.node(md5(coname.encode()).hexdigest(), coname, fontcolor='red')
status = clusteroperator.get('status')
if status is None:
# If for some reason there's no status, skip
continue
relatedObjects = status.get('relatedObjects')
if relatedObjects is None:
# If for some rason there's no relatedObjects, skip
continue
for r in relatedObjects:
# Extract relevant resource attributes
# TODO: Add filter for resource types
group = r.get('group')
resource = r.get('resource')
namespace = r.get('namespace')
name = r.get('name')
# TODO: Improve this formatting
fq = 'Name: %s\nType: %s\nGroup: %s\nNS: %s' % (name, resource, group, namespace)
# Finally, add to digraph
g.node(md5(fq.encode()).hexdigest(), fq, fontcolor='green')
g.edge(md5(coname.encode()).hexdigest(), md5(fq.encode()).hexdigest())
if echo:
print(g.source)
return g
|
# Dom Parise - 4/13/14
# participant model
#
import math,time
from random import random
from datetime import datetime,timedelta,date
from threading import Timer,Thread
from database import Database
from textmessenger import TextMessenger
# -ln(RND)/p hours
def poisson(p):
return -( math.log(random()) / p )
db = Database()
class Participant:
# initialize participants variables
def __init__ (self, phone, twilio, num_today, start_date, lab_day, money_day):
## need to check these variables when working with db
self.phone = phone #
self.txt = TextMessenger(twilio) # int
self.num_today = num_today # int
self.day = (datetime.now() - start_date).days #
self.lab_day = lab_day # int
self.money_day = money_day # int
self.message_timer = 'gonna be a thread'
self.nonresponse_timer = 'gonna be a thread'
self.expecting = False
# returns time (in minutes) as the day's 'start time'
def next_day_start_time (self): #tested
minutes = 0.0
if self.day <= 28: # weeks 1-4 # random time between 10am-4pm
minutes = 600.0 + ( 360.0 * random() )
elif self.day <= 35: # week 5 # 10am
minutes = 600.0
elif self.day <= 42: # week 6 # random time between 10am-10pm
minutes = 600.0 + ( 720.0 * random() )
tmr = date.today() + timedelta(days=1)
self.day += 1
self.num_today = 0
return datetime(year=tmr.year,month=tmr.month,day=tmr.day,hour=int(minutes/60),minute=int(minutes%60))
# given the state of the participant in the study, determine next time to send
def next_message_time (self):
# if after 22:00, get next_day_start_time if not target date
# otherwise, depending on num_sent, determine next w/ poisson
now = datetime.now()
# now = datetime(year=now.year,month=now.month,day=now.day,hour=12) # altered datetime for testing
next = self.add_poisson( now )
if next.hour >= 22: # after 10pm
return self.add_poisson( self.next_day_start_time() )
else:
if self.day < 28 and self.num_today < 6: # weeks 1-4
return next
elif self.day < 35 and self.num_today < 3: # week 5
return next
elif self.day >= 42: # week 6
return self.next_day_start_time()
else:
return self.add_poisson( self.next_day_start_time() )
# time is datetime
def add_poisson(self,time):
p = 0
if self.day < 28:
p = 1.5
elif self.day < 35:
p = 0.25
return time + timedelta(minutes=(poisson(p)*60))
# prepares participant (threads) for next message
def next_message (self):
if isinstance(self.nonresponse_timer,Thread) and self.nonresponse_timer.is_alive():
self.nonresponse_timer.cancel()
if isinstance(self.message_timer,Thread) and self.message_timer.is_alive():
self.message_timer.cancel()
self.expecting = False
self.set_next_msg( self.next_message_time() )
# sends a text message to a participant
def send_text (self):
db.log(self.phone, 'sent', 'default')
self.txt.send_default_sms(self.phone)
self.expecting = True
def nonresponse (self):
db.log(self.phone, 'nonresponse', 'resetting_poisson_process')
self.next_message()
# spawns a thread to send a message a the given time
def set_next_msg (self, time):
secs = (time - datetime.now()).total_seconds()
self.message_timer = Timer( secs, self.send_text)
self.nonresponse_timer = Timer( (secs + 5400) , self.nonresponse)
self.message_timer.start()
self.nonresponse_timer.start()
def send_verification (self):
db.log(self.phone, 'sent', 'default')
self.txt.send_verification(self.phone)
self.expectingVerification = True
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.