content
stringlengths 5
1.05M
|
|---|
#Pickling
import pickle
class Animal:
def __init__(self, number_of_paws, color):
self.number_of_paws = number_of_paws
self.color = color
class Sheep(Animal):
def __init__(self, color):
Animal.__init__(self, 4, color)
mary = Sheep("white")
print(str.format("My sheep mary is {0} and has {1} paws", mary.color, mary.number_of_paws))
my_pickled_mary = pickle.dumps(mary)
print("Would you like to see her pickled? Here she is!")
print(my_pickled_mary)
|
import math
import urllib.request
import os
import glob
import subprocess
import shutil
from tile_convert import bbox_to_xyz, tile_edges
from osgeo import gdal
from argparse import ArgumentParser
from PIL import Image
#---------- CONFIGURATION -----------#
tile_server = "https://tiles-preview.firststreet.org/historic/3/{z}/{x}/{y}.png?access_token=pk.eyJ1IjoiZG9jazQyNDIiLCJhIjoiY2pjazE5eTM2NDl2aDJ3cDUyeDlsb292NiJ9.Jr__XbmAolbLyzPDj7-8kQ"
#tile_server = "https://tiles-preview.firststreet.org/probability/depth/2050/100/{z}/{x}/{y}.png?access_token=pk.eyJ1IjoiZG9jazQyNDIiLCJhIjoiY2pjazE5eTM2NDl2aDJ3cDUyeDlsb292NiJ9.Jr__XbmAolbLyzPDj7-8kQ"
parser = ArgumentParser()
parser.add_argument("--latMin", nargs='?', default="check_string_for_empty")
parser.add_argument("--latMax", nargs='?', default="check_string_for_empty")
parser.add_argument("--lonMin", nargs='?', default="check_string_for_empty")
parser.add_argument("--lonMax", nargs='?', default="check_string_for_empty")
args = parser.parse_args()
#python tiles_to_tiff.py --lonMin -82.612512 --lonMax -82.582203 --latMin 41.272755 --latMax 41.303938
temp_dir = 'temp'
output_dir = 'output'
zoom = 12
lon_min = float(args.lonMin)
lon_max = float(args.lonMax)
lat_min = float(args.latMin)
lat_max = float(args.latMax)
#-----------------------------------#
shutil.rmtree(temp_dir)
os.makedirs(temp_dir)
def download_tile(x, y, z, tile_server):
url = tile_server.replace(
"{x}", str(x)).replace(
"{y}", str(y)).replace(
"{z}", str(z))
path = "temp/"+str(x)+"_"+str(y)+"_"+str(z)+".png"
print(path, url)
# path = '{temp_dir}/{x}_{y}_{z}.png'
#path = f'{temp_dir}/{x}_{y}_{z}.png'
urllib.request.urlretrieve(url, path)
return(path)
def merge_tiles(input_pattern, output_path):
print(input_pattern,output_path)
merge_command = ['gdal_merge.py', '-o', output_path]
for name in glob.glob(input_pattern):
merge_command.append(name)
subprocess.call(merge_command)
def georeference_raster_tile(x, y, z, path):
bounds = tile_edges(x, y, z)
filename, extension = os.path.splitext(path)
gdal.Translate(filename + '.tif',
path,
outputSRS='EPSG:4326',
outputBounds=bounds)
#
x_min, x_max, y_min, y_max = bbox_to_xyz(
lon_min, lon_max, lat_min, lat_max, zoom)
for x in range(x_min, x_max + 1):
for y in range(y_min, y_max + 1):
png_path = download_tile(x, y, zoom, tile_server)
img = Image.open(png_path)
if img.getbbox():
georeference_raster_tile(x, y, zoom, png_path)
#
print("Download complete")
#
print("Merging tiles")
merge_tiles('temp/*.tif', output_dir + '/merged_3.tif')
print("Merge complete")
shutil.rmtree(temp_dir)
os.makedirs(temp_dir)
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 27 07:44:34 2020
@author: danie
"""
from itertools import combinations_with_replacement
x = input().split()
string = sorted(x[0])
size = int(x[1])
items = list(combinations_with_replacement(string,size))
for item in items:
print(''.join(item))
|
from .pdf_element import PdfElement
from ..exc import PdfError
class PdfCatalog(PdfElement):
"""PDF Root catalog element."""
def __init__(self, obj, obj_key=None, document=None):
if obj['Type'] != 'Catalog':
raise PdfError('Type "Catalog" expected, got "{}"'.format(obj['Type']))
if 'Pages' not in obj:
raise PdfError('Catalog dictionaries must contain Pages')
super(PdfCatalog, self).__init__(obj, obj_key, document)
|
#!/usr/bin/env python
'''
Author: Rich Wellum (richwellum@gmail.com)
Adapted and enhanced (fwiw) for use with NX-OS
by Hank Preston (hapresto@cisco.com)
This is a tool to take an NX-OS Base Virtual Box image from CCO and create a
new box that has been bootstrapped for use with Vagrant.
- Initial configuration complete
- Mgmt Configured for DHCP
- vagrant account created with password vagrant and pub SSH Key
Tested with nxosv-final.7.0.3.I7.1.box
nxosv-final.7.0.3.I6.1.box
* Note: nxosv-final.7.0.3.I7.3.box (and later) boxes posted to CCO do not need
this script to complete the setup for Vagrant.
Pre-installed requirements:
python-pexpect
vagrant
virtualbox
Within OSX, these tools can be installed by homebrew (but not limited to):
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
brew cask install virtualbox
brew cask install vagrant
brew cask install python
pip install pexpect
Full Description:
Takes an NX-OS Source Box downloaded locally and completes the setup and base configuration and outputs
an updated VirtualBox Vagrant box.
Adds an embedded Vagrantfile, that will be included in
box/include/Vagrantfile. This Vagrantfile configures:
. Guest forwarding ports for 22, 80, 443 and 830
. SSH username and password and SSH (insecure) pub key
. Serial console port for configuration (disconnected by default)
This embedded Vagrantfile is compatible with additional non-embedded
Vagrantfiles for more advanced multi-node topologies.
. Backs up existing box files.
. Creates and registers a new VirtualBox VM.
. Adds appropriate memory, display and CPUs.
. Sets one NIC for networking.
. Sets up port forwarding for the guest SSH, NETCONF and RESTCONF.
. Sets up storage - hdd and dvd(for ISO).
. Starts the VM, then uses pexpect to configure NX-OS for
basic networking, with user name vagrant/vagrant and SSH key
. Enables NX-API
. Closes the VM down, once configured.
The resultant box image, will come up fully networked and ready for use
with NX-API. Other programmability features of NX-OS can be enabled
manually or through provisioning with Ansible.
NOTE: If more than one interface in the resulting Vagrant box is required
then those additional interfaces need to be added in the actual
Vagrantfile.
'''
from __future__ import print_function
import sys
import os
import time
import subprocess
import getpass
import argparse
import re
import logging
from logging import StreamHandler
import textwrap
try:
import pexpect
except ImportError:
sys.exit('The "pexpect" Python module is not installed. Please install it using pip or OS packaging.')
# The background is set with 40 plus the number of the color,
# and the foreground with 30.
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
# Telnet ports used to access IOS XE via socat
CONSOLE_PORT = 65000
CONSOLE_SOCKET = "/tmp/test"
logger = logging.getLogger(__name__)
class ColorHandler(StreamHandler):
"""
Add colors to logging output
partial credits to
http://opensourcehacker.com/2013/03/14/ultima-python-logger-somewhere-over-the-rainbow/
"""
def __init__(self, colored):
super(ColorHandler, self).__init__()
self.colored = colored
COLORS = {
'WARNING': YELLOW,
'INFO': WHITE,
'DEBUG': BLUE,
'CRITICAL': YELLOW,
'ERROR': RED
}
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
level_map = {
logging.DEBUG: (None, CYAN, False),
logging.INFO: (None, WHITE, False),
logging.WARNING: (None, YELLOW, True),
logging.ERROR: (None, RED, True),
logging.CRITICAL: (RED, WHITE, True),
}
def addColor(self, text, bg, fg, bold):
ctext = ''
if bg is not None:
ctext = self.COLOR_SEQ % (40 + bg)
if bold:
ctext = ctext + self.BOLD_SEQ
ctext = ctext + self.COLOR_SEQ % (30 + fg) + text + self.RESET_SEQ
return ctext
def colorize(self, record):
if record.levelno in self.level_map:
bg, fg, bold = self.level_map[record.levelno]
else:
bg, fg, bold = None, WHITE, False
# exception?
if record.exc_info:
formatter = logging.Formatter(format)
record.exc_text = self.addColor(
formatter.formatException(record.exc_info), bg, fg, bold)
record.msg = self.addColor(str(record.msg), bg, fg, bold)
return record
def format(self, record):
if self.colored:
message = logging.StreamHandler.format(self, self.colorize(record))
else:
message = logging.StreamHandler.format(self, record)
return message
def run(cmd, hide_error=False, cont_on_error=False):
"""
Run command to execute CLI and catch errors and display them whether
in verbose mode or not.
Allow the ability to hide errors and also to continue on errors.
"""
s_cmd = ' '.join(cmd)
logger.info("'%s'", s_cmd)
output = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
tup_output = output.communicate()
if output.returncode != 0:
logger.error('Failed (%d):', output.returncode)
else:
logger.debug('Succeeded (%d):', output.returncode)
logger.debug('Output [%s]' % tup_output[0])
if not hide_error and 0 != output.returncode:
logger.error('Error [%s]' % tup_output[1])
if not cont_on_error:
sys.exit('Quitting due to run command error')
else:
logger.debug(
'Continuing despite error cont_on_error=%d', cont_on_error)
return tup_output[0]
def pause_to_debug():
logger.critical("Pause before debug")
logger.critical(
"Use: 'socat unix-connect:/tmp/test stdin' to access the VM")
raw_input("Press Enter to continue.")
# To debug post box creation, add the following line to Vagrantfile
# config.vm.provider "virtualbox" do |v|
# v.customize ["modifyvm", :id, "--uart1", "0x3F8", 4, "--uartmode1", 'tcpserver', 65000]
# end
def cleanup_box():
"""
Destroy Running Box.
"""
logger.debug("Destroying Box")
run(["vagrant", "destroy", "-f"], cont_on_error=True)
def configure_nx(verbose=False, wait=True):
"""
Bring up NX-OS and do some initial config.
Using socat to do the connection as telnet has an
odd double return on vbox
"""
logger.warn('Waiting for NX-OS to boot (may take 3 minutes or so)')
localhost = 'localhost'
PROMPT = r'[\w-]+(\([\w-]+\))?[#>]'
# don't want to rely on specific hostname
# PROMPT = r'(Router|csr1kv).*[#>]'
CRLF = "\r\n"
def send_line(line=CRLF):
child.sendline(line)
if line != CRLF:
logger.info('NX-OS Config: %s' % line)
child.expect(re.escape(line))
def send_cmd(cmd, expect_prompt=True):
if not isinstance(cmd, list):
cmd = list((cmd,))
for c in cmd:
send_line(c)
if expect_prompt:
child.expect(PROMPT)
try:
#child = pexpect.spawn("socat TCP:%s:%s -,raw,echo=0,escape=0x1d" % (localhost, CONSOLE_PORT))
child = pexpect.spawn("socat unix-connect:%s stdin" % (CONSOLE_SOCKET))
if verbose:
child.logfile = open("tmp.log", "w")
# Long time for full configuration, waiting for ip address etc
child.timeout = 600
# wait for indication that boot has gone through
if (wait):
child.expect(r'%POAP-2-POAP_DHCP_DISCOVER_START:', child.timeout)
logger.warn(
'Logging into Vagrant Virtualbox and configuring NX-OS')
# Abort POAP
logger.warn("Aborting POAP")
send_cmd("y", expect_prompt=False)
time.sleep(1)
# Disable Secure Password Enforcement
logger.warn("Disable Secure Password")
send_cmd("n", expect_prompt=False)
time.sleep(1)
# Set admin password
logger.warn("Setting Admin Password")
if (wait):
child.expect(r'Enter the password for', child.timeout)
send_cmd("admin", expect_prompt=False)
time.sleep(2)
if (wait):
child.expect(r'Confirm the password', child.timeout)
logger.warn("Confirming Admin Password")
send_cmd("admin", expect_prompt=False)
time.sleep(3)
send_line()
time.sleep(2)
send_line()
time.sleep(1)
# wait for indication next step is ready
if (wait):
child.expect(r'Would you like to enter the basic configuration dialog', child.timeout)
# Disable Basic System Configuration
logger.warn("Disable Basic Sys Config")
send_cmd("no", expect_prompt=False)
# time.sleep(10)
# wait for indication next step is ready
if (wait):
child.expect(r'User Access Verification', child.timeout)
# Login as admin
logger.warn("Logging in as admin")
send_cmd("admin", expect_prompt=False)
time.sleep(1)
send_cmd("admin", expect_prompt=False)
time.sleep(1)
send_cmd("term width 300")
# enable plus config mode
logger.warn("Deploying Baseline configuration.")
send_cmd("enable")
send_cmd("conf t")
# Perform basic Vagrant Configuration
send_cmd("hostname n9kv1")
send_cmd("interface mgmt 0")
send_cmd("ip address dhcp ")
send_cmd("no shut")
send_cmd("exit")
send_cmd("username vagrant password vagrant role network-admin")
send_cmd("username vagrant sshkey ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzIw+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoPkcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NOTd0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcWyLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQ== vagrant insecure public key")
# Enable Features
send_cmd("feature nxapi")
# Enable Guest Shell - needed because running with 4G Ram and not auto-installed
# Used to set boot variable correctly
send_cmd("guestshell enable")
time.sleep(1)
# wait for indication that guestshell is ready
if (wait):
child.expect(r"%VMAN-2-ACTIVATION_STATE: Successfully activated virtual service 'guestshell", child.timeout)
logger.info('Guest Shell Enabled')
time.sleep(5)
# Set Boot Variable
logger.warn("Setting boot image")
send_cmd("guestshell run ls /bootflash/nxos*")
boot_image = child.before.split("/")[4].strip()
send_cmd("boot nxos bootflash:/{}".format(boot_image))
# Disable Guest Shell to save resources in base box
send_cmd("guestshell destroy", expect_prompt=False)
time.sleep(1)
send_cmd("y")
time.sleep(1)
# wait for indication that guestshell is destroyed
if (wait):
child.expect(r"%VMAN-2-INSTALL_STATE: Successfully destroyed virtual service 'guestshell", child.timeout)
logger.info('Guest Shell Destroyed')
# done and save
logger.warn("Finishing Config and Saving to Startup-Config")
send_cmd("end")
send_cmd(["copy run start", CRLF])
# just to be sure
logger.warn('Waiting 10 seconds...')
time.sleep(10)
except pexpect.TIMEOUT:
raise pexpect.TIMEOUT('Timeout (%s) exceeded in read().' % str(child.timeout))
def create_Vagrantfile(boxname, vmmemory="4096"):
"""
Create a Basic Vagrantfile.
"""
template = """# -*- mode: ruby -*-\n# vi: set ft=ruby :
Vagrant.configure("2") do |config|
config.vm.box = "{boxname}"
config.vm.synced_folder '.', '/vagrant', disabled: true
config.ssh.insert_key = false
config.vm.boot_timeout = 400
config.vm.guest = :other
# turn off the check if the plugin is installed
if Vagrant.has_plugin?("vagrant-vbguest")
config.vbguest.auto_update = false
end
config.vm.provider "virtualbox" do |vb|
vb.memory = "{vmmemory}"
end
end
"""
vagrantfile_contents = template.format(boxname=boxname, vmmemory=vmmemory)
logger.info("Contents of Vagrantfile to be used")
logger.info(vagrantfile_contents)
logger.warn("Creating Vagrantfile")
with open("Vagrantfile", "w") as f:
f.write(vagrantfile_contents)
def box_add(boxname, boxpath):
"""
Add Box to Vagrant Inventory.
"""
logger.debug("Adding box %s to Vagrant." % (boxname))
run(["vagrant", "box", "add", "-f", boxname, boxpath])
def box_remove(boxname):
"""
Remove Box from Vagrant Inventory.
"""
logger.debug("Removing box %s from Vagrant." % (boxname))
run(["vagrant", "box", "remove", "-f", boxname])
def vagrant_up(cont_on_error=False):
"""
Bring Up Vagrant
"""
logger.warn("Starting Vagrant Environment.")
logger.warn(" Note: vagrant may generate an error, that is expected")
logger.warn(" Note: this step may take 3-5 minutes to complete.")
run(["vagrant", "up"], cont_on_error=cont_on_error)
def main(argv):
input_box = ''
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''\
A tool to create an NX-OS Vagrant VirtualBox box from a base NX-OS Box.
The Base Box will be installed, booted and configured.
"vagrant ssh" provides access to the NX-oS management interface
with internet access. It uses the insecure Vagrant SSH key.
'''),
epilog=textwrap.dedent('''\
E.g.:
box build with local box:
%(prog)s nxosv-final.7.0.3.I7.1.box
'''))
parser.add_argument('BOX_FILE',
help='local Base Box filename')
# parser.add_argument('-o', '--create_ova', action='store_true',
# help='additionally use VBoxManage to export an OVA')
parser.add_argument('-d', '--debug', action='store_true',
help='will pause with the VM in a running state. Use: socat unix-connect:/tmp/test stdin to access')
parser.add_argument('-n', '--nocolor', action='store_true',
help='don\'t use colors for logging')
parser.add_argument('-v', '--verbose',
action='store_const', const=logging.INFO,
default=logging.WARN, help='turn on verbose messages')
args = parser.parse_args()
# setup logging
root_logger = logging.getLogger()
root_logger.setLevel(level=args.verbose)
handler = ColorHandler(colored=(not args.nocolor))
formatter = logging.Formatter("==> %(message)s")
handler.setFormatter(formatter)
root_logger.addHandler(handler)
logger = logging.getLogger("box-builder")
# PRE-CHECK: is socat installed?
logger.warn('Check whether "socat" is installed')
try:
run(['socat', '-V'])
except OSError:
sys.exit(
'The "socat" utility is not installed. Please install it prior to using this script.')
# Get source Box name and determine key details for script
input_box = args.BOX_FILE
box_name = os.path.basename(input_box)
vmname_base = os.path.basename(os.getcwd()) + "_default_"
version = box_name[box_name.find(".")+1:len(box_name)-4]
output_box = "nxos_{}".format(version)
# if debug flag then set the logger to debug
if args.debug:
args.verbose = logging.DEBUG
if not os.path.exists(input_box):
sys.exit('%s does not exist' % input_box)
# Set up paths
base_dir = os.path.join(os.getcwd(), 'created_boxes')
box_dir = os.path.join(base_dir, output_box)
box_out = os.path.join(box_dir, output_box + '.box')
pathname = os.path.abspath(os.path.dirname(sys.argv[0]))
# vbox = os.path.join(box_dir, vmname + '.vbox')
# vdi = os.path.join(box_dir, vmname + '.vdi')
# ova_out = os.path.join(box_dir, vmname + '.ova')
logger.debug('Input Box is %s', input_box)
logger.debug('pathname: %s', pathname)
logger.debug('VM Name Base: %s', vmname_base)
logger.debug('base_dir: %s', base_dir)
logger.debug('box_dir: %s', box_dir)
logger.debug('Source Box Path: %s', box_name)
logger.debug('Exported Box Path: %s', box_out)
# logger.warn('vbox: %s', vbox)
if not os.path.exists(base_dir):
os.makedirs(base_dir)
if not os.path.exists(box_dir):
os.makedirs(box_dir)
# Delete existing Box
if os.path.exists(box_out):
os.remove(box_out)
logger.debug('Found and deleted previous %s', box_out)
# Delete existing OVA
# if os.path.exists(ova_out) and args.create_ova is True:
# os.remove(ova_out)
# logger.debug('Found and deleted previous %s', ova_out)
# Destroy any existing vagrant environment
cleanup_box()
logger.warn(" Note: An error may occur if the Vagrant environment isn't initialized, not problem")
# Create Vagrantfile
create_Vagrantfile(box_name)
# Add Box to Vagrant Inventory
box_add(box_name, input_box)
# Bring up Environment
vagrant_up(cont_on_error=True)
# Determine VM Name from Virtual Box
vms_list_running = run(['VBoxManage', 'list', 'runningvms']).split("\n")
possible_vms = [vm for vm in vms_list_running if vmname_base in vm]
if len(possible_vms) == 1:
# Extract just the VM Name from the output
vmname = possible_vms[0].split()[0][1:len(possible_vms[0].split()[0])-2]
logger.warn("Found VirtualBox VM: {}".format(vmname))
else:
sys.exit("Could not determine the VM Name.")
# Complete Startup
# Configure NX-OS
# do print steps for logging set to DEBUG and INFO
# DEBUG also prints the I/O with the device on the console
# default is WARN
configure_nx(args.verbose < logging.WARN)
# Good place to stop and take a look if --debug was entered
if args.debug:
pause_to_debug()
# Export as new box
logger.warn('Powering down and generating new Vagrant VirtualBox')
logger.warn('Waiting for machine to shutdown')
run(["vagrant", "halt", "-f"])
# Add the embedded Vagrantfile
vagrantfile_pathname = os.path.join(pathname, 'include', 'embedded_vagrantfile_nx')
logger.warn("Exporting new box file. (may take 3 minutes or so)")
run(["vagrant", "package", "--vagrantfile", vagrantfile_pathname, "--output", box_out])
logger.warn('New Vagrant Box Created: %s', box_out)
# Destroy original Source Box
logger.warn("Cleaning up build resources.")
cleanup_box()
box_remove(box_name)
# Delete Vagrantfile used to build box
os.remove("Vagrantfile")
logger.warn('Completed!')
logger.warn(" ")
logger.warn('Add box to system:')
logger.warn(' vagrant box add --name nxos/{version} {boxout} --force'.format(version=version, boxout=box_out))
logger.warn(" ")
logger.warn('Use your new box:')
logger.warn("Make project directory: ")
logger.warn(" mkdir my_project ")
logger.warn(" cd my_project")
logger.warn('Initialize Project Vagrant Environment:')
logger.warn(' vagrant init nxos/{version}'.format(version=version))
logger.warn('Bring up box:')
logger.warn(' vagrant up')
logger.warn('')
logger.warn("Note: Due to a shell error, 'vagrant up' will error the "\
"first time launching a box. Run 'vagrant up' to complete")
logger.warn('Note:')
logger.warn(
' Both the NX-OS SSH and NX-API username and password is vagrant/vagrant')
if __name__ == '__main__':
main(sys.argv[1:])
|
from django.contrib import admin
from . models import Product
# Register your models here.
admin.site.register(Product)
|
#!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test re-org scenarios with a mempool that contains transactions
# that spend (directly or indirectly) coinbase transactions.
#
from test_framework import BitcoinTestFramework
from util import *
from pprint import pprint
from time import sleep
# Create one-input, one-output, no-fee transaction:
class RawTransactionsTest(BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 3)
def setup_network(self, split=False):
self.nodes = start_nodes(3, self.options.tmpdir)
# connect to a local machine for debugging
# url = "http://bitcoinrpc:DP6DvqZtqXarpeNWyN3LZTFchCCyCUuHwNF7E8pX99x1@%s:%d" % ('127.0.0.1', 18332)
# proxy = AuthServiceProxy(url)
# proxy.url = url # store URL on proxy for info
# self.nodes.append(proxy)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test(self):
self.nodes[2].setgenerate(True, 1)
self.nodes[0].setgenerate(True, 121)
self.sync_all()
watchonly_address = self.nodes[0].getnewaddress()
watchonly_amount = 200
self.nodes[1].importaddress(watchonly_address, "", True)
self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.5);
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.0);
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),5.0);
self.sync_all()
self.nodes[0].setgenerate(True, 1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(len(dec_tx['vin']), 1) #one vin coin
assert_equal(fee*0.00000001+float(totalOut), 1.5) #the 1.5BTC coin must be taken
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.2 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(len(dec_tx['vin']), 2) #one vin coin
assert_equal(fee*0.00000001+float(totalOut), 2.5) #the 1.5BTC+1.0BTC coins must have be taken
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(len(dec_tx['vin']), 1) #one vin coin
assert_equal(fee*0.00000001+float(totalOut), 5.0) #the 5.0BTC coin must have be taken
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6, self.nodes[1].getnewaddress() : 2.5 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(len(dec_tx['vin']), 2) #one vin coin
assert_equal(fee*0.00000001+float(totalOut), 6.0) #the 5.0BTC + 1.0BTC coins must have be taken
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 5.0:
utx = aUtx
break;
assert_equal(utx!=False, True)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee*0.00000001+float(totalOut), utx['amount']) #compare vin total and totalout+fee
#########################################################################
# test a fundrawtransaction with a VIN smaller than the required amount #
#########################################################################
utx = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 1.0:
utx = aUtx
break;
assert_equal(utx!=False, True)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if outputs.has_key(out['scriptPubKey']['addresses'][0]):
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
assert_equal(fee*0.00000001+float(totalOut), 2.5) #this tx must use the 1.0BTC and the 1.5BTC coin
###########################################
# test a fundrawtransaction with two VINs #
###########################################
utx = False
utx2 = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 1.0:
utx = aUtx
if aUtx['amount'] == 5.0:
utx2 = aUtx
assert_equal(utx!=False, True)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if outputs.has_key(out['scriptPubKey']['addresses'][0]):
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
assert_equal(fee*0.00000001+float(totalOut), 7.5) #this tx must use the 1.0BTC and the 1.5BTC coin
#########################################################
# test a fundrawtransaction with two VINs and two vOUTs #
#########################################################
utx = False
utx2 = False
listunspent = self.nodes[2].listunspent()
for aUtx in listunspent:
if aUtx['amount'] == 1.0:
utx = aUtx
if aUtx['amount'] == 5.0:
utx2 = aUtx
assert_equal(utx!=False, True)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0, self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if outputs.has_key(out['scriptPubKey']['addresses'][0]):
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
assert_equal(fee*0.00000001+float(totalOut), 7.5) #this tx must use the 1.0BTC and the 1.5BTC coin
##############################################
# test a fundrawtransaction with invalid vin #
##############################################
listunspent = self.nodes[2].listunspent()
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 1.0}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
errorString = ""
try:
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
except JSONRPCException,e:
errorString = e.error['message']
assert_equal("Insufficient" in errorString, True);
##################################################
# test a fundrawtransaction using only watchonly #
##################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2.0}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
result = self.nodes[1].fundrawtransaction(rawtx, True)
assert_equal("hex" in result.keys(), True)
assert_equal("fee" in result.keys(), True)
###############################################################
# test fundrawtransaction using the entirety of watched funds #
###############################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 1.0}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
result = self.nodes[1].fundrawtransaction(rawtx, True)
assert_equal("hex" in result.keys(), True)
assert_equal("fee" in result.keys(), True)
if __name__ == '__main__':
RawTransactionsTest().main()
|
import pymongo
from datetime import datetime
from pymongo import MongoClient
client = MongoClient('mongodb://localhost:27017/')
#db = client['primer']
#coll = db['dataset']
db = client['test']
coll = db['restaurants']
result = coll.insert_one(
{
"address": {
"street": "2 Avenue",
"zipcode": "10075",
"building": "1480",
"coord": [-73.9557413, 40.771903]
},
"borough": "Manhattan",
"cuisine": "Italian",
"grades": [
{
"date": datetime.strptime("2014-10-02", "%Y-%m-%d"),
"grade": "A",
"score": 11
},
{
"date": datetime.strptime("2014-10-16", "%Y-%m-%d"),
"grade": "B",
"score": 17
}
],
"name": "Vella",
"restaurant_id": "41704620"
}
)
# find all
#cursor = coll.find()
# find
cursor = coll.find({"borough": "Manhattan"})
for document in cursor:
print document
|
#-----------------------------------------------------------------------
#Copyright 2019 Centrum Wiskunde & Informatica, Amsterdam
#
#Author: Daniel M. Pelt
#Contact: D.M.Pelt@cwi.nl
#Website: http://dmpelt.github.io/foam_ct_phantom/
#License: MIT
#
#This file is part of foam_ct_phantom, a Python package for generating
#foam-like phantoms for CT.
#-----------------------------------------------------------------------
from . import generate, infiltrate, project, verticalmovement, expand
import abc
import os.path
import numpy as np
class Phantom(object):
__metaclass__ = abc.ABCMeta
def __init__(self, filename):
self.filename = filename
if not os.path.exists(self.filename):
raise ValueError("{} does not exist.".format(self.filename))
@abc.abstractmethod
def generate_projections(self, outfile, geom):
pass
@abc.abstractmethod
def generate_volume(self, outfile, geom, time=0):
pass
class FoamPhantom(Phantom):
@staticmethod
def generate(filename, seed, nspheres_per_unit=100000, ntrials_per_unit=1000000, maxsize=0.2, zrange=1.5):
generate.genphantom(filename, seed, nspheres_per_unit=nspheres_per_unit, ntrials_per_unit=ntrials_per_unit, maxsize=maxsize, zrange=zrange)
def generate_projections(self, outfile, geom):
generate.gen_dataset(outfile,self.filename, geom)
def generate_volume(self, outfile, geom):
generate.genvol(outfile, self.filename,geom)
def generate_3d(self, nx, ny, pixsize, angle, tilt1, tilt2, maxz=1.5, cutout=0, cutoff=-np.inf):
return generate.gen3d(self.filename, nx, ny, pixsize, angle, tilt1, tilt2, maxz=maxz, cutout=cutout, cutoff=cutoff)
class MovingFoamPhantom(Phantom):
@staticmethod
def generate(filename, phantom_file, seed, zmin, zmax, random_move=0.1, regularization=0.01, npoints=1024):
verticalmovement.generate_verticalmovement(filename, phantom_file, seed, zmin, zmax, random_move=random_move, regularization=regularization, npoints=npoints)
def generate_projections(self, outfile, geom):
verticalmovement.gen_dataset_verticalmovement(outfile,self.filename, geom)
def generate_volume(self, outfile, geom, time=0):
verticalmovement.genvol_verticalmovement(time, outfile, self.filename, geom)
def generate_3d(self, nx, ny, pixsize, angle, tilt1, tilt2, maxz=1.5, cutout=0, cutoff=-np.inf, time=0):
return verticalmovement.gen3d_verticalmovement(time, self.filename, nx, ny, pixsize, angle, tilt1, tilt2, maxz=maxz, cutout=cutout, cutoff=cutoff)
class ExpandingFoamPhantom(Phantom):
@staticmethod
def generate(filename, phantom_file, seed, start_size=0.25, random_move=0.1, regularization=0.01, static_after_fraction=0.1, npoints=1024):
expand.generate_expand(filename, phantom_file, seed, start_size=start_size, random_move=random_move, regularization=regularization, static_after_fraction=0.1, npoints=npoints)
def generate_projections(self, outfile, geom):
expand.gen_dataset_expand(outfile,self.filename, geom)
def generate_volume(self, outfile, geom, time=0):
expand.genvol_expand(time, outfile, self.filename, geom)
def generate_3d(self, nx, ny, pixsize, angle, tilt1, tilt2, maxz=1.5, cutout=0, cutoff=-np.inf, time=0):
return expand.gen3d_expand(time, self.filename, nx, ny, pixsize, angle, tilt1, tilt2, maxz=maxz, cutout=cutout, cutoff=cutoff)
class InfiltrationFoamPhantom(Phantom):
@staticmethod
def generate(filename, phantom_file, seed, fluid_value, startz=-1.5, rand_width=1, cutoff=1e-5):
infiltrate.generate_inflitration(filename, phantom_file, seed, fluid_value, startz=startz, rand_width=rand_width, cutoff=cutoff)
def generate_projections(self, outfile, geom):
infiltrate.gen_dataset_infiltrate(outfile,self.filename,geom)
def generate_volume(self, outfile, geom, time=0):
infiltrate.genvol_infiltrate(time, outfile, self.filename,geom)
def generate_3d(self, nx, ny, pixsize, angle, tilt1, tilt2, maxz=1.5, cutout=0, cutoff=-np.inf, time=0):
return infiltrate.gen3d_infiltrate(time, self.filename, nx, ny, pixsize, angle, tilt1, tilt2, maxz=maxz, cutout=cutout, cutoff=cutoff)
|
import os
import typing
from pathlib import Path
import cv2
import numpy as np
from PyQt5 import Qt
import inspect
from PyQt5 import QtGui
from PyQt5.QtGui import QIcon, QPixmap, QImage, qRgb
from PyQt5.QtWidgets import QLayout, QGridLayout, QLayoutItem, QMessageBox, QApplication, QMainWindow, QVBoxLayout, \
QGroupBox, \
QFileDialog, QDialog, QAbstractItemView, QListView, QTreeView
class GUIUtilities:
@staticmethod
def get_icon(img):
app = QApplication.instance()
curr_theme = "light"
if app:
curr_theme = app.property("theme")
assets_path = Path(__file__).parent.parent.joinpath("assets").absolute()
resource_path = assets_path.joinpath(f"icons/{curr_theme}/{img}")
return QIcon(str(resource_path))
@staticmethod
def get_image(img):
app = QApplication.instance()
curr_theme = "light"
if app:
curr_theme = app.property("theme")
assets_path = Path(__file__).parent.parent.joinpath("assets").absolute()
resource_path = assets_path.joinpath(f"icons/{curr_theme}/{img}")
return QPixmap(str(resource_path))
@staticmethod
def clear_layout(layout: QLayout):
while layout.count():
child = layout.takeAt(0)
if child.widget():
child.widget().deleteLater()
@staticmethod
def next_cell(grid_layout: QGridLayout) -> tuple:
cols = grid_layout.columnCount()
rows = grid_layout.rowCount()
for r in range(rows):
for c in range(cols):
item: QLayoutItem = grid_layout.itemAtPosition(r, c)
if item is None:
return r, c
@staticmethod
def show_error_message(message: str, title="Error"):
msg = QMessageBox()
msg.setIcon(QMessageBox.Critical)
msg.setText(message)
msg.setWindowTitle(title)
msg.exec_()
@staticmethod
def show_info_message(message: str, title: str = "Information"):
msg = QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText(message)
msg.setWindowTitle(title)
msg.exec_()
@staticmethod
def array_to_qimage(im: np.ndarray, copy=False):
gray_color_table = [qRgb(i, i, i) for i in range(256)]
if im is None:
return QImage()
if im.dtype == np.uint8:
if len(im.shape) == 2:
qim = QImage(im.data, im.shape[1], im.shape[0], im.strides[0], QImage.Format_Indexed8)
qim.setColorTable(gray_color_table)
return qim.copy() if copy else qim
elif len(im.shape) == 3:
if im.shape[2] == 3:
qim = QImage(im.data, im.shape[1], im.shape[0], im.strides[0], QImage.Format_RGB888);
return qim.copy() if copy else qim
elif im.shape[2] == 4:
qim = QImage(im.data, im.shape[1], im.shape[0], im.strides[0], QImage.Format_ARGB32);
return qim.copy() if copy else qim
@staticmethod
def findMainWindow() -> typing.Union[QMainWindow, None]:
# Global function to find the (open) QMainWindow in application
app = QApplication.instance()
for widget in app.topLevelWidgets():
if isinstance(widget, QMainWindow):
return widget
return None
@staticmethod
def wrap_with_groupbox(widget, widget_title):
groupbox = QGroupBox()
groupbox.setTitle(widget_title)
vlayout = QVBoxLayout()
vlayout.addWidget(widget)
groupbox.setLayout(vlayout)
return groupbox
@staticmethod
def select_folders():
file_dialog = QFileDialog()
file_dialog.setFileMode(QFileDialog.DirectoryOnly)
file_dialog.setOption(QFileDialog.DontUseNativeDialog, True)
file_view = file_dialog.findChild(QListView, 'listView')
if file_view:
file_view.setSelectionMode(QAbstractItemView.MultiSelection)
f_tree_view = file_dialog.findChild(QTreeView)
if f_tree_view:
f_tree_view.setSelectionMode(QAbstractItemView.MultiSelection)
paths = []
if file_dialog.exec() == QDialog.Accepted:
paths = file_dialog.selectedFiles()
return paths
@staticmethod
def select_folder(parent=None, title="select the folder"):
return str(QFileDialog.getExistingDirectory(None, title))
@staticmethod
def select_file(ext, title="select the file", parent=None):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
ext = "{} Files (*{})".format(ext, ext) if isinstance(ext, str) else ";;".join(
list(map(lambda e: "{} Files (*.{})".format(e, e), ext)))
path, _ = QFileDialog.getOpenFileName(parent, title, os.path.join(os.path.expanduser('~')), ext,
options=options)
return path
@staticmethod
def select_files(ext, title="select the file", parent=None):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
ext = "{} Files (*{})".format(ext, ext) if isinstance(ext, str) else ";;".join(
list(map(lambda e: "{} Files (*.{})".format(e, e), ext)))
files, _ = QFileDialog.getOpenFileNames(parent, title, os.path.join(os.path.expanduser('~')), ext,
options=options)
return files
@classmethod
def color_icon2gray_icon(cls, icon: QIcon):
# TODO: implement for RGB icons
pixmap = icon.pixmap(icon.availableSizes()[0])
img_array = cls.Qpixmap2array(pixmap)
*_, alpha = cv2.split(img_array)
gray_layer = cv2.cvtColor(img_array, cv2.COLOR_BGR2GRAY)
gray_img = cv2.merge((gray_layer, gray_layer, gray_layer, alpha))
height, width, channel = gray_img.shape
bytesPerLine = 4 * width
qImg = QImage(gray_img.data, width, height, bytesPerLine, QImage.Format_RGBA8888)
pixmap = QtGui.QPixmap.fromImage(qImg)
return QIcon(pixmap)
@classmethod
def Qpixmap2array(cls, pixmap: QPixmap) -> np.ndarray:
image=pixmap.toImage()
channels_count= 4 if image.hasAlphaChannel() else 3
width, height = image.width(), image.height()
buffer=image.bits().asarray(width*height*channels_count)
arr=np.frombuffer(buffer,dtype=np.uint8).reshape((height,width,channels_count))
return arr
@staticmethod
def qt_version():
vers = ['%s = %s' % (k, v) for k, v in vars(Qt).items() if
k.lower().find('version') >= 0 and not inspect.isbuiltin(v)]
print('\n'.join(sorted(vers)))
|
import abc
import os
import time
import requests
import six
from . import sdk_exceptions
from .clients import JobsClient, ModelsClient
from .clients.base_client import BaseClient
from .logger import MuteLogger
class S3FilesDownloader(object):
def __init__(self, logger=MuteLogger()):
self.logger = logger
self.file_download_retries = 8
def download_list(self, sources, destination_dir):
"""
:param tuple[tuple[str,str]] sources: tuple/list of (file_path, file_url) pairs
:param str destination_dir:
"""
for source in sources:
self.download_file(source, destination_dir, max_retries=self.file_download_retries)
def download_file(self, source, destination_dir, max_retries=0):
self._create_directory(destination_dir)
file_path, file_url = source
self.logger.log("Downloading: {}".format(file_path))
# Trying to download several times in case of connection error with S3.
# The error seems to occur randomly but adding short sleep between retries helps a bit
for _ in range(max_retries + 1):
try:
response = requests.get(file_url)
break
except requests.exceptions.ConnectionError:
self.logger.debug("Downloading {} resulted in error. Trying again...".format(file_path))
time.sleep(0.1)
else: # break statement not executed - ConnectionError `max_retries` times
raise sdk_exceptions.ResourceFetchingError("Downloading {} resulted in error".format(file_path))
self._create_subdirectories(file_path, destination_dir)
self._save_file(response, file_path, destination_dir)
def _create_directory(self, destination_dir):
if os.path.exists(destination_dir) and os.path.isdir(destination_dir):
return
os.makedirs(destination_dir)
def _create_subdirectories(self, file_path, destination_dir):
file_dirname = os.path.dirname(file_path)
file_dir_path = os.path.join(destination_dir, file_dirname)
self._create_directory(file_dir_path)
def _save_file(self, response, file_path, destination_dir):
destination_path = os.path.join(destination_dir, file_path)
try:
with open(destination_path, "wb") as h:
h.write(response.content)
except TypeError: # in py3 TypeError is raised when trying to write str in bytes mode so trying in txt mode
with open(destination_path, "w") as h:
h.write(response.content)
@six.add_metaclass(abc.ABCMeta)
class ResourceDownloader(object):
CLIENT_CLASS = None
def __init__(self, api_key, logger=MuteLogger(), ps_client_name=None):
self.api_key = api_key
self.logger = logger
self.ps_client_name = ps_client_name
self.client = self._build_client(self.CLIENT_CLASS, api_key, logger=logger)
def download(self, job_id, destination):
files = self._get_files_list(job_id)
s3_downloader = S3FilesDownloader(logger=self.logger)
s3_downloader.download_list(files, destination)
@abc.abstractmethod
def _get_files_list(self, job_id):
"""
:param str job_id:
:returns: Tuple of (file path, url) pairs
:rtype: tuple[tuple[str,str]]
"""
pass
def _build_client(self, client_class, *args, **kwargs):
"""
:param type[BaseClient] client_class:
"""
client = client_class(*args, **kwargs)
if self.ps_client_name is not None:
client.ps_client_name = self.ps_client_name
return client
class JobArtifactsDownloader(ResourceDownloader):
CLIENT_CLASS = JobsClient
def _get_files_list(self, job_id):
files = self.client.artifacts_list(job_id)
files = tuple((f.file, f.url) for f in files)
return files
class ModelFilesDownloader(ResourceDownloader):
CLIENT_CLASS = ModelsClient
def _get_files_list(self, model_id):
files = self.client.get_model_files(model_id=model_id, links=True)
files = tuple((f.file, f.url) for f in files)
return files
|
#!/usr/bin/env python3
from chewing import userphrase
def main():
list = userphrase.find_all()
##print(list)
for item in list:
print()
print('phrase:', item[0])
print('bopomofo:', item[1])
if __name__ == '__main__':
main()
|
import sys
sys.path.append('.')
from util.game import Game
from util.func import Case
from util.card import Card, CardList, CardSuit
from util.player import Player
from typing import List, Optional, Tuple
import random
class LevelUp(Game):
### Constants
PLAYERNUM: int = 4
CARDPOOL: List[Card] = [Card(i) for i in range(54)] * 2
BASESCORE: int = 80
LEVELSCORE: int = 40
def __init__(self):
self.players: List[Optional[Player]] = [None] * LevelUp.PLAYERNUM
self.discard_buffer: Optional[CardList] = None
self.curPlayerIndex: Optional[int] = None
self.rankLevel: Tuple[int, int] = (1, 1)
self.dealerIndex: Optional[int] = None
self.rankMain: int = 1
# E.g. (Spade, 0, 1) means the main suit is spade (suited with a single spade by 0-th player)
self.suitMain: Optional[Tuple(CardSuit, int, int)] = None
self.score: int = 0
self.state: str = 'END'
for i in range(len(self.players)):
self.players[i] = Player()
def inform(self, information):
case = Case(self.state)
if case('END'):
case = Case(information)
if case('START'):
self.state = 'DISPATCH'
return (True, self._dispatch(),
{
'suit': self.suitMain,
'rank': self.rankMain,
'level': self.rankLevel
}
)
if case('DISPATCH'):
case = Case(information)
if case('FINISH'):
self.state = 'DISCARD'
if self.dealerIndex is None:
self.dealerIndex = self.suitMain[1]
self.curPlayerIndex = self.dealerIndex
self.players[self.curPlayerIndex].cardInHand += self.discard_buffer
self.discard_buffer = CardList()
for player in self.players:
player.cardFront = CardList()
return (True, None, None)
return (False, None, None)
def _dispatch(self) -> List[int]:
newCardPool: List[Card] = random.sample(LevelUp.CARDPOOL, len(LevelUp.CARDPOOL))
dispatch = [newCardPool[0:25], newCardPool[25:50], newCardPool[50:75], newCardPool[75:100], newCardPool[100:]]
for id, player in enumerate(self.players):
player.cardInHand = CardList(dispatch[id])
self.discard_buffer = CardList(dispatch[-1])
return [[card.ID for card in cards] for cards in dispatch]
def isSuitable(self, cards: List[int], playerID: int, suit: Optional[CardSuit] = None):
#suit: 0 NT, 1 Spade, 2 Heart, 3 Club, 4 Diamond
if suit is None:
return [self.isSuitable(cards, playerID, s) for s in [
CardSuit.Joker, CardSuit.Spade, CardSuit.Heart, CardSuit.Club, CardSuit.Diamond
]]
cardnum = -1
case = Case(suit)
if case(CardSuit.Spade): cardnum = 39 + self.rankMain
elif case(CardSuit.Heart): cardnum = 26 + self.rankMain
elif case(CardSuit.Club): cardnum = 13 + self.rankMain
elif case(CardSuit.Diamond): cardnum = self.rankMain
if self.suitMain is None:
if suit == CardSuit.Joker:
return cards.count(52) == 2 or cards.count(53) == 2
else:
return cardnum in cards
elif self.suitMain[1] == playerID:
if self.suitMain[2] == 2: return False
if suit != self.suitMain[0]: return False
return cards.count(cardnum) == 2
else:
if suit == CardSuit.Joker:
if self.suitMain[0] == CardSuit.Joker:
return cards.count(53) == 2
else:
return cards.count(53) == 2 or cards.count(52) == 2
if self.suitMain[2] == 2: return False
return cards.count(cardnum) == 2
def suitRequest(self, playerID: int, suit: CardSuit):
cards = self.players[playerID].cardInHand.tolist()
if not self.isSuitable(cards, playerID, suit):
return False
for player in self.players:
player.cardFront = CardList()
cardnum = -1
case = Case(suit)
if case(CardSuit.Spade): cardnum = 39 + self.rankMain
elif case(CardSuit.Heart): cardnum = 26 + self.rankMain
elif case(CardSuit.Club): cardnum = 13 + self.rankMain
elif case(CardSuit.Diamond): cardnum = self.rankMain
if suit == CardSuit.Joker:
if cards.count(52) == 2:
self.suitMain = (CardSuit.Joker, playerID, 2)
self.players[playerID].cardFront += CardList([Card(52), Card(52)])
else:
self.suitMain = (CardSuit.Joker, playerID, 2)
self.players[playerID].cardFront += CardList([Card(53), Card(53)])
else:
if self.suitMain is None:
self.suitMain = (suit, playerID, 1)
self.players[playerID].cardFront += Card(cardnum)
else:
self.suitMain = (suit, playerID, 2)
self.players[playerID].cardFront += CardList([Card(cardnum), Card(cardnum)])
front = [player.cardFront.tolist() for player in self.players]
return [front, self.suitMain]
|
#!/usr/bin/env python
"""Tests for `musicdwh` package."""
import pytest
import pandas as pd
from pandas._testing import assert_frame_equal
import sqlalchemy as sqla
from musicdwh.musicdwh import import_hb, import_wwc, import_lov, ip_convert_country, import_game, upload_to_db
HB_FILE_PATH_INS = './data/hb/2021/04/28/hb.csv'
HB_FILE_PATH_UPD = './data/hb/2021/04/29/hb.csv'
WWC_FILE_PATH_INS = './data/wwc/2021/04/28/wwc.json'
WWC_FILE_PATH_UPD = './data/wwc/2021/04/29/wwc.json'
DATE_INS = '2021-04-28'
DATE_UPD = '2021-04-29'
def test_import_hb_dataframe_type():
# tests, if returns type DataFrame
df_import = import_hb(HB_FILE_PATH_INS)
assert "pandas.core.frame.DataFrame" in str(type(df_import))
def test_import_hb_dataframe_data():
# tests, if returns correct data in DataFrame
# intialise data of lists.
data = {'id':[1, 2, 3, 4, 5],
'first_name':['Maria', 'Douglas', 'Barbara', 'Jacqueline', 'Janet'],
'last_name':['Russell', 'Cunningham', 'Rice', 'Cook', 'Jones'],
'email':['mrussell0@soup.io', 'dcunningham1@sogou.com', 'brice2@bizjournals.com', 'jcook3@amazon.co.jp', 'jjones4@surveymonkey.com'],
'gender':['Female', 'Male', 'Female', 'Female', 'Female'],
'ip_address':['141.48.134.32', '75.5.5.45', '87.137.224.0', '249.125.240.30', '190.235.91.244'],
'dob':['5/26/1976', '1/25/1980', '4/15/1979', '5/2/1963', '8/17/1968']
}
# Create DataFrame
df = pd.DataFrame(data)
df_import = import_hb(HB_FILE_PATH_INS)
assert_frame_equal(df_import, df)
def test_ip_convert_country():
# tests lookup of country ID from IP_address
# intialise data of lists.
data = {'id':[1, 2, 3, 4, 5],
'first_name':['Maria', 'Douglas', 'Barbara', 'Jacqueline', 'Janet'],
'last_name':['Russell', 'Cunningham', 'Rice', 'Cook', 'Jones'],
'email':['mrussell0@soup.io', 'dcunningham1@sogou.com', 'brice2@bizjournals.com', 'jcook3@amazon.co.jp', 'jjones4@surveymonkey.com'],
'gender':['Female', 'Male', 'Female', 'Female', 'Female'],
'ip_address':['141.48.134.32', '75.5.5.45', '87.137.224.0', '249.125.240.30', '190.235.91.244'],
'dob':['5/26/1976', '1/25/1980', '4/15/1979', '5/2/1963', '8/17/1968'],
'country_code':['DE', 'US', 'DE', 'Undefined', 'PE']
}
# Create DataFrame
df = pd.DataFrame(data)
df_import = import_hb(HB_FILE_PATH_INS)
# get country codes from IP address
ip_code_series = ip_convert_country(df_import['ip_address'], 30, 100)
# append country code to hb dataframe
df_import['country_code']=ip_code_series
assert_frame_equal(df_import, df)
def test_import_game_hb():
# test importing hb data into dataframe by calling parent function
df_import = import_game ('hb', DATE_INS, './data')
# intialise data of lists.
data = {'id':[1, 2, 3, 4, 5],
'first_name':['Maria', 'Douglas', 'Barbara', 'Jacqueline', 'Janet'],
'last_name':['Russell', 'Cunningham', 'Rice', 'Cook', 'Jones'],
'email':['mrussell0@soup.io', 'dcunningham1@sogou.com', 'brice2@bizjournals.com', 'jcook3@amazon.co.jp', 'jjones4@surveymonkey.com'],
'gender':['Female', 'Male', 'Female', 'Female', 'Female'],
'ip_address':['141.48.134.32', '75.5.5.45', '87.137.224.0', '249.125.240.30', '190.235.91.244'],
'dob':['5/26/1976', '1/25/1980', '4/15/1979', '5/2/1963', '8/17/1968']
}
# Create DataFrame
df = pd.DataFrame(data)
assert_frame_equal(df_import, df)
def test_import_lov():
# test importing LOV data into dataframe
df_import = import_lov('./data/LOVs/LOV_gender.csv')
data = {'gender':['female', 'male', 'unspecified', 'other']
}
# Create DataFrame
df = pd.DataFrame(data)
assert_frame_equal(df_import, df)
# def test_import_hb_update():
# assert import_hb(HB_FILE_PATH_UPD) == [expected_output]
# def test_import_wwc_new():
# assert import_wwc(WWC_FILE_PATH_INS) == [expected_output]
# def test_import_wwc_update():
# assert import_wwc(WWC_FILE_PATH_UPD) == [expected_output]
# def test_ip_convert_country():
# def test_import_game_wwc():
# def test_upload_to_db():
|
from qtrader.utils.preprocessor import rolling1d
from qtrader.utils.preprocessor import rolling2d
|
import json
import mimetypes
import os
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.apps import apps
from django.core.servers.basehttp import FileWrapper
from django.http import (HttpResponse, HttpResponseNotFound,
HttpResponseBadRequest, StreamingHttpResponse)
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_http_methods
from django_cas.decorators import login_required
from .models import FileUpload
from .utils import upload_files
@login_required
@require_http_methods(["POST"])
def upload_file(request, app_name, model_name, object_pk):
"""Upload a file"""
try:
m = apps.get_model(app_name, model_name)
except LookupError:
message = _('Model does not exist.')
return HttpResponseBadRequest(
json.dumps({'status': 'error', 'message': message}))
# Then look up the object by pk
try:
obj = m.objects.get(pk=object_pk)
except ObjectDoesNotExist:
message = _('Object does not exist.')
return HttpResponseNotFound(
json.dumps({'status': 'error', 'message': message}))
files = upload_files(request, obj)
if files is not None:
return HttpResponse(
json.dumps({'status': 'success', 'files': files}))
message = _('Invalid or no file received.')
return HttpResponseBadRequest(
json.dumps({'status': 'error', 'message': message}))
@login_required
@require_http_methods(["POST"])
def delete_file(request, file_id):
"""Delete a file given its object id. TODO:permissions to delete files """
try:
res = FileUpload.objects.get(pk=file_id)
except FileUpload.DoesNotExist:
message = _('The requested file could not be found.')
return HttpResponseNotFound(
json.dumps({'status': 'error', 'message': message}))
res.file.delete()
res.delete()
return HttpResponse(json.dumps({'status': 'success'}))
@login_required
def serve_file(request, path, document_root=None, show_indexes=False):
"""Serve static files """
try:
_file = os.path.join(settings.MEDIA_ROOT, path)
_file_type = mimetypes.guess_type(_file)[0]
chunk_size = 8192
response = StreamingHttpResponse(FileWrapper(open(_file, 'rb'),
chunk_size),
content_type=_file_type)
response['Content-Length'] = os.path.getsize(_file)
except:
return HttpResponseNotFound()
return response
|
"""
__init__
Establish this Flask app.
"""
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from flask_httpauth import HTTPBasicAuth
from flask_json import FlaskJSON
import logging
import os
app = Flask(__name__)
app.config.from_pyfile('../config.py')
db = SQLAlchemy(app)
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
auth = HTTPBasicAuth()
json = FlaskJSON(app)
from presidency import views, models
|
#
# Copyright (C) 2018 ETH Zurich, University of Bologna
# and GreenWaves Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: Germain Haugou, ETH (germain.haugou@iis.ee.ethz.ch)
import os.path
c_head_pattern = """
/* THIS FILE HAS BEEN GENERATED, DO NOT MODIFY IT.
*/
/*
* Copyright (C) 2019 GreenWaves Technologies
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
"""
def get_c_desc(name):
return name.replace('\n', ' ').encode('ascii', 'ignore').decode('ascii')
def get_c_name(name):
return name.replace('/', '_').replace('.', '_').encode('ascii', 'ignore').decode('ascii')
class Header(object):
def __init__(self, name, path):
try:
os.makedirs(os.path.dirname(path))
except:
pass
self.file = open(path, 'w')
self.name = name
self.file.write(c_head_pattern)
def_name = get_c_name(path).upper()
self.file.write('#ifndef __%s__\n' % def_name)
self.file.write('#define __%s__\n' % def_name)
self.file.write('\n')
self.file.write('#if !defined(LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLER__)\n')
self.file.write('\n')
self.file.write('#include <stdint.h>\n')
self.file.write('#include "archi/utils.h"\n')
self.file.write('\n')
self.file.write('#endif\n')
self.file.write('\n')
def close(self):
self.file.write('\n')
self.file.write('#endif\n')
class Constant(object):
def dump_to_header(self, header):
header.file.write('#define %s_%s %s\n' % (get_c_name(header.name).upper(), get_c_name(self.name).upper(), self.value))
class Regfield(object):
def dump_to_header(self, header, reg_name):
field_name = '%s_%s' % (get_c_name(reg_name), get_c_name(self.name).upper())
access_str = ''
if self.access is not None:
access_str = ' (access: %s)' % self.access
if self.desc != '' or access_str != '':
header.file.write('// %s%s\n' % (get_c_desc(self.desc), access_str))
header.file.write('#define %-60s %d\n' % (field_name + '_BIT', self.bit))
header.file.write('#define %-60s %d\n' % (field_name + '_WIDTH', self.width))
header.file.write('#define %-60s 0x%x\n' % (field_name + '_MASK', ((1<<self.width)-1)<<self.bit))
reset = self.reset
if reset is None and self.reg_reset is not None:
reset = (self.reg_reset >> self.bit) & ((1<<self.width) - 1)
if reset is not None:
header.file.write('#define %-60s 0x%x\n' % (field_name + '_RESET', reset))
def dump_macros(self, header, reg_name=None):
header.file.write('\n')
field_name = '%s_%s' % (get_c_name(reg_name), get_c_name(self.name).upper())
header.file.write('#define %-50s (ARCHI_BEXTRACTU((value),%d,%d))\n' % (field_name + '_GET(value)', self.width, self.bit))
header.file.write('#define %-50s (ARCHI_BEXTRACT((value),%d,%d))\n' % (field_name + '_GETS(value)', self.width, self.bit))
header.file.write('#define %-50s (ARCHI_BINSERT((value),(field),%d,%d))\n' % (field_name + '_SET(value,field)', self.width, self.bit))
header.file.write('#define %-50s ((val) << %d)\n' % (field_name + '(val)', self.bit))
class Register(object):
def dump_to_header(self, header, rst=False):
if self.offset is not None:
indent = '' if not rst else ' '
header.file.write('\n')
if self.desc != '':
header.file.write('%s// %s\n' % (indent, get_c_desc(self.desc)))
header.file.write('%s#define %-40s 0x%x\n' % (indent, '%s_%s_OFFSET' % (get_c_name(header.name).upper(), get_c_name(self.name).upper()), self.offset))
def dump_fields_to_header(self, header):
for name, field in self.fields.items():
header.file.write('\n')
reg_name = '%s_%s' % (get_c_name(header.name).upper(), get_c_name(self.name).upper())
field.dump_to_header(reg_name=reg_name, header=header)
def dump_struct(self, header):
header.file.write('\n')
header.file.write('typedef union {\n')
header.file.write(' struct {\n')
current_index = 0
current_pad = 0
for name, field in self.fields.items():
if current_index < field.bit:
header.file.write(' unsigned int padding%d:%-2d;\n' % (current_pad, field.bit - current_index))
current_pad += 1
current_index = field.bit + field.width
header.file.write(' unsigned int %-16s:%-2d; // %s\n' % (get_c_name(field.name).lower(), field.width, get_c_desc(field.desc)))
header.file.write(' };\n')
header.file.write(' unsigned int raw;\n')
header.file.write('} __attribute__((packed)) %s_%s_t;\n' % (get_c_name(header.name).lower(), get_c_name(self.name).lower()))
def dump_vp_class(self, header):
if self.width in [1, 8, 16, 32, 64]:
header.file.write('\n')
header.file.write('class vp_%s_%s : public vp::reg_%d\n' % (get_c_name(header.name).lower(), get_c_name(self.name).lower(), self.width))
header.file.write('{\n')
header.file.write('public:\n')
reg_name = '%s_%s' % (get_c_name(header.name).upper(), get_c_name(self.name).upper())
for name, field in self.fields.items():
field_name = '%s_%s' % (get_c_name(reg_name), get_c_name(field.name).upper())
header.file.write(' inline void %s_set(uint%d_t value) { this->set_field(value, %s_BIT, %s_WIDTH); }\n' % (get_c_name(field.name).lower(), self.width, field_name, field_name))
header.file.write(' inline uint%d_t %s_get() { return this->get_field(%s_BIT, %s_WIDTH); }\n' % (self.width, get_c_name(field.name).lower(), field_name, field_name))
header.file.write('};\n')
def dump_macros(self, header=None):
reg_name = '%s_%s' % (get_c_name(header.name).upper(), get_c_name(self.name).upper())
for name, field in self.fields.items():
field.dump_macros(header, reg_name)
def dump_access_functions(self, header=None):
reg_name = '%s_%s' % (get_c_name(header.name), get_c_name(self.name))
if self.offset is not None:
header.file.write("\n")
header.file.write("static inline uint32_t %s_get(uint32_t base) { return ARCHI_READ(base, %s_OFFSET); }\n" % (reg_name.lower(), get_c_name(reg_name).upper()));
header.file.write("static inline void %s_set(uint32_t base, uint32_t value) { ARCHI_WRITE(base, %s_OFFSET, value); }\n" % (reg_name.lower(), get_c_name(reg_name).upper()));
class Regmap(object):
def dump_regs_to_rst(self, rst):
rst.file.write('\n')
rst.file.write('.. toggle-header::\n')
rst.file.write(' :header: *Register map C offsets*\n')
rst.file.write('\n')
rst.file.write(' .. code-block:: c\n')
rst.file.write('\n')
for name, register in self.registers.items():
register.dump_to_header(rst, rst=True)
rst.file.write('\n')
def dump_regs_to_header(self, header):
header.file.write('\n')
header.file.write('\n')
header.file.write('\n')
header.file.write('//\n')
header.file.write('// REGISTERS\n')
header.file.write('//\n')
for name, register in self.registers.items():
register.dump_to_header(header)
def dump_regfields_to_header(self, header):
header.file.write('\n')
header.file.write('\n')
header.file.write('\n')
header.file.write('//\n')
header.file.write('// REGISTERS FIELDS\n')
header.file.write('//\n')
for name, register in self.registers.items():
register.dump_fields_to_header(header=header)
def dump_structs_to_header(self, header):
header.file.write('\n')
header.file.write('\n')
header.file.write('\n')
header.file.write('//\n')
header.file.write('// REGISTERS STRUCTS\n')
header.file.write('//\n')
header.file.write('\n')
header.file.write('#if !defined(LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLER__)\n')
for name, register in self.registers.items():
register.dump_struct(header=header)
header.file.write('\n')
header.file.write('#endif\n')
def dump_vp_structs_to_header(self, header):
header.file.write('\n')
header.file.write('\n')
header.file.write('\n')
header.file.write('//\n')
header.file.write('// REGISTERS STRUCTS\n')
header.file.write('//\n')
header.file.write('\n')
header.file.write('#ifdef __GVSOC__\n')
for name, register in self.registers.items():
register.dump_vp_class(header=header)
header.file.write('\n')
header.file.write('#endif\n')
def dump_regmap_to_header(self, header):
header.file.write('\n')
header.file.write('\n')
header.file.write('\n')
header.file.write('//\n')
header.file.write('// REGISTERS GLOBAL STRUCT\n')
header.file.write('//\n')
header.file.write('\n')
header.file.write('#if !defined(LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLER__)\n')
header.file.write('\n')
header.file.write('typedef struct {\n')
for name, register in self.registers.items():
desc = ''
if register.desc != '':
desc = ' // %s' % register.desc
header.file.write(' unsigned int %-16s;%s\n' % (get_c_name(register.name).lower(), desc))
header.file.write('} __attribute__((packed)) %s_%s_t;\n' % (get_c_name(header.name), get_c_name(self.name)))
header.file.write('\n')
header.file.write('#endif\n')
def dump_accessors_to_header(self, header):
header.file.write('\n')
header.file.write('\n')
header.file.write('\n')
header.file.write('//\n')
header.file.write('// REGISTERS ACCESS FUNCTIONS\n')
header.file.write('//\n')
header.file.write('\n')
header.file.write('#if !defined(LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLER__)\n')
for name, register in self.registers.items():
register.dump_access_functions(header=header)
header.file.write('\n')
header.file.write('#endif\n')
def dump_macros_to_header(self, header):
header.file.write('\n')
header.file.write('\n')
header.file.write('\n')
header.file.write('//\n')
header.file.write('// REGISTERS FIELDS MACROS\n')
header.file.write('//\n')
header.file.write('\n')
header.file.write('#if !defined(LANGUAGE_ASSEMBLY) && !defined(__ASSEMBLER__)\n')
for name, register in self.registers.items():
register.dump_macros(header=header)
header.file.write('\n')
header.file.write('#endif\n')
def dump_groups_to_header(self, header):
for name, group in self.regmaps.items():
header.file.write('\n')
header.file.write('\n')
header.file.write('\n')
header.file.write('//\n')
header.file.write('// GROUP %s\n' % name)
header.file.write('//\n')
if group.offset is not None:
header.file.write('\n')
header.file.write('#define %-40s 0x%x\n' % ('%s_%s_OFFSET' % (get_c_name(header).name.upper(), get_c_name(name).upper()), group.offset))
group.dump_to_header(header)
def dump_constants_to_header(self, header):
if len(self.constants) != 0:
header.file.write('\n')
header.file.write('\n')
header.file.write('\n')
header.file.write('//\n')
header.file.write('// CUSTOM FIELDS\n')
header.file.write('//\n')
for name, constant in self.constants.items():
constant.dump_to_header(header=header)
def dump_to_header(self, header, header_path):
header.file.write('#include "%s_regs.h"\n' % (header_path))
header.file.write('#include "%s_regfields.h"\n' % (header_path))
header.file.write('#include "%s_structs.h"\n' % (header_path))
header.file.write('#include "%s_regmap.h"\n' % (header_path))
header.file.write('#include "%s_accessors.h"\n' % (header_path))
header.file.write('#include "%s_macros.h"\n' % (header_path))
header.file.write('#include "%s_groups.h"\n' % (header_path))
header.file.write('#include "%s_constants.h"\n' % (header_path))
def dump_to_header(regmap, name, header_path):
header_file = Header(name, header_path + '.h')
regmap.dump_to_header(header_file, os.path.basename(header_path))
header_file.close()
header_file = Header(name, header_path + '_regs.h')
regmap.dump_regs_to_header(header_file)
header_file.close()
header_file = Header(name, header_path + '_regfields.h')
regmap.dump_regfields_to_header(header_file)
header_file.close()
header_file = Header(name, header_path + '_structs.h')
regmap.dump_structs_to_header(header_file)
header_file.close()
header_file = Header(name, header_path + '_gvsoc.h')
regmap.dump_vp_structs_to_header(header_file)
header_file.close()
header_file = Header(name, header_path + '_regmap.h')
regmap.dump_regmap_to_header(header_file)
header_file.close()
header_file = Header(name, header_path + '_accessors.h')
regmap.dump_accessors_to_header(header_file)
header_file.close()
header_file = Header(name, header_path + '_macros.h')
regmap.dump_macros_to_header(header_file)
header_file.close()
header_file = Header(name, header_path + '_groups.h')
regmap.dump_groups_to_header(header_file)
header_file.close()
header_file = Header(name, header_path + '_constants.h')
regmap.dump_constants_to_header(header_file)
header_file.close()
|
from bs4 import BeautifulSoup as bs
import pandas as pd
import requests
import time
# Returns the page
def fetch_res(url):
return requests.get(url)
# Url structure: root_url + numpages
root_url = "https://news.ycombinator.com/news?p="
numpages = 25
hds, links = [], []
# Generate links for us to fetch
for i in range(1,numpages+1):
links.append(root_url+str(i))
# Timing loop for just request fetching and parsing.
start_time = time.time()
# in every page, we fetch the headline, the score, and the site it originated from
for link in links:
page = fetch_res(link)
parsed = bs(page.content, 'html.parser')
headlines = parsed.find_all('a', class_='storylink')
scores = parsed.find_all('span', class_='score')
sitestr = parsed.find_all('span', class_='sitestr')
for a,b,c in zip(headlines, scores, sitestr):
hds.append([a.get_text(), int(b.get_text().split()[0]), c.get_text()])
print(time.time()-start_time,"seconds")
df = pd.DataFrame(hds)
df.columns = ['Title', 'Score', 'Site']
print(df.head(10))
|
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
##############################################################################
# Integration testing for the MIE workflow API
#
# PRECONDITIONS:
# MIE base stack must be deployed in your AWS account
#
# Boto3 will raise a deprecation warning (known issue). It's safe to ignore.
#
# USAGE:
# cd tests/
# pytest -s -W ignore::DeprecationWarning -p no:cacheprovider
#
###############################################################################
import pytest
import boto3
import json
import time
import math
import requests
import urllib3
import logging
from botocore.exceptions import ClientError
import re
import os
from jsonschema import validate
# local imports
import validation
def test_workflow_execution_api(api, api_schema, workflow_configs):
api = api()
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
print("Running /workflow/execution API tests")
for config in workflow_configs:
print("----------------------------------------")
print("\nTEST WORKFLOW EXECUTION CONFIGURATION: {}".format(config))
# Create the workflow
# TODO: Add a check here to see if the workflow is in a deleting state (from a previous test run) and wait
create_workflow_response = api.create_workflow_request(config)
workflow = create_workflow_response.json()
assert create_workflow_response.status_code == 200
#FIXME - validate create_workflow_response
#validation.schema(workflow, api_schema["create_workflow_response"])
# Execute the workflow and wait for it to complete
create_workflow_execution_response = api.create_workflow_execution_request(workflow, config)
workflow_execution = create_workflow_execution_response.json()
assert create_workflow_execution_response.status_code == 200
assert workflow_execution['Status'] == 'Queued'
#FIXME - validate create_workflow_response
#validation.schema(workflow_execution, api_schema["create_workflow_execution_response"])
workflow_execution = api.wait_for_workflow_execution(workflow_execution, 120)
if config["Status"] == "OK":
assert workflow_execution["Status"] == "Complete"
# Check output media for expected types
for outputType in config["Outputs"]:
if outputType != "None":
assert outputType in workflow_execution["Globals"]["Media"]
else:
assert workflow_execution["Status"] == "Error"
# validation.stage_execution(workflow_execution, config, stack_resources, api_schema)
# Delete the workflow
delete_workflow_response = api.delete_stage_workflow_request(workflow)
assert delete_workflow_response.status_code == 200
#Delete the workflow
delete_stage_response = api.delete_stage_request(workflow)
assert delete_stage_response.status_code == 200
#TODO: dynamoDB remove asset
|
#! /usr/bin/env python
# -*- coding: UTF-8 -*-
__version__ = '0.1.1'
from wptranslate.translator import translate
# Silent pyflakes
translate
|
"""
=================================
Compare (:mod:`macroeco.compare`)
=================================
This module contains functions that compare the goodness of fit of a
distribution/curve to data or the fit of two distributions/curves to each
other.
.. autosummary::
:toctree: generated/
nll
lrt
AIC
AIC_compare
sum_of_squares
r_squared
preston_bin
"""
from ._compare import (nll, lrt, AIC, AIC_compare,
sum_of_squares, r_squared,
preston_bin, pueyo_bins)
|
import argparse
import copy
import os
import time
from flax import linen as nn
from flax import optim
import jax
import jax.numpy as jnp
import numpy as np
import ray
from alpa import (parallelize, global_config, set_parallelize_options, testing,
DeviceCluster, LocalPhysicalDeviceMesh)
from alpa.model.bert_model import BertConfig, FlaxBertAttention, FlaxBertLayerCollection
from alpa.util import (run_cmd, write_tsv, benchmark_func, list_gpu_info,
count_communication_primitives)
import timeit
GB = 1024 ** 3
tic = time.time()
def log_time_stamp(message):
global tic
if message:
print(f" - {message}: {time.time() - tic:.2f} s")
tic = time.time()
def compute_data_parallel_cost(optimizer, logical_mesh, physical_mesh):
"""For debugging usage."""
shapes = jax.tree_util.tree_map(lambda x : np.prod(x.shape), optimizer.target)
sizes = jax.tree_util.tree_leaves(shapes)
cost = 0
print(logical_mesh.mesh_beta)
for size in sizes:
cost += logical_mesh.all_reduce_cost(size * 4, 0)
#cost += physical_mesh.prof_result.estimate_all_reduce(((0,4), (1,5), (2,6), (3,7),), size / 4, "float32")
#cost += physical_mesh.prof_result.estimate_all_reduce(((0,2,4,6,), (1,3,5,7)), size / 2, "float32")
#cost += physical_mesh.prof_result.estimate_all_reduce(((0,1,2,3,4,5,6,7),), size, "float32")
print(cost)
def benchmark_transformer_one_case(benchmark_case, use_profiling):
log_time_stamp(None)
# Model configs
batch_size, seq_len, hidden_size, num_layers, num_heads, mesh_dim0, mesh_dim1 =\
benchmark_case
# Parallel configs
if args.local:
physical_mesh = LocalPhysicalDeviceMesh(jax.devices())
else:
device_cluster = DeviceCluster()
physical_mesh = device_cluster.get_physical_mesh()
logical_mesh = physical_mesh.get_logical_mesh([mesh_dim0, mesh_dim1],
mesh_topology="tree",
inter_host_bandwidth=1,
intra_host_bandwidth=30)
set_parallelize_options(devices=logical_mesh)
# Load profiling results
if use_profiling:
filename = physical_mesh.get_signature() + ".prof.pkl"
if os.path.exists(filename):
print(f"Load saved profiling results from {filename}")
physical_mesh.load_profiling_result(filename)
physical_mesh.prof_result.make_monotonic()
physical_mesh.prof_result.multiply_scale(1e7)
else:
physical_mesh.profile_collective("all-reduce")
print(f"Save profiling results to {filename}")
physical_mesh.save_profiling_result(filename)
log_time_stamp("Setup device mesh")
@parallelize
def train_step(optimizer, batch, rng_key, apply_fn):
def loss_func(params):
rngs = {"dropout": rng_key}
out = apply_fn(params, batch["hidden_states"], batch["attention_mask"],
deterministic=False, rngs=rngs)[0]
return jnp.mean((out - batch["label"]) ** 2)
grad = jax.grad(loss_func)(optimizer.target)
new_optimizer = optimizer.apply_gradient(grad)
return new_optimizer
# Prepare input batch
batch = {
"hidden_states": jnp.ones((batch_size, seq_len, hidden_size), dtype=np.float32),
"attention_mask": jnp.ones((batch_size, seq_len), dtype=np.int32),
"label": jnp.ones((batch_size, seq_len, hidden_size), dtype=np.float32),
}
log_time_stamp("Prepare input")
# Init model and optimizer
model = FlaxBertLayerCollection(BertConfig(
num_hidden_layers=num_layers,
hidden_size=hidden_size,
intermediate_size=hidden_size * 4,
num_attention_heads=num_heads))
rngkey = jax.random.PRNGKey(0)
params = model.init_dummy(rngkey, batch["hidden_states"], batch["attention_mask"])
optimizer = optim.Adam(1e-2).create(params)
del params
log_time_stamp("Init model and optimizer")
# Compile executable
executable = train_step.get_executable(optimizer, batch, rngkey, model.apply)
log_time_stamp("Compile (driver)")
physical_mesh.sync_workers()
log_time_stamp("Compile (workers)")
# Benchmark step time
for i in range(args.niter):
optimizer = train_step(optimizer, batch, rngkey, model.apply)
costs = executable.get_execution_time_costs(warmup=2)
log_time_stamp("Benchmark")
# Check sharding strategy
objective = testing.last_compiled_auto_sharding_objective or 0.0
real_mem = executable.get_total_allocation_size()
hlo_text = executable.get_hlo_text()
with open("last.hlo", "w") as fout:
fout.write(hlo_text)
n_total, n_all_reduce, n_all_gather, n_reduce_scatter, _ =\
count_communication_primitives(hlo_text)
print(f"#total: {n_total}, #all-reduce: {n_all_reduce}, "
f"#all-gather: {n_all_gather}, #reduce-scatter: {n_reduce_scatter}")
# Log benchmark results
heads = ["Type", "Model Config", "Parallel Config", "Peak Mem",
"Objective", "Mean Time", "Std Time"]
values = ["transformer-layer", str(benchmark_case[:-2]), str(benchmark_case[-2:]),
f"{real_mem/GB:.3f}", f"{objective:.2f}",
f"{np.mean(costs):.3f}", f"{np.std(costs):.3f}"]
write_tsv(heads, values, "result_trans.tsv")
physical_mesh.shutdown()
# B = batch_size, S = seq_len, H = hidden_size, L = num_layers,
# #head = num_heads, D0 = mesh_dimension_0, D1 = mesh_dimension_1
benchmark_suite_4_gpu = [
# B, S, H, L, #head, D0, D1
(32, 1024, 1536, 2, 1536//96, 4, 1),
(32, 1024, 1536, 2, 1536//96, 2, 2),
(32, 1024, 1536, 2, 1536//96, 1, 4),
]
benchmark_suite_8_gpu = [
# B, S, H, L, #head, D0, D1
(32, 1024, 1536, 4, 1536//96, 8, 1),
(32, 1024, 1536, 4, 1536//96, 4, 2),
(32, 1024, 1536, 4, 1536//96, 2, 4),
(32, 128, 5120, 3, 5120//128, 8, 1),
(32, 128, 5120, 3, 5120//128, 4, 2),
(32, 128, 5120, 3, 5120//128, 2, 4),
]
def benchmark_all(use_profiling):
if args.local:
num_gpus = list_gpu_info().count("UUID")
else:
num_gpus = int(ray.cluster_resources()["GPU"])
benchmark_suites = {
4: benchmark_suite_4_gpu,
8: benchmark_suite_8_gpu,
}
for case in benchmark_suites[num_gpus]:
# Backup global config
old_global_config = copy.deepcopy(global_config.__dict__)
benchmark_transformer_one_case(case, use_profiling)
# Restore global config
global_config.__dict__ = old_global_config
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--use-profiling", action="store_true")
parser.add_argument("--niter", type=int, default=10)
parser.add_argument("--local", action="store_true",
help="Run on local GPUs. Do not use ray actors.")
args = parser.parse_args()
if not args.local:
ray.init(address="auto")
jax.config.update('jax_platform_name', 'cpu')
global_config.use_dummy_value_for_benchmarking = True
benchmark_all(args.use_profiling)
|
# Custom model builders
from core.misc import MODELS
@MODELS.register_func('UNet_model')
def build_unet_model(C):
from models.unet import UNet
return UNet(6, 2)
@MODELS.register_func('UNet_OSCD_model')
def build_unet_oscd_model(C):
from models.unet import UNet
return UNet(26, 2)
@MODELS.register_func('SiamUNet-diff_model')
def build_siamunet_diff_model(C):
from models.siamunet_diff import SiamUNet_diff
return SiamUNet_diff(3, 2)
@MODELS.register_func('SiamUNet-diff_OSCD_model')
def build_siamunet_diff_oscd_model(C):
from models.siamunet_diff import SiamUNet_diff
return SiamUNet_diff(13, 2)
@MODELS.register_func('SiamUNet-conc_model')
def build_siamunet_conc_model(C):
from models.siamunet_conc import SiamUNet_conc
return SiamUNet_conc(3, 2)
@MODELS.register_func('SiamUNet-conc_OSCD_model')
def build_siamunet_conc_oscd_model(C):
from models.siamunet_conc import SiamUNet_conc
return SiamUNet_conc(13, 2)
@MODELS.register_func('CDNet_model')
def build_cdnet_model(C):
from models.cdnet import CDNet
return CDNet(6, 2)
@MODELS.register_func('IFN_model')
def build_ifn_model(C):
from models.ifn import DSIFN
return DSIFN()
@MODELS.register_func('SNUNet_model')
def build_snunet_model(C):
from models.snunet import SNUNet
return SNUNet(3, 2, 32)
@MODELS.register_func('STANet_model')
def build_stanet_model(C):
from models.stanet import STANet
return STANet(**C['stanet_model'])
@MODELS.register_func('LUNet_model')
def build_lunet_model(C):
from models.lunet import LUNet
return LUNet(3, 2)
@MODELS.register_func('P2V_model')
def build_p2v_model(C):
from models.p2v import P2VNet
return P2VNet(**C['p2v_model'])
@MODELS.register_func('DSAMNet_model')
def build_dsamnet_model(C):
from models.dsamnet import DSAMNet
return DSAMNet(**C['dsamnet_model'])
@MODELS.register_func('BIT_model')
def build_bit_model(C):
from models.bit import BIT
return BIT(**C['bit_model'])
@MODELS.register_func('CDP_model')
def build_cdp_model(C):
try:
import change_detection_pytorch as cdp
except ModuleNotFoundError:
raise ModuleNotFoundError("The change_detection.pytorch library is not available!")
cdp_model_cfg = C['cdp_model'].copy()
arch = cdp_model_cfg.pop('arch')
encoder_name = cdp_model_cfg.pop('encoder_name')
encoder_weights = cdp_model_cfg.pop('encoder_weights')
in_channels = cdp_model_cfg.pop('in_channels')
classes = cdp_model_cfg.pop('classes')
model = cdp.create_model(
arch=arch,
encoder_name=encoder_name,
encoder_weights=encoder_weights,
in_channels=in_channels,
classes=classes,
**cdp_model_cfg
)
return model
|
"""
The configure.mccs module contains Python classes that represent the various
aspects of MCCS configuration that may be specified in a SubArray.configure
command.
"""
from typing import List
__all__ = [
"MCCSConfiguration",
"StnConfiguration",
"SubarrayBeamConfiguration",
"SubarrayBeamTarget"
]
class SubarrayBeamTarget:
"""
Target encapsulates source coordinates and source metadata.
The SubArrayNode ICD specifies that az and el must be provided
"""
# pylint: disable=too-many-arguments
def __init__(self, az: float, el: float, target_name: str, reference_frame: str):
"""
Initialise the station configuration.
:param az: Az specification with rates
:param el: El specification with rates
:param target_name: target name
:param reference_frame: Target coordinate reference frame
"""
self.az = az
self.el = el
self.target_name = target_name
self.reference_frame = reference_frame
def __eq__(self, other):
"""
Check for equality between two SubarrayBeamTarget objects
:param other: the object to check against this object
:type other: SubarrayBeamTarget object
:return: returns True if the objects are the same, else False
:rtype: boolean
"""
if not isinstance(other, SubarrayBeamTarget):
return False
return (self.az == other.az
and self.el == other.el
and self.target_name == other.target_name
and self.reference_frame == other.reference_frame)
class StnConfiguration:
"""A class to hold station configuration configuration"""
def __init__(self, station_id: int):
"""
Initialise the station configuration.
:param station_id: stations id
:type station_id: int
"""
self.station_id = station_id
def __eq__(self, other):
"""
Check for equality between two station configuration objects
:param other: the object to check against this object
:type other: station configuration object
:return: returns True if the objects are the same, else False
:rtype: boolean
"""
if not isinstance(other, StnConfiguration):
return False
return self.station_id == other.station_id
class SubarrayBeamConfiguration:
"""A class to hold subarray_beam configuration attributes"""
def __init__(
self,
subarray_beam_id: int,
station_ids: List[int],
channels: List[List[int]],
update_rate: float,
target: SubarrayBeamTarget,
antenna_weights: List[float],
phase_centre: List[float]
):
"""
Initialise the station beam configuration.
:param subarray_beam_id: stationbeam's id
:type subarray_beam_id: int
:param station_ids: station id's
:type station_ids: List[int]
:param channels: channels to form station beam
:type channels: List[Tuple]
:param update_rate: frequency of new Az/El during scan
:type update_rate: float
:param target: Az/El specification with target source
:type target: SubarrayBeamTarget
:param antenna_weights: antenna_weights
:type antenna_weights: List[float]
:param phase_centre: phase_centre
:type phase_centre: List[float]
"""
self.subarray_beam_id = subarray_beam_id
self.station_ids = station_ids
self.channels = channels
self.update_rate = update_rate
self.target = target
self.antenna_weights = antenna_weights
self.phase_centre = phase_centre
def __eq__(self, other):
"""
Check for equality between two station beam configuration objects
:param other: the object to check against this object
:type other: station beam configuration object
:return: returns True if the objects are the same, else False
:rtype: boolean
"""
if not isinstance(other, SubarrayBeamConfiguration):
return False
return (
self.subarray_beam_id == other.subarray_beam_id
and self.station_ids == other.station_ids
and self.channels == other.channels
and self.update_rate == other.update_rate
and self.target == other.target
and self.antenna_weights == other.antenna_weights
and self.phase_centre == other.phase_centre
)
class MCCSConfiguration:
"""
Class to hold all subarray configuration.
"""
def __init__(
self,
*_, # force kwargs
station_configs: List[StnConfiguration],
subarray_beam_configs: List[SubarrayBeamConfiguration],
):
"""
Create a new MCCSConfiguration.
:param station_configs: a list of station configurations
:type station_configs: List[StnConfiguration]
:param subarray_beam_configs: a list of subarray beam configurations
:type subarray_beam_configs: List[SubarrayBeamConfiguration]
"""
self.station_configs = station_configs
self.subarray_beam_configs = subarray_beam_configs
def __eq__(self, other):
"""
Check for equality between two mccs configuration objects
:param other: the object to check against this object
:type other: mccs configuration object
:return: returns True if the objects are the same, else False
:rtype: boolean
"""
if not isinstance(other, MCCSConfiguration):
return False
return (
self.station_configs == other.station_configs
and self.subarray_beam_configs == other.subarray_beam_configs
)
|
#Faça um programa que abra e reproduza um arquivo de audio MP3
#pygame.init()
#pygame.mixer.music.load()
#pygame.mixer.music.play()
#pygame.event.wait()
|
import pandas as pd
import re
from yahooquery.utils import (
_convert_to_list, _convert_to_timestamp, _history_dataframe, _flatten_list)
from yahooquery.base import _YahooFinance
class Ticker(_YahooFinance):
"""
Base class for interacting with Yahoo Finance API
Attributes
----------
symbols: str or list
Symbol or list collection of symbols
Keyword Arguments
-----------------
formatted: bool, default False, optional
Quantitative values are given as dictionaries with at least two
keys: 'raw' and 'fmt'. The 'raw' key expresses value numerically
and the 'fmt' key expresses the value as a string. See Notes for more
detail
asynchronous: bool, default False, optional
Defines whether the requests are made synchronously or asynchronously.
max_workers: int, default 8, optional
Defines the number of workers used to make asynchronous requests.
This only matters when asynchronous=True
proxies: dict, default None, optional
Allows for the session to use a proxy when making requests
Notes
-----
When formatted is set to True, all quote_summary modules will return as
dictionaries. There are two reasons for this:
1. Quantitative values are expressed as dictionaries. For example:
"totalPay": {
"raw": 115554666,
"fmt": "11.56M",
"longFmt": "11,555,466"
}
When formatted is set to False, the _format_data method will return
the value in the "raw" key.
2. Dates are either expressed as timestamps:
"governanceEpochDate": 1570147200
Or as dictionaries:
"exDividendDate": {
"raw": 1573084800,
"fmt": "2019-11-07"
}
When formatted is set to False, the _format_data method will return the
date expressed in the format YYYY-MM-DD by either converting from the
timestamp or retrieving the "fmt" key.
"""
def __init__(self, symbols, **kwargs):
self._symbols = _convert_to_list(symbols)
super(Ticker, self).__init__(**kwargs)
def _quote_summary(self, modules):
kwargs = {}
params = {'modules': ','.join(modules)}
if len(modules) == 1:
kwargs.update({'addl_key': modules[0]})
data = self._get_data(key='quoteSummary', params=params, **kwargs)
dates = _flatten_list(
[self._MODULES_DICT[module]['convert_dates']
for module in modules])
return data if self.formatted else self._format_data(data, dates)
def _quote_summary_dataframe(self, module, **kwargs):
data = self._quote_summary([module])
if not kwargs.get('data_filter'):
data_filter = self._MODULES_DICT[module]['filter']
kwargs.update({'data_filter': data_filter})
return self._to_dataframe(data, **kwargs)
@property
def symbols(self):
return self._symbols
@symbols.setter
def symbols(self, symbols):
self._symbols = _convert_to_list(symbols)
def _to_dataframe(self, data, **kwargs):
if not self.formatted:
dataframes = []
try:
for symbol in self.symbols:
final_data = data[symbol][kwargs.get('data_filter')] if \
kwargs.get('data_filter') else data[symbol]
if kwargs.get('from_dict', False):
df = pd.DataFrame(
[(k, v) for d in final_data for k, v in d.items()])
df.set_index(0, inplace=True)
df.columns = [symbol]
else:
df = pd.DataFrame(final_data)
dataframes.append(df)
if kwargs.get('from_dict', False):
df = pd.concat(dataframes, axis=1)
else:
df = pd.concat(
dataframes, keys=self.symbols, names=['symbol', 'row'],
sort=False)
return df
except TypeError:
return data
else:
return data
@property
def all_modules(self):
"""
Returns all quoteSummary modules, indexed by module title
for each symbol
Notes
-----
Only returns JSON
"""
return self._quote_summary(
self._CONFIG['quoteSummary']['query']['modules']['options'])
def get_modules(self, modules):
"""
Obtain specific quoteSummary modules for given symbol(s)
Parameters
----------
modules: list or str
Desired modules for retrieval
Notes
-----
Only returns JSON
Raises
------
ValueError
If invalid module is specified
"""
all_modules = \
self._CONFIG['quoteSummary']['query']['modules']['options']
if not isinstance(modules, list):
modules = re.findall(r"[a-zA-Z]+", modules)
if any(elem not in all_modules for elem in modules):
raise ValueError("""
One of {} is not a valid value. Valid values are {}.
""".format(
', '.join(modules),
', '.join(all_modules)
))
return self._quote_summary(modules)
@property
def asset_profile(self):
"""Asset Profile
Geographical and business summary data for given symbol(s).
Returns
-------
dict
assetProfile module data
"""
return self._quote_summary(['assetProfile'])
@property
def calendar_events(self):
"""Calendar Events
Earnings and Revenue expectations for upcoming earnings date for given
symbol(s)
Returns
-------
dict
calendarEvents module data
"""
return self._quote_summary(['calendarEvents'])
@property
def earnings(self):
"""Earnings
Historical earnings data for given symbol(s)
Returns
-------
dict
earnings module data
"""
return self._quote_summary(['earnings'])
@property
def earnings_trend(self):
"""Earnings Trend
Historical trend data for earnings and revenue estimations for given
symbol(s)
Returns
-------
dict
earningsTrend module data
"""
return self._quote_summary(['earningsTrend'])
@property
def esg_scores(self):
"""ESG Scores
Data related to a given symbol(s) environmental, social, and
governance metrics
Returns
-------
dict
esgScores module data
"""
return self._quote_summary(['esgScores'])
@property
def financial_data(self):
"""Financial Data
Financial KPIs for given symbol(s)
Returns
-------
dict
financialData module data
"""
return self._quote_summary(['financialData'])
@property
def news(self):
"""News articles related to given symbol(s)
Obtain news articles related to a given symbol(s). Data includes
the title of the article, summary, url, author_name, publisher
Notes
-----
It's recommended to use only one symbol for this property as the data
returned does not distinguish between what symbol the news stories
belong to
Returns
-------
dict
"""
return self._get_data(
'news', {}, **{'list_result': True})
@property
def index_trend(self):
"""Index Trend
Trend data related given symbol(s) index, specificially PE and PEG
ratios
Returns
-------
dict
indexTrend module data
"""
return self._quote_summary(['indexTrend'])
@property
def industry_trend(self):
"""Industry Trend
Seems to be deprecated
Returns
-------
dict
industryTrend module data
"""
return self._quote_summary(['industryTrend'])
@property
def key_stats(self):
"""Key Statistics
KPIs for given symbol(s) (PE, enterprise value, EPS, EBITA, and more)
Returns
-------
dict
defaultKeyStatistics module data
"""
return self._quote_summary(['defaultKeyStatistics'])
@property
def major_holders(self):
"""Major Holders
Data showing breakdown of owners of given symbol(s), insiders,
institutions, etc.
Returns
-------
dict
majorHoldersBreakdown module data
"""
return self._quote_summary(['majorHoldersBreakdown'])
@property
def page_views(self):
"""Page Views
Short, Mid, and Long-term trend data regarding a symbol(s) page views
Returns
-------
dict
pageViews module data
"""
return self._quote_summary(['pageViews'])
@property
def price(self):
"""Price
Detailed pricing data for given symbol(s), exchange, quote type,
currency, market cap, pre / post market data, etc.
Returns
-------
dict
price module data
"""
return self._quote_summary(['price'])
@property
def quote_type(self):
"""Quote Type
Stock exchange specific data for given symbol(s)
Returns
-------
dict
quoteType module data
"""
return self._quote_summary(['quoteType'])
@property
def recommendations(self):
"""Recommendations
Retrieve the top 5 symbols that are similar to a given symbol
Returns
-------
dict
"""
return self._get_data('recommendations')
@property
def share_purchase_activity(self):
"""Share Purchase Activity
High-level buy / sell data for given symbol(s) insiders
Returns
-------
dict
netSharePurchaseActivity module data
"""
return self._quote_summary(['netSharePurchaseActivity'])
@property
def summary_detail(self):
"""Summary Detail
Contains similar data to price endpoint
Returns
-------
dict
summaryDetail module data
"""
return self._quote_summary(['summaryDetail'])
@property
def summary_profile(self):
"""Summary Profile
Data related to given symbol(s) location and business summary
Returns
-------
dict
summaryProfile module data
"""
return self._quote_summary(['summaryProfile'])
@property
def technical_insights(self):
"""Technical Insights
Technical trading information as well as company metrics related
to innovativeness, sustainability, and hiring. Metrics can also
be compared against the company's sector
Returns
-------
dict
"""
return self._get_data('insights')
@property
def validation(self):
"""Symbol Validation
Validate existence of given symbol(s)
Returns
-------
dict
"""
return self._get_data('validation')
def _financials(self, financials_type, frequency, premium=False):
try:
time_dict = self.FUNDAMENTALS_TIME_ARGS[frequency[:1].lower()]
prefix = time_dict['prefix']
period_type = time_dict['period_type']
except KeyError as e:
raise(e)
key = 'fundamentals_premium' if premium else 'fundamentals'
types = self._CONFIG[key]['query']['type']['options'][financials_type]
prefixed_types = ['{}{}'.format(prefix, t) for t in types] + \
['trailing{}'.format(t) for t in types]
data = self._get_data(key, {'type': ','.join(prefixed_types)}, **{
'list_result': True})
dataframes = []
try:
for k in data.keys():
if isinstance(data[k], str) or data[k][0].get('description'):
return data
dataframes.extend([
self._financials_dataframes(data[k][i], period_type)
for i in range(len(data[k]))])
except AttributeError:
return data
try:
df = pd.concat(dataframes)
for p in [prefix, 'trailing']:
df['dataType'] = df['dataType'].apply(
lambda x: str(x).lstrip(p))
df['asOfDate'] = pd.to_datetime(df['asOfDate'], format='%Y-%m-%d')
df = df.pivot_table(
index=['symbol', 'asOfDate', 'periodType'], columns='dataType',
values='reportedValue')
return pd.DataFrame(df.to_records()).set_index('symbol')
except ValueError:
return '{} data unavailable for {}'.format(
financials_type.replace('_', ' ').title(),
', '.join(self._symbols))
def _financials_dataframes(self, data, period_type):
data_type = data['meta']['type'][0]
symbol = data['meta']['symbol'][0]
try:
df = pd.DataFrame.from_records(data[data_type])
df['reportedValue'] = \
df['reportedValue'].apply(lambda x: x.get('raw'))
df['dataType'] = data_type
df['symbol'] = symbol
df['periodType'] = data[data_type][-1].get('periodType', period_type)
return df
except KeyError:
# No data is available for that type
pass
@property
def valuation_measures(self):
"""Valuation Measures
Retrieves valuation measures for most recent four quarters as well
as the most recent date
Notes
-----
Only quarterly data is available for non-premium subscribers
"""
return self._financials('valuation', 'q')
def balance_sheet(self, frequency='a'):
"""Balance Sheet
Retrieves balance sheet data for most recent four quarters or most
recent four years as well as trailing 12 months.
Parameters
----------
frequency: str, default 'A', optional
Specify either annual or quarterly balance sheet. Value should
be 'a' or 'q'.
Returns
-------
pandas.DataFrame
"""
return self._financials('balance_sheet', frequency)
def cash_flow(self, frequency='a'):
"""Cash Flow
Retrieves cash flow data for most recent four quarters or most
recent four years as well as the trailing 12 months
Parameters
----------
frequency: str, default 'a', optional
Specify either annual or quarterly cash flow statement. Value
should be 'a' or 'q'.
Returns
-------
pandas.DataFrame
"""
return self._financials('cash_flow', frequency)
@property
def company_officers(self):
"""Company Officers
Retrieves top executives for given symbol(s) and their total pay
package. Uses the assetProfile module to retrieve data
Returns
-------
pandas.DataFrame
assetProfile module data
"""
data = self._quote_summary(["assetProfile"])
return self._to_dataframe(data, data_filter="companyOfficers")
@property
def earning_history(self):
"""Earning History
Data related to historical earnings (actual vs. estimate) for given
symbol(s)
Returns
-------
pandas.DataFrame
earningsHistory module data
"""
return self._quote_summary_dataframe('earningsHistory')
@property
def fund_ownership(self):
"""Fund Ownership
Data related to top 10 owners of a given symbol(s)
Returns
-------
pandas.DataFrame
fundOwnership module data
"""
return self._quote_summary_dataframe('fundOwnership')
@property
def grading_history(self):
"""Grading History
Data related to upgrades / downgrades by companies for a given
symbol(s)
Returns
-------
pandas.DataFrame
upgradeDowngradeHistory module data
"""
return self._quote_summary_dataframe('upgradeDowngradeHistory')
def income_statement(self, frequency='a'):
"""Income Statement
Retrieves income statement data for most recent four quarters or most
recent four years as well as trailing 12 months.
Parameters
----------
frequency: str, default 'A', optional
Specify either annual or quarterly income statement. Value should
be 'a' or 'q'.
Returns
-------
pandas.DataFrame
"""
return self._financials('income_statement', frequency)
@property
def insider_holders(self):
"""Insider Holders
Data related to stock holdings of a given symbol(s) insiders
Returns
-------
pandas.DataFrame
insiderHolders module data
"""
return self._quote_summary_dataframe('insiderHolders')
@property
def insider_transactions(self):
"""Insider Transactions
Data related to transactions by insiders for a given symbol(s)
Returns
-------
pandas.DataFrame
insiderTransactions module data
"""
return self._quote_summary_dataframe('insiderTransactions')
@property
def institution_ownership(self):
"""Institution Ownership
Top 10 owners of a given symbol(s)
Returns
-------
pandas.DataFrame
institutionOwnership module data
"""
return self._quote_summary_dataframe('institutionOwnership')
@property
def recommendation_trend(self):
"""Recommendation Trend
Data related to historical recommendations (buy, hold, sell) for a
given symbol(s)
Returns
-------
pandas.DataFrame
recommendationTrend module data
"""
return self._quote_summary_dataframe('recommendationTrend')
@property
def sec_filings(self):
"""SEC Filings
Historical SEC filings for a given symbol(s)
Returns
-------
pandas.DataFrame
secFilings endpoint data
"""
return self._quote_summary_dataframe('secFilings')
# FUND SPECIFIC
def _fund_holdings(self, holding_type):
data = self.fund_holding_info
for symbol in self.symbols:
try:
data[symbol] = data[symbol][holding_type]
except TypeError:
pass
return data
@property
def fund_bond_holdings(self):
"""Fund Bond Holdings
Retrieves aggregated maturity and duration information for a given
symbol(s)
.. warning:: This endpoint will only return data for specific
securities (funds and etfs)
Returns
-------
dict
topHoldings module data subset
"""
return self._fund_holdings("bondHoldings")
@property
def fund_category_holdings(self):
"""Fund Category Holdings
High-level holding breakdown (cash, bonds, equity, etc.) for a given
symbol(s)
.. warning:: This endpoint will only return data for specific
securities (funds and etfs)
Returns
-------
pandas.DataFrame
topHoldings module data subset
"""
data_dict = self._quote_summary(["topHoldings"])
for symbol in self.symbols:
for key in self._FUND_DETAILS:
try:
del data_dict[symbol][key]
except TypeError:
return data_dict
return pd.DataFrame(
[pd.Series(data_dict[symbol]) for symbol in self.symbols],
index=self.symbols)
@property
def fund_equity_holdings(self):
"""Fund Equity Holdings
Retrieves aggregated priceTo____ data for a given symbol(s)
.. warning:: This endpoint will only return data for specific
securities (funds and etfs)
Returns
-------
dict
topHoldings module data subset
"""
return self._fund_holdings("equityHoldings")
@property
def fund_performance(self):
"""Fund Performance
Historical return data for a given symbol(s) and symbol(s) specific
category
.. warning:: This endpoint will only return data for specific
securities (funds and etfs)
Returns
-------
pandas.DataFrame
fundPerformance module data
"""
return self._quote_summary(["fundPerformance"])
@property
def fund_profile(self):
"""Fund Profile
Summary level information for a given symbol(s)
.. warning:: This endpoint will only return data for specific
securities (funds and etfs)
Returns
-------
pandas.DataFrame
fundProfile endpoint data
"""
return self._quote_summary(["fundProfile"])
@property
def fund_holding_info(self):
"""Fund Holding Information
Contains information for a funds top holdings, bond ratings, bond
holdings, equity holdings, sector weightings, and category breakdown
.. warning:: This endpoint will only return data for specific
securities (funds and etfs)
Returns
-------
dict
topHoldings module data
"""
return self._quote_summary(["topHoldings"])
@property
def fund_top_holdings(self):
"""Fund Top Holdings
Retrieves Top 10 holdings for a given symbol(s)
.. warning:: This endpoint will only return data for specific
securities (funds and etfs)
Returns
-------
pandas.DataFrame
topHoldings module data subset
"""
return self._quote_summary_dataframe(
'topHoldings', data_filter='holdings')
@property
def fund_bond_ratings(self):
"""Fund Bond Ratings
Retrieves aggregated bond rating data for a given symbol(s)
.. warning:: This endpoint will only return data for specific
securities (funds and etfs)
Returns
-------
pandas.DataFrame
topHoldings module data subset
"""
return self._quote_summary_dataframe(
'topHoldings', data_filter='bondRatings', from_dict=True)
@property
def fund_sector_weightings(self):
"""Fund Sector Weightings
Retrieves aggregated sector weightings for a given symbol(s)
.. warning:: This endpoint will only return data for specific
securities (funds and etfs)
Returns
-------
pandas.DataFrame
topHoldings module data subset
"""
return self._quote_summary_dataframe(
'topHoldings', data_filter='sectorWeightings', from_dict=True)
# PREMIUM
def p_balance_sheet(self, frequency='a'):
"""Balance Sheet
Retrieves balance sheet data for most recent four quarters or most
recent four years as well as trailing 12 months.
Parameters
----------
frequency: str, default 'A', optional
Specify either annual or quarterly balance sheet. Value should
be 'a' or 'q'.
Notes
-----
You must be subscribed to Yahoo Finance Premium and be logged in
for this method to return any data
Returns
-------
pandas.DataFrame
"""
return self._financials('balance_sheet', frequency, premium=True)
def p_cash_flow(self, frequency='a'):
"""Cash Flow
Retrieves cash flow data for most recent four quarters or most
recent four years as well as the trailing 12 months
Parameters
----------
frequency: str, default 'a', optional
Specify either annual or quarterly cash flow statement. Value
should be 'a' or 'q'.
Notes
-----
You must be subscribed to Yahoo Finance Premium and be logged in
for this method to return any data
Returns
-------
pandas.DataFrame
"""
return self._financials('cash_flow', frequency, premium=True)
def p_income_statement(self, frequency='a'):
"""Income Statement
Retrieves income statement data for most recent four quarters or most
recent four years as well as trailing 12 months.
Parameters
----------
frequency: str, default 'A', optional
Specify either annual or quarterly income statement. Value should
be 'a' or 'q'.
Notes
-----
You must be subscribed to Yahoo Finance Premium and be logged in
for this method to return any data
Returns
-------
pandas.DataFrame
"""
return self._financials('income_statement', frequency, premium=True)
@property
def p_company_360(self):
return self._get_data('company360')
@property
def p_technical_insights(self):
return self._get_data('premium_insights')
@property
def p_portal(self):
return self._get_data('premium_portal')
def p_reports(self, report_id):
return self._get_data('reports', {'reportId': report_id})
def p_ideas(self, idea_id):
return self._get_data('trade_ideas', {'ideaId': idea_id})
@property
def p_technical_events(self):
return self._get_data('technical_events')
def p_valuation_measures(self, frequency='q'):
"""Valuation Measures
Retrieves valuation measures for all available dates for given
symbol(s)
"""
return self._financials('valuation', frequency, premium=True)
@property
def p_value_analyzer(self):
return self._get_data('value_analyzer')
@property
def p_value_analyzer_drilldown(self):
return self._get_data('value_analyzer_drilldown')
# HISTORICAL PRICE DATA
def history(self, period='ytd', interval='1d', start=None, end=None):
"""
Historical pricing data
Pulls historical pricing data for a given symbol(s)
Parameters
----------
period: str, default ytd, optional
Length of time
interval: str, default 1d, optional
Time between data points
start: str or datetime.datetime, default None, optional
Specify a starting point to pull data from. Can be expressed as a
string with the format YYYY-MM-DD or as a datetime object
end: str of datetime.datetime, default None, optional
Specify a ending point to pull data from. Can be expressed as a
string with the format YYYY-MM-DD or as a datetime object.
Returns
-------
pandas.DataFrame
historical pricing data
"""
config = self._CONFIG['chart']
periods = config['query']['range']['options']
intervals = config['query']['interval']['options']
if start or period is None or period.lower() == 'max':
start = _convert_to_timestamp(start)
end = _convert_to_timestamp(end, start=False)
params = {'period1': start, 'period2': end}
else:
period = period.lower()
if period not in periods:
raise ValueError("Period values must be one of {}".format(
', '.join(periods)))
params = {'range': period}
if interval not in intervals:
raise ValueError("Interval values must be one of {}".format(
', '.join(intervals)))
params['interval'] = interval.lower()
data = self._get_data('chart', params)
df = self._historical_data_to_dataframe(data)
return df
def _historical_data_to_dataframe(self, data):
d = {}
for symbol in self._symbols:
if 'timestamp' in data[symbol]:
d[symbol] = _history_dataframe(data, symbol)
else:
d[symbol] = data[symbol]
if all(isinstance(d[key], pd.DataFrame) for key in d):
if len(d) == 1:
df = d[self._symbols[0]]
else:
df = pd.concat(list(d.values()), keys=list(d.keys()),
names=['symbol', 'date'], sort=False)
columns = list(df.columns)
if 'dividends' in columns:
df[['dividends']] = df[['dividends']].fillna(value=0)
columns.remove('dividends')
if 'splits' in columns:
df[['splits']] = df[['splits']].fillna(value=0)
columns.remove('splits')
try:
df[columns] = df.groupby(['symbol'])[columns].ffill()
except (KeyError, ValueError):
df.fillna(method='ffill', inplace=True)
return df
return d
@property
def option_chain(self):
data = self._get_data('options', {'getAllData': True})
dataframes = []
for symbol in self._symbols:
try:
if data[symbol]['options']:
dataframes.append(
self._option_dataframe(data[symbol]['options'], symbol)
)
except TypeError:
pass
if dataframes:
df = pd.concat(dataframes, sort=False)
df.set_index(
['symbol', 'expiration', 'optionType'], inplace=True)
df.rename_axis(
['symbol', 'expiration', 'optionType'], inplace=True)
df.fillna(0, inplace=True)
return df
return 'No option chain data found'
def _option_dataframe(self, data, symbol):
dataframes = []
for optionType in ['calls', 'puts']:
df = pd.concat(
[pd.DataFrame(data[i][optionType]) for i in range(len(data))])
df['optionType'] = optionType
dataframes.append(df)
df = pd.concat(dataframes, sort=False)
df['symbol'] = symbol
try:
df['expiration'] = pd.to_datetime(df['expiration'], unit='s')
df['lastTradeDate'] = pd.to_datetime(df['lastTradeDate'], unit='s')
except ValueError:
df['expiration'] = [d.get('fmt') for d in df['expiration']]
except KeyError:
pass
return df
|
# -*- coding: utf-8 -*-
# @Time : 2019/1/27 下午4:09
# @Author : shiwei-Du
# @Email : dusw0214@126.com
# @File : __init__.py
# @Software: PyCharm
|
# -*- coding: utf-8 -*-
from reversion import revisions as reversion
from django.db.models import Count
from optparse import make_option
from django.core.management.base import BaseCommand
from website.apps.core.models import Language
def condense_classification(classif):
condensers = {
'Austronesian': "An",
"Malayo-Polynesian": "MP",
"Central-Eastern": "CE",
"Eastern": "E",
"South Halmahera-West New Guinea": "SHWNG",
"Western Oceanic": "WOc",
"Oceanic": "Oc",
"West New Guinea": "WNG",
}
for old, new in condensers.items():
classif = classif.replace(old, new)
return classif
class Command(BaseCommand):
args = 'tally'
help = 'Tallys the counts of language data'
output_transaction = True
def handle(self, *args, **options):
tally = {}
families = {}
languages = Language.objects.annotate(count=Count('lexicon')).all()
#languages = languages.filter(count__gt=0)
languages = languages.order_by("classification")
prev_classif = None
total = 0
for count, lang in enumerate(languages, 1):
if lang.classification != prev_classif:
print(condense_classification(lang.classification))
if lang.count < 50:
strength = ' '
elif lang.count < 100:
strength = '* '
elif lang.count < 200:
strength = '** '
else:
strength = '***'
print(" ".join([
"\t",
'%3d' % count,
"%3s" % lang.isocode,
lang.ljust(50),
'%5d' % lang.count,
strength
]))
tally[strength] = tally.get(strength, 0) + 1
for family in lang.family.all():
families[family] = families.get(family, 0) + 1
prev_classif = lang.classification
total += lang.count
print('-' * 76)
print('%d languages' % count)
print('%d lexical items' % total)
print('-' * 76)
families = sorted(families.items(), key=lambda x: x[1], reverse=True)
for f, n in families:
print(' %20s = %3d' % (f, n))
print('-' * 76)
print(' 0-50 = %3d' % tally[' '])
print(' 50-100 = %3d' % tally['* '])
print('100-200 = %3d' % tally['** '])
print('200-+ = %3d' % tally['***'])
print('-' * 76)
|
# Copyright (c) 2013, TeamPRO and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe,json
from frappe import msgprint, _
def execute(filters=None):
columns, data = [], []
row = []
columns = get_columns()
data = get_data(filters)
return columns, data
def get_data(filters):
row = []
data = []
resource = ''
if filters.status == 'All':
tasks = frappe.get_all('Task',{'service':'IT-SW'},['*'])
else:
tasks = frappe.get_all('Task',{'service':'IT-SW','status':filters.status},['*'])
for task in tasks:
awh = 0
project_status = frappe.get_value('Project',task.project,'status')
query = """select sum(hours) as hours from `tabTimesheet Detail` where task= '{task}' """.format(task=task.name)
ts_dates = frappe.db.sql("""select min(date(from_time)) as tsd,max(date(to_time)) as ted from `tabTimesheet Detail` where task= '{task}' and date(from_time) between {from_date} and {to_date}""".format(task=task.name,from_date=filters.from_date,to_date=filters.to_date),as_dict=1)[0]
tsd = ts_dates.tsd
ted = ts_dates.ted
timesheet_hours = frappe.db.sql(query,as_dict=1)[0]
if timesheet_hours.hours:
awh = round(timesheet_hours.hours,2)
if task._assign:
assigned_to = json.loads(task._assign)
if assigned_to:
resource = frappe.get_value('Employee',{'user_id':assigned_to[0]},['short_code'])
row = [task.name,task.subject,task.status,task.project,project_status,task.exp_start_date,task.exp_end_date,task.expected_time,awh,resource,tsd,ted]
data.append(row)
return data
def get_columns():
return [
{
"label": _("Task ID"),
"fieldname": "task",
"fieldtype": "Link",
"options": "Task",
"width": 120
},
{
"label": _("Subject"),
"fieldname": "subject",
"fieldtype": "Data",
"width": 300
},
{
"label": _("Task Status"),
"fieldname": "task_status",
"fieldtype": "Data",
"width": 120
},
{
"label": _("Project"),
"fieldname": "project",
"fieldtype": "Link",
"options": "Project",
"width": 120
},
{
"label": _("Project Status"),
"fieldname": "project_status",
"fieldtype": "Data",
"width": 120
},
{
"label": _("ESD"),
"fieldname": "esd",
"fieldtype": "Date",
"width": 100
},
{
"label": _("EED"),
"fieldname": "eed",
"fieldtype": "Date",
"width": 100
},
{
"label": _("EWH"),
"fieldname": "ewh",
"fieldtype": "float",
"width": 80
},
{
"label": _("AWH"),
"fieldname": "awh",
"fieldtype": "float",
"width": 80
},
{
"label": _("Resource"),
"fieldname": "resource",
"fieldtype": "Data",
"width": 120
},
{
"label": _("TSD"),
"fieldname": "tsd",
"fieldtype": "Date",
"width": 100
},
{
"label": _("TED"),
"fieldname": "ted",
"fieldtype": "Date",
"width": 100
}
]
return columns
|
'''
@author: Nia Catlin
Various config file handling routines
'''
import ConfigParser, os, time
import _winreg
CONFIG_FILE = None
config = None
def writeConfig():
try:
with open(CONFIG_FILE, 'w') as configfile:
config.write(configfile)#,space_around_delimiters=False)
except: print('Failed to write config file')
def isActive(trigger):
if trigger in config.get('TRIGGERS','alwaystriggers').split(','):
return (True,'Always')
elif trigger in config.get('TRIGGERS','lockedtriggers').split(','):
return (True,'Locked')
else:
return (False,False)
def loadConfig():
global CONFIG_FILE
try:
key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,"SOFTWARE\Lockwatcher")
CONFIG_FILE = str(_winreg.QueryValueEx(key,'ConfigPath')[0])
except:
key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,"SOFTWARE\Wow6432Node\Lockwatcher")
CONFIG_FILE = str(_winreg.QueryValueEx(key,'ConfigPath')[0])
configDir = os.path.split(CONFIG_FILE)[0]
time.sleep(0.1)
if not os.path.exists(CONFIG_FILE) or os.path.getsize(CONFIG_FILE)<20:
global config
print('writing new config')
config = ConfigParser.ConfigParser()
config.add_section('TRIGGERS')
config.set('TRIGGERS','bluetooth_device_id','')
config.set('TRIGGERS','kbd_kill_combo_1','')
config.set('TRIGGERS','kbd_kill_combo_2','')
config.set('TRIGGERS','low_temp','21')
config.set('TRIGGERS','lockedtriggers','E_DEVICE,E_CHASSIS_MOTION,E_ROOM_MOTION,E_NET_CABLE_IN,E_NET_CABLE_OUT,E_KILL_SWITCH_2')
config.set('TRIGGERS','alwaystriggers','E_KILL_SWITCH_1')
config.set('TRIGGERS','dismount_tc','False')
config.set('TRIGGERS','exec_shellscript','False')
config.set('TRIGGERS','script_timeout','0')
config.set('TRIGGERS','adapterconguids','')
config.set('TRIGGERS','adapterdisconguids','')
config.set('TRIGGERS','ballistix_log_file','')
config.set('TRIGGERS','tc_path','')
config.set('TRIGGERS','ispy_path','')
config.set('TRIGGERS','room_cam_id','')
defaultLogPath = os.path.join(configDir,'lw-log.txt')
config.set('TRIGGERS','logfile',defaultLogPath)
config.set('TRIGGERS','debuglog','False')
config.set('TRIGGERS','test_mode','False')
config.add_section('EMAIL')
config.set('EMAIL','email_alert','False')
config.set('EMAIL','email_imap_host','imap.changeme.domain')
config.set('EMAIL','email_smtp_host','smtp.changeme.domain')
config.set('EMAIL','email_username','changeme')
config.set('EMAIL','email_password','changeme')
config.set('EMAIL','email_secret','changeme')
config.set('EMAIL','bad_command_limit','3')
config.set('EMAIL','enable_remote','False')
config.set('EMAIL','email_motion_picture','False')
config.set('EMAIL','alert_email_address','yourphone@mail.domain')
config.set('EMAIL','command_email_address','yourphone@mail.domain')
config.set('EMAIL','sender_email_address','yourpc@mail.domain')
writeConfig()
else:
config = ConfigParser.ConfigParser()
config.read(CONFIG_FILE)
return config
config = loadConfig()
def reloadConfig():
global config
config = loadConfig()
|
import os
import cv2
import argparse
import numpy as np
from tqdm import tqdm
from PIL import Image
# argparse
parser = argparse.ArgumentParser()
parser.add_argument('--load_path', help='load_path', type=str)
parser.add_argument('--save_path', help='save_path', type=str)
args = parser.parse_args()
def get_path():
load_path = args.load_path
save_path = args.save_path
return load_path, save_path
def get_video_list(load_path):
video_list = os.listdir(load_path)
return video_list
def make_dir(save_path):
save_path = os.path.abspath(save_path)
if not os.path.isdir(save_path):
os.mkdir(save_path)
def get_video_frame(video, load_path, save_path):
save_path = os.path.abspath(save_path)
video_save_path = os.path.join(save_path, video[:-4])
video_save_path = os.path.abspath(video_save_path)
if not os.path.isdir(video_save_path):
os.mkdir(video_save_path)
cap = cv2.VideoCapture(os.path.join(load_path, video))
fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()
frame_len = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
for i in range(frame_len):
ret, frame = cap.read()
frame = cv2.resize(frame, dsize=(256, 256), interpolation=cv2.INTER_LINEAR)
gray= cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
fgmask = fgbg.apply(frame)
os.chdir(video_save_path)
cv2.imwrite('{}.png'.format(i), gray)
cv2.imwrite('{}_fgbg.png'.format(i), fgmask)
cap.release()
def main():
load_path, save_path = get_path()
video_list = get_video_list(load_path)
make_dir(save_path)
for i in tqdm(range(len(video_list)), mininterval=1):
video = video_list[i]
get_video_frame(video, load_path, save_path)
print('전체 완료')
if __name__ == '__main__':
main()
|
# for item in "Python":
# print(item)
# for item in ["Mosh", "John", "Sarah"]:
# print(item)
# for item in [1, 2, 3, 4]:
# print(item)
# for item in range(10):
# print(item)
# for item in range(5, 10, 2):
# print(item)
"""
write a program to calculate all shopping item in shopping cart
"""
prices = [10, 20, 30]
for price in prices:
print(f"Total: {sum(prices)}")
break
prices = [10, 20, 30]
total = 0
for price in prices:
total += price
print(f"Total: {total}")
|
# Generated by Django 2.0.8 on 2019-06-17 08:16
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailimages', '0001_squashed_0021'),
('app_challenges_sections_units', '0010_auto_20190617_0808'),
]
operations = [
migrations.AddField(
model_name='challengesectionunit',
name='thumbnail',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image'),
),
]
|
import pytest # Pytest library for Python unit testing
from ukpostcodes.postcode import Postcode
# List of all correct and valid UK post codes
formatting_positive_value = [
"EC1A 1BB", # already formatted
"EC1A1BB", # uppercase but no space
"ec1a1bb", # lower case with no spaces
# Other combinations with varied length string
"w1a0ax",
"m11ae",
"b338th",
"cr26xh",
"dn551pt",
"BL01AA"
]
# List of invalid postcodes with wrong length
formatting_negative_value_length = [
"EC1A1BBasasas", # more than 8 characters
"EC", # less than 5 characters
]
# List of invalid postcodes with special characters
formatting_negative_value_special_char = [
"ec1#bb", # special symbols #
"m1$1ae" # special symbols $
]
# List of all invalid UK post codes
validate_negative_values = [
'QC1A 1BB', # Invalid: The letters QVX are not used in the first position
'VC1A 1BB', # Invalid: The letters QVX are not used in the first position
'XC1A 1BB', # Invalid: The letters QVX are not used in the first position
'AI1A 1BB', # Invalid: The letters IJZ are not used in the second position
'AJ1A 1BB', # Invalid: The letters IJZ are not used in the second position
'A9X 0AX', # Invalid: only letters to appear in the third position are ABCDEFGHJKPSTUW
'AA9C 0AX', # Invalid: only letters to appear in the fourth position are ABEHMNPRVWXY
'AA9A 9CZ', # Invalid: inward code do not use CIKMOV
'BBB 1AA', # Invalid: all letters in outward code
'1111 1AA', # Invalid: all digits in Outward code
'99AA 1AA', # Invalid: all Digits post code area
'AA1 AA', # Invalid: all letters in inward code,
'AB1 1AA', # Special Case
'BR11 1AA', # Special case
'BL1 1AA'
]
@pytest.fixture(params=formatting_positive_value)
def formatting_positive_value(request):
return request.param
@pytest.fixture(params=formatting_negative_value_length)
def formatting_negative_value_length(request):
return request.param
@pytest.fixture(params=formatting_negative_value_special_char)
def formatting_negative_value_special_char(request):
return request.param
def test_positive_format_postcode1(formatting_positive_value):
"""Function to test format_postcode method for positive cases
arguments:
formatting_positive_value -- list of valid uk post codes
"""
postcode = Postcode()
print(postcode.format_postcode(formatting_positive_value))
# Message is "Formatted" if formatting of post code is successful
assert postcode.message == "Formatted"
def test_positive_format_postcode2():
"""Function to test format_postcode method for positive cases
arguments:
formatting_positive_value -- list of valid uk post codes
"""
postcode = Postcode()
print(postcode.format_postcode("w1a0ax"))
assert postcode.message == "Formatted" and postcode.formatted_postcode == "W1A 0AX"
def test_negative_format_postcode1(formatting_negative_value_length):
"""Function to test format_postcode method for negative cases(invalid length)
arguments:
formatting_positive_value -- list of post codes with invalid length
"""
postcode = Postcode()
print(postcode.format_postcode(formatting_negative_value_length))
assert postcode.valid == False and postcode.message == "ERROR: 5 to 8 characters only"
def test_negative_format_postcode2(formatting_negative_value_special_char):
"""Function to test format_postcode method for negative cases(Special chars)
arguments:
formatting_positive_value -- list of post codes with special char
"""
postcode = Postcode()
print(postcode.format_postcode(formatting_negative_value_special_char))
assert postcode.valid == False and postcode.message == "ERROR: No special Characters allowed"
@pytest.fixture(params=validate_negative_values)
def validate_negative_values(request):
return request.param
def test_positive_validate_postcode1(formatting_positive_value):
"""Function to test validate_postcode method for positive cases
arguments:
formatting_positive_value -- list of valid uk post codes
"""
postcode = Postcode()
postcode.validate_postcode(formatting_positive_value)
assert postcode.valid == True and postcode.message == "VALID: the post code is valid"
def test_negative_validate_postcode1(validate_negative_values):
"""Function to test validate_postcode method for negative cases
arguments:
validate_negative_values -- invalid uk post codes
"""
postcode = Postcode()
postcode.split_validate_postcode(validate_negative_values)
assert postcode.valid == False and postcode.message == "INVALID: the post code is invalid"
def test_positive_split_postcode1():
"""Function to test split_validate_postcode method for positive cases;
negative cases are handled by the methods this function call
"""
postcode = Postcode()
postcode.split_validate_postcode("SW1W 0NY")
# Valid is equal to true for valid uk post codes and they splitted into respective components
assert postcode.valid == True and postcode.postcode_area == "SW" and postcode.postcode_district == "1W" and\
postcode.postcode_sector == "0" and postcode.postcode_unit == "NY"
def test_positive_split_postcode2():
"""Function to test split_validate_postcode method for positive cases;
negative cases are handled by the methods this function call
"""
postcode = Postcode()
postcode.split_validate_postcode("M1 1AE")
# Valid is equal to true for valid uk post codes and they splitted into respective components
assert postcode.valid == True and postcode.postcode_area == "M" and postcode.postcode_district == "1" and\
postcode.postcode_sector == "1" and postcode.postcode_unit == "AE"
|
import os
import time
"""
NOTE: @ time of crawling, The domain link should be like http://cse.nitk.ac.in and not like http://cse.nitk.ac.in/
TO DO: remove repeated lines in levels array
"""
fp = open("sitemap_links.txt", "r")
site_links = []
n = 10
levels = [[] for x in range(n)]
link = fp.readline()
# creating array of links by reading sitemap_links.txt file
while link:
site_links.append(link[:-1])
link = fp.readline()
# print(site_links)
cur_link = site_links[0]
# print(levels)
# for 1st iteration
for i in range(len(site_links)):
part_of_site = site_links[i][:len(cur_link)]
if part_of_site == cur_link:
site_links[i] = site_links[i][len(cur_link) + 1:]
levels[0].append(cur_link)
print(site_links)
"""
for i in range(len(site_links)):
if site_links[i] == '':
pass
else:
print(site_links[i])
"""
set_new_cur_link = 1
k = 1
this_level_arr = []
relation = [[[] for y in range(len(site_links))] for x in range(n)]
# remaining iterations
# k = level
while k < n:
# creating this level headers array
this_level_arr = []
for j in range(len(site_links)):
if site_links[j] == '':
pass
elif '/' in site_links[j]:
slash_ind = site_links[j].index('/')
link = site_links[j][:slash_ind]
this_level_arr.append(link)
else:
this_level_arr.append(site_links[j])
# print(this_level_arr)
# copy this_level_arr into the correct level
for j in range(len(this_level_arr)):
levels[k].append(this_level_arr[j])
for i in range(len(site_links)):
for j in range(len(this_level_arr)):
if set_new_cur_link:
cur_link = this_level_arr[j]
set_new_cur_link = 0
if '/' not in site_links[i]:
pass
else:
# remove current headers from the site_links and add relation from this headers to next level children
# e.g. for level 1 parent relation 1 contains its children (obv which lies in level 2)
part_of_site = site_links[i][:len(cur_link)]
if part_of_site == cur_link:
site_links[i] = site_links[i][len(cur_link) + 1:]
if '/' in site_links[i]:
slash_ind = site_links[i].index('/')
link = site_links[i][:slash_ind]
relation[k][j].append(link)
else:
relation[k][j].append(site_links[i])
set_new_cur_link = 1
# print(site_links)
k += 1
print(site_links)
print("-"*30 + "Levels" + "-"*30)
for i in range(len(levels)):
print(i, levels[i])
print()
print("-"*30 + "Relations" + "-"*30)
for i in range(len(relation)):
print(i, relation[i])
|
import asyncio
import dataclasses
from datetime import datetime, timedelta
import json
import logging
import re
from typing import Any, Dict, Tuple, List, Iterable, Optional
import aiohttp
import pandas as pd
import elasticsearch
from bs4 import BeautifulSoup
from newspaper import Article
from omegaconf import DictConfig
from fake_useragent import UserAgent
from lxml import etree
from .base import BaseScraper, BasePageScraper
from ..store import es, model
log = logging.getLogger(__name__)
logging.getLogger("urllib3").setLevel(logging.WARNING)
class CnyesApiScraper(BaseScraper):
def scraper_cfg_to_kwargs(self) -> Dict[str, Any]:
return {
"start": datetime.fromisoformat(self.scraper_cfg.start),
"until": datetime.now()
if self.scraper_cfg.until == None
else datetime.fromisoformat(self.scraper_cfg.until),
}
def startpoints(self, start: datetime, until: datetime) -> List[str]:
urls = []
while start < until:
_until = start + timedelta(days=30)
if _until > until:
_until = until
urls.append(
"https://news.cnyes.com/api/v3/news/category/headline"
+ f"?startAt={int(start.timestamp())}"
+ f"&endAt={int(_until.timestamp())}"
+ "&limit=100"
)
start = _until
return urls
def parse(
self, resp: aiohttp.ClientResponse, jdata: dict
) -> Tuple[List[es.Page], List[str]]:
# TODO currently not use BaseScraper parse method <- use self.worker() instead BaseScraper.worker() => confusing
pages, new_urls = [], []
if jdata["items"]["next_page_url"] is not None:
new_urls.append(f'https://news.cnyes.com{jdata["items"]["next_page_url"]}')
for e in jdata["items"]["data"]:
url = f"https://news.cnyes.com/news/id/{e['newsId']}"
try:
es.Page.get(id=url)
except elasticsearch.NotFoundError:
p = es.Page(
from_url=url,
entry_title=e["title"],
entry_summary=e["summary"],
entry_published_at=datetime.fromtimestamp(e["publishAt"]),
entry_tickers=[x for x in map(lambda x: x["symbol"], e["market"])],
entry_meta=json.dumps(e),
)
pages.append(p)
return pages, new_urls
async def worker(self, queue: asyncio.Queue):
ua = UserAgent(verify_ssl=False, use_cache_server=False).random
async with aiohttp.ClientSession(
raise_for_status=True, headers=[("User-Agent", ua)]
) as sess:
while True:
url = await queue.get()
try:
async with sess.get(url) as resp:
pages, new_urls = self.parse(resp, await resp.json())
for p in pages:
p.save()
for u in new_urls:
queue.put_nowait(u)
log.info(f"scraped: {url}")
await asyncio.sleep(30)
except aiohttp.ClientError as e:
log.error(e)
self.error_urls.append(url)
except Exception as e:
log.error(f"Error on: {url}")
log.error(e)
finally:
queue.task_done()
def _parse_tickers(node: etree.Element) -> Optional[List[model.TickerText]]:
if node is None:
return
tickers = []
# find all <a> and de-duplicate their parents
for p in set([e.getparent() for e in node.cssselect("a")]):
text = (
etree.tostring(p, method="text", encoding="utf-8").decode("utf-8").strip()
)
tt = model.TickerText(text)
for a in p.cssselect("a"):
href = a.get("href")
if "invest.cnyes.com" in href:
tt.labels.append(("", a.text))
if len(tt.labels) > 0:
tickers.append(tt)
return tickers
def _parse_keywords(html: str) -> List[str]:
soup = BeautifulSoup(html, "html.parser")
e = soup.select_one('meta[itemprop="keywords"]')
if e is not None:
return e["content"].split(",")
else:
return []
class CnyesPageScraper(BasePageScraper):
domain = "cnyes.com"
kw_regex = re.compile(r"^\/tag\/(\w+)")
@classmethod
def parse(
cls, from_url: str, resolved_url: str, http_status: int, html: str
) -> List[model.Page]:
article = Article(resolved_url)
article.set_html(html)
article.parse()
parsed = model.Parsed(
keywords=_parse_keywords(html),
tickers=_parse_tickers(article.clean_top_node),
)
return [
model.Page(
from_url=from_url,
resolved_url=resolved_url,
http_status=http_status,
article_metadata=dict(article.meta_data),
article_published_at=article.publish_date,
article_title=article.title,
article_text=article.text,
# article_html=etree.tostring(
# article.clean_top_node, encoding='utf-8').decode('utf-8'),
parsed=parsed,
fetched_at=datetime.now(),
)
]
|
import os
import sys
# Windows only
# Check that git is installed
if os.path.isfile("C:\Program Files\Git\git-cmd.exe") == False:
print("Git not installed. Install to continue, or simply change\npath in code to point to proper exe.")
sys.exit(0)
# Check proper message arguments, define commit message.
try:
msg = "git commit -m " + "\"" + sys.argv[1] + "\""
except IndexError:
print("Run as: git-push.py \"commit message\"\nFor Windows only.")
sys.exit(0)
# prewrite git commands
def gitwork():
os.system("git add .")
os.system(msg)
os.system("git push")
# review changes
path = os.getcwd()
print("Push changes in: " + path)
print("with message: " + sys.argv[1])
check = input("[y,n]: ")
# Ask user to continue.
if check == "y":
gitwork()
elif check == "n":
print("Cancelled.")
sys.exit(0)
else:
print("Unknown response, aborted.")
# Exit
print("Done.")
sys.exit(0)
|
"""
Create a mesh from a KnitNetwork by finding the cycles (faces) of the
network.
---
[NOTE] This algorithm relies on finding cycles (quads and triangles) for the
supplied network. This is not a trivial task in 3d space - at least to my
knowledge. Assigning a geometrybase to the KnitNetwork on initialization
and choosing cyclesmode 1 or 2 greatly improves reliability!
None the less, success is very dependent on the curvature of the original
surface or mesh used.
---
[IMPLEMENTATION DETAIL] N-Gons are deliberately deactivated but can be activated
when editing the function call inside the scripting component and increasing
the max_valence value.
Inputs:
Toggle: Set to True to activate the component.
{item, boolean}
KnitNetwork: The KnitNetwork to create the mesh from.
{item, KnitNetwork}
CyclesMode: Determines how the neighbors of each node are sorted when
finding the cycles of the network.
[-1] equals to using the World XY plane
[0] equals to using a plane normal to the origin nodes
closest point on the geometrybase
[1] equals to using a plane normal to the average of the
origin and neighbor nodes' closest points on the
geometrybase
[2] equals to using an average plane between a plane fit to
the origin and its neighbor nodes and a plane normal to
the origin nodes closest point on the geometrybase.
Defaults to -1.
{item, int}
Outputs:
Mesh: The Rhino mesh. {item, mesh}
Remarks:
Author: Max Eschenbach
License: MIT License
Version: 200705
"""
# PYTHON STANDARD LIBRARY IMPORTS
from __future__ import division
# GHPYTHON SDK IMPORTS
from ghpythonlib.componentbase import executingcomponent as component
import Grasshopper, GhPython
import System
import Rhino
import rhinoscriptsyntax as rs
# GHENV COMPONENT SETTINGS
ghenv.Component.Name = "CreateMeshFromKnitNetwork"
ghenv.Component.NickName ="CMFKN"
ghenv.Component.Category = "Cockatoo"
ghenv.Component.SubCategory = "06 KnitNetwork"
# LOCAL MODULE IMPORTS
try:
from cockatoo import KnitNetwork
from cockatoo import KnitDiNetwork
except ImportError:
errMsg = "The Cockatoo python module seems to be not correctly " + \
"installed! Please make sure the module is in you search " + \
"path, see README for instructions!."
raise ImportError(errMsg)
class CreateMeshFromKnitNetwork(component):
def RunScript(self, Toggle, KN, CyclesMode):
# sanitize inputs
if CyclesMode == None:
CyclesMode = -1
elif CyclesMode < 0:
CyclesMode = -1
elif CyclesMode > 2:
CyclesMode = 2
# initialize Mesh
Mesh = Grasshopper.DataTree[Rhino.Geometry.Mesh]()
if Toggle and KN:
if CyclesMode != -1 and not KN.graph["reference_geometry"]:
errMsg = "KnitNetwork has no reference geometry " + \
"attached! Will fall back to World XY plane."
rml = self.RuntimeMessageLevel.Warning
self.AddRuntimeMessage(rml, errMsg)
# create mesh from knitnetwork
if isinstance(KN, KnitNetwork):
Mesh = KN.create_mesh(mode=CyclesMode,
max_valence=4)
elif isinstance(KN, KnitDiNetwork):
if KN.verify_dual_form():
Mesh = KnitNetwork(KN).create_mesh(mode=CyclesMode,
max_valence=4)
elif Toggle and not KN:
rml = self.RuntimeMessageLevel.Warning
rMsg = "No KnitNetwork input!"
self.AddRuntimeMessage(rml, rMsg)
return Mesh
|
#!/usr/bin/python
"""
Plot functions of redshift for RSDs.
"""
import numpy as np
import pylab as P
from rfwrapper import rf
import matplotlib.patches
import matplotlib.cm
import os
cosmo = rf.experiments.cosmo
fname = 'spherex-fsig8-scaledep.pdf'
names = [ 'gSPHEREx1_mgscaledep', 'gSPHEREx2_mgscaledep', 'EuclidRef_mg' ]
#labels = [ 'SPHEREx 0.003', 'SPHEREx 0.008', 'Euclid spectro-z', 'DESI spectro-z']
labels = [ '$\sigma(z)/(1+z) < 0.003$', '$\sigma(z)/(1+z) < 0.008$',
r'${\rm Euclid}$ ${\rm spectro-z}$', 'DESI spectro-z']
#colours = [ '#80B6D6', '#93C993', '#757575', '#a8a8a8'] # '#c8c8c8',
colours = [ '#F82E1E', '#1F7CC0', '#333333', '#a8a8a8']
# Dashes for different k bins
dashes = [[], [6,4], [4,2], [4,2,4,4]]
lws = [2.8, 2.8, 1.5, 1.]
alphas = [1., 1., 0.5, 0.2]
# Fiducial value and plotting
P.subplot(111)
for k in range(len(names)):
root = "output/" + names[k]
# Load cosmo fns.
dat = np.atleast_2d( np.genfromtxt(root+"-cosmofns-zc.dat") ).T
zc, Hc, dAc, Dc, fc = dat
z, H, dA, D, f = np.genfromtxt(root+"-cosmofns-smooth.dat").T
kc = np.genfromtxt(root+"-fisher-kc.dat").T
# Load Fisher matrices as fn. of z
Nbins = zc.size
F_list = [np.genfromtxt(root+"-fisher-full-%d.dat" % i) for i in range(Nbins)]
# EOS FISHER MATRIX
# Actually, (aperp, apar) are (D_A, H)
pnames = rf.load_param_names(root+"-fisher-full-0.dat")
print pnames
#zfns = ['A', 'b_HI', 'f', 'H', 'DA', 'aperp', 'apar']
zfns = ['H', 'DA', 'aperp', 'apar']
for j in range(4): zfns += ['k%dbs8' % j, 'k%dfs8' % j]
excl = ['A', 'Tb', 'n_s', 'sigma8', 'omegak', 'omegaDE', 'w0', 'wa', 'h',
'gamma', 'N_eff', 'pk*', 'f', 'b_HI',
'gamma0', 'gamma1', 'eta0', 'eta1', 'A_xi', 'logkmg',
'fs8*', 'bs8*', 'sigma8tot', 'sigma_8',]
# Combine Fisher matrices
F, lbls = rf.combined_fisher_matrix( F_list,
expand=zfns, names=pnames,
exclude=excl )
print lbls
# Look for unconstrained bins and remove them
excl = []
for j in range(4):
for n in range(zc.size):
pbs8 = 'k%dbs8%d' % (j, n)
pfs8 = 'k%dfs8%d' % (j, n)
Fbs8 = np.abs(np.diag(F)[lbls.index(pbs8)])
Ffs8 = np.abs(np.diag(F)[lbls.index(pfs8)])
if (Fbs8 < 1e-8) or (Ffs8 < 1e-8):
excl += [pbs8, pfs8]
F, lbls = rf.combined_fisher_matrix( [F,], expand=[],
names=lbls, exclude=excl )
cov = np.linalg.inv(F)
errs = np.sqrt(np.diag(cov))
# Identify scale-dependent functions
for j in range(2):
l = labels[k] if j == 1 else None
try:
pfs8 = rf.indices_for_param_names(lbls, 'k%dfs8*' % j)
pbs8 = rf.indices_for_param_names(lbls, 'k%dbs8*' % j)
# Plot errors as fn. of redshift
err = errs[pfs8] / (cosmo['sigma_8']*fc*Dc)
if 'Euclid' in labels[k]:
idxs = np.where(np.logical_and(zc >= 1.1, zc <= 1.9))
zc = zc[idxs]
err = err[idxs]
P.plot( zc, err, color=colours[j], lw=lws[j],
alpha=alphas[j], dashes=dashes[k] )
# Dummy, for legend
P.plot( zc, -err, color='k', label=l, lw=lws[j],
alpha=alphas[j], dashes=dashes[k] )
# marker=marker[k], markersize=ms[k], markeredgecolor=colours[k],
except:
pass
P.tick_params(axis='both', which='major', labelsize=20, width=1.5, size=8., pad=10)
P.tick_params(axis='both', which='minor', labelsize=20, width=1.5, size=5.)
# Set axis limits
P.xlim((-0.001, 2.4))
P.ylim((8e-3, 5.))
#P.xscale('log')
# fontdict={'size':'x-large'}) #, bbox=dict(ec='k', fc='none', lw=1.2))
P.xlabel('$z$', labelpad=10., fontdict={'fontsize':'xx-large'})
P.ylabel('$\sigma(f \sigma_8) / (f \sigma_8)$', labelpad=15., fontdict={'fontsize':'xx-large'})
# Set tick locations
#P.gca().yaxis.set_major_locator(matplotlib.ticker.MultipleLocator(0.02))
#P.gca().yaxis.set_minor_locator(matplotlib.ticker.MultipleLocator(0.01))
P.gca().xaxis.set_minor_locator(matplotlib.ticker.MultipleLocator(0.5))
P.yscale('log')
leg = P.legend(prop={'size':17}, loc='upper left', frameon=False, ncol=2)
#leg.get_frame().set_edgecolor('w')
#leg.get_frame().set_alpha(0.5)
# Scale-dependence labels
P.figtext(0.25, 0.26, "0.01 < k < 0.1 Mpc$^{-1}$", fontsize=16, backgroundcolor='w', color=colours[1])
P.figtext(0.38, 0.705, "0 < k < 0.01 Mpc$^{-1}$", fontsize=16, backgroundcolor='w', color=colours[0])
# Set size
P.tight_layout()
#P.gcf().set_size_inches(8.4, 7.8)
#P.gcf().set_size_inches(9.5, 6.8)
P.savefig(fname, transparent=True)
P.show()
|
import os
import numpy as np
class DeepDIVADatasetAdapter(object):
"""
Creates a directory & file based training environment that natively works with DeepDIVA CNN implementation.
Symlinks are used to reference files in self.root directory.
"""
def __init__(self, input_dir):
self.root = input_dir
# return [[path, label],
# [path2], label]
def read_folder_dataset(self, subfolder="train"):
"""
:param subfolder: string. subfolder to scan for files/images
:return: 2D ndarray. [[file_path, label]...]
"""
dataset_root = os.path.join(self.root, subfolder)
dataset = []
for label in os.listdir(dataset_root):
label_path = os.path.join(dataset_root, label)
files = os.listdir(label_path)
for picture in files:
dataset.append(os.path.join(label_path, picture))
dataset.append(label)
return np.array(dataset).reshape(len(dataset) // 2, 2)
def create_symlink_dataset(self, dataset, output_dir, subfolder='train'):
"""
:param dataset: 2D ndarray. [[file_path, label]...]
:param output_dir: string, root path for symlinks
:param subfolder: string: train, val, test
"""
for picture_path, label in dataset:
label_dir = os.path.join(output_dir, subfolder, label)
filename = os.path.basename(picture_path)
os.makedirs(label_dir, exist_ok=True)
os.symlink(picture_path, os.path.join(label_dir, filename))
def copy_symlink(self, output_dir, subfolder='train'):
ds = self.read_folder_dataset(subfolder)
self.create_symlink_dataset(ds, output_dir, subfolder)
|
#py_socket_client_pig.py
import socket
from py_socket_connector_info import\
ip_address, port, protocol_family_name, socket_type
#create a socket:
with socket.socket(protocol_family_name, socket_type) as soc:
soc.connect( (ip_address, port) )
print("Yea! We're talking to {}\n".format(ip_address))
while True:
msg=input("Pig Latin translator (or 'stop'): ")
soc.send(bytes(msg, 'utf=8'))
print(soc.recv(64))
if 'stop' in msg:
break
|
from django.shortcuts import render
import os
from mimetypes import guess_type
from django.conf import settings
from django.utils.encoding import force_bytes
from wsgiref.util import FileWrapper
from django.http import Http404, HttpResponse
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.shortcuts import render, get_object_or_404
from .models import RegHeaderImage, RegCategory, RegForm
def reg_list(request):
header = RegHeaderImage.objects.all()
cat = RegCategory.objects.all()
template = 'registry/regforms_list.html'
context = {
'header': header,
'cat': cat
}
return render(request, template, context)
def reg_detail(request, slug=None):
header = RegHeaderImage.objects.all()
try:
cat_q = get_object_or_404(RegCategory, slug=slug)
except RegCategory.MultipleObjectsReturned:
cat_q = RegCategory.objects.filter(slug=slug).order_by('title').first()
reg_f = cat_q.regform_set.all()
# reg1 = RegForm.objects.filter(section__title='section1')
# reg2 = RegForm.objects.filter(section__title='section2')
template = 'registry/regforms_detail.html'
context = {
'header': header,
'cat_q': cat_q,
'reg_f': reg_f
}
return render(request, template, context)
def reg_download(request):
pass
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
@AUTHOR:Joselyn Zhao
@CONTACT:zhaojing17@foxmail.com
@HOME_PAGE:joselynzhao.top
@SOFTWERE:PyCharm
@FILE:20052901.py
@TIME:2020/5/29 19:24
@DES:3. 无重复字符的最长子串
'''
class Solution:
def __init__(self,word):
self.lengthOfLongestSubstring(word)
def lengthOfLongestSubstring(self, s):
max_len = 1
for i in range(len(s)):
# print(s[i])
exsit_list = [s[i]]
current_len = 1
for j in range(i+1,len(s)):
# print(s[j])
if s[j] not in exsit_list:
exsit_list.append(s[j])
current_len +=1
else:
break
if current_len>max_len:
max_len = current_len
print(max_len)
def lenthOfLongestSubstring02(self,s):
if len(s)==0:
return 0
max_len = 1
point_1 = 0
point_2 = 1
box = [s[point_1]]
current = point_1+1
while(point_1!=len(s)):
if s[current] not in box:
box.append(s[current])
point_2 += 1
else:
index = box.index(s[current])
s = Solution('abcabcbb')
|
#!/usr/bin/env python
from time import sleep
from sys import exit
try:
from pyfiglet import figlet_format
except ImportError:
exit("This script requires the pyfiglet module\nInstall with: sudo pip install pyfiglet")
import unicornhat as unicorn
print("""Figlet
You should see scrolling text that is defined in the TXT variable.
If the text moves in the wrong direction, change the rotation from 0 to 180.
Text output is kind of limited on a pHAT of course because most letters don't
fit on the small display of 4x8.
""")
unicorn.set_layout(unicorn.AUTO)
unicorn.rotation(0)
unicorn.brightness(0.5)
width,height=unicorn.get_shape()
TXT = "HELLO"
figletText = figlet_format(TXT+' ', "banner", width=1000) # banner font generates text with heigth 7
textMatrix = figletText.split("\n")[:width] # width should be 8 on both HAT and pHAT!
textWidth = len(textMatrix[0]) # the total length of the result from figlet
i = -1
def step():
global i
i = 0 if i>=100*textWidth else i+1 # avoid overflow
for h in range(height):
for w in range(width):
hPos = (i+h) % textWidth
chr = textMatrix[w][hPos]
if chr == ' ':
unicorn.set_pixel(width-w-1, h, 0, 0, 0)
else:
unicorn.set_pixel(width-w-1, h, 255, 0, 0)
unicorn.show()
while True:
step()
sleep(0.2)
|
#!/usr/bin/env python
from wsgiref.simple_server import make_server
if False:
from serve_wsgi_basic import application
else:
from serve_wsgi_map import application
httpd = make_server('162.242.218.22', 8081, application)
httpd.serve_forever()
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution0437:
def pathSum(self, root, target):
# define global result and path
self.result = 0
cache = {0:1}
# recursive to get result
self.dfs(root, target, 0, cache)
# return result
return self.result
def dfs(self, root, target, currPathSum, cache):
# exit condition
if root is None:
return
# calculate currPathSum and required oldPathSum
currPathSum += root.val
oldPathSum = currPathSum - target
# update result and cache
self.result += cache.get(oldPathSum, 0)
cache[currPathSum] = cache.get(currPathSum, 0) + 1
# dfs breakdown
self.dfs(root.left, target, currPathSum, cache)
self.dfs(root.right, target, currPathSum, cache)
# when move to a different branch, the currPathSum is no longer available, hence remove one.
cache[currPathSum] -= 1
|
from __future__ import absolute_import
# Interface to the Salesforce BULK API
import os
from collections import namedtuple
from httplib2 import Http
import requests
import urllib2
import urlparse
import requests
import xml.etree.ElementTree as ET
from tempfile import TemporaryFile, NamedTemporaryFile
import StringIO
import re
import time
import csv
from . import bulk_states
from . import csv_adapter
UploadResult = namedtuple('UploadResult', 'id success created error')
class BulkApiError(Exception):
def __init__(self, message, status_code=None):
super(BulkApiError, self).__init__(message)
self.status_code = status_code
class BulkJobAborted(BulkApiError):
def __init__(self, job_id):
self.job_id = job_id
message = 'Job {0} aborted'.format(job_id)
super(BulkJobAborted, self).__init__(message)
class BulkBatchFailed(BulkApiError):
def __init__(self, job_id, batch_id, state_message):
self.job_id = job_id
self.batch_id = batch_id
self.state_message = state_message
message = 'Batch {0} of job {1} failed: {2}'.format(batch_id, job_id,
state_message)
super(BulkBatchFailed, self).__init__(message)
class SalesforceBulk(object):
def __init__(self, sessionId=None, host=None, username=None, password=None,
exception_class=BulkApiError):
if not sessionId and not username:
raise RuntimeError(
"Must supply either sessionId/instance_url or username/password")
if not sessionId:
sessionId, endpoint = SalesforceBulk.login_to_salesforce(
username, password)
host = urlparse.urlparse(endpoint)
host = host.hostname.replace("-api", "")
if host[0:4] == 'http':
self.endpoint = host
else:
self.endpoint = "https://" + host
self.sessionId = sessionId
self.jobNS = 'http://www.force.com/2009/06/asyncapi/dataload'
self.jobs = {} # dict of job_id => job_id
self.batches = {} # dict of batch_id => job_id
self.batch_statuses = {}
self.exception_class = exception_class
@staticmethod
def login_to_salesforce(username, password):
env_vars = (
'SALESFORCE_CLIENT_ID',
'SALESFORCE_CLIENT_SECRET',
'SALESFORCE_REDIRECT_URI',
)
missing_env_vars = [e for e in env_vars if e not in os.environ]
if missing_env_vars:
raise RuntimeError(
"You must set {0} to use username/pass login".format(
', '.join(missing_env_vars)))
try:
import salesforce_oauth_request
except ImportError:
raise ImportError(
"You must install salesforce-oauth-request to use username/password")
packet = salesforce_oauth_request.login(
username=username, password=password)
return packet['access_token'], packet['instance_url']
def headers(self, values={}):
default = {"X-SFDC-Session": self.sessionId,
"Content-Type": "application/xml; charset=UTF-8"}
for k, val in values.iteritems():
default[k] = val
return default
# Register a new Bulk API job - returns the job id
def create_query_job(self, object_name, **kwargs):
return self.create_job(object_name, "query", **kwargs)
def create_insert_job(self, object_name, **kwargs):
return self.create_job(object_name, "insert", **kwargs)
def create_update_job(self, object_name, **kwargs):
return self.create_job(object_name, "update", **kwargs)
def create_upsert_job(self, object_name, external_id, **kwargs):
return self.create_job(
object_name, "upsert", external_id=external_id, **kwargs)
def create_delete_job(self, object_name, **kwargs):
return self.create_job(object_name, "delete", **kwargs)
def create_job(self, object_name=None, operation=None, contentType='CSV',
concurrency=None, external_id=None):
assert(object_name is not None)
assert(operation is not None)
assert(operation)
doc = self.create_job_doc(object_name=object_name,
operation=operation,
contentType=contentType,
concurrency=concurrency,
external_id=external_id)
http = Http()
resp, content = http.request(self.endpoint + "/services/async/29.0/job",
"POST",
headers=self.headers(),
body=doc)
self.check_status(resp, content)
tree = ET.fromstring(content)
job_id = tree.findtext("{%s}id" % self.jobNS)
self.jobs[job_id] = job_id
return job_id
def check_status(self, resp, content):
if resp.status >= 400:
msg = "Bulk API HTTP Error result: {0}".format(content)
self.raise_error(msg, resp.status)
def close_job(self, job_id):
doc = self.create_close_job_doc()
http = Http()
url = self.endpoint + "/services/async/29.0/job/%s" % job_id
resp, content = http.request(url, "POST", headers=self.headers(),
body=doc)
self.check_status(resp, content)
def create_job_doc(self, object_name=None, operation=None,
contentType='CSV', concurrency=None, external_id=None):
root = ET.Element("jobInfo")
root.set("xmlns", self.jobNS)
op = ET.SubElement(root, "operation")
op.text = operation
obj = ET.SubElement(root, "object")
obj.text = object_name
if external_id is not None:
ext_id = ET.SubElement(root, "externalIdFieldName")
ext_id.text = external_id
if concurrency:
con = ET.SubElement(root, "concurrencyMode")
con.text = concurrency
ct = ET.SubElement(root, "contentType")
ct.text = contentType
buf = StringIO.StringIO()
tree = ET.ElementTree(root)
tree.write(buf, encoding="UTF-8")
return buf.getvalue()
def create_close_job_doc(self, object_name=None, operation=None,
contentType='CSV'):
root = ET.Element("jobInfo")
root.set("xmlns", self.jobNS)
state = ET.SubElement(root, "state")
state.text = "Closed"
buf = StringIO.StringIO()
tree = ET.ElementTree(root)
tree.write(buf, encoding="UTF-8")
return buf.getvalue()
# Add a BulkQuery to the job - returns the batch id
def query(self, job_id, soql):
if job_id is None:
job_id = self.create_job(
re.search(re.compile("from (\w+)", re.I), soql).group(1),
"query")
http = Http()
uri = self.endpoint + "/services/async/29.0/job/%s/batch" % job_id
headers = self.headers({"Content-Type": "text/csv"})
resp, content = http.request(uri, method="POST", body=soql,
headers=headers)
self.check_status(resp, content)
tree = ET.fromstring(content)
batch_id = tree.findtext("{%s}id" % self.jobNS)
self.batches[batch_id] = job_id
return batch_id
def split_csv(self, csv, batch_size):
csv_io = StringIO.StringIO(csv)
batches = []
for i, line in enumerate(csv_io):
if not i:
headers = line
batch = headers
continue
if not i % batch_size:
batches.append(batch)
batch = headers
batch += line
batches.append(batch)
return batches
# Add a BulkUpload to the job - returns the batch id
def bulk_csv_upload(self, job_id, csv, batch_size=2500):
# Split a large CSV into manageable batches
batches = self.split_csv(csv, batch_size)
batch_ids = []
uri = self.endpoint + "/services/async/29.0/job/%s/batch" % job_id
headers = self.headers({"Content-Type": "text/csv"})
for batch in batches:
resp = requests.post(uri, data=batch, headers=headers)
content = resp.content
if resp.status_code >= 400:
self.raise_error(content, resp.status)
tree = ET.fromstring(content)
batch_id = tree.findtext("{%s}id" % self.jobNS)
self.batches[batch_id] = job_id
batch_ids.append(batch_id)
return batch_ids
def raise_error(self, message, status_code=None):
if status_code:
message = "[{0}] {1}".format(status_code, message)
if self.exception_class == BulkApiError:
raise self.exception_class(message, status_code=status_code)
else:
raise self.exception_class(message)
def post_bulk_batch(self, job_id, csv_generator):
uri = self.endpoint + "/services/async/29.0/job/%s/batch" % job_id
headers = self.headers({"Content-Type": "text/csv"})
resp = requests.post(uri, data=csv_generator, headers=headers)
content = resp.content
if resp.status_code >= 400:
self.raise_error(content, resp.status_code)
tree = ET.fromstring(content)
batch_id = tree.findtext("{%s}id" % self.jobNS)
return batch_id
# Add a BulkDelete to the job - returns the batch id
def bulk_delete(self, job_id, object_type,
where, batch_size=2500, limit=1000):
from time import sleep
query_job_id = self.create_query_job(object_type)
soql = "Select Id from %s where %s Limit %d" % (object_type, where, limit)
query_batch_id = self.query(query_job_id, soql)
while not self.is_batch_done(query_job_id, query_batch_id):
sleep(10)
self.close_job(query_job_id)
res = self.get_batch_result_ids(query_batch_id, query_job_id)
q = self.get_batch_results(query_batch_id, res[0], query_job_id)
q.next()
results = []
for row in q:
results.append({'Id': unicode(row.strip('"'))})
if not len(results):
if job_id:
self.close_job(job_id)
return (None, None,)
if job_id is None:
job_id = self.create_job(object_type, "delete")
batch = self.post_bulk_batch(
job_id, csv_adapter.CsvDictsAdapter(iter(results)))
return (job_id, batch)
def lookup_job_id(self, batch_id):
try:
return self.batches[batch_id]
except KeyError:
raise Exception(
"Batch id '%s' is uknown, can't retrieve job_id" % batch_id)
def job_status(self, job_id=None):
job_id = job_id or self.lookup_job_id(batch_id)
uri = urlparse.urljoin(self.endpoint,
'/services/async/29.0/job/{0}'.format(job_id))
response = requests.get(uri, headers=self.headers())
if response.status_code != 200:
self.raise_error(response.content, response.status_code)
tree = ET.fromstring(response.content)
result = {}
for child in tree:
result[re.sub("{.*?}", "", child.tag)] = child.text
return result
def job_state(self, job_id):
status = self.job_status(job_id)
if 'state' in status:
return status['state']
else:
return None
def batch_status(self, job_id=None, batch_id=None, reload=False):
if not reload and batch_id in self.batch_statuses:
return self.batch_statuses[batch_id]
job_id = job_id or self.lookup_job_id(batch_id)
http = Http()
uri = self.endpoint + \
"/services/async/29.0/job/%s/batch/%s" % (job_id, batch_id)
resp, content = http.request(uri, headers=self.headers())
self.check_status(resp, content)
tree = ET.fromstring(content)
result = {}
for child in tree:
result[re.sub("{.*?}", "", child.tag)] = child.text
self.batch_statuses[batch_id] = result
return result
def batch_state(self, job_id, batch_id, reload=False):
status = self.batch_status(job_id, batch_id, reload=reload)
if 'state' in status:
return status['state']
else:
return None
def is_batch_done(self, job_id, batch_id):
batch_state = self.batch_state(job_id, batch_id, reload=True)
if batch_state in bulk_states.ERROR_STATES:
status = self.batch_status(job_id, batch_id)
raise BulkBatchFailed(job_id, batch_id, status['stateMessage'])
return batch_state == bulk_states.COMPLETED
# Wait for the given batch to complete, waiting at most timeout seconds
# (defaults to 10 minutes).
def wait_for_batch(self, job_id, batch_id, timeout=60 * 10,
sleep_interval=10):
waited = 0
while not self.is_batch_done(job_id, batch_id) and waited < timeout:
time.sleep(sleep_interval)
waited += sleep_interval
def get_batch_result_ids(self, batch_id, job_id=None):
job_id = job_id or self.lookup_job_id(batch_id)
if not self.is_batch_done(job_id, batch_id):
return False
uri = urlparse.urljoin(
self.endpoint,
"services/async/29.0/job/{0}/batch/{1}/result".format(
job_id, batch_id),
)
resp = requests.get(uri, headers=self.headers())
if resp.status_code != 200:
return False
tree = ET.fromstring(resp.content)
return [str(r.text) for r in
tree.iterfind("{{{0}}}result".format(self.jobNS))]
def get_batch_results(self, batch_id, result_id, job_id=None,
parse_csv=False, logger=None):
job_id = job_id or self.lookup_job_id(batch_id)
logger = logger or (lambda message: None)
uri = urlparse.urljoin(
self.endpoint,
"services/async/29.0/job/{0}/batch/{1}/result/{2}".format(
job_id, batch_id, result_id),
)
logger('Downloading bulk result file id=#{0}'.format(result_id))
resp = requests.get(uri, headers=self.headers(), stream=True)
if not parse_csv:
iterator = resp.iter_lines()
else:
iterator = csv.reader(resp.iter_lines(), delimiter=',',
quotechar='"')
BATCH_SIZE = 5000
for i, line in enumerate(iterator):
if i % BATCH_SIZE == 0:
logger('Loading bulk result #{0}'.format(i))
yield line
def get_batch_result_iter(self, job_id, batch_id, parse_csv=False,
logger=None):
"""
Return a line interator over the contents of a batch result document. If
csv=True then parses the first line as the csv header and the iterator
returns dicts.
"""
status = self.batch_status(job_id, batch_id)
if status['state'] != 'Completed':
return None
elif logger:
if 'numberRecordsProcessed' in status:
logger("Bulk batch %d processed %s records" %
(batch_id, status['numberRecordsProcessed']))
if 'numberRecordsFailed' in status:
failed = int(status['numberRecordsFailed'])
if failed > 0:
logger("Bulk batch %d had %d failed records" %
(batch_id, failed))
uri = self.endpoint + \
"/services/async/29.0/job/%s/batch/%s/result" % (job_id, batch_id)
r = requests.get(uri, headers=self.headers(), stream=True)
if parse_csv:
return csv.DictReader(r.iter_lines(chunk_size=2048), delimiter=",",
quotechar='"')
else:
return r.iter_lines(chunk_size=2048)
def get_upload_results(self, job_id, batch_id,
callback=(lambda *args, **kwargs: None),
batch_size=0, logger=None):
job_id = job_id or self.lookup_job_id(batch_id)
if not self.is_batch_done(job_id, batch_id):
return False
http = Http()
uri = self.endpoint + \
"/services/async/29.0/job/%s/batch/%s/result" % (job_id, batch_id)
resp, content = http.request(uri, method="GET", headers=self.headers())
tf = TemporaryFile()
tf.write(content)
total_remaining = self.count_file_lines(tf)
if logger:
logger("Total records: %d" % total_remaining)
tf.seek(0)
records = []
line_number = 0
col_names = []
reader = csv.reader(tf, delimiter=",", quotechar='"')
for row in reader:
line_number += 1
records.append(UploadResult(*row))
if len(records) == 1:
col_names = records[0]
if batch_size > 0 and len(records) >= (batch_size + 1):
callback(records, total_remaining, line_number)
total_remaining -= (len(records) - 1)
records = [col_names]
callback(records, total_remaining, line_number)
tf.close()
return True
def parse_csv(self, tf, callback, batch_size, total_remaining):
records = []
line_number = 0
col_names = []
reader = csv.reader(tf, delimiter=",", quotechar='"')
for row in reader:
line_number += 1
records.append(row)
if len(records) == 1:
col_names = records[0]
if batch_size > 0 and len(records) >= (batch_size + 1):
callback(records, total_remaining, line_number)
total_remaining -= (len(records) - 1)
records = [col_names]
return records, total_remaining
def count_file_lines(self, tf):
tf.seek(0)
buffer = bytearray(2048)
lines = 0
quotes = 0
while tf.readinto(buffer) > 0:
quoteChar = ord('"')
newline = ord('\n')
for c in buffer:
if c == quoteChar:
quotes += 1
elif c == newline:
if (quotes % 2) == 0:
lines += 1
quotes = 0
return lines
|
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
"""
This module exposes dynamic sparse layers through simple classes with a consistent API.
There are also dense layers wrapped in the same interface in order to make it easier
to build models where the layers can be switched from sparse to dense easily.
The sparse layers store a host side representation of the sparsity pattern
and utilities for manipulating and syncing this representation to and from TensorFlow
variables (on the device). It is the users responsibilty to manage the syncronisation of the
host and TensorFlow (device side) representation. This is achieved by calling the appropriate
methods in this sparse layer classes and arranging for the TensorFlow ops constructed by the layer
to be called at the right time in the TF compute graph. Careful orchestration of these can be used
to implement dynamic sparse optimisers such as RigL:
https://github.com/graphcore/examples/tree/master/applications/tensorflow/dynamic_sparsity/mnist_rigl
"""
import time
import os
import numpy as np
import tensorflow.compat.v1 as tf
from ipu_sparse_ops import sparse
from logging import getLogger
from typing import (
Callable,
List,
Mapping,
NamedTuple,
Union
)
logger = getLogger(os.path.basename(__file__))
class SparseMatrix:
"""
Utility class to manage a sparse weight matrix in triplet (COO) format.
:param spec: The parameters that specify the multiplication of inputs with this weight matrix.
:param triplets: The sparse weight matrix specified in triplet/COO format.
E.g. as returned from 'ipu_sparse_ops.sparse.triplets_from_dense'.
:param matmul_options: Poplib's matmul options specific for these weights. See popsparse
docs in Poplibs.
:param triplets: Triplets to initialise the sparse martix.
"""
def __init__(self, spec: sparse.MatmulSpec, matmul_options: Mapping[str, str], triplets: sparse.Triplets, name: str = ""):
self.spec = spec
self.name = name
self.matmul_options = matmul_options.copy()
if 'partialsType' in matmul_options:
if matmul_options['partialsType'] == "half" and spec.block_size == 1:
raise ValueError("Half partials are not supported for 1x1 blocks.")
self.dense_grad_matmul_options = self.matmul_options.pop('dense_grad', {})
# Initialised by update_from_triplets/update_from_values
self.representation: sparse.SparseRepresentation = None
self.triplets: sparse.Triplets = None
self.update_from_triplets(triplets=triplets)
def update_from_triplets(self, triplets: sparse.Triplets):
self.triplets = triplets
self.representation = sparse.representation_from_triplets(
self.spec, *self.triplets, self.matmul_options, debug_name=self.name)
def update_from_values(self, values: List[float], metainfo: List[float] = None):
np.copyto(self.representation.nz_values, values)
if metainfo is not None:
# Reinterpret cast the metainfo as uint16 rather than float16.
metainfo_as_uint16 = np.frombuffer(metainfo.tobytes(),
dtype=np.uint16)
np.copyto(self.representation.metainfo_state, metainfo_as_uint16)
self.triplets = sparse.triplets_from_representation(
self.spec, self.representation, self.matmul_options, debug_name=self.name)
def extract_dense(self) -> np.ndarray:
return sparse.dense_from_triplets(self.spec, *self.triplets)
def extract_mask(self) -> np.ndarray:
return sparse.mask_from_triplets(self.spec, *self.triplets)
def get_metainfo(self) -> List[np.uint16]:
return self.representation.metainfo_state
def get_triplets(self) -> sparse.Triplets:
return self.triplets
def get_values(self) -> np.ndarray:
return self.representation.nz_values
def get_matmul_shape(self) -> List[int]:
return (self.spec.input_size, self.spec.output_size)
def get_data_type(self):
return self.spec.data_type
class SparseSlot(NamedTuple):
np_variable: np.ndarray
tf_variable: tf.Variable
placeholder: tf.Tensor = None
class SparseFcLayer:
def __init__(
self,
weights: SparseMatrix,
name: str,
use_bias: bool = False,
relu: bool = False,
disable_updating: bool = False,
pooling_type: str = "NONE"):
"""
Construct a new 'SparseFcLayer' object. It is recommended to create these layers
using the factory functions e.g.: 'from_random_generator' or 'from_triplets'.
This layer is for fully connected layers that are sparse and can have the sparsity pattern
updated during training.
:param weights: A SparseMatrix object describing the sparse weight matrix for this layer.
:param name: Name string for the layer (used as a variable scope).
:use_bias: Flag to say whether a bias should be added to the layer.
:relu: Flag to say whether a relu activation be added to the layer.
:disable_updating: A flag to disable updates of the sparsity pattern for this
layer. Non-zero values can still change.
"""
self.weights = weights
self.name = name
# Each layer needs slot values that share the same sparse representation
# as the non-zero value data (initialise slots to zero):
self.sparse_slots: Mapping[str, SparseSlot] = {} # Initialised by repeated calls to record_slot_var
logger.debug(f"Created sparse values with shape {self.weights.get_values().shape}")
logger.debug(f"Created sparse metainfo with shape {self.weights.get_metainfo().shape}")
self.use_bias = use_bias
self.relu = relu
self.bias_init = tf.zeros_initializer()
self.disable_updating = disable_updating
self.pooling_type = pooling_type
# Initialised by build
self.built = False
self.dense_dummy_var: tf.Variable = None
self.metainfo_var: tf.Variable = None
self.values_var: tf.Variable = None
@classmethod
def from_random_generator(
cls, hidden_size: int, input_shape: List[int],
density: float,
block_size: int,
values_initialiser_gen: Callable[..., np.ndarray],
indices_initialiser_gen: Callable,
matmul_options: Mapping[str, str],
name: str,
dtype: tf.DType = tf.float32,
use_bias: bool = False,
relu: bool = False,
disable_updating: bool = False,
pooling_type: str = 'NONE'):
"""
Utility factory function to build a 'SparseFcLayer' from a random sparsity
pattern and random non zero values.
"""
spec = sparse.matmul_spec_from_density(hidden_size, input_shape, density,
block_size, dtype, pooling_type)
ns = tf.get_default_graph().get_name_scope()
qualified_name = ns + "/" + name if ns else name
logger.debug(f"Creating random sparse FC {qualified_name} with spec: {spec}")
t0 = time.perf_counter()
triplets = sparse.random_triplets(spec, indices_initialiser_gen, values_initialiser_gen)
t1 = time.perf_counter()
logger.debug(f"Generated triplets in {t1-t0:0.03f} seconds")
weights = SparseMatrix(spec, matmul_options, triplets, name=qualified_name)
logger.debug(f"Triplet stats for {qualified_name}: {sparse.triplet_stats(*triplets)}")
return cls(weights, name, use_bias, relu, disable_updating, pooling_type=pooling_type)
@classmethod
def from_triplets(cls, hidden_size: int, input_shape: List[int],
row_indices: List[int], col_indices: List[int],
values: List[float], matmul_options: Mapping[str, str], name: str,
dtype: tf.DType = tf.float32,
use_bias: bool = False,
relu: bool = False,
disable_updating: bool = False,
pooling_type: str = 'NONE'):
"""
Utility factory function to build a 'SparseFcLayer' from a set of triplets (COO format).
E.g. as returned from 'ipu_sparse_ops.sparse.triplets_from_dense'
"""
block_size = sparse.block_size_from_list(values)
spec = sparse.matmul_spec_from_max(hidden_size, input_shape, len(values), block_size, dtype, pooling_type)
ns = tf.get_default_graph().get_name_scope()
qualified_name = ns + "/" + name if ns else name
logger.debug(f"Creating random sparse FC {qualified_name} with spec: {spec}")
triplets = sparse.Triplets(row_indices, col_indices, values)
weights = SparseMatrix(spec, matmul_options, triplets, name=qualified_name)
return cls(weights, name, use_bias, relu, disable_updating, pooling_type=pooling_type)
def get_nonzero_blocks_shape(self) -> List[int]:
return (self.weights.spec.block_size, self.weights.spec.block_size)
def get_shape(self) -> List[int]:
return self.weights.get_matmul_shape()
def get_data_type(self):
return self.weights.get_data_type()
def create_placeholders(self):
self.metainfo_ph, self.nz_ph = self.weights.representation.makePlaceHolders(self.weights.spec.data_type)
def feed_dict(self) -> Mapping[tf.Tensor, np.ndarray]:
"""
Return a feed that can be used to initialise and update the sparsity
pattern, values, and sparse slots.
"""
if self.disable_updating:
return {}
feeds = {
self.metainfo_ph: self.weights.representation.metaInfoFeed(),
self.nz_ph: self.weights.representation.valuesFeed()
}
feeds.update({
slot.placeholder: slot.np_variable
for slot in self.sparse_slots.values()
})
return feeds
def is_sparse(self) -> bool:
"""
Used to distinguish sparse and dense layer objects within this module.
"""
return True
def update_sparsity_op(self) -> tf.Operation:
"""
Return a TensorFlow op that can be used to update the sparsity pattern and values from a feed.
This op must be fed from the result of calling this layer's feed_dict() method.
If self.disable_updating is set then this will return a no-op.
"""
if self.disable_updating:
return tf.no_op()
elif not self.built:
raise AttributeError(
f"This sparse layer '{self.name}' is being asked to create an "
"update op to change the underlying sparsity pattern but has "
"not yet been built. Either call the layer before using this "
"method or build it explicitly by calling build().")
else:
return sparse.update_metainfo_op_with_vars(
self.metainfo_ph, self.nz_ph,
self.metainfo_var, self.values_var)
def update_slots_op(self) -> tf.Operation:
"""
Return a TensorFlow op that can be used to update the sparse slot (values only not pattern) from a feed.
This op must be fed from the result of calling this layer's feed_dict() method. If you have not built the
training op then this variable will not exist and an exception will be raised.
"""
if not self.sparse_slots:
logger.warning("update_slots_op called with no slots registered.")
return tf.no_op()
update_ops = [
slot.tf_variable.assign(slot.placeholder)
for slot in self.sparse_slots.values()
]
return tf.group(update_ops)
def get_values_var(self) -> tf.Variable:
"""
Return the TensorFlow variable that is holding the non zero values for this layer.
"""
if self.values_var is None:
raise AttributeError(
f"This sparse layer '{self.name}' is being asked to return the nonzero sparse "
"values variable but has not yet been built. Call this layer or explicitly build it.")
return self.values_var
def get_metainfo_var(self) -> tf.Variable:
"""
Return the TensorFlow variable that is holding the sparse metainfo values for this layer.
"""
if self.metainfo_var is None:
raise AttributeError(
f"This sparse layer '{self.name}' is being asked to return the sparse "
"metainfo variable but has not yet been built. Call this layer or explicitly build it.")
return self.metainfo_var
def get_dense_dummy_var(self) -> tf.Variable:
"""
Return the TensorFlow dummy variable that is used to reference the dense gradient for this layer.
"""
if self.dense_dummy_var is None:
raise AttributeError(
f"This sparse layer '{self.name}' is being asked to return the dense dummy "
"variable but has not yet been built. Call this layer or explicitly build it.")
return self.dense_dummy_var
def get_dense_grad_w(self, loss: tf.Tensor) -> tf.Tensor:
"""
Access the TensorFlow variable that is holding the dense gradient for this layer.
The dense gradient is conditionally computed so may be stale.
"""
dummy_var = self.get_dense_dummy_var()
logger.debug(f"Layer '{self.name}' grad dummy var: '{dummy_var}'")
dense_grad = tf.gradients(loss, dummy_var)[0]
if dense_grad is None:
raise ValueError(
f"This sparse layer '{self.name}' is being asked to return a dense gradient "
"but the loss op does not depend on it. Make sure the loss op is dependent "
"on the output of this layer.")
return dense_grad
def get_max_non_zeros(self) -> int:
"""
The maximum number of non-zeros allowed in this layer.
"""
return self.weights.spec.max_non_zero_blocks
def record_slot_var(self, slot_name: str, optimizer: tf.train.Optimizer):
"""
Used by the optimiser to record a slot with this layer.
Returns the Tensorflow slot_variable that was recorded.
"""
if self.values_var is None:
raise AttributeError(
f"This sparse layer '{self.name}' is being asked to record a "
"slot variable but it has not yet been called! "
"Make sure you call this layer upstream of the loss op or "
"remove it from the sparse_layers list.")
slot_var = optimizer.get_slot(self.values_var, slot_name)
internal_name = slot_var.name
logger.debug(f"Recording slot variable {slot_var.name} as {internal_name}")
if slot_var is None:
raise ValueError(
f"This sparse layer '{self.name}' is being asked to record "
f"a slot variable for '{self.values_var.name}' but no such "
"slot exists! Make sure the loss op is actually dependent "
"on this layer or remove it from the sparse_layers list.")
if slot_var.shape != self.weights.get_values().shape:
raise ValueError(
f"Shape mismatch between variable {slot_var.shape} "
f"and slot {self.weights.get_values().shape}")
with tf.init_scope(): # escapes XLA, so placeholders can be created
with tf.device("cpu"):
placeholder = tf.placeholder(dtype=slot_var.dtype, shape=slot_var.shape)
self.sparse_slots[internal_name] = SparseSlot(
placeholder=placeholder,
tf_variable=slot_var,
np_variable=np.zeros_like(self.weights.get_values())
)
return slot_var
def update_triplets(self, new_triplets: sparse.Triplets):
"""
Update the host side representation of the sparsity pattern with a new set of triplets.
The on device representation will not be updated until you run the op returned from the
layer's 'update_sparsity_op()' method.
"""
self.weights.update_from_triplets(new_triplets)
def extract_dense(self) -> np.ndarray:
"""
Return a dense version of this layer's sparse weight matrix.
"""
return self.weights.extract_dense()
def extract_mask(self) -> np.ndarray:
"""
Return a dense mask representation of this layer's weight matrix,
"""
return self.weights.extract_mask()
def get_triplets(self) -> sparse.Triplets:
"""
Return a triplet version of this layer's sparse weight matrix.
"""
return self.weights.get_triplets()
def get_slot_var_dict(self):
"""
Return the dict holding the slots.
"""
return self.sparse_slots
def extract_slot_triplets(self) -> Mapping[str, sparse.Triplets]:
slot_representations = {
name: sparse.SparseRepresentation(self.weights.get_metainfo(), slot.np_variable)
for name, slot in self.get_slot_var_dict().items()
}
return {
name: sparse.triplets_from_representation(
self.weights.spec, representation, self.weights.matmul_options, debug_name=name + "(slot)")
for name, representation in slot_representations.items()
}
def update_slots_from_triplets(self, slot_triplets: Mapping[str, sparse.Triplets]):
"""
Update the host side representation of the sparse slot with a new set of triplets.
The row and column indices must be identical to those for the sparse weights.
The on device representation will not be updated until you run the op returned from the
layer's 'update_sparsity_op()' method.
"""
slot_representations = {
name: sparse.representation_from_triplets(
self.weights.spec,
*triplet,
self.weights.matmul_options, debug_name=name + "(slot)")
for name, triplet in slot_triplets.items()
}
for name, representation in slot_representations.items():
current_slot = self.sparse_slots[name]
if current_slot.np_variable.shape != representation.nz_values.shape:
raise RuntimeError(
"New slot shape is not compatible. "
f"Slot {name}: New: {representation.nz_values.shape} != old: {current_slot.shape}")
self.sparse_slots[name] = SparseSlot(
np_variable=representation.nz_values,
tf_variable=current_slot.tf_variable,
placeholder=current_slot.placeholder)
def sync_internal_representation(self, values: List[Mapping[str, List[float]]], slots: Mapping[str, List[float]],
metainfo: List[Mapping[str, List[int]]] = None):
"""
Used to store the values and slots returned from the device into the internal
SparseRepresentation object (self.weights.representation). This will typically be called after each
training step that you run on the device.
"""
values = [value for value in values.values()]
if len(values) > 1:
raise Exception("sync_internal_representation expects a single array of non-zero values")
values = values[0]
if metainfo is not None:
metainfo = [metainf for metainf in metainfo.values()]
if len(metainfo) > 1:
raise Exception("sync_internal_representation expects a single set of metainfo or None")
metainfo = metainfo[0]
self.weights.update_from_values(values=values, metainfo=metainfo)
if not self.disable_updating:
for name, values in slots.items():
np.copyto(self.sparse_slots[name].np_variable, values)
def build(self):
"""Generates the underlying variables once."""
if self.built:
return
self.values_var, self.metainfo_var, self.dense_dummy_var = \
sparse.get_or_create_matmul_vars(
self.weights.spec,
self.weights.representation,
self.weights.matmul_options,
constant_metainfo=self.disable_updating)
if self.use_bias:
self.bias = tf.get_variable(
"bias", shape=[self.weights.spec.output_size],
initializer=self.bias_init,
dtype=tf.dtypes.as_dtype(self.weights.get_values().dtype)
)
self.built = True
return
def __call__(
self,
inputs: tf.Tensor,
compute_dense_grad_w: Union[bool, tf.Tensor] = False) -> tf.Tensor:
"""
Build and return the op to execute the layer. It will
compute the matrix multiplication of input with the
soarse weight matrix, then add bias and activation ops
if these are enabled for this layer.
"""
with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE, use_resource=True):
self.build()
z = sparse.matmul_with_vars(
self.weights.spec,
inputs,
compute_dense_grad_w,
self.weights.matmul_options,
self.values_var,
self.metainfo_var,
self.dense_dummy_var,
self.weights.dense_grad_matmul_options)
logger.debug(f"Layer '{self.name}' non-zeros var: '{self.values_var.name}'")
if self.use_bias:
z = z + self.bias
# Reshape z to remove group size of 1 (no other group sizes are supported
# at the moment):
z = tf.reshape(z, [self.weights.spec.batch_size, self.weights.spec.output_size])
if self.relu:
return tf.nn.relu(z)
else:
return z
class DenseFcLayer:
"""
This is a dense FC layer with the same call, placeholder and feed interface as SparseFcLayer.
"""
def __init__(self, hidden_size, name, dtype=tf.float32, use_bias=False, relu=False):
"""
:param hidden_size: Output size for the hidden layer.
:param name: Name string for the layer. This is not optional as it
sets the variable namespace used to access internal variables.
:use_bias: Flag to say whether a bias should be added to the layer.
:relu: Flag to say whether a relu activation be added to the layer.
"""
self.hidden_size = hidden_size
self.name = name
self.weight_init = tf.glorot_uniform_initializer()
self.bias_init = tf.zeros_initializer()
self.relu = relu
self.use_bias = use_bias
self.dtype = dtype
def __call__(self, input, ignored=None):
with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE, use_resource=True):
self.w = tf.get_variable("weight", shape=[input.shape[-1], self.hidden_size],
dtype=self.dtype, initializer=self.weight_init)
if self.use_bias:
self.bias = tf.get_variable("bias", shape=[self.hidden_size],
dtype=self.dtype, initializer=self.bias_init)
if self.use_bias and self.relu:
return tf.nn.relu_layer(input, self.w, self.bias)
else:
z = tf.matmul(input, self.w)
if self.use_bias:
z = z + self.bias
if self.relu:
return tf.nn.relu(z)
else:
return z
def feed_dict(self):
return {}
def create_placeholders(self):
None
def is_sparse(self):
return False
def get_data_type(self):
return self.dtype
|
"""add config table
Revision ID: 088598e662be
Revises: 6a55d8748a29
Create Date: 2017-07-17 18:21:14.762529
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '088598e662be'
down_revision = '6a55d8748a29'
branch_labels = None
depends_on = None
configs = sa.table('configs',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('round', sa.Integer(), server_default='0'),
sa.Column('submitions_over', sa.Boolean(), server_default='false'),
)
def upgrade():
op.create_table('configs',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('round', sa.Integer(), server_default='0'),
sa.Column('submitions_over', sa.Boolean(), server_default='false'),
)
op.bulk_insert(configs,
[
{'id': '0', 'round': '0', 'submitions_over': 'false'}
]
)
def downgrade():
op.drop_table('configs')
|
#!/usr/bin/env python3
from SPARQLWrapper import SPARQLWrapper, JSON
import validators
import requests
import json
#############
# Constants
__author__ = "Sylvain Boissel <sylvain@ashtree.eu>"
SPARQL_TIMEFORMAT = "%Y-%m-%dT%H:%M:%SZ"
WD_ROOT_URL = 'https://www.wikidata.org'
WD_BASE_URL = WD_ROOT_URL + '/wiki/'
WD_API_URL = WD_ROOT_URL + '/w/api.php'
###########
# Classes
class Item:
"""
A Wikidata item
"""
def __init__(self, identifier=""):
if is_item_uri(identifier):
self.entiry_uri = identifier
self.qid = self.uri_to_qid()
self.base_uri = self.get_base_uri()
elif is_qid(identifier):
self.qid = identifier.upper()
self.entiry_uri = self.get_entity_uri()
self.base_uri = self.get_base_uri()
else:
self.qid = ""
self.entiry_uri = ""
self.base_uri = ""
def __str__(self):
return self.qid
def __repr__(self):
return "wikidata.item({})".format(self.qid)
def uri_to_qid(self):
return self.uri.split('/')[-1].upper()
def get_base_uri(self):
return "{}{}".format(WD_BASE_URL, self.qid)
def get_entity_uri(self):
return "http://www.wikidata.org/entity/{}".format(self.qid)
def getWikidataContent(self):
"""
Retrieves the item content from Wikidata
"""
##################
# API calls
#
def getentities(qids, props='labels|aliases|descriptions|claims'):
payload = {
'action': 'wbgetentities',
'format': 'json',
'ids': '|'.join(qids),
'props': props
}
response = requests.get(WD_API_URL, params=payload)
return response
##################
# SPARQL Queries
#
def query_raw(query):
"""
Queries WDQS and returns the result as a dictionary
"""
endpoint = "https://query.wikidata.org/bigdata/namespace/wdq/sparql"
sparql = SPARQLWrapper(endpoint)
sparql.setQuery(query)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
return results
def query_items(query, main_key="item"):
"""
Makes a WDQS query and sorts the results.
The "main_key" variable is expected to be a Wikidata item.
"""
results = query_raw(query)
all_items = []
keys = results["head"]["vars"]
if main_key not in keys:
print("Error: Unkown key")
else:
keys.remove(main_key)
for r in results["results"]["bindings"]:
item = Item(r[main_key]['value'])
item.query_results = []
print(keys)
for k in keys:
if k in r:
value = r[k]['value']
if is_item_uri(value):
value = Item(value).uri_to_qid()
out = (k, value)
elif 'xml:lang' in r[k]:
out = (k, value, r[k]['xml:lang'])
else:
out = (k, value)
item.query_results.append(out)
all_items.append(item)
print(r[main_key]['value'])
print("item: {}".format(str(item)))
print("item: {}".format(item.query_results))
pprint(r)
print("==========")
return all_items
##################
# Custom validators
#
@validators.validator
def is_qid(value):
if not (isinstance(value, str) or isinstance(value, unicode)):
print(type(value))
return False
elif value[0].lower() != 'q':
return False
else:
return value[1:].isdigit()
@validators.validator
def is_item_uri(value):
if validators.url(value):
parts = value.split('/')
if parts[2] == "www.wikidata.org" and is_qid(parts[-1]):
return True
return False
"""""""""""""""""
from pprint import pprint
myquery = #Cats
#Cats
SELECT DISTINCT ?item ?item_label ?birthDate ?birthPlace ?image ?coords
WHERE {
?item wdt:P31 wd:Q146 .
?item wdt:P569 ?birthDate .
?item wdt:P19 ?birthPlace .
OPTIONAL {
?item rdfs:label ?item_label .
FILTER(LANG(?item_label) IN ("fr", "en")) .
}
OPTIONAL { ?item wdt:P18 ?image . }
OPTIONAL { ?birthPlace wdt:P625 ?coords . }
}
results = query_raw(myquery)
pprint(results)
results = query_items(myquery)
pprint(results)
"""
|
from django.test import TestCase
from models import Image, Comment, Profile
import datetime as dt
from django.contrib.auth.models.import User
# Create your tests here.
|
from checkov.terraform.context_parsers.base_parser import BaseContextParser
class DataContextParser(BaseContextParser):
def __init__(self):
definition_type = "data"
super().__init__(definition_type=definition_type)
def get_entity_context_path(self, entity_block):
entity_type = next(iter(entity_block.keys()))
entity_name = next(iter(entity_block[entity_type]))
return [entity_type, entity_name]
parser = DataContextParser()
|
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
#
# Ubuntu System Tests
# Copyright (C) 2014-2016 Canonical
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import configparser
import logging
import os
from telegram.common import options
logger = logging.getLogger(__name__)
config_stack = None
DEFAULT_CONF = 'ubuntu-system-tests.conf'
KEY_DEFAULT = 'default'
CONFIG_OPTIONS = [
options.Option(
'device_serial', help_string='Device serial'),
options.Option(
'device_password', help_string='Device password', mandatory=True),
options.Option(
'output_dir', mandatory=True,
help_string='Directory to store the tests results and artifacts.\n'
'WARNING: This directory will have all existing content '
'deleted!'),
options.Option(
'sim_0_pin', mandatory=True,
help_string='SIM pin for the card in the first slot'),
options.Option(
'sim_1_pin',
help_string='SIM pin for the card in the second slot'),
options.Option(
'wifi_ssid', mandatory=True,
help_string='Wi-Fi SSID used to connect during setup wizard'),
options.Option(
'wifi_password', mandatory=True,
help_string='Wi-Fi password used to connect during setup wizard'),
options.Option(
'device_security', default='Passcode',
help_string='The security method to select during setup wizard'),
options.Option(
'bluetooth_device_name',
help_string='Name of a nearby bluetooth pairing device'),
options.Option(
'device_phone_number',
help_string="The phone number of the SIM card inserted in the first "
"SIM slot. This must be formatted exactly as you would "
"dial it."),
options.Option(
'device_phone_number2',
help_string="The phone number of the SIM card inserted in the second "
"SIM slot. If the device has only one SIM slot, you can "
"leave this blank. This must be formatted exactly as you "
"would dial it."),
options.Option(
'telephony_service_number1',
help_string="The phone number to call and message from "
"in the telephony service. In Twilio go to "
"https://www.twilio.com/user/account/"
"phone-numbers/incoming and click on the number "
"you want to use, then copy the 'Phone Number' "
"field exactly."),
options.Option(
'telephony_service_number2',
help_string="The second phone number to call and message from "
"in the telephony service. In Twilio go to "
"https://www.twilio.com/user/account/"
"phone-numbers/incoming and click on the number "
"you want to use, then copy the 'Phone Number' "
"field exactly."),
options.Option(
# this is a private key, that will not be exported to
# autopilot directly
'_twilio_account_sid', mandatory=True,
help_string='Account SID for Twilio. Please log into '
'https://www.twilio.com/login '
'and select \'Dashboard -> Show API Credentials\'.'
'Then copy and paste the Account SID value here.'),
options.Option(
# this is a private key, that will not be exported to
# autopilot directly
'_twilio_auth_token', mandatory=True,
help_string='Auth token for Twilio account. Please log into '
'https://www.twilio.com/login '
'and select \'Dashboard -> Show API Credentials\'.'
'Then copy and paste the Auth token value here.'),
options.Option(
# this is a private key, that will not be exported to
# autopilot directly
'max_unity8_retry_delay', default=30000,
help_string='The boundary for how long we should be retrying until '
'unity8 is started'),
options.Option(
'app_startup_cold_runs', default=0,
help_string='How many iterations to run for the cold start app '
'performance tests?'),
options.Option(
'app_startup_hot_runs', default=0,
help_string='How many iterations to run for the hot start app '
'performance tests?'),
options.Option(
'email_outlook', default='platform_qa_test@outlook.com',
help_string='@outlook.com email address to use for email tests'),
options.Option(
'password_outlook', mandatory=True,
help_string='@outlook.com password to use for email tests'),
options.Option(
'email_yahoo', default='platform_qa_test_account@yahoo.com',
help_string='@yahoo.com email address to use for email tests'),
options.Option(
'password_yahoo', mandatory=True,
help_string='@yahoo.com password to use for email tests'),
options.Option(
'username_imap', default='platform-qa-test-account',
help_string='IMAP username to use for email tests'),
options.Option(
'password_imap', mandatory=True,
help_string='IMAP password to use for email tests'),
options.Option(
'pictures_scalability_runs',
help_string='The different loads used for scalability pictures tests '
'separated by comma'),
options.Option(
'_practitest_project_id',
help_string='Practitest project ID to use for retrieving test lists.'),
options.Option(
'_practitest_api_token',
help_string='Practitest api token used for authentication.'),
options.Option(
'country_sim', default='United States',
help_string='The name of the country of the SIM card to use for '
'telegram login'),
]
def get_user_config_dir():
"""Return the path to the user configuration directory."""
conf_dir = os.environ.get(
'XDG_CONFIG_HOME', os.path.expanduser('~/.config'))
return conf_dir
def get_device_config_file_path():
"""Return the path of the config file copied to the device."""
return '/tmp/ubuntu-system-tests.conf'
def get_device_config_section():
"""Return the name of config section to use."""
return os.environ.get('CONFIG_SECTION')
def get_config_stack_from_file(file_path, config_section=None):
"""Return config stack from specified file path."""
config = UbuntuSystemTestsConfig(file_path, section=config_section)
config.get_config_from_file()
return config
def get_device_config_stack():
"""Return the config stack variable"""
global config_stack
if not config_stack:
config_stack = get_config_stack_from_file(
get_device_config_file_path(), get_device_config_section())
return config_stack
def get_test_config_values(config_stack):
""" Return a string containing the comma separated key=value options.
The list includes the config values and the temporal ones as well
"""
return config_stack.get_autopilot_config_string()
class UbuntuSystemTestsConfig():
"""Class to get and save test configuration data."""
def __init__(self, file_name=None, section=None, options=None):
"""
Construct the config and parser object.
:param file_name: Name of config file to load. Default file is used if
value is not specified.
:param section: Name of section to use in config file. Default section
is used if not specified.
:param options: List of options that should be specified in config.
Default list of options is used if not specified.
"""
user_config_dir = get_user_config_dir()
if not os.path.exists(user_config_dir):
os.mkdir(user_config_dir)
self.file_path = os.path.join(
user_config_dir, file_name or DEFAULT_CONF)
self.section = section or KEY_DEFAULT
self.parser = configparser.ConfigParser(
allow_no_value=True, empty_lines_in_values=False,
default_section=KEY_DEFAULT)
self.config = {}
self.options = options or CONFIG_OPTIONS
def set(self, option_name, value):
"""Set a config value.
:param option_name: Name of config option.
:param value: Value to set.
"""
self.config[option_name] = value
self.save()
def get(self, option_name, default=''):
"""Get a config value.
:param option_name: Name of config option to get.
:param default: Value to return as default if item doesn't exist.
:return: Config option value if it exists. If the config option itself
doesn't exist, return None. Otherwise if the option exists but no value
is defined return the default value."""
return self.config.get(option_name, default)
def save(self):
"""Save the config to file."""
mode = 'r+' if os.path.exists(self.file_path) else 'w'
with open(self.file_path, mode) as f:
self.parser.write(f)
def get_config_from_string(self, string):
"""Load config from a string. This is used for selftests only.
:param string: Config string to load from.
"""
self.parser.read_string(string)
self._save_config_for_section()
def get_config_from_file(self):
"""Load config from file."""
self.parser.read(self.file_path)
self._save_config_for_section()
def _save_config_for_section(self):
"""Save the current config for the specified section only."""
self.config = self.parser[self.section]
def get_missing_options(self, silent=False):
"""Check through each required option and get a value for each one
that is not present.
:param silent: Whether to query stdin for an input value or take a
default value where it exists.
"""
save_required = False
for option in self.options:
if option.name in self.config:
# name exists, check if its mandatory and if a value exists
if option.mandatory and not self.config.get(option.name):
# mandatory and no value exists, so get one
self._get_option_value(option, silent)
save_required = True
else:
# option name not in config, get a value
self._get_option_value(option, silent)
save_required = True
self._save_if_required(save_required)
def _save_if_required(self, save_required):
"""Save config if it is required.
:param save_required: Whether to save or not.
"""
if save_required:
self.save()
def _get_option_value(self, option, silent):
"""Request a value for specified option.
:param option: Required option.
:param silent: Whether to query stdin or take default value.
"""
if silent:
default = option.default
if default:
self.config[option.name] = option.default
logger.warning(
'In silent mode no value specified for option: "{o}". '
'Using default value "{v}".'.format(
o=option.name, v=default))
else:
logger.warning(
'In silent mode no default value specified for option: '
'"{o}". Continuing with value unset.'.format(
o=option.name))
else:
self._get_option_value_from_stdin(option)
def _get_option_value_from_stdin(self, option):
"""Request option value from stdin.
:param option: Required option.
"""
help_msg = self._get_option_help_text(option)
print(help_msg, end='')
value = self._get_value_from_user_or_default(option)
self.config[option.name] = value
def _get_option_help_text(self, option):
"""Return help message for specified option.
:param option: Required option.
"""
help_msg = option.help_string
default = option.default
if default:
help_msg += ' (leave blank to use default value: {})'.format(
default)
elif not option.mandatory:
help_msg += ' (leave blank if not required)'
help_msg += ': '
return help_msg
def _get_value_from_user_or_default(self, option):
"""Return value from stdin or default value if no stdin value."""
return input() or option.default
def get_autopilot_config_string(self):
"""Return the configuration in a string usable by Autopilot.
That is, a list of key=value pairs serparated by comma.
Any keys starting with _ will be ignored.
"""
config = ''
for key, value in self.config.items():
if not key.startswith('_'):
config += '{k}={v},'.format(k=key, v=value)
# remove trailing ',' if it exists
if config.endswith(','):
config = config[:-1]
return config
|
__author__ = 'alanseciwa'
'''
This code removes b' characters from utf-8 formatted tweets. This allows tweets to have hex values
of emojis used in tweets. After stripping away the unwanted characters, it is outputted to a new
cvs file.
'''
import sys
import csv
def clean(t):
# set csv file to variable
csv_file = open('/Users/alanseciwa/Desktop/cleaned.csv', 'w')
writer = csv.writer(csv_file, quotechar='', quoting=csv.QUOTE_NONE)
# Open csv file
with open(t, 'r') as tweet:
for i in tweet:
# Use .replace func to filter out all b' chars
j = i.replace("b'", "").strip() # need strip() func at end to escape char correctly
# write j to csv writer
writer.writerow([j])
print(j)
csv_file.close()
def main():
# Tweet location
tweets = "/Users/alanseciwa/Desktop/clean_data-TWEETONLY-2.csv"
clean(tweets)
if __name__ == '__main__':
main()
sys.exit()
|
# Reverse the order of words
def rev_words(sentence):
words = sentence.split()
return ' '.join(words[::-1])
sentence = 'perfect makes practice';
res = rev_words(sentence)
print(len(res))
for c in sentence:
print(f"'{c}', ", end='')
|
import pytest
class TestDocTrackingPandasDataframe:
@pytest.mark.skip()
def test_doc(self):
# Python 3.6.8
from dbnd import log_metric, log_dataframe
import pandas as pd
transactions_df = pd.read_csv("data/example.csv")
# log dataframe
log_dataframe(
"Full table", transactions_df, with_histograms=False, with_preview=False
)
# log_metrics
minimum_amount = 5000
num_large_transactions = transactions_df[
transactions_df["transaction_amt"] >= minimum_amount
].shape[0]
avg_large_transaction = transactions_df[
transactions_df["transaction_amt"] >= minimum_amount
].mean()["transaction_amt"]
large_transactions_df = transactions_df[
transactions_df["transaction_amt"] >= minimum_amount
]
log_metric("Number of large transactions(>= 5000)", num_large_transactions)
log_metric("mean large transactions", avg_large_transaction)
log_dataframe(
"Large transactions (>= 5000)", large_transactions_df, with_preview=False
)
|
from django.shortcuts import render
from .datatables import BookDataTables
from sspdatatables.utils.decorator import ensure_ajax, dt_json_response
from collections import OrderedDict
def overview(request):
book_datatables = BookDataTables()
context = book_datatables.get_table_frame()
context.update({
"title": "Books",
})
return render(request, 'overview.html', context)
@ensure_ajax(['POST'])
def get_book_api(request):
pre_search_condition = OrderedDict([('select_related', 'author')])
book_datatables = BookDataTables()
result = book_datatables.process(pre_search_condition=pre_search_condition,
**request.POST)
return dt_json_response(result)
|
import pandas as pd
from time import sleep
from airbnb_pricer.utils.async_run import async_run
from airbnb_pricer.airbnb.skipped_columns import skipped_columns
def load_airbnb_links(cur_links_table):
cur_urls = cur_links_table.url.tolist()
cur_tables = async_run(_load_airbnb_link, cur_urls)
cur_links_table["table"] = cur_tables
return cur_links_table
def _load_airbnb_link(cur_url):
tmp_data = pd.read_csv(cur_url, low_memory=False)
tmp_data = tmp_data.drop(
columns=[
tmp_column for tmp_column in tmp_data.columns if tmp_column in skipped_columns
]
)
sleep(1/3)
return tmp_data
|
"""
Summary: Contains classes for the database.
Classes: BaseModel with subclasses Program
"""
from peewee import *
import json
with open("config.json", "r") as f:
config = json.load(f)
print config
db = SqliteDatabase(config["dbname"])
class BaseModel(Model):
class Meta:
database = db
class User(BaseModel):
id = PrimaryKeyField()
username = TextField(unique=True)
class Program(BaseModel):
id = PrimaryKeyField()
length = IntegerField(default=0)
date = DateField(default="2011-05-11")
class Set(BaseModel):
id = PrimaryKeyField()
class Leg(BaseModel):
id = PrimaryKeyField()
if __name__ == "__main__":
try:
db.create_tables([Program, Set, Leg])
except:
print("There already exists a database with this name!")
|
import numpy as np
import pandas as pd
from sklearn.metrics import mean_absolute_error
from estimate_start_times.config import DEFAULT_CSV_IDS, EventLogIDs
logs = [
"insurance",
"BPI_Challenge_2012_W_Two_TS",
"BPI_Challenge_2017_W_Two_TS",
"Application_to_Approval_Government_Agency",
"callcentre",
"ConsultaDataMining201618",
"poc_processmining",
"Production",
"confidential",
"Loan_Application",
"cvs_pharmacy",
"Procure_to_Pay",
]
raw_path = "../event_logs/{}.csv.gz"
def read_and_preprocess_log(event_log_path: str, log_ids: EventLogIDs) -> pd.DataFrame:
# Read from CSV
event_log = pd.read_csv(event_log_path)
# Transform to Timestamp bot start and end columns
event_log[log_ids.start_time] = pd.to_datetime(event_log[log_ids.start_time], utc=True)
event_log[log_ids.end_time] = pd.to_datetime(event_log[log_ids.end_time], utc=True)
if log_ids.enabled_time in event_log:
event_log[log_ids.enabled_time] = pd.to_datetime(event_log[log_ids.enabled_time], utc=True)
if log_ids.available_time in event_log:
event_log[log_ids.available_time] = pd.to_datetime(event_log[log_ids.available_time], utc=True)
# Sort by end timestamp, then by start timestamp, and then by activity name
event_log = event_log.sort_values(
[log_ids.end_time, log_ids.activity, log_ids.case, log_ids.resource]
)
# Reset the index
event_log.reset_index(drop=True, inplace=True)
return event_log
def measure_estimation():
techniques = ["heur_median", "heur_median_2", "heur_median_5",
"heur_mode", "heur_mode_2", "heur_mode_5",
"df_median", "df_median_2", "df_median_5",
"df_mode", "df_mode_2", "df_mode_5",
"only_resource_median", "only_resource_median_2", "only_resource_median_5",
"only_resource_mode", "only_resource_mode_2", "only_resource_mode_5"]
print("log_technique,"
"smape_proc_times,"
"mape_proc_times,"
"mae_proc_times (s),"
"total_activity_instances,"
"num_selected_enabled_time,"
"num_selected_available_time,"
"num_re_estimated,"
"num_estimated_after_real,"
"num_estimated_before_real,"
"num_exact_estimation")
for log_name in logs:
raw_event_log = read_and_preprocess_log(raw_path.format(log_name), DEFAULT_CSV_IDS)
for technique in techniques:
calculate_estimation_stats(log_name, technique, raw_event_log, DEFAULT_CSV_IDS)
def calculate_estimation_stats(log_name: str, method: str, raw_event_log: pd.DataFrame, log_ids: EventLogIDs):
# Measure stats for estimated log
estimated_event_log = read_and_preprocess_log(raw_path.format(method + "/" + log_name + "_estimated"), log_ids)
# Check sorting similarity
if not raw_event_log[log_ids.end_time].equals(estimated_event_log[log_ids.end_time]):
print("Different 'end_timestamp' order!!")
if not raw_event_log[log_ids.activity].equals(estimated_event_log[log_ids.activity]):
print("Different 'activity' order!!")
if not raw_event_log[log_ids.case].equals(estimated_event_log[log_ids.case]):
print("Different 'case' order!!")
# Print stats
raw_processing_times = (
raw_event_log[log_ids.end_time] - raw_event_log[log_ids.start_time]
).astype(np.int64) / 1000000000
estimated_processing_times = (
estimated_event_log[log_ids.end_time] - estimated_event_log[log_ids.start_time]
).astype(np.int64) / 1000000000
raw_minus_estimated = raw_processing_times - estimated_processing_times
print("{}_{},{},{},{},{},{},{},{},{},{},{}".format(
log_name,
method,
symmetric_mean_absolute_percentage_error(raw_processing_times, estimated_processing_times),
mean_absolute_percentage_error(raw_processing_times, estimated_processing_times),
mean_absolute_error(raw_processing_times, estimated_processing_times),
len(estimated_event_log),
((estimated_event_log[log_ids.start_time] == estimated_event_log[log_ids.enabled_time]) &
(estimated_event_log[log_ids.start_time] != estimated_event_log[log_ids.available_time])).sum(),
((estimated_event_log[log_ids.start_time] == estimated_event_log[log_ids.available_time]) &
(estimated_event_log[log_ids.start_time] != estimated_event_log[log_ids.enabled_time])).sum(),
((estimated_event_log[log_ids.start_time] != estimated_event_log[log_ids.available_time]) &
(estimated_event_log[log_ids.start_time] != estimated_event_log[log_ids.enabled_time])).sum(),
(raw_minus_estimated > 0).sum(),
(raw_minus_estimated < 0).sum(),
(raw_minus_estimated == 0).sum()
))
def symmetric_mean_absolute_percentage_error(actual, forecast) -> float:
return np.sum(2 * np.abs(forecast - actual) / (np.abs(actual) + np.abs(forecast))) / len(actual)
def mean_absolute_percentage_error(actual, forecast) -> float:
return np.sum(np.abs(forecast - actual) / np.abs(actual)) / len(actual)
if __name__ == '__main__':
measure_estimation()
|
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
carry = 0
start, prev = l1, None
# both lists haven't been exhausted
while l1 and l2:
carry, digit = divmod(l1.val + l2.val + carry, 10)
l1.val = digit
prev = l1
l1, l2 = l1.next, l2.next
# continue with the rest, keep reference to previous valid
# node in order to add carry if it exists.
final = None
if l1:
while l1:
carry, digit = divmod(l1.val + carry, 10)
l1.val = digit
final = l1
l1 = l1.next
if carry:
final.next = ListNode(1)
elif l2:
prev.next = l2
while l2:
carry, digit = divmod(l2.val + carry, 10)
l2.val = digit
final = l2
l2 = l2.next
if carry:
final.next = ListNode(1)
else:
if carry:
prev.next = ListNode(1)
return start
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 11 09:33:07 2020
@author: HI
"""
import xlwings as xw
import math
from scipy.stats import norm
@xw.func
def GBlackScholesNGreeek(S,X,T,r,q,sigam,corp):
data=[S,X,T,r,q,sigam,corp]
d1=(math.log(data[0]/data[1])+(data[4]+data[5]**2/2)*data[2]/365)/((data[5])*math.sqrt(data[2]/365))
d2=d1-data[5]*math.sqrt(data[2]/365)
c=data[0]*math.exp((data[4]-data[3])*data[2]/365)*norm.cdf(d1)-data[1]*math.exp(-data[3]*data[2]/365)*norm.cdf(d2)
p=data[1]*math.exp(-data[3]*data[2]/365)*norm.cdf(-d2)-data[0]*math.exp((data[4]-data[3])*data[2]/365)*norm.cdf(-d1)
if data[6]=='c':
return c
else:
return p
|
from .migration_target import MigrationTarget
from .mount_point import MountPoint
from .workload import Workload
from .serializable import Serializable
from enum import Enum
from time import sleep
X = 5
class MigrationState(Enum):
NOT_STARTED = 1
RUNNING = 2
ERROR = 3
SUCCESS = 4
class Migration(Serializable):
def __init__(self, mount_points: [MountPoint], source: Workload, migration_target: MigrationTarget):
self.mount_points = mount_points
self.source = source
self.migration_target = migration_target
self.migration_state = MigrationState.NOT_STARTED
def run(self):
self.migration_state = MigrationState.RUNNING
selected_mount_points_names = [str(mp.mount_point_name) for mp in self.mount_points]
if "C:\\" not in selected_mount_points_names:
raise Exception("Migration is now allowed when C:\\ is not selected")
source_mount_point_names = [str(mp.mount_point_name) for mp in self.source.storage]
storage = []
for mount_point in self.mount_points:
if mount_point.mount_point_name in source_mount_point_names:
storage.append(mount_point)
if len(storage) == 0:
raise Exception("There are no selected mount points in source storage list")
ip = self.source.ip
credentials = self.source.credentials
self.migration_target.target_vm = Workload(ip, credentials, storage)
sleep(X * 60)
self.migration_state = MigrationState.SUCCESS
def repr_json(self):
return dict(mount_points=self.mount_points, source=self.source, migration_target=self.migration_target,
migration_state=self.migration_state.name)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('vendors', '0020_auto_20140930_1435'),
]
operations = [
migrations.RunSQL(
"""ALTER TABLE "vendors_vendor" ALTER COLUMN "annual_revenue" DROP DEFAULT; """
),
migrations.RunSQL("""ALTER TABLE "vendors_vendor" ALTER COLUMN "annual_revenue" TYPE BIGINT USING annual_revenue::bigint; """
)
]
|
#!/usr/bin/env python3
import isce
import numpy as np
import shelve
import os
import logging
import argparse
from isceobj.Constants import SPEED_OF_LIGHT
import datetime
from isceobj.Util.Poly2D import Poly2D
def cmdLineParse():
'''
Command line parser.
'''
parser = argparse.ArgumentParser(description='Plot corner reflectors in SLC')
parser.add_argument('-i', '--input', dest='indir', type=str, required=True,
help='Input SLC directory')
parser.add_argument('-c', '--crs', dest='posfile', type=str, required=True,
help='Input text file with CR positions')
parser.add_argument('-p', '--plot', dest='plot', action='store_true', default=False,
help='Plot')
return parser.parse_args()
def makePlot(filename, pos):
'''
Make plots.
'''
import matplotlib.pyplot as plt
from imageMath import IML
win = 8
mm = IML.mmapFromISCE(filename, logging)
data = mm.bands[0]
plt.figure('CR analysis')
for index, (num, line, pixel) in enumerate(pos):
print(line, pixel)
xx = np.int(pixel)
yy = np.int(line)
box = 10 * np.log10(np.abs(data[yy-win:yy+win, yy-win:yy+win]))
plt.subplot(7,3,index+1)
plt.imshow(box, cmap=plt.cm.gray)
plt.colorbar()
plt.scatter(pixel-xx+win, line-yy+win, marker='+', c='b')
plt.show()
def makeOnePlot(filename, pos):
'''
Make plots.
'''
import matplotlib.pyplot as plt
from imageMath import IML
win = 100
mm = IML.mmapFromISCE(filename, logging)
data = mm.bands[0]
nl, npix = data.shape
pos = np.array(pos)
miny = np.clip(np.min(pos[:,1])-win, 0 , nl-1)
maxy = np.clip(np.max(pos[:,1])+win, 0 , nl-1)
minx = np.clip(np.min(pos[:,2])-win, 0, npix-1)
maxx = np.clip(np.max(pos[:,2])+win, 0, npix-1)
box = np.power(np.abs(data[int(miny):int(maxy), int(minx):int(maxx)]), 0.4)
plt.figure('CR analysis')
plt.imshow(box, cmap=plt.cm.gray)
plt.colorbar()
# plt.scatter(pos[:,2]-minx, pos[:,1]-miny, marker='+', c='b', s=200)
plt.scatter(pos[:,2]-minx, pos[:,1]-miny, marker='o',
facecolors='none', edgecolors='b', s=100)
plt.title(os.path.basename(os.path.dirname(filename)))
plt.show()
def getAzRg(frame,llh):
'''
Return line pixel position.
'''
nl = frame.getImage().getLength() - 1
np = frame.getImage().getWidth() - 1
coeffs = frame._dopplerVsPixel
if coeffs is None:
coeffs = [0.]
pol = Poly2D()
pol._meanRange = frame.startingRange
pol._normRange = frame.instrument.rangePixelSize
pol.initPoly(azimuthOrder=0, rangeOrder=len(coeffs)-1, coeffs=[coeffs])
taz, rgm = frame.orbit.geo2rdr(list(llh)[1:], side=frame.instrument.platform.pointingDirection,
doppler=pol, wvl=frame.instrument.getRadarWavelength())
line = (taz - frame.sensingStart).total_seconds() * frame.PRF
pixel = (rgm - frame.startingRange) / frame.getInstrument().getRangePixelSize()
if (line < 0) or (line > nl):
return None
if (pixel < 0) or (pixel > np):
return None
return (line, pixel)
if __name__ == '__main__':
'''
Main driver.
'''
#Command line parse
inps = cmdLineParse()
#Load shelve
with shelve.open(os.path.join(inps.indir, 'data'), 'r') as db:
frame = db['frame']
####Adjust azimuth for bias
bias = 0.5 * (frame.getStartingRange() + frame.getFarRange()) / SPEED_OF_LIGHT
print('One way bias: ', bias)
delta = datetime.timedelta(seconds = bias) #-0.009)
frame.sensingStart = frame.sensingStart - delta
####Adjust range for bias
# frame.startingRange = frame.startingRange + 100.0
###Load CRS positions
llhs = np.loadtxt(inps.posfile, delimiter=',')
crs = []
for ind, llh in enumerate(llhs):
pos = getAzRg(frame, llh)
if pos is not None:
crs.append([ind, pos[0], pos[1]])
print('Number of CRS in the scene: {0}'.format(len(crs)))
if inps.plot and len(crs) > 0:
makeOnePlot(frame.image.filename, crs)
if False:
'''
Work on the grid file.
'''
import matplotlib.pyplot as plt
fname = '154283811/154283811_RH_L1_SlantRange_grid.txt'
grid = np.loadtxt(fname)
ht = np.linspace(600.0, 900.0, num=150)
lonref = grid[0][1]
latref = grid[0][0]
rngref = grid[0][2]
r0 = frame.startingRange
t0 = frame.sensingStart
orb = frame.orbit
tdiff = []
rdiff = []
for h in ht:
tt,rr = orb.geo2rdr([latref, lonref, h])
tdiff.append( (tt-t0).total_seconds())
rdiff.append( rr - r0)
plt.figure()
plt.subplot(2,1,1)
plt.plot(ht, tdiff)
plt.ylabel('Az diff')
plt.subplot(2,1,2)
plt.plot(ht, rdiff)
plt.xlabel('Rg diff')
plt.show()
|
#! python3
# messenger.py - Weather API that sends me text messages
from twilio.rest import Client
import requests, bs4
# rest API from twilio
# Your Account SID from twilio.com/console
account_sid = "AC7226356ef66d2de53d535237cba9e2dc"
# Your Auth Token from twilio.com/console
auth_token = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
myNumber = '+1469XXXXXXX'
client = Client(account_sid, auth_token)
# create a message to myself
def textmyself():
res = requests.get('https://weather.com/weather/today/l/c1535f42ba5fc52449e416514aca69b3b2a16aae4b89abd6c92e662f7a89c02f').text
soup = bs4.BeautifulSoup(res, features='html.parser')
state = soup.find('header', class_='loc-container').text
fullforcast = soup.find('div', class_='today_nowcard-section today_nowcard-condition').text
message = client.messages.create(
to="+1469XXXXXXX",
from_="+17033489722",
body= fullforcast)
return message.sid
|
import os
os.fsformat('/flash')
|
from malcolm.core import Part, method_takes, REQUIRED
from malcolm.modules.builtin.vmetas import StringMeta
@method_takes(
"name", StringMeta("Name of the Part within the controller"), REQUIRED)
class DummyPart(Part):
"""Defines a dummy part"""
def __init__(self, params):
super(DummyPart, self).__init__(params.name)
|
"""3D mesh manipulation utilities."""
from builtins import str
from collections import OrderedDict
import numpy as np
from transforms3d import quaternions
from transforms3d.quaternions import axangle2quat, mat2quat
import os.path as osp
import pyassimp
import pprint
import hashlib
import mmcv
cur_dir = osp.dirname(osp.abspath(__file__))
from lib.utils import logger
from lib.pysixd import inout
def get_vertices_extent(vertices):
xmin, xmax = np.amin(vertices[:, 0]), np.amax(vertices[:, 0])
ymin, ymax = np.amin(vertices[:, 1]), np.amax(vertices[:, 1])
zmin, zmax = np.amin(vertices[:, 2]), np.amax(vertices[:, 2])
xsize = xmax - xmin
ysize = ymax - ymin
zsize = zmax - zmin
return xsize, ysize, zsize
def frustum(left, right, bottom, top, znear, zfar):
"""Create view frustum matrix."""
assert right != left
assert bottom != top
assert znear != zfar
M = np.zeros((4, 4), dtype=np.float32)
M[0, 0] = +2.0 * znear / (right - left)
M[2, 0] = (right + left) / (right - left)
M[1, 1] = +2.0 * znear / (top - bottom)
M[3, 1] = (top + bottom) / (top - bottom)
M[2, 2] = -(zfar + znear) / (zfar - znear)
M[3, 2] = -2.0 * znear * zfar / (zfar - znear)
M[2, 3] = -1.0
return M
def perspective(fovy, aspect, znear, zfar):
"""Create perspective projection matrix.
fovy: deg
"""
assert znear != zfar
fovy_rad = fovy / 180.0 * np.pi
h = np.tan(fovy_rad / 2.0) * znear
w = h * aspect
return frustum(-w, w, -h, h, znear, zfar)
def anorm(x, axis=None, keepdims=False):
"""Compute L2 norms alogn specified axes."""
return np.sqrt((x * x).sum(axis=axis, keepdims=keepdims))
def normalize(v, axis=None, eps=1e-10):
"""L2 Normalize along specified axes."""
return v / max(anorm(v, axis=axis, keepdims=True), eps)
def lookat(eye, target=[0, 0, 0], up=[0, 1, 0]):
"""Generate LookAt modelview matrix."""
eye = np.float32(eye)
forward = normalize(target - eye)
side = normalize(np.cross(forward, up))
up = np.cross(side, forward)
M = np.eye(4, dtype=np.float32)
R = M[:3, :3]
R[:] = [side, up, -forward]
M[:3, 3] = -(R.dot(eye))
return M
def sample_view(min_dist, max_dist=None):
"""Sample random camera position.
Sample origin directed camera position in given distance range from
the origin. ModelView matrix is returned.
"""
if max_dist is None:
max_dist = min_dist
dist = np.random.uniform(min_dist, max_dist)
eye = np.random.normal(size=3)
eye = normalize(eye) * dist
return lookat(eye)
def homotrans(M, p):
p = np.asarray(p)
if p.shape[-1] == M.shape[1] - 1:
p = np.append(p, np.ones_like(p[..., :1]), -1)
p = np.dot(p, M.T)
return p[..., :-1] / p[..., -1:]
def _parse_vertex_tuple(s):
"""Parse vertex indices in '/' separated form (like 'i/j/k', 'i//k'.
...).
"""
vt = [0, 0, 0]
for i, c in enumerate(s.split("/")):
if c:
vt[i] = int(c)
return tuple(vt)
def _unify_rows(a):
"""Unify lengths of each row of a."""
lens = np.fromiter(map(len, a), np.int32)
if not (lens[0] == lens).all():
out = np.zeros((len(a), lens.max()), np.float32)
for i, row in enumerate(a):
out[i, : lens[i]] = row
else:
out = np.float32(a)
return out
def loadTexture(path):
from PIL import Image
import OpenGL.GL as GL
img = Image.open(path).transpose(Image.FLIP_TOP_BOTTOM)
if img.mode != "RGB":
# print('convert {} to RGB'.format(img.mode))
img = img.convert("RGB")
img_data = np.fromstring(img.tobytes(), np.uint8)
# print(img_data.shape)
width, height = img.size
# glTexImage2D expects the first element of the image data to be the
# bottom-left corner of the image. Subsequent elements go left to right,
# with subsequent lines going from bottom to top.
# However, the image data was created with PIL Image tostring and numpy's
# fromstring, which means we have to do a bit of reorganization. The first
# element in the data output by tostring() will be the top-left corner of
# the image, with following values going left-to-right and lines going
# top-to-bottom. So, we need to flip the vertical coordinate (y).
texture_id = GL.glGenTextures(1)
GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 1)
GL.glBindTexture(GL.GL_TEXTURE_2D, texture_id) # bind texture
GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)
GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR_MIPMAP_LINEAR)
GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_S, GL.GL_CLAMP_TO_EDGE)
GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_T, GL.GL_CLAMP_TO_EDGE)
GL.glTexImage2D(
GL.GL_TEXTURE_2D,
0,
GL.GL_RGB,
width,
height,
0,
GL.GL_RGB,
GL.GL_UNSIGNED_BYTE,
img_data,
)
GL.glGenerateMipmap(GL.GL_TEXTURE_2D)
return texture_id
def im2Texture(im, flip_v=False):
from PIL import Image
import OpenGL.GL as GL
im_pil = Image.fromarray(im)
if flip_v:
im_pil = im_pil.transpose(Image.FLIP_TOP_BOTTOM)
if im_pil.mode != "RGB":
print("convert {} to RGB".format(im_pil.mode))
im_pil = im_pil.convert("RGB")
img_data = np.fromstring(im_pil.tobytes(), np.uint8)
# print(img_data.shape)
width, height = im_pil.size
# glTexImage2D expects the first element of the image data to be the
# bottom-left corner of the image. Subsequent elements go left to right,
# with subsequent lines going from bottom to top.
# However, the image data was created with PIL Image tostring and numpy's
# fromstring, which means we have to do a bit of reorganization. The first
# element in the data output by tostring() will be the top-left corner of
# the image, with following values going left-to-right and lines going
# top-to-bottom. So, we need to flip the vertical coordinate (y).
texture_id = GL.glGenTextures(1)
GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 1)
GL.glBindTexture(GL.GL_TEXTURE_2D, texture_id) # bind texture
GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_LINEAR)
GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_LINEAR_MIPMAP_LINEAR)
GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_S, GL.GL_CLAMP_TO_EDGE)
GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_T, GL.GL_CLAMP_TO_EDGE)
GL.glTexImage2D(
GL.GL_TEXTURE_2D,
0,
GL.GL_RGB,
width,
height,
0,
GL.GL_RGB,
GL.GL_UNSIGNED_BYTE,
img_data,
)
GL.glGenerateMipmap(GL.GL_TEXTURE_2D)
return texture_id
def shader_from_path(shader_filename):
shader_path = osp.join(cur_dir, "../shader", shader_filename)
assert osp.exists(shader_path)
with open(shader_path, "r") as f:
return f.read()
def calc_normals(vertices):
normals = np.empty_like(vertices)
N = vertices.shape[0]
for i in range(0, N - 1, 3):
v1 = vertices[i]
v2 = vertices[i + 1]
v3 = vertices[i + 2]
normal = np.cross(v2 - v1, v3 - v1)
norm = np.linalg.norm(normal)
normal = np.zeros(3) if norm == 0 else normal / norm
normals[i] = normal
normals[i + 1] = normal
normals[i + 2] = normal
return normals
def load_mesh_pyassimp(
model_path,
recalculate_normals=False,
vertex_scale=1.0,
is_textured=False,
cad_model_color=None,
use_cache=True,
cache_dir=".cache",
verbose=True,
):
hashed_file_name = (
hashlib.md5(
(
"{}_{}_{}_{}_{}_{}".format(
model_path,
"load_meshe_pyassimp",
recalculate_normals,
vertex_scale,
is_textured,
cad_model_color,
)
).encode("utf-8")
).hexdigest()
+ "_load_mesh_pyassimp.pkl"
)
mmcv.mkdir_or_exist(cache_dir)
cache_file = osp.join(cache_dir, hashed_file_name)
if use_cache and osp.exists(cache_file):
logger.info("{} loaded cache file: {}".format(model_path, cache_file))
return mmcv.load(cache_file)
scene = pyassimp.load(
model_path
) # ,processing=pyassimp.postprocess.aiProcess_GenUVCoords | pyassimp.postprocess.aiProcess_Triangulate) # load collada
mesh = scene.meshes[0]
# pprint(vars(mesh))
print(mesh.__dict__.keys())
# check materials
mat = mesh.material
# pprint(vars(mat))
print(mat.__dict__.keys())
# default values in pyassimp, ambient:0.05, diffuse: 0.6, specular: 0.6
if "diffuse" in mat.properties.keys() and mat.properties["diffuse"] != 0:
uMatDiffuse = np.array(mat.properties["diffuse"])[:3]
else:
uMatDiffuse = [0.8, 0.8, 0.8]
if "specular" in mat.properties.keys() and mat.properties["specular"] != 0:
uMatSpecular = np.array(mat.properties["specular"])[:3]
else:
uMatSpecular = [0.5, 0.5, 0.5]
if "ambient" in mat.properties.keys() and mat.properties["ambient"] != 0:
uMatAmbient = np.array(mat.properties["ambient"])[:3] # phong shader
else:
uMatAmbient = [0, 0, 0]
if "shininess" in mat.properties.keys() and mat.properties["shininess"] != 0:
uMatShininess = max(mat.properties["shininess"], 1) # avoid the 0 shininess
else:
uMatShininess = 1
vertices = mesh.vertices * vertex_scale
if recalculate_normals:
normals = calc_normals(vertices)
else:
normals = mesh.normals
if sum(normals.shape) == 0:
normals = calc_normals(vertices)
# import pdb; pdb.set_trace();
result = dict(
vertices=vertices,
normals=normals,
faces=mesh.faces,
uMatDiffuse=uMatDiffuse,
uMatSpecular=uMatSpecular,
uMatAmbient=uMatAmbient,
uMatShininess=uMatShininess,
)
if is_textured:
result["colors"] = np.zeros((vertices.shape[0], 3), np.float32)
if sum(mesh.texturecoords.shape) > 0:
result["texturecoords"] = mesh.texturecoords[0, :, :2]
else:
logger.warn("can not load texturecoords with pyassimp") # pyassimp does not load ply texture_uv
else:
result["texturecoords"] = np.zeros((vertices.shape[0], 2), np.float32)
if sum(mesh.colors.shape) > 0:
result["colors"] = mesh.colors[0, :, :3]
else:
if verbose:
logger.warn("can not load colors with pyassimp. (ignore this if the model is textured)")
if not is_textured and "colors" not in result:
# no vert color and texture
is_cad = True
colors = np.zeros((vertices.shape[0], 3), np.float32) # dummy colors
if cad_model_color is None:
colors[:, 0] = 223.0 / 255
colors[:, 1] = 214.0 / 255
colors[:, 2] = 205.0 / 255
else:
colors[:, 0] = cad_model_color[0] / 255
colors[:, 1] = cad_model_color[1] / 255
colors[:, 2] = cad_model_color[2] / 255
result["colors"] = colors
else:
is_cad = False
result["is_cad"] = is_cad
pyassimp.release(scene)
# if model_path.endswith('.obj'):
# ply_path = model_path.replace('.obj', '.ply')
# if osp.exists(ply_path):
# for key in ['uMatDiffuse', 'uMatSpecular', 'uMatAmbient', 'uMatShininess']:
# print('before: ', key, result[key])
# logger.info('assign light properties by loading {}'.format(ply_path))
# _res = load_mesh_pyassimp(ply_path)
# for key in ['uMatDiffuse', 'uMatSpecular', 'uMatAmbient', 'uMatShininess']:
# result[key] = _res[key]
# for key in ['uMatDiffuse', 'uMatSpecular', 'uMatAmbient', 'uMatShininess']:
# print('after: ', key, result[key])
mmcv.dump(result, cache_file)
return result
def load_mesh_sixd(
model_path,
recalculate_normals=False,
vertex_scale=1.0,
is_textured=False,
render_uv=False,
render_normalized_coords=False,
render_nocs=False,
model_info=None,
cad_model_color=None,
use_cache=True,
cache_dir=".cache",
):
mmcv.mkdir_or_exist(cache_dir)
if model_path.endswith(".obj"):
logger.warn(".obj file, load with pyassimp")
return load_mesh_pyassimp(
model_path,
recalculate_normals=recalculate_normals,
vertex_scale=vertex_scale,
is_textured=is_textured,
use_cache=use_cache,
cache_dir=cache_dir,
)
#####################################
hashed_file_name = (
hashlib.md5(
(
"{}_{}_{}_{}_{}".format(
model_path,
"load_mesh_pysixd",
recalculate_normals,
vertex_scale,
is_textured,
)
).encode("utf-8")
).hexdigest()
+ "_load_mesh_pysixd.pkl"
)
cache_file = osp.join(cache_dir, hashed_file_name)
if use_cache and osp.exists(cache_file):
logger.info("{} loaded cache file: {}".format(model_path, cache_file))
return mmcv.load(cache_file)
attributes = {}
logger.info("loading {}".format(model_path))
model = inout.load_ply(model_path)
vertices = np.array(model["pts"]).astype(np.float32) * vertex_scale
# import pdb; pdb.set_trace();
num_pts = vertices.shape[0]
if recalculate_normals or "normals" not in model:
normals = calc_normals(vertices)
else:
normals = np.array(model["normals"]).astype(np.float32)
assert (
int(render_uv + render_normalized_coords + render_nocs) <= 1
), "render_uv, render_normalized_coords, render_nocs can not be True the same time"
if render_uv:
logger.info("render uv")
model["colors"] = np.zeros((num_pts, 3), np.float32)
model["colors"][:, 1:] = model["texture_uv"]
if render_normalized_coords: # each axis normalized within [0, 1]
logger.info("render normalized coords")
# assert model_info is not None
normalizedCoords = np.copy(vertices)
if model_info is None:
xmin, xmax = vertices[:, 0].min(), vertices[:, 0].max()
ymin, ymax = vertices[:, 1].min(), vertices[:, 1].max()
zmin, zmax = vertices[:, 2].min(), vertices[:, 2].max()
else:
xmin, xmax = model_info["xmin"], model_info["xmax"]
ymin, ymax = model_info["ymin"], model_info["ymax"]
zmin, zmax = model_info["zmin"], model_info["zmax"]
# normalize every axis to [0, 1]
normalizedCoords[:, 0] = (normalizedCoords[:, 0] - xmin) / (xmax - xmin)
normalizedCoords[:, 1] = (normalizedCoords[:, 1] - ymin) / (ymax - ymin)
normalizedCoords[:, 2] = (normalizedCoords[:, 2] - zmin) / (zmax - zmin)
model["colors"] = normalizedCoords
if render_nocs: # diagnal normalized to 1, and min corner moved to (0,0,0)
logger.info("render nocs")
# Centering and scaling to fit the unit box
nocs = np.copy(vertices)
if model_info is None:
xmin, xmax = nocs[:, 0].min(), nocs[:, 0].max()
ymin, ymax = nocs[:, 1].min(), nocs[:, 1].max()
zmin, zmax = nocs[:, 2].min(), nocs[:, 2].max()
diagonal = np.sqrt((xmax - xmin) ** 2 + (ymax - ymin) ** 2 + (zmax - zmin) ** 2)
else:
xmin, xmax = model_info["xmin"], model_info["xmax"]
ymin, ymax = model_info["ymin"], model_info["ymax"]
zmin, zmax = model_info["zmin"], model_info["zmax"]
diagonal = model_info["diagonal"]
# # move (xmin, ymin, zmin) to origin, model centered at the 3b bbox center
nocs[:, 0] -= xmin
nocs[:, 1] -= ymin
nocs[:, 2] -= zmin
# scale = max(max(xmax - xmin, ymax - ymin), zmax - zmin)
# unit diagonal
nocs /= diagonal
model["colors"] = nocs
faces = np.array(model["faces"]).astype(np.uint32)
if "colors" in model:
# NOTE: hack
logger.info("colors max: {}".format(model["colors"].max()))
if model["colors"].max() > 1.1:
logger.info("make colors in [0, 1]")
colors = np.array(model["colors"]).astype(np.float32) / 255.0
else: # in range [0, 1]
colors = np.array(model["colors"]).astype(np.float32)
attributes.update(vertices=vertices, normals=normals, colors=colors, faces=faces)
else:
attributes.update(vertices=vertices, normals=normals, faces=faces)
if "texture_uv" in model and is_textured:
attributes["texturecoords"] = model["texture_uv"]
attributes["colors"] = np.zeros((vertices.shape[0], 3), np.float32)
else:
attributes["texturecoords"] = np.zeros((vertices.shape[0], 2), np.float32)
if not is_textured and "colors" not in model:
# no vert color and texture
is_cad = True
colors = np.zeros((vertices.shape[0], 3), np.float32) # dummy colors
if cad_model_color is None:
colors[:, 0] = 223.0 / 255
colors[:, 1] = 214.0 / 255
colors[:, 2] = 205.0 / 255
else:
colors[:, 0] = cad_model_color[0] / 255
colors[:, 1] = cad_model_color[1] / 255
colors[:, 2] = cad_model_color[2] / 255
attributes["colors"] = colors
else:
is_cad = False
attributes["is_cad"] = is_cad
result = load_mesh_pyassimp(
model_path,
recalculate_normals=recalculate_normals,
vertex_scale=vertex_scale,
is_textured=False,
use_cache=False,
verbose=False,
)
attributes.update(
uMatDiffuse=result["uMatDiffuse"],
uMatSpecular=result["uMatSpecular"],
uMatAmbient=result["uMatAmbient"],
uMatShininess=result["uMatShininess"],
)
mmcv.dump(attributes, cache_file)
return attributes
def load_obj(fn):
"""Load 3d mesh form .obj' file.
Args:
fn: Input file name or file-like object.
Returns:
dictionary with the following keys (some of which may be missing):
position: np.float32, (n, 3) array, vertex positions
uv: np.float32, (n, 2) array, vertex uv coordinates
normal: np.float32, (n, 3) array, vertex uv normals
face: np.int32, (k*3,) traingular face indices
"""
position = [np.zeros(3, dtype=np.float32)]
normal = [np.zeros(3, dtype=np.float32)]
uv = [np.zeros(2, dtype=np.float32)]
tuple2idx = OrderedDict()
trinagle_indices = []
input_file = open(fn) if isinstance(fn, str) else fn
for line in input_file:
line = line.strip()
if not line or line[0] == "#":
continue
line = line.split(" ", 1)
tag = line[0]
if len(line) > 1:
line = line[1]
else:
line = ""
if tag == "v":
position.append(np.fromstring(line, sep=" "))
elif tag == "vt":
uv.append(np.fromstring(line, sep=" "))
elif tag == "vn":
normal.append(np.fromstring(line, sep=" "))
elif tag == "f":
output_face_indices = []
for chunk in line.split():
# tuple order: pos_idx, uv_idx, normal_idx
vt = _parse_vertex_tuple(chunk)
if vt not in tuple2idx: # create a new output vertex?
tuple2idx[vt] = len(tuple2idx)
output_face_indices.append(tuple2idx[vt])
# generate face triangles
for i in range(1, len(output_face_indices) - 1):
for vi in [0, i, i + 1]:
trinagle_indices.append(output_face_indices[vi])
outputs = {}
outputs["face"] = np.int32(trinagle_indices)
pos_idx, uv_idx, normal_idx = np.int32(list(tuple2idx)).T
if np.any(pos_idx):
outputs["position"] = _unify_rows(position)[pos_idx]
if np.any(uv_idx):
outputs["uv"] = _unify_rows(uv)[uv_idx]
if np.any(normal_idx):
outputs["normal"] = _unify_rows(normal)[normal_idx]
return outputs
def normalize_mesh(mesh):
"""Scale mesh to fit into -1..1 cube."""
mesh = dict(mesh)
pos = mesh["position"][:, :3].copy()
pos -= (pos.max(0) + pos.min(0)) / 2.0
pos /= np.abs(pos).max()
mesh["position"] = pos
return mesh
def quat2rotmat(quat):
quat_mat = np.eye(4)
quat_mat[:3, :3] = quaternions.quat2mat(quat)
return quat_mat
def mat2rotmat(mat):
quat_mat = np.eye(4)
quat_mat[:3, :3] = mat
return quat_mat
def xyz2mat(xyz):
trans_mat = np.eye(4)
trans_mat[-1, :3] = xyz
return trans_mat
def mat2xyz(mat):
xyz = mat[-1, :3]
xyz[np.isnan(xyz)] = 0
return xyz
def safemat2quat(mat):
quat = np.array([1, 0, 0, 0])
try:
quat = mat2quat(mat)
except:
pass
quat[np.isnan(quat)] = 0
return quat
|
def ackermann(m, n):
if m == 0:
return n+1
elif m > 0:
if n == 0:
return ackermann(m-1, 1)
elif n > 0:
return ackermann(m-1, ackermann(m, n-1))
print 'invalid input'
if __name__ == '__main__':
print ackermann(3, 4)
print ackermann(-1, 2)
|
import time
import numpy as np
import tensorflow as tf
def average_completion(exp):
completion_time = 0
number_task = 0
for job in exp.simulation.cluster.jobs:
for task in job.tasks:
number_task += 1
# completion_time += (task.finished_timestamp - task.started_timestamp) # 这里有问题吧,计算的应该是job的提交时间
completion_time += (task.finished_timestamp - task.task_config.submit_time)
return completion_time / number_task
def average_slowdown(exp):
slowdown = 0
number_task = 0
for job in exp.simulation.cluster.jobs:
for task in job.tasks:
number_task += 1
# slowdown += (task.finished_timestamp - task.started_timestamp) / task.task_config.duration
slowdown += (task.finished_timestamp - task.task_config.submit_time)
return slowdown / number_task
def multiprocessing_run(episode, trajectories, makespans, average_completions, average_slowdowns):
np.random.seed(int(time.time()))
tf.random.set_random_seed(time.time())
episode.run()
trajectories.append(episode.simulation.scheduler.algorithm.current_trajectory)
makespans.append(episode.simulation.env.now)
# print(episode.simulation.env.now)
average_completions.append(average_completion(episode))
average_slowdowns.append(average_slowdown(episode))
def debugPrinter(file, lineno, data=None):
"""
输出带有文件名和行号的调试信息
:param file: 对应__file__变量
:param lineno: 对应sys._getframe().f变量
:param data: 需要输出的信息
:return:
"""
fileName = file.split('/')[-1]
lineno = lineno.f_lineno
print(f'[{fileName} {lineno}] {data}')
|
# coding: utf-8
import sublime
st_version = int(sublime.version())
if st_version > 3000:
from JoomlaPack.lib.file import File
from JoomlaPack.lib.folder import Folder
from JoomlaPack.lib.helper import Helper
from JoomlaPack.lib.json import Json
from JoomlaPack.lib.manifest import Manifest
from JoomlaPack.lib.project import Project
else:
from lib.file import File
from lib.folder import Folder
from lib.helper import Helper
from lib.json import Json
from lib.manifest import Manifest
from lib.project import Project
__all__ = [
'File',
'Folder',
'Helper',
'Json',
'Manifest',
'Project'
]
|
import pytest
import socket
# import main.client
# from main.client import prompt
# import main.server
from main.util import Hall, Room, User, create_socket
from main.banner import Ascii_Banner, Text
def test_can_successfully_create_to_a_hall():
hall = Hall()
assert hall
def test_Ascii_Banner():
msg = 'msg'
banner = Ascii_Banner(msg)
assert banner
def test_colored_banner():
msg = 'hello'
banner= Ascii_Banner.colored_banner(msg, 'red')
assert banner
def test_can_successfully_print_text():
msg = "In a good progress"
text = Text(msg)
assert text
def test_colored_text():
msg = 'hello'
text = Text.colored_text(msg, 'blue')
assert text
def test_can_successfully_connected_to_a_chat_room_server():
name = "test room"
room = Room(name)
assert room.room == "test room"
def test_create_socket():
address = ('127.0.0.1', 22222)
s = create_socket(address)
assert s
def test_create_user():
user = 'new'
name = User(user)
assert name.name == 'new'
# def test_welcome_new(fix_user):
# user = fix_user
# out = Hall()
# assert out.welcome_new(user) == "Error"
# @pytest.fixture
# def fix_user():
# user = 'new'
# name = User(user)
# return name
|
import threading
import taco.constants
import logging
import os
import uuid
import Queue
settings_lock = threading.Lock()
settings = {}
chat_log = []
chat_log_lock = threading.Lock()
chat_uuid = uuid.uuid4().hex
chat_uuid_lock = threading.Lock()
stop = threading.Event()
public_keys_lock = threading.Lock()
public_keys = {}
share_listings_i_care_about = {}
share_listings_i_care_about_lock = threading.Lock()
share_listing_requests_lock = threading.Lock()
share_listing_requests = {}
share_listings = {}
share_listings_lock = threading.Lock()
download_q = {}
download_q_lock = threading.Lock()
completed_q = []
completed_q_lock = threading.Lock()
upload_q = {}
upload_q_lock = threading.Lock()
upload_limiter_lock = threading.Lock()
download_limiter_lock = threading.Lock()
high_priority_output_queue_lock = threading.Lock()
medium_priority_output_queue_lock = threading.Lock()
low_priority_output_queue_lock = threading.Lock()
file_request_output_queue_lock = threading.Lock()
high_priority_output_queue = {}
medium_priority_output_queue = {}
low_priority_output_queue = {}
file_request_output_queue = {}
def Add_To_Output_Queue(peer_uuid,msg,priority=3):
logging.debug("Add to "+ peer_uuid+" output q @ " + str(priority))
if priority==1:
with high_priority_output_queue_lock:
if peer_uuid in high_priority_output_queue:
high_priority_output_queue[peer_uuid].put(msg)
taco.globals.clients.sleep.set()
return 1
elif priority==2:
with medium_priority_output_queue_lock:
if peer_uuid in medium_priority_output_queue:
medium_priority_output_queue[peer_uuid].put(msg)
taco.globals.clients.sleep.set()
return 1
elif priority==3:
with low_priority_output_queue_lock:
if peer_uuid in low_priority_output_queue:
low_priority_output_queue[peer_uuid].put(msg)
taco.globals.clients.sleep.set()
return 1
else:
with file_request_output_queue_lock:
if peer_uuid in file_request_output_queue:
file_request_output_queue[peer_uuid].put(msg)
taco.globals.clients.sleep.set()
return 1
return 0
def Add_To_All_Output_Queues(msg,priority=3):
logging.debug("Add to ALL output q @ " + str(priority))
if priority==1:
with high_priority_output_queue_lock:
for keyname in high_priority_output_queue:
high_priority_output_queue[keyname].put(msg)
taco.globals.clients.sleep.set()
return 1
elif priority==2:
with medium_priority_output_queue_lock:
for keyname in medium_priority_output_queue:
medium_priority_output_queue[keyname].put(msg)
taco.globals.clients.sleep.set()
return 1
elif priority==3:
with low_priority_output_queue_lock:
for keyname in low_priority_output_queue:
low_priority_output_queue[keyname].put(msg)
taco.globals.clients.sleep.set()
return 1
else:
with file_request_output_queue_lock:
for keyname in file_request_output_queue:
file_request_output_queue[keyname].put(msg)
taco.globals.clients.sleep.set()
return 1
return 0
def properexit(signum, frame):
logging.warning("SIGINT Detected, stopping " + taco.constants.APP_NAME)
stop.set()
logging.info("Stopping Server")
server.stop.set()
logging.info("Stopping Clients")
clients.stop.set()
clients.sleep.set()
logging.info("Stopping Filesystem Workers")
filesys.stop.set()
filesys.sleep.set()
server.join()
clients.join()
filesys.join()
logging.info("Dispatcher Stopped Successfully")
logging.info("Clean Exit")
os._exit(3)
|
#
# Copyright (c) 2017 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import
from __future__ import print_function
from unittest import TestCase
from packagedcode.utils import parse_repo_url
class TestPackageUtils(TestCase):
def test_parse_repo_url_0(self):
test = 'npm/npm'
expected = 'https://github.com/npm/npm'
assert expected == parse_repo_url(test)
def test_parse_repo_url_1(self):
test = 'gist:11081aaa281'
expected = 'https://gist.github.com/11081aaa281'
assert expected == parse_repo_url(test)
def test_parse_repo_url_2(self):
test = 'bitbucket:example/repo'
expected = 'https://bitbucket.org/example/repo'
assert expected == parse_repo_url(test)
def test_parse_repo_url_3(self):
test = 'gitlab:another/repo'
expected = 'https://gitlab.com/another/repo'
assert expected == parse_repo_url(test)
def test_parse_repo_url_4(self):
test = 'expressjs/serve-static'
expected = 'https://github.com/expressjs/serve-static'
assert expected == parse_repo_url(test)
def test_parse_repo_url_5(self):
test = 'git://github.com/angular/di.js.git'
expected = 'git://github.com/angular/di.js.git'
assert expected == parse_repo_url(test)
def test_parse_repo_url_6(self):
test = 'git://github.com/hapijs/boom'
expected = 'git://github.com/hapijs/boom'
assert expected == parse_repo_url(test)
def test_parse_repo_url_7(self):
test = 'git@github.com:balderdashy/waterline-criteria.git'
expected = 'https://github.com/balderdashy/waterline-criteria.git'
assert expected == parse_repo_url(test)
def test_parse_repo_url_8(self):
test = 'http://github.com/ariya/esprima.git'
expected = 'http://github.com/ariya/esprima.git'
assert expected == parse_repo_url(test)
def test_parse_repo_url_9(self):
test = 'http://github.com/isaacs/nopt'
expected = 'http://github.com/isaacs/nopt'
assert expected == parse_repo_url(test)
def test_parse_repo_url_10(self):
test = 'https://github.com/chaijs/chai'
expected = 'https://github.com/chaijs/chai'
assert expected == parse_repo_url(test)
def test_parse_repo_url_11(self):
test = 'https://github.com/christkv/kerberos.git'
expected = 'https://github.com/christkv/kerberos.git'
assert expected == parse_repo_url(test)
def test_parse_repo_url_12(self):
test = 'https://gitlab.com/foo/private.git'
expected = 'https://gitlab.com/foo/private.git'
assert expected == parse_repo_url(test)
def test_parse_repo_url_13(self):
test = 'git@gitlab.com:foo/private.git'
expected = 'https://gitlab.com/foo/private.git'
assert expected == parse_repo_url(test)
def test_parse_git_repo_url_without_slash_slash(self):
test = 'git@github.com/Filirom1/npm2aur.git'
expected = 'https://github.com/Filirom1/npm2aur.git'
assert expected == parse_repo_url(test)
def test_parse_repo_url_does_not_fail_on_empty(self):
assert None == parse_repo_url(None)
assert None == parse_repo_url('')
assert None == parse_repo_url(' ')
|
# -*- Mode: Python; tab-width: 2; indent-tabs-mode:nil; -*-
# vim: set ts=2 et sw=2 tw=80:
#
# Copyright (c) 2013 The MathJax Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This table must be sorted.
#
# fontUtil::computeNormalSizeSplitting assumes the table to be sorted. If it
# does not found a character, it will assume it is "NONUNICODE".
#
COPYRIGHT = "Copyright (c) 2013 The MathJax Consortium"
FONTSPLITTING = [
["Main",
(0x0000, 0x007F), # Basic Latin
# MathJax_Main, Latin-1 Supplement
0x00A0,
0x00A3, # pound sign
0x00A5, # MathJax_AMS
0x00A7, # section symbol
0x00A8,
0x00AC,
0x00AE, # MathJax_AMS
(0x00AF,0x00B1),
0x00B4,
0x00B5, # mu
0x00B7, # middle dot
0x00D7,
0x00F0, # MathJax_AMS
0x00F7,
# MathJax_Main, Latin Extended-A
0x0127, # MathJax_AMS
0x0131,
# MathJax_Main, Latin Extended-B
0x0237,
# MathJax_Main, Spacing Modifier Letters,
(0x02C6,0x02C7),
(0x02C9,0x02CB),
(0x02D8,0x02DA),
0x02DC,
# MathJax_Main, Combining Diacritical Marks
(0x0300,0x0304),
(0x0306,0x0308),
(0x030A,0x030C),
0x0338,
(0x0370, 0x037D), # Greek and Coptic
(0x037F, 0x0383), # Greek and Coptic
(0x0391, 0x03A9), # Greek and Coptic
(0x03B1, 0x03C9), # Greek and Coptic
(0x03CF, 0x03FF), # Greek and Coptic
# MathJax_Main, General Punctuation
(0x2002,0x2006),
(0x2009,0x200A),
(0x2013,0x2014),
0x2016,
(0x2018,0x2019),
(0x201C,0x201D),
(0x2020,0x2021),
0x2026,
0x2032,
0x2033, # double prime
0x2034, # triple prime
0x2035, # MathJax_AMS
0x203E, # overline
0x2044,
0x2057, # quadruple prime
# MathJax_Main, Combining Diacritical Marks for Symbols
0x20D7,
# MathJax_Main, Letterlike Symbols
0x210F,
0x2111,
0x2113,
0x2118,
0x211C,
0x2127, # MathJax_AMS
0x2132, # MathJax_AMS
0x2135,
(0x2136,0x2138), # MathJax_AMS
0x2141, # MathJax_AMS
# MathJax_Main, Arrows
(0x2190,0x2199),
(0x219A,0x219B), # MathJax_AMS
0x219E, # MathJax_AMS
0x21A0, # MathJax_AMS
(0x21A2,0x21A3), # MathJax_AMS
0x21A6,
(0x21A9,0x21AA),
(0x21AB,0x21AE), # MathJax_AMS
(0x21B0,0x21B1), # MathJax_AMS
(0x21B6,0x21B7), # MathJax_AMS
(0x21BA,0x21BB), # MathJax_AMS
(0x21BC,0x21BD),
(0x21BE,0x21BF), # MathJax_AMS
(0x21C0,0x21C1),
(0x21C2,0x21C4), # MathJax_AMS
(0x21C6,0x21CF), # MathJax_AMS
(0x21D0,0x21D5),
(0x21DA,0x21DB), # MathJax_AMS
0x21DD, # MathJax_AMS
0x21E0, # MathJax_AMS
0x21E2, # MathJax_AMS
# MathJax_Main, Mathematical Operators
0x2200,
0x2201, # MathJax_Main
(0x2202,0x2203),
(0x2204,0x2205), # MathJax_Main
(0x2207,0x2209),
0x220B,
0x220D, # MathJax_AMS
(0x2212,0x2213),
0x2214, # MathJax_AMS
(0x2215,0x221A),
(0x221D,0x221E),
(0x2220,0x2226), # MathJax_AMS ; MathJax_Main: 0x2220, 0x2223, 0x2225,
(0x2227,0x222B),
(0x2234,0x2235), # MathJax_AMS
(0x223C,0x223D), # MathJax_AMS ; MathJax_Main: 0x223C
0x2240,
(0x2241,0x2242), # MathJax_AMS
0x2243,
0x2245,
0x2246, # MathJax_AMS
0x2248,
0x224A, # MathJax_AMS
0x224D,
(0x224E,0x224F), # MathJax_AMS
0x2250,
(0x2251,0x2253), # MathJax_AMS
(0x2256,0x2257), # MathJax_AMS
0x225C, # MathJax_AMS
(0x2260,0x2261),
(0x2264,0x2265),
(0x2266,0x2269), # MathJax_AMS
(0x226A,0x226B),
0x226C, # MathJax_AMS
(0x226E,0x2273), # MathJax_AMS
(0x2276,0x2277), # MathJax_AMS
(0x227A,0x227B),
(0x227C,0x2281), # MathJax_AMS
(0x2282,0x2283),
(0x2286,0x2287),
(0x2288,0x228B), # MathJax_AMS
0x228E,
(0x228F,0x2290), # MathJax_AMS
(0x2291,0x2299),
(0x229A,0x229B), # MathJax_AMS
(0x229D,0x22A1), # MathJax_AMS
(0x22A2,0x22A5),
(0x22A8,0x22AA), # MathJax_AMS ; MathJax_Main: 0x22A8
(0x22AC,0x22AF), # MathJax_AMS
(0x22B2,0x22B5), # MathJax_AMS
0x22B8, # MathJax_AMS
(0x22BA,0x22BC), # MathJax_AMS
(0x22C4,0x22C6),
0x22C8,
(0x22C9,0x22D4), # MathJax_AMS
(0x22D6,0x22DB), # MathJax_AMS
(0x22DE,0x22E1), # MathJax_AMS
(0x22E6,0x22ED), # MathJax_AMS
(0x22EE,0x22EF),
0x22F1,
# MathJax_Main, Miscellaneous Technical
(0x2308,0x230B),
(0x2322,0x2323),
(0x23B0,0x23B1),
0x23B4,
0x23B5,
0x23D0,
(0x23DC, 0x23E1),
# MathJax_AMS, Enclosed Alphanumerics
0x24C8,
# MathJax_AMS, Box Drawing
0x250C,
0x2510,
0x2514,
0x2518,
(0x2571,0x2572),
# MathJax_Main, Geometric Shapes
(0x25A0,0x25A1), # MathJax_AMS
0x25B2, # MathJax_AMS
0x25B3,
0x25B6, # MathJax_AMS
0x25B8,
0x25B9,
0x25BC, # MathJax_AMS
0x25BD,
0x25C0, # MathJax_AMS
0x25C2,
0x25C3,
0x25CA, # MathJax_AMS
0x25EF,
# MathJax_Main, Miscellaneous Symbols
(0x2660,0x2663),
(0x266D,0x266F),
# MathJax_AMS, Dingbats
0x2713,
0x2720,
# MathJax_Main, Miscellaneous Mathematical
(0x27E8,0x27E9),
(0x27EE,0x27EF),
# MathJax_Main, Supplemental Arrows
(0x27F5,0x27FA),
0x27FC,
# Miscellaneous Mathematical Symbols-B
0x2997,
0x2998,
0x29EB,
0x29F5,
0x29F8,
0x29F9,
# MathJax_Main, Supplemental Mathematical
0x2A3F,
0x2A5E,
(0x2A7D,0x2A7E),
(0x2A85,0x2A8C),
(0x2A95,0x2A96),
(0x2AAF,0x2AB0),
(0x2AB5,0x2ABA),
(0x2AC5,0x2AC6),
(0x2ACB,0x2ACC),
0xFFFD # Specials
],
["Normal",
0x210E, # Planck Constant
(0x1D400, 0x1D433), # Bold
(0x1D434, 0x1D467), # Italic
(0x1D468, 0x1D49B), # Bold Italic
(0x1D6A4, 0x1D6A5), # dotless i j
(0x1D6A8, 0x1D6E1), # Greek Bold
(0x1D6E2, 0x1D71B), # Greek Italic
(0x1D71C, 0x1D755), # Greek BoldItalic
(0x1D7CE, 0x1D7D7) # Bold digits
],
["Script",
0x210A, # Script g
0x210B, # Script H
0x2110, # Script I
0x2112, # Script L
0x211B, # Script R
0x212C, # Script B
0x212F, # Script e
0x2130, # Script E
0x2131, # Script F
0x2133, # Script M
0x2134, # Script o
(0x1D49C, 0x1D4CF), # Script
(0x1D4D0, 0x1D503) # Bold Script
],
["Fraktur",
0x210C, # Script H
0x2111, # Script I
0x211C, # Script R
0x2128, # Script z
0x212D, # Script C
(0x1D504, 0x1D537), # Fraktur
(0x1D56C, 0x1D59F) # Bold Fraktur
],
["DoubleStruck",
0x2102, # DoubleStruck C
0x210D, # DoubleStruck H
0x2115, # DoubleStruck N
0x2119, # DoubleStruck P
0x211A, # DoubleStruck Q
0x211D, # DoubleStruck R
0x2124, # DoubleStruck Z
(0x213C, 0x2140), # DoubleStruck pi, gamma, Gamma, Sigma
(0x2145, 0x2149), # DoubleStruck Italic D, d, e, i, j
(0x1D538, 0x1D56B), # DoubleStruck
(0x1D7D8, 0x1D7E1) # DoubleStruck digits
],
["SansSerif",
(0x1D5A0, 0x1D5D3), # Sans-Serif
(0x1D5D4, 0x1D607), # Sans-Serif Bold
(0x1D608, 0x1D63B), # Sans-Serif Italic
(0x1D63C, 0x1D66F), # Sans-Serif BoldItalic
(0x1D756, 0x1D7CB), # Greek Sans-Serif Bold
(0x1D7E2, 0x1D7EB), # Sans-Serif digits
(0x1D7EC, 0x1D7F5) # Sans-Serif Bold digits
],
["Monospace",
(0x1D670,0x1D6A3), # Monospace
(0x1D7F6,0x1D7FF), # Monospace digits
],
["Latin",
(0x0080, 0x00FF), # Latin-1 Supplement
(0x0100, 0x017F), # Latin Extended-A
(0x0180, 0x024F), # Latin Extended-B
(0x1E00, 0x1EFF), # Latin Extended Additional
(0xA720, 0xA7FF), # Latin Extended-D
(0xFB00, 0xFB4F) # Alphabetic Presentation Forms
],
["Alphabets",
(0x0384, 0x0390), # Greek and Coptic
(0x03AA, 0x03B0), # Greek and Coptic
(0x03CA, 0x03CE), # Greek and Coptic
(0x0400, 0x04FF), # Cyrillic
0x0E3F, # thai currency symbol baht
(0x13A0, 0x13FF), # Cherokee
(0x2100, 0x214F), # Letterlike Symbols
(0x3040, 0x309F), # Hiragana
(0xFE70, 0xFFEF), # Arabic Presentation Forms-B
(0x10140, 0x1018F) # Ancient Greek Numbers
],
["Marks",
(0x02B0, 0x02FF), # Spacing Modifier Letters
(0x0300, 0x036F), # Combining Diacritical Marks
(0x2000, 0x206F), # General Punctuation
(0x20D0, 0x20FF), # Combining Diacritical Marks for Symbols
(0x2E00, 0x2E7F), # Supplemental Punctuation
(0x3000, 0x303F) # CJK Symbols and Punctuation
],
["Arrows",
(0x2190, 0x21FF), # Arrows
(0x27F0, 0x27FF), # Supplemental Arrows-A
(0x2900, 0x297F) # Supplemental Arrows-B
],
["Operators",
(0x2200, 0x22FF), # Mathematical Operators
(0x2A00, 0x2AFF) # Supplemental Mathematical Operators
],
["Symbols",
(0x2300, 0x23FF), # Miscellaneous Technical
(0x27C0, 0x27EF), # Miscellaneous Mathematical Symbols-A
(0x2980, 0x29FF) # Miscellaneous Mathematical Symbols-B
],
["Shapes",
(0x2400, 0x243F), # Control Pictures
(0x2500, 0x257F), # Box Drawing
(0x2580, 0x259F), # Block Elements
(0x25A0, 0x25FF), # Geometric Shapes
(0x2600, 0x26FF), # Miscellaneous Symbols
(0x2B00, 0x2BFF) # Miscellaneous Symbols and Arrows
],
["Misc",
(0x0250, 0x02AF), # IPA Extensions
(0x1D00, 0x1D7F), # Phonetic Extensions
(0x1D80, 0x1DBF), # Phonetic Extensions Supplement
(0x2070, 0x209F), # Superscripts and Subscripts
(0x20A0, 0x20CF), # Currency Symbols
(0x2150, 0x218F), # Number Forms
(0x2460, 0x24FF), # Enclosed Alphanumerics
(0x2700, 0x27BF) # Dingbats
],
["Variants"], # Used for oldstyle numbers, caligraphic and glyph variants
["NonUnicode"] # Font for remaining non-Unicode glyphs
]
|
"""
Suppose Andy and Doris want to choose a restaurant for dinner, and they both have a list of favorite restaurants represented by strings.
You need to help them find out their common interest with the least list index sum. If there is a choice tie between answers, output all of them with no order requirement. You could assume there always exists an answer.
Example 1:
Input: list1 = ["Shogun","Tapioca Express","Burger King","KFC"], list2 = ["Piatti","The Grill at Torrey Pines","Hungry Hunter Steakhouse","Shogun"]
Output: ["Shogun"]
Explanation: The only restaurant they both like is "Shogun".
Example 2:
Input: list1 = ["Shogun","Tapioca Express","Burger King","KFC"], list2 = ["KFC","Shogun","Burger King"]
Output: ["Shogun"]
Explanation: The restaurant they both like and have the least index sum is "Shogun" with index sum 1 (0+1).
Example 3:
Input: list1 = ["Shogun","Tapioca Express","Burger King","KFC"], list2 = ["KFC","Burger King","Tapioca Express","Shogun"]
Output: ["KFC","Burger King","Tapioca Express","Shogun"]
Example 4:
Input: list1 = ["Shogun","Tapioca Express","Burger King","KFC"], list2 = ["KNN","KFC","Burger King","Tapioca Express","Shogun"]
Output: ["KFC","Burger King","Tapioca Express","Shogun"]
Example 5:
Input: list1 = ["KFC"], list2 = ["KFC"]
Output: ["KFC"]
Constraints:
1 <= list1.length, list2.length <= 1000
1 <= list1[i].length, list2[i].length <= 30
list1[i] and list2[i] consist of spaces ' ' and English letters.
All the stings of list1 are unique.
All the stings of list2 are unique.
"""
class Solution(object):
def findRestaurant(self, list1, list2):
"""
:type list1: List[str]
:type list2: List[str]
:rtype: List[str]
"""
tr = {}
for i, w in enumerate(list1):
tr[w] = [i]
for i, w in enumerate(list2):
if w not in tr:
continue
tr[w].append(i)
m = len(list1) + len(list2)
for w, ns in tr.iteritems():
if len(ns) < 2:
continue
m = min(m, ns[0] + ns[1])
ret = []
for w, ns in tr.iteritems():
if len(ns) < 2:
continue
if ns[0] + ns[1] == m:
ret.append(w)
return ret
|
import os
import re
import socket
import sys
from enum import Enum
import click
import hjson as json
import inotify.adapters
import inotify.constants
from prometheus_client import Gauge
class Module(Enum):
DHCP4 = 1
DHCP6 = 2
class KeaExporter:
subnet_pattern = re.compile(
r"subnet\[(?P<subnet_idx>[\d]+)\]\.(?P<metric>[\w-]+)")
msg_statistics_all = bytes(
json.dumpsJSON({'command': 'statistic-get-all'}), 'utf-8')
def __init__(self, config_path):
# kea control socket
self.sock_dhcp6 = None
self.sock_dhcp6_path = None
self.sock_dhcp4 = None
self.sock_dhcp4_path = None
# prometheus
self.prefix = 'kea'
self.prefix_dhcp4 = '{0}_dhcp4'.format(self.prefix)
self.prefix_dhcp6 = '{0}_dhcp6'.format(self.prefix)
self.metrics_dhcp4 = None
self.metrics_dhcp4_map = None
self.metrics_dhcp4_ignore = None
self.setup_dhcp4_metrics()
self.metrics_dhcp6 = None
self.metrics_dhcp6_map = None
self.metrics_dhcp6_ignore = None
self.setup_dhcp6_metrics()
# kea config
self.config_path = config_path
self.config = None
self.inotify = inotify.adapters.Inotify()
self.inotify.add_watch(
config_path, mask=inotify.constants.IN_MODIFY
)
self.load_config()
def load_config(self):
with open(self.config_path, 'r') as handle:
self.config = json.load(handle)
try:
sock_path = self.config['Dhcp4']['control-socket']['socket-name']
if not os.access(sock_path, os.F_OK):
raise FileNotFoundError()
if not os.access(sock_path, os.R_OK | os.W_OK):
raise PermissionError()
self.sock_dhcp4_path = sock_path
except KeyError:
click.echo('Dhcp4.control-socket.socket-name not configured, '
'will not be exporting Dhcp4 metrics', file=sys.stderr)
except FileNotFoundError:
click.echo('Dhcp4 control-socket configured, but it does not '
'exist. Is Kea running?', file=sys.stderr)
sys.exit(1)
except PermissionError:
click.echo('Dhcp4 control-socket is not read-/writeable.',
file=sys.stderr)
sys.exit(1)
try:
sock_path = self.config['Dhcp6']['control-socket']['socket-name']
if not os.access(sock_path, os.F_OK):
raise FileNotFoundError()
if not os.access(sock_path, os.R_OK | os.W_OK):
raise PermissionError()
self.sock_dhcp6_path = sock_path
except KeyError:
click.echo('Dhcp6.control-socket.socket-name not configured, '
'will not be exporting Dhcp6 metrics', file=sys.stderr)
except FileNotFoundError:
click.echo('Dhcp6 control-socket configured, but it does not '
'exist. Is Kea running?', file=sys.stderr)
sys.exit(1)
except PermissionError:
click.echo('Dhcp6 control-socket is not read-/writeable.',
file=sys.stderr)
sys.exit(1)
def setup_dhcp4_metrics(self):
self.metrics_dhcp4 = {
# Packets
'sent_packets': Gauge(
'{0}_packets_sent_total'.format(self.prefix_dhcp4),
'Packets sent',
['operation']),
'received_packets': Gauge(
'{0}_packets_received_total'.format(self.prefix_dhcp4),
'Packets received',
['operation']),
# per Subnet
'addresses_assigned_total': Gauge(
'{0}_addresses_assigned_total'.format(self.prefix_dhcp4),
'Assigned addresses',
['subnet']),
'addresses_declined_total': Gauge(
'{0}_addresses_declined_total'.format(self.prefix_dhcp4),
'Declined counts',
['subnet']),
'addresses_declined_reclaimed_total': Gauge(
'{0}_addresses_declined_reclaimed_total'.format(
self.prefix_dhcp4),
'Declined addresses that were reclaimed',
['subnet']),
'addresses_reclaimed_total': Gauge(
'{0}_addresses_reclaimed_total'.format(self.prefix_dhcp4),
'Expired addresses that were reclaimed',
['subnet']),
'addresses_total': Gauge(
'{0}_addresses_total'.format(self.prefix_dhcp4),
'Size of subnet address pool',
['subnet']
)
}
self.metrics_dhcp4_map = {
# sent_packets
'pkt4-ack-sent': {
'metric': 'sent_packets',
'labels': {
'operation': 'ack'
},
},
'pkt4-nak-sent': {
'metric': 'sent_packets',
'labels': {
'operation': 'nak'
},
},
'pkt4-offer-sent': {
'metric': 'sent_packets',
'labels': {
'operation': 'offer'
},
},
# received_packets
'pkt4-discover-received': {
'metric': 'received_packets',
'labels': {
'operation': 'discover'
}
},
'pkt4-offer-received': {
'metric': 'received_packets',
'labels': {
'operation': 'offer'
}
},
'pkt4-request-received': {
'metric': 'received_packets',
'labels': {
'operation': 'request'
}
},
'pkt4-ack-received': {
'metric': 'received_packets',
'labels': {
'operation': 'ack'
}
},
'pkt4-nak-received': {
'metric': 'received_packets',
'labels': {
'operation': 'nak'
}
},
'pkt4-release-received': {
'metric': 'received_packets',
'labels': {
'operation': 'release'
}
},
'pkt4-decline-received': {
'metric': 'received_packets',
'labels': {
'operation': 'decline'
}
},
'pkt4-inform-received': {
'metric': 'received_packets',
'labels': {
'operation': 'inform'
}
},
'pkt4-unknown-received': {
'metric': 'received_packets',
'labels': {
'operation': 'unknown'
}
},
'pkt4-parse-failed': {
'metric': 'received_packets',
'labels': {
'operation': 'parse-failed'
}
},
'pkt4-receive-drop': {
'metric': 'received_packets',
'labels': {
'operation': 'drop'
}
},
# per Subnet
'assigned-addresses': {
'metric': 'addresses_assigned_total',
},
'declined-addresses': {
'metric': 'addresses_declined_total',
},
'declined-reclaimed-addresses': {
'metric': 'addresses_declined_reclaimed_total',
},
'reclaimed-declined-addresses': {
'metric': 'addresses_declined_reclaimed_total',
},
'reclaimed-leases': {
'metric': 'addresses_reclaimed_total',
},
'total-addresses': {
'metric': 'addresses_total',
}
}
self.metrics_dhcp4_ignore = [
# sums of different packet types
'pkt4-sent',
'pkt4-received',
# sums of subnet values
'declined-addresses',
'declined-reclaimed-addresses',
'reclaimed-declined-addresses',
'reclaimed-leases'
]
def setup_dhcp6_metrics(self):
self.metrics_dhcp6 = {
# Packets sent/received
'sent_packets': Gauge(
'{0}_packets_sent_total'.format(self.prefix_dhcp6),
'Packets sent',
['operation']),
'received_packets': Gauge(
'{0}_packets_received_total'.format(self.prefix_dhcp6),
'Packets received',
['operation']),
# DHCPv4-over-DHCPv6
'sent_dhcp4_packets': Gauge(
'{0}_packets_sent_dhcp4_total'.format(self.prefix_dhcp6),
'DHCPv4-over-DHCPv6 Packets received',
['operation']
),
'received_dhcp4_packets': Gauge(
'{0}_packets_received_dhcp4_total'.format(self.prefix_dhcp6),
'DHCPv4-over-DHCPv6 Packets received',
['operation']
),
# per Subnet
'addresses_declined_total': Gauge(
'{0}_addresses_declined_total'.format(self.prefix_dhcp6),
'Declined addresses',
['subnet']),
'addresses_declined_reclaimed_total': Gauge(
'{0}_addresses_declined_reclaimed_total'.format(
self.prefix_dhcp6),
'Declined addresses that were reclaimed',
['subnet']),
'addresses_reclaimed_total': Gauge(
'{0}_addresses_reclaimed_total'.format(self.prefix_dhcp6),
'Expired addresses that were reclaimed',
['subnet']),
# IA_NA
'na_assigned_total': Gauge(
'{0}_na_assigned_total'.format(self.prefix_dhcp6),
'Assigned non-temporary addresses (IA_NA)',
['subnet']),
'na_total': Gauge(
'{0}_na_total'.format(self.prefix_dhcp6),
'Size of non-temporary address pool',
['subnet']
),
# IA_PD
'pd_assigned_total': Gauge(
'{0}_pd_assigned_total'.format(self.prefix_dhcp6),
'Assigned prefix delegations (IA_PD)',
['subnet']),
'pd_total': Gauge(
'{0}_pd_total'.format(self.prefix_dhcp6),
'Size of prefix delegation pool',
['subnet']
),
}
self.metrics_dhcp6_map = {
# sent_packets
'pkt6-advertise-sent': {
'metric': 'sent_packets',
'labels': {
'operation': 'advertise'
},
},
'pkt6-reply-sent': {
'metric': 'sent_packets',
'labels': {
'operation': 'reply'
},
},
# received_packets
'pkt6-receive-drop': {
'metric': 'received_packets',
'labels': {
'operation': 'drop'
},
},
'pkt6-parse-failed': {
'metric': 'receoved_packets',
'labels': {
'operation': 'parse-failed'
},
},
'pkt6-solicit-received': {
'metric': 'received_packets',
'labels': {
'operation': 'solicit'
},
},
'pkt6-advertise-received': {
'metric': 'received_packets',
'labels': {
'operation': 'advertise'
}
},
'pkt6-request-received': {
'metric': 'received_packets',
'labels': {
'operation': 'request'
}
},
'pkt6-reply-received': {
'metric': 'received_packets',
'labels': {
'operation': 'reply'
}
},
'pkt6-renew-received': {
'metric': 'received_packets',
'labels': {
'operation': 'renew'
}
},
'pkt6-rebind-received': {
'metric': 'received_packets',
'labels': {
'operation': 'rebind'
}
},
'pkt6-release-received': {
'metric': 'received_packets',
'labels': {
'operation': 'release'
}
},
'pkt6-decline-received': {
'metric': 'received_packets',
'labels': {
'operation': 'decline'
}
},
'pkt6-infrequest-received': {
'metric': 'received_packets',
'labels': {
'operation': 'infrequest'
}
},
'pkt6-unknown-received': {
'metric': 'received_packets',
'labels': {
'operation': 'unknown'
}
},
# DHCPv4-over-DHCPv6
'pkt6-dhcpv4-response-sent': {
'metric': 'sent_dhcp4_packets',
'labels': {
'operation': 'response'
}
},
'pkt6-dhcpv4-query-received': {
'metric': 'received_dhcp4_packets',
'labels': {
'operation': 'query'
}
},
'pkt6-dhcpv4-response-received': {
'metric': 'received_dhcp4_packets',
'labels': {
'operation': 'response'
}
},
# per Subnet
'assigned-nas': {
'metric': 'na_assigned_total',
},
'assigned-pds': {
'metric': 'pd_assigned_total',
},
'declined-addresses': {
'metric': 'addresses_declined_total',
},
'declined-reclaimed-addresses': {
'metric': 'addresses_declined_reclaimed_total',
},
'reclaimed-declined-addresses': {
'metric': 'addresses_declined_reclaimed_total',
},
'reclaimed-leases': {
'metric': 'addresses_reclaimed_total',
},
'total-nas': {
'metric': 'na_total',
},
'total-pds': {
'metric': 'pd_total',
}
}
self.metrics_dhcp6_ignore = [
# sums of different packet types
'pkt6-sent',
'pkt6-received',
# sums of subnet values
'declined-addresses',
'declined-reclaimed-addresses',
'reclaimed-declined-addresses',
'reclaimed-leases'
]
def update(self):
reload_config = False
for event in self.inotify.event_gen():
if not event:
break
reload_config = True
if reload_config:
click.echo('Config was modified, reloading...', file=sys.stderr)
self.load_config()
for sock_path, module in [(self.sock_dhcp4_path, Module.DHCP4),
(self.sock_dhcp6_path, Module.DHCP6)]:
if sock_path is None:
continue
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as sock:
sock.connect(sock_path)
sock.send(KeaExporter.msg_statistics_all)
response = sock.recv(8192).decode()
self.parse_metrics(json.loads(response), module)
def parse_metrics(self, response, module):
for key, data in response['arguments'].items():
if module is Module.DHCP4:
if key in self.metrics_dhcp4_ignore:
continue
else:
if key in self.metrics_dhcp6_ignore:
continue
value, timestamp = data[0]
labels = {}
# lookup subnet
if key.startswith('subnet['):
match = self.subnet_pattern.match(key)
if match:
subnet_idx = int(match.group('subnet_idx')) - 1
key = match.group('metric')
if module is Module.DHCP4:
subnet = self.config['Dhcp4']['subnet4'][subnet_idx]
else:
subnet = self.config['Dhcp6']['subnet6'][subnet_idx]
labels['subnet'] = subnet['subnet']
else:
click.echo('subnet pattern failed for metric: {0}'.format(
key), file=sys.stderr)
if module is Module.DHCP4:
metric_info = self.metrics_dhcp4_map[key]
metric = self.metrics_dhcp4[metric_info['metric']]
else:
metric_info = self.metrics_dhcp6_map[key]
metric = self.metrics_dhcp6[metric_info['metric']]
# merge static and dynamic labels
labels.update(metric_info.get('labels', {}))
# export labels and value
metric.labels(**labels).set(value)
|
import FWCore.ParameterSet.Config as cms
from Calibration.TkAlCaRecoProducers.ALCARECOSiStripCalCosmics_cff import ALCARECOSiStripCalCosmics
from CalibTracker.SiStripCommon.prescaleEvent_cfi import prescaleEvent
from HLTrigger.HLTfilters.triggerResultsFilter_cfi import triggerResultsFilter
ALCARECOSiStripCalCosmicsNanoPrescale = prescaleEvent.clone(prescale=1)
ALCARECOSiStripCalCosmicsNanoHLT = triggerResultsFilter.clone(
triggerConditions=cms.vstring("HLT_L1SingleMuCosmics_v*"),
hltResults=cms.InputTag("TriggerResults", "", "HLT"),
l1tResults=cms.InputTag(""),
throw=cms.bool(False)
)
# Select only events where tracker had HV on (according to DCS bit information)
# AND respective partition is in the run (according to FED information)
import CalibTracker.SiStripCommon.SiStripDCSFilter_cfi
DCSStatusForSiStripCalCosmicsNano = CalibTracker.SiStripCommon.SiStripDCSFilter_cfi.siStripDCSFilter.clone(
DetectorType = cms.vstring('TIBTID','TOB','TECp','TECm'),
ApplyFilter = cms.bool(True),
AndOr = cms.bool(True),
DebugOn = cms.untracked.bool(False)
)
from CalibTracker.Configuration.Filter_Refit_cff import CalibrationTracks, CalibrationTracksRefit, MeasurementTrackerEvent, offlineBeamSpot
ALCARECOSiStripCalCosmicsNanoCalibTracks = CalibrationTracks.clone(src=cms.InputTag("ALCARECOSiStripCalCosmics"))
ALCARECOSiStripCalCosmicsNanoCalibTracksRefit = CalibrationTracksRefit.clone(
src=cms.InputTag("ALCARECOSiStripCalCosmicsNanoCalibTracks")
)
ALCARECOSiStripCalCosmicsNanoTkCalSeq = cms.Sequence(
ALCARECOSiStripCalCosmicsNanoPrescale*
ALCARECOSiStripCalCosmicsNanoHLT*
DCSStatusForSiStripCalCosmicsNano*
ALCARECOSiStripCalCosmicsNanoCalibTracks,
cms.Task(MeasurementTrackerEvent),
cms.Task(offlineBeamSpot),
cms.Task(ALCARECOSiStripCalCosmicsNanoCalibTracksRefit)
)
from PhysicsTools.NanoAOD.common_cff import *
from PhysicsTools.NanoAOD.nano_cff import nanoMetadata
from CalibTracker.SiStripCommon.siStripPositionCorrectionsTable_cfi import siStripPositionCorrectionsTable
from CalibTracker.SiStripCommon.siStripLorentzAngleRunInfoTable_cfi import siStripLorentzAngleRunInfoTable
ALCARECOSiStripCalCosmicsNanoTracksTable = cms.EDProducer("SimpleTrackFlatTableProducer",
src=cms.InputTag("ALCARECOSiStripCalCosmicsNanoCalibTracksRefit"),
cut=cms.string(""),
name=cms.string("track"),
doc=cms.string("SiStripCalCosmics ALCARECO tracks"),
singleton=cms.bool(False),
extension=cms.bool(False),
variables=cms.PSet(
chi2ndof=Var("chi2()/ndof", float),
pt=Var("pt()", float),
hitsvalid=Var("numberOfValidHits()", int), ## unsigned?
phi=Var("phi()", float),
eta=Var("eta()", float),
)
)
ALCARECOSiStripCalCosmicsNanoMeasTable = siStripPositionCorrectionsTable.clone(
Tracks=cms.InputTag("ALCARECOSiStripCalCosmicsNanoCalibTracksRefit"))
ALCARECOSiStripCalCosmicsNanoTables = cms.Task(
nanoMetadata,
ALCARECOSiStripCalCosmicsNanoTracksTable,
ALCARECOSiStripCalCosmicsNanoMeasTable,
siStripLorentzAngleRunInfoTable
)
seqALCARECOSiStripCalCosmicsNano = cms.Sequence(ALCARECOSiStripCalCosmicsNanoTkCalSeq, ALCARECOSiStripCalCosmicsNanoTables)
|
from __future__ import print_function
import os
import numpy as np
import pickle as pk
from glob import glob as glob
from .imdb import IMDB
from common.utility.utils import calc_total_skeleton_length, calc_kpt_bound_pad, \
compute_similarity_transform, calc_total_skeleton_length_bone
from common.utility.visualization import debug_vis
s_hm36_subject_num = 7
HM_subject_idx = [ 1, 5, 6, 7, 8, 9, 11 ]
HM_subject_idx_inv = [ -1, 0, -1, -1, -1, 1, 2, 3, 4, 5, -1, 6 ]
s_hm36_act_num = 15
HM_act_idx = [ 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 ]
HM_act_idx_inv = [ -1, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 ]
s_hm36_subact_num = 2
HM_subact_idx = [ 1, 2 ]
HM_subact_idx_inv = [ -1, 0, 1 ]
s_hm36_camera_num = 4
HM_camera_idx = [ 1, 2, 3, 4 ]
HM_camera_idx_inv = [ -1, 0, 1, 2, 3 ]
# 17 joints of Human3.6M:
# 'root', 'Rleg0', 'Rleg1', 'Rleg2', 'Lleg0', 'Lleg1', 'Lleg2', 'torso', 'neck', 'nose', 'head', 'Larm0', 'Larm1', 'Larm2', 'Rarm0', 'Rarm1', 'Rarm2'
# 'root', 'RHip', 'RKnee', 'RAnkle', 'LHip', 'LKnee', 'LAnkle', 'torso', 'neck', 'nose', 'head', 'LShoulder', 'LElbow', 'LWrist', 'RShoulder', 'RElbow', 'RWrist'
# 18 joints with Thorax:
# 'root', 'Rleg0', 'Rleg1', 'Rleg2', 'Lleg0', 'Lleg1', 'Lleg2', 'torso', 'neck', 'nose', 'head', 'Larm0', 'Larm1', 'Larm2', 'Rarm0', 'Rarm1', 'Rarm2', 'Thorax'
# 'root', 'RHip', 'RKnee', 'RAnkle', 'LHip', 'LKnee', 'LAnkle', 'torso', 'neck', 'nose', 'head', 'LShoulder', 'LElbow', 'LWrist', 'RShoulder', 'RElbow', 'RWrist, 'Thorax''
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
# [ 0, 0, 1, 2, 0, 4, 5, 0, 17, 17, 8, 17, 11, 12, 17, 14, 15, 0]
# 16 joints of MPII
# 0-R_Ankle, 1-R_Knee, 2-R_Hip, 3-L_Hip, 4-L_Knee, 5-L_Ankle, 6-Pelvis, 7-Thorax,
# 8-Neck, 9-Head, 10-R_Wrist, 11-R_Elbow, 12-R_Shoulder, 13-L_Shoulder, 14-L_Elbow, 15-L_Wrist
s_org_36_jt_num = 32
s_36_root_jt_idx = 0
s_36_lsh_jt_idx = 11
s_36_rsh_jt_idx = 14
s_36_jt_num = 18
s_36_flip_pairs = np.array([[1, 4], [2, 5], [3, 6], [14, 11], [15, 12], [16, 13]], dtype=np.int)
s_36_parent_ids = np.array([0, 0, 1, 2, 0, 4, 5, 0, 17, 17, 8, 17, 11, 12, 17, 14, 15, 0], dtype=np.int)
s_36_bone_jts = np.array([[0, 7], [7, 8], [8, 9], [9, 10], [8, 11], [11, 12], [12, 13],
[8, 14], [14, 15], [15, 16], [0, 1], [1, 2], [2, 3], [0, 4], [4, 5], [5, 6]])
s_mpii_2_hm36_jt = [6, 2, 1, 0, 3, 4, 5, -1, 8, -1, 9, 13, 14, 15, 12, 11, 10, 7]
s_hm36_2_mpii_jt = [3, 2, 1, 4, 5, 6, 0, 17, 8, 10, 16, 15, 14, 11, 12, 13]
s_coco_2_hm36_jt = [-1, 12, 14, 16, 11, 13, 15, -1, -1, 0, -1, 5, 7, 9, 6, 8, 10, -1]
s_posetrack_2_hm36_jt = [-1, 2, 1, 0, 3, 4, 5, -1, 12, 13, 14, 9, 10, 11, 8, 7, 6, -1]
def from_coco_to_hm36_single(pose, pose_vis):
res_jts = np.zeros((s_36_jt_num, 3), dtype=np.float)
res_vis = np.zeros((s_36_jt_num, 3), dtype=np.float)
for i in range(0, s_36_jt_num):
id1 = i
id2 = s_coco_2_hm36_jt[i]
if id2 >= 0:
res_jts[id1] = pose[id2].copy()
res_vis[id1] = pose_vis[id2].copy()
return res_jts.copy(), res_vis.copy()
def from_coco_to_hm36(db):
for n_sample in range(0, len(db)):
res_jts, res_vis = from_coco_to_hm36_single(db[n_sample]['joints_3d'], db[n_sample]['joints_3d_vis'])
db[n_sample]['joints_3d'] = res_jts
db[n_sample]['joints_3d_vis'] = res_vis
def from_mpii_to_hm36_single(pose, pose_vis):
res_jts = np.zeros((s_36_jt_num, 3), dtype=np.float)
res_vis = np.zeros((s_36_jt_num, 3), dtype=np.float)
for i in range(0, s_36_jt_num):
id1 = i
id2 = s_mpii_2_hm36_jt[i]
if id2 >= 0:
res_jts[id1] = pose[id2].copy()
res_vis[id1] = pose_vis[id2].copy()
return res_jts.copy(), res_vis.copy()
def from_mpii_to_hm36(db):
for n_sample in range(0, len(db)):
res_jts, res_vis = from_mpii_to_hm36_single(db[n_sample]['joints_3d'], db[n_sample]['joints_3d_vis'])
db[n_sample]['joints_3d'] = res_jts
db[n_sample]['joints_3d_vis'] = res_vis
def from_posetrack_to_hm36(db):
for n_sample in range(0, len(db)):
res_jts = np.zeros((s_36_jt_num, 3), dtype=np.float)
res_vis = np.zeros((s_36_jt_num, 3), dtype=np.float)
res_jts_nxt = np.zeros((s_36_jt_num, 3), dtype=np.float)
res_vis_nxt = np.zeros((s_36_jt_num, 3), dtype=np.float)
for i in range(0, s_36_jt_num):
id1 = i
id2 = s_posetrack_2_hm36_jt[i]
if id2 >= 0:
res_jts[id1] = db[n_sample]['joints_3d'][id2].copy()
res_vis[id1] = db[n_sample]['joints_3d_vis'][id2].copy()
res_jts_nxt[id1] = db[n_sample]['joints_3d_next'][id2].copy()
res_vis_nxt[id1] = db[n_sample]['joints_3d_vis_next'][id2].copy()
res_jts[0] = (res_jts[1] + res_jts[4]) * 0.5
res_vis[0] = res_vis[1] * res_vis[4]
res_jts[17] = (res_jts[11] + res_jts[14]) * 0.5
res_vis[17] = res_vis[11] * res_vis[14]
res_jts[7] = (res_jts[0] + res_jts[8]) * 0.5
res_vis[7] = res_vis[0] * res_vis[8]
db[n_sample]['joints_3d'] = res_jts.copy()
db[n_sample]['joints_3d_vis'] = res_vis.copy()
res_jts_nxt[0] = (res_jts_nxt[1] + res_jts_nxt[4]) * 0.5
res_vis_nxt[0] = res_vis_nxt[1] * res_vis_nxt[4]
res_jts_nxt[17] = (res_jts_nxt[11] + res_jts_nxt[14]) * 0.5
res_vis_nxt[17] = res_vis_nxt[11] * res_vis_nxt[14]
res_jts_nxt[7] = (res_jts_nxt[0] + res_jts_nxt[8]) * 0.5
res_vis_nxt[7] = res_vis_nxt[0] * res_vis_nxt[8]
db[n_sample]['joints_3d_next'] = res_jts_nxt.copy()
db[n_sample]['joints_3d_vis_next'] = res_vis_nxt.copy()
def parsing_hm36_gt_file(gt_file):
keypoints = []
with open(gt_file, 'r') as f:
content = f.read()
content = content.split('\n')
image_num = int(float(content[0]))
img_width = content[1].split(' ')[1]
img_height = content[1].split(' ')[2]
rot = content[2].split(' ')[1:10]
trans = content[3].split(' ')[1:4]
fl = content[4].split(' ')[1:3]
c_p = content[5].split(' ')[1:3]
k_p = content[6].split(' ')[1:4]
p_p = content[7].split(' ')[1:3]
jt_list = content[8].split(' ')[1:18]
for i in range(0, image_num):
keypoints.append(content[9 + i].split(' ')[1:97])
keypoints = np.asarray([[float(y) for y in x] for x in keypoints])
keypoints = keypoints.reshape(keypoints.shape[0], keypoints.shape[1] // 3, 3)
trans = np.asarray([float(y) for y in trans])
jt_list = np.asarray([int(y) for y in jt_list])
keypoints = keypoints[:, jt_list - 1, :]
# add thorax
thorax = (keypoints[:, s_36_lsh_jt_idx, :] + keypoints[:, s_36_rsh_jt_idx, :]) * 0.5
thorax = thorax.reshape((thorax.shape[0], 1, thorax.shape[1]))
keypoints = np.concatenate((keypoints, thorax), axis=1)
rot = np.asarray([float(y) for y in rot]).reshape((3,3))
rot = np.transpose(rot)
fl = np.asarray([float(y) for y in fl])
c_p = np.asarray([float(y) for y in c_p])
img_width = np.asarray(float(img_width))
img_height = np.asarray(float(img_height))
return keypoints, trans, jt_list, rot, fl, c_p, img_width, img_height
def CamProj(x, y, z, fx, fy, u, v):
cam_x = x / z * fx
cam_x = cam_x + u
cam_y = y / z * fy
cam_y = cam_y + v
return cam_x, cam_y
def CamBackProj(cam_x, cam_y, depth, fx, fy, u, v):
x = (cam_x - u) / fx * depth
y = (cam_y - v) / fy * depth
z = depth
return x, y, z
def joint_to_bone_mat(parent_ids):
joint_num = len(parent_ids)
mat = np.zeros((joint_num, joint_num), dtype=int)
for i in range(0, joint_num):
p_i = parent_ids[i]
if p_i != i:
mat[i][p_i] = -1
mat[i][i] = 1
else:
mat[i][i] = 1
return np.transpose(mat)
def joint_to_full_pair_mat(joint_num):
mat = np.zeros((joint_num * (joint_num - 1) / 2, joint_num), dtype=int)
idx = 0
for i in range(0, joint_num):
for j in range(0, joint_num):
if j > i:
mat[idx][i] = 1
mat[idx][j] = -1
idx = idx + 1
return np.transpose(mat)
def convert_joint(jts, vis, mat):
cvt_jts = np.zeros((mat.shape[1]) * 3, dtype = float)
cvt_jts[0::3] = np.dot(jts[0::3], mat)
cvt_jts[1::3] = np.dot(jts[1::3], mat)
cvt_jts[2::3] = np.dot(jts[2::3], mat)
vis_mat = mat.copy()
vis_mat[vis_mat!=0] = 1
cvt_vis = np.zeros((mat.shape[1]) * 3, dtype = float)
s = np.sum(vis_mat, axis=0)
cvt_vis[0::3] = np.dot(vis[0::3], vis_mat) == s
cvt_vis[1::3] = np.dot(vis[1::3], vis_mat) == s
cvt_vis[2::3] = np.dot(vis[2::3], vis_mat) == s
return cvt_jts, cvt_vis
def rigid_transform_3D(A, B):
centroid_A = np.mean(A, axis = 0)
centroid_B = np.mean(B, axis = 0)
H = np.dot(np.transpose(A - centroid_A), B - centroid_B)
U, s, V = np.linalg.svd(H)
R = np.dot(np.transpose(V), np.transpose(U))
if np.linalg.det(R) < 0:
V[2] = -V[2]
R = np.dot(np.transpose(V), np.transpose(U))
t = -np.dot(R, np.transpose(centroid_A)) + np.transpose(centroid_B)
return R, t
def rigid_align(A, B):
R, t = rigid_transform_3D(A, B)
A2 = np.transpose(np.dot(R, np.transpose(A))) + t
return A2
def from_worldjt_to_imagejt(n_img, joint_num, rot, keypoints, trans, fl, c_p, rect_3d_width, rect_3d_height):
# project to image space
pt_3d = np.zeros((joint_num, 3), dtype=np.float)
pt_2d = np.zeros((joint_num, 3), dtype=np.float)
for n_jt in range(0, joint_num):
pt_3d[n_jt] = np.dot(rot, keypoints[n_img, n_jt] - trans)
pt_2d[n_jt, 0], pt_2d[n_jt, 1] = CamProj(pt_3d[n_jt, 0], pt_3d[n_jt, 1], pt_3d[n_jt, 2], fl[0], fl[1],
c_p[0], c_p[1])
pt_2d[n_jt, 2] = pt_3d[n_jt, 2]
pelvis3d = pt_3d[s_36_root_jt_idx]
# build 3D bounding box centered on pelvis, size 2000^2
rect3d_lt = pelvis3d - [rect_3d_width / 2, rect_3d_height / 2, 0]
rect3d_rb = pelvis3d + [rect_3d_width / 2, rect_3d_height / 2, 0]
# back-project 3D BBox to 2D image
rect2d_l, rect2d_t = CamProj(rect3d_lt[0], rect3d_lt[1], rect3d_lt[2], fl[0], fl[1], c_p[0], c_p[1])
rect2d_r, rect2d_b = CamProj(rect3d_rb[0], rect3d_rb[1], rect3d_rb[2], fl[0], fl[1], c_p[0], c_p[1])
# Subtract pelvis depth
pt_2d[:, 2] = pt_2d[:, 2] - pelvis3d[2]
pt_2d = pt_2d.reshape((joint_num, 3))
vis = np.ones((joint_num, 3), dtype=np.float)
return rect2d_l, rect2d_r, rect2d_t, rect2d_b, pt_2d, pt_3d, vis, pelvis3d
class hm36(IMDB):
def __init__(self, image_set_name, dataset_path, patch_width, patch_height, rect_3d_width, rect_3d_height):
super(hm36, self).__init__('HM36', image_set_name, dataset_path, patch_width, patch_height)
self.joint_num = s_36_jt_num
self.flip_pairs = s_36_flip_pairs
self.parent_ids = s_36_parent_ids
self.idx2name = ['root', 'R-hip', 'R-knee', 'R-ankle', 'L-hip', 'L-knee', 'L-ankle', 'torso', 'neck', 'nose',
'head', 'L-shoulder', 'L-elbow', 'L-wrist', 'R-shoulder', 'R-elbow', 'R-wrist', 'thorax']
self.rect_3d_width = rect_3d_width
self.rect_3d_height = rect_3d_height
self.aspect_ratio = 1.0 * patch_width / patch_height
self.mean_bone_length = 0
def _H36FolderName(self, subject_id, act_id, subact_id, camera_id):
return "s_%02d_act_%02d_subact_%02d_ca_%02d" % \
(HM_subject_idx[subject_id], HM_act_idx[act_id], HM_subact_idx[subact_id], HM_camera_idx[camera_id])
def _H36ImageName(self, folder_name, frame_id):
return "%s_%06d.jpg" % (folder_name, frame_id + 1)
def _AllHuman36Folders(self, subject_list_):
subject_list = subject_list_[:]
if len(subject_list) == 0:
for i in range(0, s_hm36_subject_num):
subject_list.append(i)
folders = []
for i in range(0, len(subject_list)):
for j in range(0, s_hm36_act_num):
for m in range(0, s_hm36_subact_num):
# for each check that the number of images are equal
# /home/abhijat/gitstuff/integral-human-pose/data/hm36/images/temp_folder
temp_folder = os.path.join('../../data/hm36/images/'+ self._H36FolderName(subject_list[i], j, m, 1))
temp_txtfile = os.path.join('../../data/hm36/annot/'+ self._H36FolderName(subject_list[i], j, m, 1)\
+ '/matlab_meta.txt')
fp = open(temp_txtfile) # Open file on read mode
lines = fp.read().split("\n") # Create a list containing all lines
num_expected = int(lines[0])
fp.close() # Close file
if len(glob(temp_folder+'/*.jpg')) != num_expected:
# print(temp_folder)
continue
for n in range(0, s_hm36_camera_num):
folders.append(self._H36FolderName(subject_list[i], j, m, n))
return folders
def _sample_dataset(self, image_set_name):
if image_set_name == 'train':
sample_num = 200
step = -1
folder_start = 0
folder_end = 600
folders = self._AllHuman36Folders([0, 1, 2, 3, 4])
elif image_set_name == 'trainfull':
sample_num = -1
step = 1
folder_start = 0
folder_end = 600
folders = self._AllHuman36Folders([0, 1, 2, 3, 4])
elif image_set_name == 'trainsample2':
sample_num = -1
step = 2
folder_start = 0
folder_end = 600
folders = self._AllHuman36Folders([0, 1, 2, 3, 4])
elif image_set_name == 'trainsample10':
sample_num = -1
step = 10
folder_start = 0
folder_end = 600
folders = self._AllHuman36Folders([0, 1, 2, 3, 4])
elif image_set_name == 'valid':
sample_num = 40
step = -1
folder_start = 0
folder_end = 240
folders = self._AllHuman36Folders([5, 6])
elif image_set_name == 'validmin':
sample_num = 10
step = -1
folder_start = 0
folder_end = 224#240
folders = self._AllHuman36Folders([5, 6])
elif image_set_name == 'validfull':
sample_num = -1
step = 1
folder_start = 0
folder_end = 240
folders = self._AllHuman36Folders([5, 6])
elif image_set_name == 'validsample2':
sample_num = -1
step = 2
folder_start = 0
folder_end = 240
folders = self._AllHuman36Folders([5, 6])
elif image_set_name == 'validsample10':
sample_num = -1
step = 10
folder_start = 0
folder_end = 240
folders = self._AllHuman36Folders([5, 6])
elif 'act' in image_set_name:
act_id = int(image_set_name[-2:])
sample_num = 40
step = -1
folder_start = 0
folders = []
s_list = [5, 6]
for i in range(0, len(s_list)):
for m in range(0, s_hm36_subact_num):
for n in range(0, s_hm36_camera_num):
folders.append(self._H36FolderName(s_list[i], act_id, m, n))
folder_end = len(folders)
else:
print("Error!!!!!!!!! Unknown hm36 sub set!")
assert 0
return folders, sample_num, step, folder_start, folder_end
def gt_db(self):
folders, sample_num, step, folder_start, folder_end = self._sample_dataset(self.image_set_name)
# print(folders)
# import ipdb; ipdb.set_trace();
db = None
cache_file = os.path.join(self.cache_path, self.name + '_keypoint_db_sample' + str(sample_num) + '.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
db = pk.load(fid)
print('{} gt db loaded from {}, {} samples are loaded'.format(self.name, cache_file, len(db)))
if db != None:
self.num_sample_single = len(db)
return db
gt_db = []
for n_folder in range(folder_start, folder_end):
print('Loading folder ', n_folder, ' in ', len(folders))
# load ground truth
keypoints, trans, jt_list, rot, fl, c_p, img_width, img_height = parsing_hm36_gt_file(
os.path.join(self.dataset_path, "annot", folders[n_folder], 'matlab_meta.txt'))
# random sample redundant video sequence
if sample_num > 0:
img_index = np.random.choice(keypoints.shape[0], sample_num, replace=False)
else:
img_index = np.arange(keypoints.shape[0])
img_index = img_index[0:keypoints.shape[0]:step]
for n_img_ in range(0, img_index.shape[0]):
n_img = img_index[n_img_]
image_name = os.path.join(folders[n_folder], self._H36ImageName(folders[n_folder], n_img))
assert keypoints.shape[1] == self.joint_num
rect2d_l, rect2d_r, rect2d_t, rect2d_b, pt_2d, pt_3d, vis, pelvis3d = \
from_worldjt_to_imagejt(n_img, self.joint_num, rot, keypoints, trans, fl, c_p, self.rect_3d_width, self.rect_3d_height)
skeleton_length = calc_total_skeleton_length_bone(pt_3d, s_36_bone_jts)
gt_db.append({
'image': os.path.join(self.dataset_path, '', 'images', image_name),
'center_x': (rect2d_l + rect2d_r) * 0.5,
'center_y': (rect2d_t + rect2d_b) * 0.5,
'width': (rect2d_r - rect2d_l),
'height': (rect2d_b - rect2d_t),
'flip_pairs': self.flip_pairs,
'parent_ids': self.parent_ids,
'joints_3d': pt_2d, # [org_img_x, org_img_y, depth - root_depth]
'joints_3d_vis': vis,
'joints_3d_cam': pt_3d, # [X, Y, Z] in camera coordinate
'pelvis': pelvis3d,
'fl': fl,
'c_p': c_p,
'bone_len': skeleton_length
})
with open(cache_file, 'wb') as fid:
pk.dump(gt_db, fid, pk.HIGHEST_PROTOCOL)
print('{} samples ared wrote {}'.format(len(gt_db), cache_file))
self.num_sample_single = len(gt_db)
return gt_db
def dt_db(self, det_bbox_src):
print("Using Detector:", det_bbox_src)
self.detector = det_bbox_src
folders, sample_num, step, folder_start, folder_end = self._sample_dataset(self.image_set_name)
dt_cache_file = os.path.join(self.cache_path, self.name + '_keypoint_dt_db_sample' + str(sample_num) + '.pkl')
if os.path.exists(dt_cache_file):
with open(dt_cache_file, 'rb') as fid:
dt_db = pk.load(fid)
print('{} gt db loaded from {}, {} samples are loaded'.format(self.name, dt_cache_file, len(dt_db)))
return dt_db
gt_cache_file = os.path.join(self.cache_path, self.name + '_keypoint_db_sample' + str(sample_num) + '.pkl')
if os.path.exists(gt_cache_file):
with open(gt_cache_file, 'rb') as fid:
gt_db = pk.load(fid)
print('{} gt db loaded from {}, {} samples are loaded'.format(self.name, gt_cache_file, len(gt_db)))
else:
assert 0, gt_cache_file + ' not exist...'
self.num_sample_single = len(gt_db)
self.mean_bone_length = np.asarray([item['bone_len'] for item in gt_db]).mean()
# update bbox using detection result
print("Updating BBox from detector")
bbox_file = os.path.join(self.cache_path, 'detection', det_bbox_src, 'kpts_bbox.pkl')
with open(bbox_file, 'rb') as fid:
bbox_list = pk.load(fid)
assert len(bbox_list) == len(gt_db)
for idx in range(len(gt_db)):
box = bbox_list[idx]
center_x = (box[0] + box[2]) * 0.5
center_y = (box[1] + box[3]) * 0.5
width = box[2] - box[0]
height = box[3] - box[1]
if width > self.aspect_ratio * height:
height = width * 1.0 / self.aspect_ratio
elif width < self.aspect_ratio * height:
width = height * self.aspect_ratio
width = width * 1.25
height = height * 1.25
gt_db[idx]['center_x'] = center_x
gt_db[idx]['center_y'] = center_y
gt_db[idx]['width'] = width
gt_db[idx]['height'] = height
DEBUG = False
if DEBUG:
box = [center_x, center_y, width, height]
pose = []
debug_vis(os.path.join(gt_db[idx]['image']), box, pose)
self.num_sample_single = len(gt_db)
return gt_db
def jnt_bbox_db(self):
db = None
folders, sample_num, step, folder_start, folder_end = self._sample_dataset(self.image_set_name)
cache_file = os.path.join(self.cache_path, self.name + '_keypoint_jntBBox_db_sample' + str(sample_num) + '.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
db = pk.load(fid)
print('{} gt db loaded from {}, {} samples are loaded'.format(self.name, cache_file, len(db)))
if db != None:
self.num_sample_single = len(db)
self.mean_bone_length = np.asarray([item['bone_len'] for item in db]).mean()
return db
jnt_bbox_db = []
for n_folder in range(folder_start, folder_end):
print('Loading folder ', n_folder, ' in ', len(folders))
# load ground truth
keypoints, trans, jt_list, rot, fl, c_p, img_width, img_height = parsing_hm36_gt_file(
os.path.join(self.dataset_path, "annot", folders[n_folder], 'matlab_meta.txt'))
# random sample redundant video sequence
if sample_num > 0:
img_index = np.random.choice(keypoints.shape[0], sample_num, replace=False)
else:
img_index = np.arange(keypoints.shape[0])
img_index = img_index[0:keypoints.shape[0]:step]
for n_img_ in range(0, img_index.shape[0]):
n_img = img_index[n_img_]
image_name = os.path.join(folders[n_folder], self._H36ImageName(folders[n_folder], n_img))
assert keypoints.shape[1] == self.joint_num
_, _, _, _, pt_2d, pt_3d, vis, pelvis3d = \
from_worldjt_to_imagejt(n_img, self.joint_num, rot, keypoints, trans, fl, c_p, self.rect_3d_width,
self.rect_3d_height)
c_x, c_y, w, h = calc_kpt_bound_pad(pt_2d, vis, self.aspect_ratio)
pt_3d_relative = pt_3d - pt_3d[0]
skeleton_length = calc_total_skeleton_length(pt_3d_relative, s_36_parent_ids)
jnt_bbox_db.append({
'image': os.path.join(self.dataset_path, '', 'images', image_name),
'center_x': c_x,
'center_y': c_y,
'width': w,
'height': h,
'flip_pairs': self.flip_pairs,
'parent_ids': self.parent_ids,
'joints_3d': pt_2d, # [org_img_x, org_img_y, depth - root_depth]
'joints_3d_vis': vis,
'joints_3d_cam': pt_3d, # [X, Y, Z] in camera coordinate
'pelvis': pelvis3d,
'fl': fl,
'c_p': c_p,
'joints_3d_relative': pt_3d_relative, # [X-root, Y-root, Z-root] in camera coordinate
'bone_len': skeleton_length
})
self.mean_bone_length = np.asarray([item['bone_len'] for item in jnt_bbox_db]).mean()
with open(cache_file, 'wb') as fid:
pk.dump(jnt_bbox_db, fid, pk.HIGHEST_PROTOCOL)
print('{} samples ared wrote {}'.format(len(jnt_bbox_db), cache_file))
self.num_sample_single = len(jnt_bbox_db)
return jnt_bbox_db
def get_mean_bone_length(self):
return self.mean_bone_length
def evaluate(self, preds, save_path):
preds = preds[:, :, 0:3]
gts = self.gt_db()
sample_num = preds.shape[0]
joint_num = self.joint_num
# flip_pair = self.flip_pairs
parent_ids = self.parent_ids
# 18 joints:
# 'root', 'Rleg0', 'Rleg1', 'Rleg2', 'Lleg0', 'Lleg1', 'Lleg2', 'torso', 'neck', 'nose', 'head', 'Larm0', 'Larm1', 'Larm2', 'Rarm0', 'Rarm1', 'Rarm2', 'Thorax'
# 'root', 'RHip', 'RKnee', 'RAnkle', 'LHip', 'LKnee', 'LAnkle', 'torso', 'neck', 'nose', 'head', 'LShoulder', 'LElbow', 'LWrist', 'RShoulder', 'RElbow', 'RWrist, 'Thorax''
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
# joint_names = ['root', 'Hip', 'Knee', 'Ankle', 'Torso', 'Neck', 'Nose', 'Head', 'Shoulder', 'Elbow', 'Wrist', '17j', '16j', '14j', '13j']
eval_jt = [[0], [1, 4], [2, 5], [3, 6], [7], [8], [9], [10], [11, 14], [12, 15], [13, 16],
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
[0, 1, 2, 3, 4, 5, 6, 8, 10, 11, 12, 13, 14, 15, 16, 17],
[1, 2, 3, 4, 5, 6, 8, 10, 11, 12, 13, 14, 15, 16],
[1, 2, 3, 4, 5, 6, 10, 11, 12, 13, 14, 15, 16]]
metrics_num = 3
t_start = 5
t_step = 100
t_end = 205
coord_num = int(4 + (t_end - t_start) / t_step) # 4 for all, x, y, z
avg_errors = [] # [metrics_num, len(eval_jt), coord_num]
for n_m in range(0, metrics_num):
avg_errors.append(np.zeros((len(eval_jt), coord_num), dtype=np.float))
pred_to_save = []
for n_sample in range(0, sample_num):
gt = gts[n_sample]
# Org image info
fl = gt['fl'][0:2]
c_p = gt['c_p'][0:2]
gt_3d_root = np.reshape(gt['pelvis'], (1, 3))
gt_2d_kpt = gt['joints_3d'].copy()
gt_vis = gt['joints_3d_vis'].copy()
# get camera depth from root joint
pre_2d_kpt = preds[n_sample].copy()
pre_2d_kpt[:, 2] = pre_2d_kpt[:, 2] + gt_3d_root[0, 2]
gt_2d_kpt[:, 2] = gt_2d_kpt[:, 2] + gt_3d_root[0, 2]
# back project
pre_3d_kpt = np.zeros((joint_num, 3), dtype=np.float)
gt_3d_kpt = np.zeros((joint_num, 3), dtype=np.float)
for n_jt in range(0, joint_num):
pre_3d_kpt[n_jt, 0], pre_3d_kpt[n_jt, 1], pre_3d_kpt[n_jt, 2] = \
CamBackProj(pre_2d_kpt[n_jt, 0], pre_2d_kpt[n_jt, 1], pre_2d_kpt[n_jt, 2], fl[0], fl[1], c_p[0],
c_p[1])
gt_3d_kpt[n_jt, 0], gt_3d_kpt[n_jt, 1], gt_3d_kpt[n_jt, 2] = \
CamBackProj(gt_2d_kpt[n_jt, 0], gt_2d_kpt[n_jt, 1], gt_2d_kpt[n_jt, 2], fl[0], fl[1], c_p[0],
c_p[1])
# bone
j2b_mat = joint_to_bone_mat(parent_ids)
pre_3d_bone, bone_vis = convert_joint(np.reshape(pre_3d_kpt, joint_num * 3),
np.reshape(gt_vis, joint_num * 3), j2b_mat)
gt_3d_bone, bone_vis = convert_joint(np.reshape(gt_3d_kpt, joint_num * 3),
np.reshape(gt_vis, joint_num * 3), j2b_mat)
pre_3d_bone = np.reshape(pre_3d_bone, (joint_num, 3))
gt_3d_bone = np.reshape(gt_3d_bone, (joint_num, 3))
# align
pre_3d_kpt_align = rigid_align(pre_3d_kpt, gt_3d_kpt)
diffs = [] # [metrics_num, joint_num * 3]
# should align root, required by protocol #1
pre_3d_kpt = pre_3d_kpt - pre_3d_kpt [0]
gt_3d_kpt = gt_3d_kpt - gt_3d_kpt [0]
pre_3d_kpt_align = pre_3d_kpt_align - pre_3d_kpt_align [0]
diffs.append((pre_3d_kpt - gt_3d_kpt)) # Avg joint error
diffs.append((pre_3d_bone - gt_3d_bone))
diffs.append((pre_3d_kpt_align - gt_3d_kpt))
pred_to_save.append({'pred': pre_3d_kpt,
'align_pred': pre_3d_kpt_align,
'gt': gt_3d_kpt})
for n_m in range(0, metrics_num):
e_jt = []
e_jt_x = []
e_jt_y = []
e_jt_z = []
e_jt_pck = [[] for i in range(t_start, t_end, t_step)]
for n_jt in range(0, joint_num):
t_dis = np.linalg.norm(diffs[n_m][n_jt])
e_jt.append(t_dis)
e_jt_x.append(abs(diffs[n_m][n_jt][0]))
e_jt_y.append(abs(diffs[n_m][n_jt][1]))
e_jt_z.append(abs(diffs[n_m][n_jt][2]))
for i in range(t_start, t_end, t_step):
e_jt_pck[int((i - t_start) / t_step)].append(int(t_dis < i))
for n_eval_jt in range(0, len(eval_jt)):
e = 0
e_x = 0
e_y = 0
e_z = 0
e_pck = [0 for i in range(t_start, t_end, t_step)]
for n_jt in range(0, len(eval_jt[n_eval_jt])):
e = e + e_jt[eval_jt[n_eval_jt][n_jt]]
e_x = e_x + e_jt_x[eval_jt[n_eval_jt][n_jt]]
e_y = e_y + e_jt_y[eval_jt[n_eval_jt][n_jt]]
e_z = e_z + e_jt_z[eval_jt[n_eval_jt][n_jt]]
for i in range(t_start, t_end, t_step):
e_pck[int((i - t_start) / t_step)] = \
e_pck[int((i - t_start) / t_step)] + \
e_jt_pck[int((i - t_start) / t_step)][eval_jt[n_eval_jt][n_jt]]
avg_errors[n_m][n_eval_jt][0] = avg_errors[n_m][n_eval_jt][0] + e / float(len(eval_jt[n_eval_jt]))
avg_errors[n_m][n_eval_jt][1] = avg_errors[n_m][n_eval_jt][1] + e_x / float(len(eval_jt[n_eval_jt]))
avg_errors[n_m][n_eval_jt][2] = avg_errors[n_m][n_eval_jt][2] + e_y / float(len(eval_jt[n_eval_jt]))
avg_errors[n_m][n_eval_jt][3] = avg_errors[n_m][n_eval_jt][3] + e_z / float(len(eval_jt[n_eval_jt]))
for i in range(t_start, t_end, t_step):
avg_errors[n_m][n_eval_jt][4 + int((i - t_start) / t_step)] = \
avg_errors[n_m][n_eval_jt][4 + int((i - t_start) / t_step)] + \
e_pck[int((i - t_start) / t_step)] / float(len(eval_jt[n_eval_jt]))
for n_m in range(0, metrics_num):
avg_errors[n_m] = avg_errors[n_m] / sample_num
name_value = [
('hm36_root :', avg_errors[0][0][0]),
('hm36_Hip :', avg_errors[0][1][0]),
('hm36_Knee :', avg_errors[0][2][0]),
('hm36_Ankle :', avg_errors[0][3][0]),
('hm36_Torso :', avg_errors[0][4][0]),
('hm36_Neck :', avg_errors[0][5][0]),
('hm36_Nose :', avg_errors[0][6][0]),
('hm36_Head :', avg_errors[0][7][0]),
('hm36_Shoulder :', avg_errors[0][8][0]),
('hm36_Elbow :', avg_errors[0][9][0]),
('hm36_Wrist :', avg_errors[0][10][0]),
('hm36_17j :', avg_errors[0][11][0]),
('hm36_17j_align:', avg_errors[2][11][0]),
('hm36_17j_x :', avg_errors[0][11][1]),
('hm36_17j_y :', avg_errors[0][11][2]),
('hm36_17j_z :', avg_errors[0][11][3]),
]
return name_value
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the RC4 decrypter object."""
import unittest
from cryptography.hazmat.primitives.ciphers import algorithms
from dfvfs.encryption import decrypter
from dfvfs.lib import definitions
from tests.encryption import test_lib
class DecrypterTestCase(test_lib.DecrypterTestCase):
"""Tests for the decrypter interface."""
def testInitialize(self):
"""Tests the __init__ method."""
test_decrypter = decrypter.Decrypter()
self.assertIsNotNone(test_decrypter)
with self.assertRaises(ValueError):
decrypter.Decrypter(key=b'test1')
class CryptographyBlockCipherDecrypterTest(test_lib.DecrypterTestCase):
"""Tests for the block cipher decrypter using Cryptography."""
_DES3_INITIALIZATION_VECTOR = b'This IV!'
_DES3_KEY = b'This is a key123'
def testInitialize(self):
"""Tests the __init__ method."""
algorithm = algorithms.TripleDES(self._DES3_KEY)
test_decrypter = decrypter.CryptographyBlockCipherDecrypter(
algorithm=algorithm, cipher_mode=definitions.ENCRYPTION_MODE_CBC,
initialization_vector=self._DES3_INITIALIZATION_VECTOR)
self.assertIsNotNone(test_decrypter)
test_decrypter = decrypter.CryptographyBlockCipherDecrypter(
algorithm=algorithm, cipher_mode=definitions.ENCRYPTION_MODE_CFB,
initialization_vector=self._DES3_INITIALIZATION_VECTOR)
self.assertIsNotNone(test_decrypter)
test_decrypter = decrypter.CryptographyBlockCipherDecrypter(
algorithm=algorithm, cipher_mode=definitions.ENCRYPTION_MODE_ECB)
self.assertIsNotNone(test_decrypter)
test_decrypter = decrypter.CryptographyBlockCipherDecrypter(
algorithm=algorithm, cipher_mode=definitions.ENCRYPTION_MODE_OFB,
initialization_vector=self._DES3_INITIALIZATION_VECTOR)
self.assertIsNotNone(test_decrypter)
with self.assertRaises(ValueError):
decrypter.CryptographyBlockCipherDecrypter(
algorithm=algorithm, cipher_mode=definitions.ENCRYPTION_MODE_CBC)
if __name__ == '__main__':
unittest.main()
|
__version__ = '0.1.0'
default_app_config = 'telegrambot.apps.TelegrambotConfig'
|
import os
here = os.path.dirname(__file__)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
USE_SAML2 = False
ADMINS = (
('Gijs Molenaar', 'gijs@pythonic.nl'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(here, '../../database/sqlite3.db'),
}
}
TIME_ZONE = 'Europe/Amsterdam'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
MEDIA_ROOT = os.path.join(here, '../../static/')
MEDIA_URL = '/media/'
ADMIN_MEDIA_PREFIX = '/admin-media/'
SECRET_KEY = '&bhy2&*zy4wr%!yk7qv(yzb6*5s$h!mgs_nos!+@3^y0li31sb'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
#'debug_toolbar.middleware.DebugToolbarMiddleware',
)
ROOT_URLCONF = 'filebender.urls'
TEMPLATE_DIRS = (
os.path.join(here, '../../templates')
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'bigfiles',
#'debug_toolbar',
)
if USE_SAML2:
INSTALLED_APPS.append('djangosaml2')
LOGIN_URL = '/saml2/login/'
AUTHENTICATION_BACKENDS = (
'djangosaml2.backends.Saml2Backend',
'django.contrib.auth.backends.ModelBackend',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.contrib.messages.context_processors.messages",
'bigfiles.context_processors.auth_urls',
'bigfiles.context_processors.storage',
)
SAML_CONFIG = {
'xmlsec_binary' : '/opt/local/bin/xmlsec1',
"sp": {
"name" : "Gijs SP",
"url" : "http://www.example.com:8087/",
"idp": {
"urn:mace:localhost:saml:gijs:idp": {
"single_signon_service": "http://localhost:8000/idp/"},
},
},
"entityid" : "urn:mace:localhost:saml:gijs:sp",
"service": {
"sp":{
"name" : "Gijs SP",
"url" : "http://localhost:8002/simplesaml",
"idp": {
"urn:mace:localhost:saml:gijs:idp": {
"single_signon_service": "http://localhost:8000/sp/"},
},
"endpoints": "",
}
},
"key_file" : os.path.join(here, '../../keys/private-key.pem'),
"cert_file" : os.path.join(here, '../../keys/certificate.pem'),
"attribute_map_dir": "./attributemaps",
"organization": {
"display_name":["Rolands identities"]
},
"contact_person": [{
"givenname": "Roland",
"surname": "Hedberg",
"phone": "+46 90510",
"mail": "roland@example.com",
"type": "technical",
}]
}
SAML_USERNAME_ATTRIBUTE = 'uid'
INTERNAL_IPS = ('127.0.0.1',)
# where to store large upload
FILE_UPLOAD_TEMP_DIR='/tmp'
# length of file secret. Don't change this after database creation
FILE_SECRET_LENGTH=50
# Where to store the large files
STORAGE_ROOT = os.path.join(here, '../../storage/')
# How are the files accessed from outside
STORAGE_URL = '/storage/'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
# -*- coding: utf-8 -*-
"""
@file
@brief Abstract class to connect to a SQL server using various way.
It will be used to implement magic functions
"""
import re
import sqlite3
class InterfaceSQLException(BaseException):
"""
a specific exception
"""
def __init__(self, message):
"""
@param message exception message
"""
self.message = message
def __str__(self):
"""
usual
"""
return self.message
class AutoCompletionSQLObject:
"""
a simple class which allows auto completion
for tables, columns...
"""
def __init__(self, name):
"""
creates an instance with a given name
"""
self._true_name = name
self._filt_name = AutoCompletionSQLObject._filter_name(name)
@staticmethod
def _filter_name(name):
"""
removes unavailable characters
"""
return name.replace(".", "_").replace(" ", "_")
@property
def _(self):
"""
returns the true name of the object
"""
return self._true_name
@property
def _f(self):
"""
returns the filtered name
"""
return self._filt_name
def _add(self, name):
"""
add a subname to the class
@param name string
@return an AutoCompletionSQLObject
the filtered name (``_f``) of the new object will
be added to ``self.__dict__``, if an object
already exists with the same name, it will raise an exception
"""
au = AutoCompletionSQLObject(name)
af = au._f
if af in self.__dict__:
raise KeyError(
"the object %s was already added to %s" %
(af, self._f))
self.__dict__[af] = au
return au
class InterfaceSQL:
"""
Abstract class to connect to a SQL server using various way.
It will be used to implement magic functions
"""
@staticmethod
def create(obj):
"""
@param obj a filename, a connection string, ...
``obj`` can be a:
* file --> the class :class:`Database <pyensae.sql.database_main.Database>` will be used, we assume this file
is sqlite database, the file does not have to exist, in that case, it will created
* sqlite3.Connection --> the object will be wrapped into a :class:`Database <pyensae.sql.database_main.Database>`
* InterfaceSQL --> returns the object itself
.. versionchanged:: 1.1
Parameter *dbfile* can be of type `sqlite3.Connection <https://docs.python.org/3/library/sqlite3.html#sqlite3.Connection>`_.
"""
if isinstance(obj, str):
from .sql_interface_database import InterfaceSQLDatabase
return InterfaceSQLDatabase(obj)
elif isinstance(obj, sqlite3.Connection):
from .sql_interface_database import InterfaceSQLDatabase
return InterfaceSQLDatabase(obj)
elif isinstance(obj, InterfaceSQL):
return obj
else:
raise NotImplementedError(
"nothing is implemented for type: %s" % str(
type(obj)))
def populate_completion(self):
"""
the method create an object which contains a class
the user could use to speed the typing SQL queries,
functions in a notebook
This object will added with the name ``CC``,
it is returned by the function.
@return @see cl AutoCompletionSQLObject
The method should be called when the method @see me connect
is called.
"""
self.CC = AutoCompletionSQLObject("TBL")
tbls = self.get_table_list()
for tb in tbls:
compl = self.CC._add(tb)
cols = self.get_table_columns(tb)
for k, v in cols.items():
compl._add(v[0])
return self.CC
def __init__(self, obj):
"""
Initializes the object.
@param obj anything, see below
``obj`` can be a:
* file --> the class :class:`Database <pyensae.sql.database_main.Database>` will be used, we assume this file
is sqlite database, the file does not have to exist, in that case, it will created
"""
raise NotImplementedError()
def connect(self):
"""
connection to the database
"""
raise NotImplementedError()
def close(self):
"""
close the connection to the database
"""
raise NotImplementedError()
def get_table_list(self):
"""
returns the list of tables in the database
@return list of strings
"""
raise NotImplementedError()
def get_table_columns(self, table_name, as_dict=True):
"""
returns the list of columns in a table
@param table_name table name
@param as_dict True, as dictionary, as a list otherwise
@return dictionary { "column": (position, type) } or a list
"""
raise NotImplementedError()
def execute(self, sql_query):
"""
execute a SQL query
@param sql_query query to execute
@return pandas DataFrame
The function takes care of the unexpected syntax introduction
by the autocompletion object: it just replaces
``DB.CC.<name>`` by the ``true_name``.
"""
sql_query = self.process_query(sql_query)
return self.execute_clean_query(sql_query)
def execute_clean_query(self, sql_query):
"""
The function does the same thing as @see me execute
but it does not replace autocompletion object.
It is this function which should be overloaded by
classes inheriting from this one.
@param sql_query query to execute
@return pandas DataFrame
"""
raise NotImplementedError()
_exp = re.compile("(DB[.]CC[.][a-zA-Z0-9_]+([.][a-zA-Z0-9_]+)*)")
def process_query(self, sql_query):
"""
replaces autocompletion object by their real names
@param sql_query SQL query
@return clean sql_query
"""
# i don't remember the syntax but it should be replaced using regular expression, not
# string replace
fi = InterfaceSQL._exp.findall(sql_query)
if len(fi) > 0:
only = [_[0] for _ in fi]
only.sort(reverse=True)
for o in only:
co = "self." + o[3:]
ev = eval(co)
sql_query = sql_query.replace(o, ev._)
return sql_query
def import_flat_file(self, filename, table_name):
"""
import a flat file as a table, we assume the columns
separator is ``\\t`` and the file name contains a header
@param filename filename
@param table table name
@return the number of added rows
"""
raise NotImplementedError()
def drop_table(self, table_name):
"""
drops a table
@param table table name
"""
raise NotImplementedError()
def refresh_completion(self):
"""
refresh the auto completion
@return completion object
"""
return self.populate_completion()
def add_function(self, code_function):
"""
add a function to the database which can be called in a SELECT statement
@param code_function pointer to the function
"""
raise NotImplementedError()
def import_dataframe(self, tablename, df):
"""
import a dataframe into the database
@param tablename name of the table
@param df dataframe
@return the number of added rows
"""
raise NotImplementedError()
|
from . import backend
from . import datasets
from . import losses
from . import metrics
from . import model
from .optim import *
from .utils import *
from .testing import *
|
# -*- coding: utf-8 -*-
import os
from flask import Blueprint, render_template, send_from_directory, abort, redirect, url_for, flash
from flask import current_app as APP
from flask.ext.login import login_user, login_required, current_user
from fbone.extensions import db, login_manager
from fbone.core.oauth import OAuthSignIn
from .models import User, UsersSocialAccount
user = Blueprint('user', __name__, url_prefix='/user')
@login_manager.user_loader
def load_user(id):
return User.query.get(int(id))
@user.route('/')
@login_required
def index(offset=0):
if not current_user.is_authenticated():
abort(403)
return render_template('user/index.html', user=current_user)
@user.route('/authorize/<provider>')
def oauth_authorize(provider):
if not current_user.is_anonymous():
return redirect(url_for('user.index'))
oauth = OAuthSignIn.get_provider(provider)
return oauth.authorize()
@user.route('/callback/<provider>')
def oauth_callback(provider, user=None):
if not current_user.is_anonymous():
return redirect(url_for('index'))
oauth = OAuthSignIn.get_provider(provider)
social_id, username, email = oauth.callback()
if social_id is None:
flash('Authentication failed.')
return redirect(url_for('frontend.index'))
# update to foreign key later
# user = User.query.filter_by(social_id=social_id).first()
if not user:
user = User().create(nickname=username, email=email)
social_id = UsersSocialAccount().create(social_id=social_id, provider=provider)
user.social_ids.append(social_id)
db.session.commit()
login_user(user, True)
return redirect(url_for('user.index'))
@user.route('/<int:user_id>/profile')
@login_required
def profile(_id):
user = User.get_by_id(_id)
return render_template('user/profile.html', user=user, current_user=current_user,
followed=current_user.is_following(user))
@user.route('/<int:user_id>/avatar/<path:filename>')
@login_required
def avatar(user_id, filename):
dir_path = os.path.join(APP.config['UPLOAD_FOLDER'], 'user_%s' % user_id)
return send_from_directory(dir_path, filename, as_attachment=True)
@user.route('/follow_user/<int:user_id>')
@login_required
def follow_user(user_id):
user = User.get_by_id(user_id)
current_user.follow(user)
flash("You are now following" + " %s" % user.name, 'success')
return render_template('user/profile.html', user=user, current_user=current_user,
followed=current_user.is_following(user))
@user.route('/unfollow_user/<int:user_id>')
@login_required
def unfollow_user(user_id):
user = User.get_by_id(user_id)
current_user.unfollow(user)
flash("You are now not following" + " %s" % user.name, 'success')
return render_template('user/profile.html', user=user, current_user=current_user,
followed=current_user.is_following(user))
|
import argparse
import json
import yaml
import os
from os.path import join, basename, splitext
def pp_json(json_thing, sort=True, indents=4):
if isinstance(json_thing, str):
print(json.dumps(json.loads(json_thing), sort_keys=sort, indent=indents))
else:
print(json.dumps(json_thing, sort_keys=sort, indent=indents))
def load_json(file_name):
with open(file_name, "r") as file_stream:
return json.load(file_stream)
def load_yaml(file_name):
with open(file_name, "r") as file_stream:
return yaml.load(file_stream, Loader=yaml.FullLoader)
def get_params():
parser = argparse.ArgumentParser(description="Parameters")
parser.add_argument("meta_model")
parser.add_argument("--config", default='./up-zonk.yaml', required=False)
args = parser.parse_args()
config = load_yaml(args.config)
# meta model load, can be without .yaml ext --
filename_w_ext = os.path.basename(args.meta_model)
filename, file_extension = os.path.splitext(filename_w_ext)
meta_model_file_name = args.model if file_extension == "yaml" else filename + ".yaml"
meta_model_path = os.path.join(config["up"], meta_model_file_name)
meta_model = load_yaml(meta_model_path)
return config, meta_model
|
#!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015, 2016 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import math
import backtrader as bt
class MTradeObserver(bt.observer.Observer):
lines = ('Id_0', 'Id_1', 'Id_2')
plotinfo = dict(plot=True, subplot=True, plotlinelabels=True)
plotlines = dict(
Id_0=dict(marker='*', markersize=8.0, color='lime', fillstyle='full'),
Id_1=dict(marker='o', markersize=8.0, color='red', fillstyle='full'),
Id_2=dict(marker='s', markersize=8.0, color='blue', fillstyle='full')
)
def next(self):
for trade in self._owner._tradespending:
if trade.data is not self.data:
continue
if not trade.isclosed:
continue
self.lines[trade.tradeid][0] = trade.pnlcomm
|
for i in range(1,100):
print(i)
|
"""Patching modules and objects"""
import contextlib
import sys
def begin_patch(module, member, new_value):
if isinstance(module, str):
if module not in sys.modules:
return None
module = sys.modules[module]
if not hasattr(module, member):
old_member = None
else:
old_member = getattr(module, member)
setattr(module, member, new_value)
return module, member, old_member
def end_patch(token):
if token is None:
return
module, member, old_member = token
if old_member is None:
delattr(module, member)
else:
setattr(module, member, old_member)
@contextlib.contextmanager
def patch(*args):
"""Manager a patch in a contextmanager"""
tokens = []
for idx in range(0, len(args), 3):
module, member, new_value = args[idx : idx + 3]
tokens.append(begin_patch(module, member, new_value))
try:
yield
finally:
for token in tokens[::-1]:
end_patch(token)
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Score'
db.create_table(u'ratings_score', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
('key', self.gf('django.db.models.fields.CharField')(max_length=16)),
('average', self.gf('django.db.models.fields.FloatField')(default=0)),
('total', self.gf('django.db.models.fields.IntegerField')(default=0)),
('num_votes', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
))
db.send_create_signal(u'ratings', ['Score'])
# Adding unique constraint on 'Score', fields ['content_type', 'object_id', 'key']
db.create_unique(u'ratings_score', ['content_type_id', 'object_id', 'key'])
# Adding model 'Vote'
db.create_table(u'ratings_vote', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
('key', self.gf('django.db.models.fields.CharField')(max_length=16)),
('score', self.gf('django.db.models.fields.FloatField')()),
('user', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='votes', null=True, to=orm['auth.User'])),
('ip_address', self.gf('django.db.models.fields.IPAddressField')(max_length=15, null=True)),
('cookie', self.gf('django.db.models.fields.CharField')(max_length=64, null=True, blank=True)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('modified_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal(u'ratings', ['Vote'])
# Adding unique constraint on 'Vote', fields ['content_type', 'object_id', 'key', 'user']
db.create_unique(u'ratings_vote', ['content_type_id', 'object_id', 'key', 'user_id'])
# Adding unique constraint on 'Vote', fields ['content_type', 'object_id', 'key', 'ip_address', 'cookie']
db.create_unique(u'ratings_vote', ['content_type_id', 'object_id', 'key', 'ip_address', 'cookie'])
# Adding model 'Comment'
db.create_table(u'ratings_comment', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
('key', self.gf('django.db.models.fields.CharField')(max_length=16)),
('comment', self.gf('django.db.models.fields.TextField')(max_length=3000)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='comments', null=True, to=orm['auth.User'])),
('user_name', self.gf('django.db.models.fields.CharField')(max_length=50, blank=True)),
('ip_address', self.gf('django.db.models.fields.IPAddressField')(max_length=15, null=True)),
('cookie', self.gf('django.db.models.fields.CharField')(max_length=64, null=True, blank=True)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('modified_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal(u'ratings', ['Comment'])
def backwards(self, orm):
# Removing unique constraint on 'Vote', fields ['content_type', 'object_id', 'key', 'ip_address', 'cookie']
db.delete_unique(u'ratings_vote', ['content_type_id', 'object_id', 'key', 'ip_address', 'cookie'])
# Removing unique constraint on 'Vote', fields ['content_type', 'object_id', 'key', 'user']
db.delete_unique(u'ratings_vote', ['content_type_id', 'object_id', 'key', 'user_id'])
# Removing unique constraint on 'Score', fields ['content_type', 'object_id', 'key']
db.delete_unique(u'ratings_score', ['content_type_id', 'object_id', 'key'])
# Deleting model 'Score'
db.delete_table(u'ratings_score')
# Deleting model 'Vote'
db.delete_table(u'ratings_vote')
# Deleting model 'Comment'
db.delete_table(u'ratings_comment')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'ratings.comment': {
'Meta': {'object_name': 'Comment'},
'comment': ('django.db.models.fields.TextField', [], {'max_length': '3000'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'cookie': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'to': u"orm['auth.User']"}),
'user_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'})
},
u'ratings.score': {
'Meta': {'unique_together': "(('content_type', 'object_id', 'key'),)", 'object_name': 'Score'},
'average': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'num_votes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'total': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'ratings.vote': {
'Meta': {'unique_together': "(('content_type', 'object_id', 'key', 'user'), ('content_type', 'object_id', 'key', 'ip_address', 'cookie'))", 'object_name': 'Vote'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'cookie': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'score': ('django.db.models.fields.FloatField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'votes'", 'null': 'True', 'to': u"orm['auth.User']"})
}
}
complete_apps = ['ratings']
|
"""
Sample definitions.
"""
from pymontecarlo.options.sample.base import *
from pymontecarlo.options.sample.substrate import *
from pymontecarlo.options.sample.inclusion import *
from pymontecarlo.options.sample.horizontallayers import *
from pymontecarlo.options.sample.verticallayers import *
from pymontecarlo.options.sample.sphere import *
|
from gym_kuka_mujoco.utils.insertion import hole_insertion_samples
import os
import mujoco_py
# Get the model path
model_filename = 'full_peg_insertion_experiment.xml'
model_path = os.path.join('..','..', 'gym_kuka_mujoco', 'envs', 'assets',
model_filename)
# Construct the model and simulation objects.
model = mujoco_py.load_model_from_path(model_path)
sim = mujoco_py.MjSim(model)
viewer = mujoco_py.MjViewer(sim)
q_sol = hole_insertion_samples(sim, nsamples=20)
while True:
# Iterate through all of the solutions
for q in q_sol:
sim.data.qpos[:] = q
sim.forward()
viewer.render()
for q in q_sol[::-1]:
sim.data.qpos[:] = q
sim.forward()
viewer.render()
|
# Generated by Django 3.0.10 on 2020-11-29 18:26
from django.db import migrations
import kaffepause.accounts.models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.AlterModelManagers(
name='account',
managers=[
('objects', kaffepause.accounts.models.AccountManager()),
],
),
]
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) - Damian Avila
# pylint: disable = C0103
"""
Packaging
"""
# inspired from
# http://jupyter-notebook.readthedocs.io/en/stable/examples/Notebook/Distributing%20Jupyter%20Extensions%20as%20Python%20Packages.html#Example---Server-extension-and-nbextension
import os
from setuptools import setup, find_packages
NAME = "rise"
INSTALL_REQUIRES = [
'notebook>=5.5.0',
]
with open('README.md') as readme:
README = readme.read()
# Enable the nbextension (like jupyter nbextension enable --sys-prefix)
DATA_FILES = [
("etc/jupyter/nbconfig/notebook.d", [
"jupyter-config/nbconfig/notebook.d/rise.json"
]),
]
# Install the nbextension (like jupyter nbextension install --sys-prefix).
# More precisely, everything in the rise/static directory and its
# subdirectories should be installed
nbext = ["share", "jupyter", "nbextensions", NAME]
for (path, dirs, files) in os.walk(os.path.join("rise", "static")):
# Files to install
srcfiles = [os.path.join(path, f) for f in files]
# Installation path components, removing rise/static from "path"
dst = nbext + path.split(os.sep)[2:]
DATA_FILES.append((os.path.join(*dst), srcfiles))
# version string is extracted from toplevel package.json
import json
with open('package.json') as package_json:
content = package_json.read()
version = json.loads(content)['version']
# from npm server into python semver
if "-dev." in version:
version = version.replace("-dev.", ".dev")
setup_args = dict(
name=NAME,
version=version,
packages=find_packages(),
data_files=DATA_FILES,
include_package_data=True,
install_requires=INSTALL_REQUIRES,
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4',
description="Reveal.js - Jupyter/IPython Slideshow Extension",
long_description=README,
author="Damián Avila",
author_email="damianavila82@yahoo.com.ar",
project_urls={
'source': "http://github.com/damianavila/RISE",
'documentation': "http://rise.readthedocs.io",
'gitter': "https://gitter.im/damianavila/RISE",
},
license="BSD-3-Clause",
platforms="Linux, Mac OS X, Windows",
keywords=["jupyter", "ipython", "presentation", "slides", "revealjs"],
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
zip_safe=False,
)
if __name__ == '__main__':
setup(**setup_args)
|
"""
DIA-PreResNet for CIFAR/SVHN, implemented in Gluon.
Original papers: 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
"""
__all__ = ['CIFARDIAPreResNet', 'diapreresnet20_cifar10', 'diapreresnet20_cifar100', 'diapreresnet20_svhn',
'diapreresnet56_cifar10', 'diapreresnet56_cifar100', 'diapreresnet56_svhn', 'diapreresnet110_cifar10',
'diapreresnet110_cifar100', 'diapreresnet110_svhn', 'diapreresnet164bn_cifar10',
'diapreresnet164bn_cifar100', 'diapreresnet164bn_svhn', 'diapreresnet1001_cifar10',
'diapreresnet1001_cifar100', 'diapreresnet1001_svhn', 'diapreresnet1202_cifar10',
'diapreresnet1202_cifar100', 'diapreresnet1202_svhn']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv3x3, DualPathSequential
from .preresnet import PreResActivation
from .diaresnet import DIAAttention
from .diapreresnet import DIAPreResUnit
class CIFARDIAPreResNet(HybridBlock):
"""
DIA-PreResNet model for CIFAR from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
bn_use_global_stats=False,
in_channels=3,
in_size=(32, 32),
classes=10,
**kwargs):
super(CIFARDIAPreResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(conv3x3(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = DualPathSequential(
return_two=False,
prefix="stage{}_".format(i + 1))
attention = DIAAttention(
in_x_features=channels_per_stage[0],
in_h_features=channels_per_stage[0])
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(DIAPreResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bottleneck=bottleneck,
conv1_stride=False,
attention=attention))
in_channels = out_channels
self.features.add(stage)
self.features.add(PreResActivation(
in_channels=in_channels,
bn_use_global_stats=bn_use_global_stats))
self.features.add(nn.AvgPool2D(
pool_size=8,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_diapreresnet_cifar(classes,
blocks,
bottleneck,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create DIA-PreResNet model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
assert (classes in [10, 100])
if bottleneck:
assert ((blocks - 2) % 9 == 0)
layers = [(blocks - 2) // 9] * 3
else:
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
channels_per_layers = [16, 32, 64]
init_block_channels = 16
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if bottleneck:
channels = [[cij * 4 for cij in ci] for ci in channels]
net = CIFARDIAPreResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def diapreresnet20_cifar10(classes=10, **kwargs):
"""
DIA-PreResNet-20 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="diapreresnet20_cifar10",
**kwargs)
def diapreresnet20_cifar100(classes=100, **kwargs):
"""
DIA-PreResNet-20 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="diapreresnet20_cifar100",
**kwargs)
def diapreresnet20_svhn(classes=10, **kwargs):
"""
DIA-PreResNet-20 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="diapreresnet20_svhn",
**kwargs)
def diapreresnet56_cifar10(classes=10, **kwargs):
"""
DIA-PreResNet-56 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="diapreresnet56_cifar10",
**kwargs)
def diapreresnet56_cifar100(classes=100, **kwargs):
"""
DIA-PreResNet-56 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="diapreresnet56_cifar100",
**kwargs)
def diapreresnet56_svhn(classes=10, **kwargs):
"""
DIA-PreResNet-56 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="diapreresnet56_svhn",
**kwargs)
def diapreresnet110_cifar10(classes=10, **kwargs):
"""
DIA-PreResNet-110 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="diapreresnet110_cifar10",
**kwargs)
def diapreresnet110_cifar100(classes=100, **kwargs):
"""
DIA-PreResNet-110 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="diapreresnet110_cifar100",
**kwargs)
def diapreresnet110_svhn(classes=10, **kwargs):
"""
DIA-PreResNet-110 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="diapreresnet110_svhn",
**kwargs)
def diapreresnet164bn_cifar10(classes=10, **kwargs):
"""
DIA-PreResNet-164(BN) model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="diapreresnet164bn_cifar10",
**kwargs)
def diapreresnet164bn_cifar100(classes=100, **kwargs):
"""
DIA-PreResNet-164(BN) model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="diapreresnet164bn_cifar100",
**kwargs)
def diapreresnet164bn_svhn(classes=10, **kwargs):
"""
DIA-PreResNet-164(BN) model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="diapreresnet164bn_svhn",
**kwargs)
def diapreresnet1001_cifar10(classes=10, **kwargs):
"""
DIA-PreResNet-1001 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="diapreresnet1001_cifar10",
**kwargs)
def diapreresnet1001_cifar100(classes=100, **kwargs):
"""
DIA-PreResNet-1001 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="diapreresnet1001_cifar100",
**kwargs)
def diapreresnet1001_svhn(classes=10, **kwargs):
"""
DIA-PreResNet-1001 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="diapreresnet1001_svhn",
**kwargs)
def diapreresnet1202_cifar10(classes=10, **kwargs):
"""
DIA-PreResNet-1202 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="diapreresnet1202_cifar10",
**kwargs)
def diapreresnet1202_cifar100(classes=100, **kwargs):
"""
DIA-PreResNet-1202 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(classes=classes, blocks=1202, bottleneck=False,
model_name="diapreresnet1202_cifar100", **kwargs)
def diapreresnet1202_svhn(classes=10, **kwargs):
"""
DIA-PreResNet-1202 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="diapreresnet1202_svhn",
**kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
(diapreresnet20_cifar10, 10),
(diapreresnet20_cifar100, 100),
(diapreresnet20_svhn, 10),
(diapreresnet56_cifar10, 10),
(diapreresnet56_cifar100, 100),
(diapreresnet56_svhn, 10),
(diapreresnet110_cifar10, 10),
(diapreresnet110_cifar100, 100),
(diapreresnet110_svhn, 10),
(diapreresnet164bn_cifar10, 10),
(diapreresnet164bn_cifar100, 100),
(diapreresnet164bn_svhn, 10),
(diapreresnet1001_cifar10, 10),
(diapreresnet1001_cifar100, 100),
(diapreresnet1001_svhn, 10),
(diapreresnet1202_cifar10, 10),
(diapreresnet1202_cifar100, 100),
(diapreresnet1202_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != diapreresnet20_cifar10 or weight_count == 286674)
assert (model != diapreresnet20_cifar100 or weight_count == 292524)
assert (model != diapreresnet20_svhn or weight_count == 286674)
assert (model != diapreresnet56_cifar10 or weight_count == 869970)
assert (model != diapreresnet56_cifar100 or weight_count == 875820)
assert (model != diapreresnet56_svhn or weight_count == 869970)
assert (model != diapreresnet110_cifar10 or weight_count == 1744914)
assert (model != diapreresnet110_cifar100 or weight_count == 1750764)
assert (model != diapreresnet110_svhn or weight_count == 1744914)
assert (model != diapreresnet164bn_cifar10 or weight_count == 1922106)
assert (model != diapreresnet164bn_cifar100 or weight_count == 1945236)
assert (model != diapreresnet164bn_svhn or weight_count == 1922106)
assert (model != diapreresnet1001_cifar10 or weight_count == 10546554)
assert (model != diapreresnet1001_cifar100 or weight_count == 10569684)
assert (model != diapreresnet1001_svhn or weight_count == 10546554)
assert (model != diapreresnet1202_cifar10 or weight_count == 19438226)
assert (model != diapreresnet1202_cifar100 or weight_count == 19444076)
assert (model != diapreresnet1202_svhn or weight_count == 19438226)
x = mx.nd.zeros((1, 3, 32, 32), ctx=ctx)
y = net(x)
assert (y.shape == (1, classes))
if __name__ == "__main__":
_test()
|
from MOOSEplot import geoplot as gp
from MOOSEplot import plotCommon as pc
from datetime import datetime
from matplotlib import pyplot as plt
## First define a StandardPlot object from GeoPlot.
Gplot=gp.StandardPlot()
## Load and Parse the namelist.
## Note, you will likely need to change the wrf_path in the namelist
## Such that it works with this namelist.
Gplot.parse_namelist(namelist_file='namelist_ex1.txt')
## Optional, uncomment the below line to print out all of the namelist values.
#Gplot.print_namelist()
## Define the map projection based on current namelist values.
Gplot.define_projection()
#Define Figure and subplot
figure=plt.figure(figsize=(12,9))
#Note that the Cartopy framework is already imported as part of GeoPlot, and the projection is already defined as a cartopy projection
ax=figure.add_subplot(1,1,1,projection=Gplot.projection)
# Plot the model data on the defined subplot.
Gplot.makePlot(ax)
## Add the metars to the data
## Note that numerous arguments are required to match the plotting conventions defined in the StandardPlot
## METAR .csv files are located in the local SW_METARS directory. Observed Temperature (in C) is defined under the "TMP" title
## Min,Max, and color map are manually set to match the values in the namelist.
## Time is set to the namelist time-step and format, and the surface station lat/lon is saved under the latitude, and longitude headers in the csv file.
## Vectors is set equal to false, so wind barbs are NOT plotted.
pc.add_metars(ax,path='SW_METARS/',vari='TMP',vmin=10.,vmax=45.,cmap='jet',edgecolor='k',marker='o',time=Gplot.namelist_dictionary['time_step'],
tfmt=Gplot.namelist_dictionary['time_format'],lon='longitude',lat='latitude',zorder=8,vectors=False)
## Finally, define a time-string using the datetime function and print out a title.
timestr=datetime.strptime(Gplot.namelist_dictionary['time_step'],Gplot.namelist_dictionary['time_format']).strftime('%Y%m%d %H UTC')
plt.title(timestr,loc='right')
Gplot.show() ## Show the figure onto the screen
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# look this: http://picamera.readthedocs.io/en/release-1.2/recipes1.html
import io
import os
import sys
import time
import struct
import socket
import picamera
from PIL import Image
import cv2
import numpy as np
def photographToFile():
# Explicitly open a new file called my_image.jpg
my_file = open('photo.jpg', 'wb')
with picamera.PiCamera() as camera:
camera.start_preview()
time.sleep(2)
camera.capture(my_file)
# Note that at this point the data is in the file cache, but may
# not actually have been written to disk yet
my_file.close()
# Now the file has been closed, other processes should be able to
#read the image successfully
return True
def photographToBytesIO():
#write stream to BytesIO(Python’s in-memory stream class)
stream = io.BytesIO()
with picamera.PiCamera() as camera:
camera.start_preview()
time.sleep(2)
camera.capture(stream, format='jpeg')
# "Rewind" the stream to the beginning so we can read its content
stream.seek(0)
image = Image.open(stream)
img = image.copy()
return img
def photographToCV():
# Create the in-memory stream
stream = io.BytesIO()
with picamera.PiCamera() as camera:
camera.start_preview()
time.sleep(2)
camera.capture(stream, format='jpeg')
# Construct a numpy array from the stream
data = np.fromstring(stream.getvalue(), dtype=np.uint8)
# "Decode" the image from the array, preserving colour
image = cv2.imdecode(data, 1)
# OpenCV returns an array with data in BGR order. If you want RGB instead
# use the following...
image = image[:, :, ::-1]
return image
def photographSeq():
with picamera.PiCamera() as camera:
camera.start_preview()
time.sleep(2)
for filename in camera.capture_continuous('img{counter:03d}.jpg'):
print('Captured %s' % filename)
time.sleep(300) # wait 5 minutes
def photographToServerSocket():
# NOTICE:
# The server script should be run first (don't run in pi)
# to ensure there’s a listening socket ready to accept a connection from the client script
# Start a socket listening for connections on 0.0.0.0:8000 (0.0.0.0 means
# all interfaces)
server_socket = socket.socket()
server_socket.bind(('0.0.0.0', 8000))
server_socket.listen(0)
# Accept a single connection and make a file-like object out of it
# @TODO: use select/poll or use epoll for more connections 10k>
connection = server_socket.accept()[0].makefile('rb')
try:
while True:
# Read the length of the image as a 32-bit unsigned int. If the
# length is zero, quit the loop
image_len = struct.unpack('<L', connection.read(4))[0]
if not image_len:
break
# Construct a stream to hold the image data and read the image
# data from the connection
image_stream = io.BytesIO()
image_stream.write(connection.read(image_len))
# Rewind the stream, open it as an image with PIL and do some
# processing on it
image_stream.seek(0)
image = Image.open(image_stream)
print('Image is %dx%d' % image.size)
image.verify()
print('Image is verified')
finally:
connection.close()
server_socket.close()
def photographToClientSocket():
# Connect a client socket to my_server:8000 (change my_server to the
# hostname of your server)
client_socket = socket.socket()
#client_socket.connect(('my_server', 8000))
client_socket.connect(('192.168.1.102', 8000))
# Make a file-like object out of the connection
connection = client_socket.makefile('wb')
try:
with picamera.PiCamera() as camera:
camera.resolution = (640, 480)
# Start a preview and let the camera warm up for 2 seconds
camera.start_preview()
time.sleep(2)
# Note the start time and construct a stream to hold image data
# temporarily (we could write it directly to connection but in this
# case we want to find out the size of each capture first to keep
# our protocol simple)
start = time.time()
stream = io.BytesIO()
for foo in camera.capture_continuous(stream, 'jpeg'):
# Write the length of the capture to the stream and flush to
# ensure it actually gets sent
connection.write(struct.pack('<L', stream.tell()))
connection.flush()
# Rewind the stream and send the image data over the wire
stream.seek(0)
connection.write(stream.read())
# If we've been capturing for more than 30 seconds, quit
if time.time() - start > 30:
break
# Reset the stream for the next capture
stream.seek(0)
stream.truncate()
# Write a length of zero to the stream to signal we're done
connection.write(struct.pack('<L', 0))
finally:
connection.close()
client_socket.close()
if __name__ == '__main__':
photographToFile()
image = photographToBytesIO()
print(image)
time.sleep(3)
image = photographToCV()
print(image)
time.sleep(3)
photographSeq()
photographToClientSocket()
|
import os,random
from threading import Thread
from time import sleep
import playsound
from termcolor import colored
from config import *
import numpy as np
from PIL import Image
def get_ansi_color_code(r, g, b):
if r == g and g == b:
if r < 8:
return 16
if r > 248:
return 231
return round(((r - 8) / 247) * 24) + 232
return 16 + (36 * round(r / 255 * 5)) + (6 * round(g / 255 * 5)) + round(b / 255 * 5)
def get_color(r, g, b):
return "\x1b[48;5;{}m \x1b[0m".format(int(get_ansi_color_code(r,g,b)))
def show_image(img_path):
try:
img = Image.open(img_path)
except FileNotFoundError:
exit('Image not found.')
h = 50
w = 120
img = img.resize((w,h), Image.ANTIALIAS)
img_arr = np.asarray(img)
h,w,c = img_arr.shape
for x in range(h):
print(" "*12,end='')
for y in range(w):
pix = img_arr[x][y]
print(get_color(pix[0], pix[1], pix[2]), sep='', end='')
print()
sleep(0.15)
# Importing module specified in the config file
art = __import__(f'arts.{artFile}', globals(), locals(), ['*'])
def replaceMultiple(mainString, toBeReplace, newString):
# Iterate over the list to be replaced
for elem in toBeReplace :
# Check if the element is in the main string
if elem in mainString :
# Replace the string
mainString = mainString.replace(elem, newString)
return mainString
def resource_path(relative_path):
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
def pprint(art,time):
color_used = [random.choice(color)]
colorAttribute = []
for i in range(len(art)):
if art[i] in colorCodes:
# Color attr set to blink if 9
if art[i] == '⑨':
colorAttribute = [colorCodes[art[i]]]
# color attr none if 10
elif art[i] == '⑩':
colorAttribute = []
# Random color if R
elif art[i] == '®':
color_used = color
else:
color_used = [colorCodes[art[i]]]
print(colored(replaceMultiple(art[i],colorCodes,''),random.choice(color_used),attrs=colorAttribute),sep='', end='',flush= True);sleep(time)
show_image('./pic/km.jpg')
def pAudio():
if playAudio:
playsound.playsound(resource_path(audio), True)
# Code reader
with open(resource_path(__file__)) as f_in:
code = f_in.read()
def pcode():
# Print the code before wishing
if codePrint:
for i in range(len(code)):
print(colored(code[i], codeColor),sep='', end='',flush= True);sleep(codingSpeed)
input('\n\n'+colored('python3','blue')+colored(' PyBirthdayWish.py','yellow'))
os.system('cls' if os.name == 'nt' else 'clear')
else:
input(colored('press F11 and hit {Enter}...','blue'))
os.system('cls' if os.name == 'nt' else 'clear')
# Clearing terminal
os.system('cls' if os.name == 'nt' else 'clear')
try:
pcode()
Thread(target = pAudio).start()
Thread(target = pprint, args=(art.mainArt,speed)).start()
input()
except KeyboardInterrupt:
print(colored('\n[-] Thanks!!','red'))
os._exit(0)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.