content
stringlengths 5
1.05M
|
|---|
#
# gtksel.py -- select version of Gtk to use
#
# Eric Jeschke (eric@naoj.org)
#
import ginga.toolkit
toolkit = ginga.toolkit.toolkit
have_gtk3 = False
have_gtk2 = False
# For now, Gtk 2 has preference
if toolkit in ('gtk2', 'choose'):
try:
import pygtk
pygtk.require('2.0')
have_gtk2 = True
except ImportError:
pass
if toolkit in ('gtk3', 'choose') and (not have_gtk2):
try:
# Try to import Gtk 2->3 compatibility layer
from gi import pygtkcompat
from gi.repository import GdkPixbuf
pygtkcompat.enable()
pygtkcompat.enable_gtk(version='3.0')
have_gtk3 = True
except ImportError:
pass
import gtk
import gobject
if have_gtk3:
# TEMP: until this is fixed or some other acceptable workaround
# there is no good way to run on Gtk3
raise Exception("Cairo.ImageSurface.create_for_data is not yet implemented in Gtk3")
ginga.toolkit.use('gtk3')
def pixbuf_new_from_xpm_data(xpm_data):
xpm_data = bytes('\n'.join(xpm_data))
return GdkPixbuf.Pixbuf.new_from_xpm_data(xpm_data)
def pixbuf_new_from_array(data, rgbtype, bpp):
# Seems Gtk3 Pixbufs do not have the new_from_array() method!
#return GdkPixbuf.Pixbuf.new_from_array(data, rgbtype, bpp)
daht, dawd, depth = data.shape
stride = dawd * 4 * bpp
rgb_buf = data.tostring(order='C')
hasAlpha = False
rgbtype = GdkPixbuf.Colorspace.RGB
return GdkPixbuf.Pixbuf.new_from_data(rgb_buf, rgbtype, hasAlpha, 8,
dawd, daht, stride, None, None)
def pixbuf_new_from_data(rgb_buf, rgbtype, hasAlpha, bpp, dawd, daht, stride):
return GdkPixbuf.Pixbuf.new_from_data(rgb_buf, rgbtype, hasAlpha, bpp,
dawd, daht, stride, None, None)
def pixbuf_new_from_file_at_size(foldericon, width, height):
return GdkPixbuf.Pixbuf.new_from_file_at_size(foldericon,
width, height)
def make_cursor(widget, iconpath, x, y):
image = gtk.Image()
image.set_from_file(iconpath)
pixbuf = image.get_pixbuf()
screen = widget.get_screen()
display = screen.get_display()
return gtk.gdk.Cursor(display, pixbuf, x, y)
elif have_gtk2:
ginga.toolkit.use('gtk2')
def pixbuf_new_from_xpm_data(xpm_data):
return gtk.gdk.pixbuf_new_from_xpm_data(xpm_data)
def pixbuf_new_from_array(data, rgbtype, bpp):
return gtk.gdk.pixbuf_new_from_array(data, rgbtype, bpp)
def pixbuf_new_from_data(rgb_buf, rgbtype, hasAlpha, bpp, dawd, daht, stride):
return gtk.gdk.pixbuf_new_from_data(rgb_buf, rgbtype, hasAlpha, bpp,
dawd, daht, stride)
def pixbuf_new_from_file_at_size(foldericon, width, height):
return gtk.gdk.pixbuf_new_from_file_at_size(foldericon,
width, height)
def make_cursor(widget, iconpath, x, y):
pixbuf = gtk.gdk.pixbuf_new_from_file(iconpath)
screen = widget.get_screen()
display = screen.get_display()
return gtk.gdk.Cursor(display, pixbuf, x, y)
else:
raise ImportError("Failed to import gtk. There may be an issue with the toolkit module or it is not installed")
#END
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-22 18:45
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('topics', '0007_auto_20170322_1054'),
('topics', '0006_auto_20170319_1524'),
]
operations = [
]
|
from .initialize import *
from scipy import stats
from ..utils.tools import drop_nan, splat_teff_to_spt,kernel_density
from tqdm import tqdm
import splat.simulate as spsim
import splat.evolve as spev
import splat.empirical as spe
import wisps
#import pymc3 as pm
from scipy.interpolate import griddata
#import theano.tensor as tt
#from theano.compile.ops import as_op
import astropy.units as u
import numba
BINARY_TABLE=pd.read_pickle(DATA_FOLDER+'/binary_lookup_table.pkl.gz')
BINARY_TABLE_SYS=(BINARY_TABLE['sys']).values
BINARY_TABLE_PRIM=(BINARY_TABLE['prim']).values
BINARY_TABLE_SEC=(BINARY_TABLE['sec']).values
def log_mass_function(m, alpha):
"""
Power law mass function
"""
return np.log(m**-alpha)
def log_mass_ratio(q, gamma):
"""
Power law mass ratio
m1 is primary mass
m2 is secondary mass """
return np.log(q**gamma)
def total_likelihood(m1, q, alpha, gamma):
return log_mass_function(m1, alpha)+log_mass_ratio(q, gamma)
def simulate_binary(nstuff, mass_range, age_range):
"""
Simulate a distribution of binaries from simple assumptions
This is much faster than splat
"""
gamma=4
with pm.Model() as model:
alpha=0.6
prim=pm.Uniform('m1', lower=mass_range[0], upper=mass_range[1]) #primaries
q=pm.Uniform('q', lower=.1, upper=1.)
sec=pm.Deterministic('m2', prim*q)
age=pm.Uniform('t', lower=age_range[0], upper=age_range[-1]) #system age
like = pm.Potential('likelihood', total_likelihood(prim, q, alpha, gamma))
trace = pm.sample(draws=nstuff, cores=4, tune=int(nstuff/20), init='advi')
return [trace.m1, trace.m2, trace.t]
def get_system_type(pr, sc):
"""
use the lookup table to get a spectral type for the binary
using a linear interpolation to avoid nans
"""
#where secondary are nans set to primaries
sc[np.isnan(sc)]=pr[np.isnan(sc)]
interpoints=np.array([BINARY_TABLE_PRIM, BINARY_TABLE_SEC ]).T
comb=griddata(interpoints, BINARY_TABLE_SYS , (pr, sc), method='linear')
return comb
def evolutionary_model_interpolator(mass, age, model):
"""
My own evolutionary model interpolator,
Hoping to make it faster than splat because I'm impatient
input: mass, age
model: model name
"""
model_filename=EVOL_MODELS_FOLDER+'//'+model.lower()+'.csv'
evolutiomodel=pd.read_csv( model_filename)
#use the full cloud treatment for saumon models
if model=='saumon2008':
evolutiomodel=evolutiomodel[evolutiomodel.cloud=='hybrid']
#make age, teff, mass logarithm scale
valuest=np.log10(evolutiomodel.temperature.values)
valueslogg=evolutiomodel.gravity.values
valueslumn=evolutiomodel.luminosity.values
valuesm=np.log10(evolutiomodel.mass.values)
valuesag=np.log10(evolutiomodel.age.values)
evolpoints=np.array([valuesm, valuesag ]).T
teffs=griddata(evolpoints, valuest , (np.log10(mass), np.log10(age)), method='linear')
lumn=griddata(evolpoints, valueslumn , (np.log10(mass), np.log10(age)), method='linear')
return {'mass': mass*u.Msun, 'age': age*u.Gyr, 'temperature': 10**teffs*u.Kelvin,
'luminosity': lumn*u.Lsun}
def simulate_spts(**kwargs):
"""
Simulate parameters from mass function,
mass ratio distribution and age distribution
"""
recompute=kwargs.get('recompute', False)
model_name=kwargs.get('name','baraffe2003')
#use hybrid models that predit the T dwarf bump for Saumon Models
if model_name=='saumon2008':
cloud='hybrid'
else:
cloud=False
#automatically set maxima and minima to avoid having too many nans
#mass age and age, min, max
#all masses should be 0.01
acceptable_values={'baraffe2003': [0.01, 0.1, 0.001, 8.0],
'marley2019': [0.01, 0.08, 0.001, 8.0], 'saumon2008':[0.01, 0.09, 0.003, 8.0],
'phillips2020':[0.01, 0.075, 0.001, 8.0 ], 'burrows2001':[0.01, 0.1, 0.001, 8.0]}
if recompute:
nsim = kwargs.get('nsample', 1e5)
ranges=acceptable_values[model_name]
# masses for singles [this can be done with pymc but nvm]
m_singles = spsim.simulateMasses(nsim,range=[ranges[0], ranges[1]],distribution='power-law',alpha=0.6)
#ages for singles
ages_singles= spsim.simulateAges(nsim,range=[ranges[2], ranges[3]], distribution='uniform')
#parameters for binaries
#binrs=simulate_binary(int(nsim), [ranges[0], ranges[1]], [ranges[2], ranges[3]])
qs=spsim.simulateMassRatios(nsim,distribution='power-law',q_range=[0.1,1.0],gamma=4)
m_prims = spsim.simulateMasses(nsim,range=[ranges[0], ranges[1]],distribution='power-law',alpha=0.6)
m_sec=m_prims*qs
ages_bin= spsim.simulateAges(nsim,range=[ranges[2], ranges[3]], distribution='uniform')
#single_evol=spev.modelParameters(mass=m_singles,age=ages_singles, set=model_name, cloud=cloud)
single_evol=evolutionary_model_interpolator(m_singles, ages_singles, model_name)
#primary_evol=spev.modelParameters(mass=binrs[0],age=binrs[-1], set=model_name, cloud=cloud)
primary_evol=evolutionary_model_interpolator(m_prims,ages_bin, model_name)
#secondary_evol=spev.modelParameters(mass=binrs[1],age=binrs[-1], set=model_name, cloud=cloud)
secondary_evol=evolutionary_model_interpolator(m_sec,ages_bin, model_name)
#save luminosities
#temperatures
teffs_singl =single_evol['temperature'].value
teffs_primar=primary_evol['temperature'].value
teffs_second=secondary_evol['temperature'].value
#spectraltypes
spts_singl =splat_teff_to_spt(teffs_singl)
#the singles will be fine, remove nans from systems
spt_primar=splat_teff_to_spt(teffs_primar)
spt_second=splat_teff_to_spt(teffs_second)
#remove nans
print ('MAX AGES', np.nanmax(ages_singles))
#print ('MAX AGES', np.nanmax())
xy=np.vstack([np.round(np.array(spt_primar), decimals=0), np.round(np.array(spt_second), decimals=0)]).T
spt_binr=get_system_type(xy[:,0], xy[:,1])
values={ 'sing_evol': single_evol, 'sing_spt':spts_singl,
'prim_evol': primary_evol, 'prim_spt':spt_primar,
'sec_evol': secondary_evol, 'sec_spt': spt_second,
'binary_spt': spt_binr }
import pickle
with open(wisps.OUTPUT_FILES+'/mass_age_spcts_with_bin{}.pkl'.format(model_name), 'wb') as file:
pickle.dump(values,file)
else:
values=pd.read_pickle(wisps.OUTPUT_FILES+'/mass_age_spcts_with_bin{}.pkl'.format(model_name))
return values
def make_systems(**kwargs):
"""
choose a random sets of primaries and secondaries
and a sample of single systems based off a preccomputed-evolutionary model grid
and an unresolved binary fraction
"""
#recompute for different evolutionary models
model=kwargs.get('model_name', 'baraffe2003')
binary_fraction=kwargs.get('bfraction', 0.2)
model_vals=simulate_spts(name=model, **kwargs)
#nbin= int(len(model_vals['sing_spt'])*binary_fraction) #number of binaries
ndraw= int(len(model_vals['sing_spt'])/(1-binary_fraction))-int(len(model_vals['sing_spt']))
nans=np.isnan(model_vals['binary_spt'])
bins={'spt': np.random.choice(model_vals['binary_spt'][~nans], ndraw),
'teff': np.random.choice(model_vals['prim_evol']['temperature'].value[~nans], ndraw),
'age': np.random.choice(model_vals['prim_evol']['age'].value[~nans],ndraw),
'mass': np.random.choice(model_vals['prim_evol']['mass'].value[~nans]+model_vals['prim_evol']['mass'].value[~nans],ndraw)}
vs={'system_spts': np.concatenate([model_vals['sing_spt'], choices['spt']]),
'system_teff': np.concatenate([(model_vals['sing_evol']['temperature']).value, choices['teff']]),
'system_age': np.concatenate([(model_vals['sing_evol']['age']).value, choices['age']]),
'system_mass': np.concatenate([(model_vals['sing_evol']['mass']).value, choices['mass']])}
return vs
|
from sqlalchemy import Column, String, Integer
from .. import Base
from .base_model import BaseModel
class Item(Base, BaseModel):
__tablename__ = 'items'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String, nullable=False)
category = Column(String, nullable=True)
count = Column(Integer, default=0)
def __init__(self, name: str, category: str, count=0):
self.name = name.strip()
self.category = category.strip()
self.count = count
def __repr__(self):
return f"<Item(id={self.id}, name={self.name})>"
|
class Results(object):
"""
MIOSQP Results
"""
def __init__(self, x, upper_glob, run_time, status,
osqp_solve_time, osqp_iter_avg):
self.x = x
self.upper_glob = upper_glob
self.run_time = run_time
self.status = status
self.osqp_solve_time = osqp_solve_time
self.osqp_iter_avg = osqp_iter_avg
|
import struct
from enum import IntEnum
from spherov2.commands import Commands
from spherov2.helper import to_bytes, to_int
from spherov2.listeners.system_info import Version, LastErrorInfo, ConfigBlock, ManufacturingDate, EventLogStatus
class SosMessages(IntEnum):
UNKNOWN = 0
SUBPROCESSOR_CRASHED = 1
class BootReasons(IntEnum):
COLD_BOOT = 0
UNEXPECTED_RESET = 1
APPLICATION_RESET_DUE_TO_ERROR = 2
APPLICATION_RESET_FOR_A_FIRMWARE_UPDATE = 3
PROCESSOR_IS_BOOTING_FROM_SLEEP = 4
PROCESSOR_IS_RESETTING_FOR_SOME_NON_ERROR_REASON = 5
class SystemInfo(Commands):
_did = 17
@staticmethod
def get_main_app_version(toy, proc=None):
return Version(*struct.unpack('>3H', toy._execute(SystemInfo._encode(toy, 0, proc)).data))
@staticmethod
def get_bootloader_version(toy, proc=None):
return Version(*struct.unpack('>3H', toy._execute(SystemInfo._encode(toy, 1, proc)).data))
@staticmethod
def get_board_revision(toy, proc=None):
return toy._execute(SystemInfo._encode(toy, 3, proc)).data[0]
@staticmethod
def get_mac_address(toy, proc=None):
return toy._execute(SystemInfo._encode(toy, 6, proc)).data
@staticmethod
def get_stats_id(toy, proc=None):
return toy._execute(SystemInfo._encode(toy, 19, proc)).data
@staticmethod
def get_secondary_main_app_version(toy, proc=None):
toy._execute(SystemInfo._encode(toy, 23, proc))
return Version(
*struct.unpack('>3H', toy._wait_packet(SystemInfo.secondary_main_app_version_notify).data))
secondary_main_app_version_notify = (17, 24, 0xff)
@staticmethod
def get_processor_name(toy, proc=None):
return toy._execute(SystemInfo._encode(toy, 31, proc)).data.rstrip(b'\0')
@staticmethod
def get_boot_reason(toy, proc=None):
return BootReasons(toy._execute(SystemInfo._encode(toy, 32, proc)).data[0])
@staticmethod
def get_last_error_info(toy, proc=None):
return LastErrorInfo(
*struct.unpack('>32sH12s', toy._execute(SystemInfo._encode(toy, 33, proc)).data))
@staticmethod
def get_secondary_mcu_bootloader_version(toy, proc=None):
toy._execute(SystemInfo._encode(toy, 36, proc))
return Version(*struct.unpack('>3H', toy._wait_packet(SystemInfo.secondary_mcu_bootloader_version_notify).data))
secondary_mcu_bootloader_version_notify = (17, 37, 0xff)
@staticmethod
def get_three_character_sku(toy, proc=None):
return toy._execute(SystemInfo._encode(toy, 40, proc)).data
@staticmethod
def write_config_block(toy, proc=None):
toy._execute(SystemInfo._encode(toy, 43, proc))
@staticmethod
def get_config_block(toy, proc=None):
data = toy._execute(SystemInfo.get_config_block(SystemInfo._encode(toy, 44, proc).data))
return ConfigBlock(*struct.unpack('>2I', data[:8]), data[8:])
@staticmethod
def set_config_block(toy, metadata_version, config_block_version, application_data, proc=None):
toy._execute(SystemInfo._encode(
45, proc, [*to_bytes(metadata_version, 4), *to_bytes(config_block_version, 4), *application_data]))
@staticmethod
def erase_config_block(toy, j, proc=None):
toy._execute(SystemInfo._encode(toy, 46, proc, to_bytes(j, 4)))
@staticmethod
def get_swd_locking_status(toy, proc=None):
return bool(toy._execute(SystemInfo._encode(toy, 48, proc)).data[0])
@staticmethod
def get_manufacturing_date(toy, proc=None):
return ManufacturingDate(
*struct.unpack('>HBB', toy._execute(SystemInfo._encode(toy, 51, proc)).data))
@staticmethod
def get_sku(toy, proc=None):
return toy._execute(SystemInfo._encode(toy, 56, proc)).data.rstrip(b'\0')
@staticmethod
def get_core_up_time_in_milliseconds(toy, proc=None):
return to_int(toy._execute(SystemInfo._encode(toy, 57, proc)).data)
@staticmethod
def get_event_log_status(toy, proc=None):
return EventLogStatus(*struct.unpack('>3I', toy._execute(SystemInfo._encode(toy, 58, proc)).data))
@staticmethod
def get_event_log_data(toy, j, j2, proc=None): # unknown name
return toy._execute(SystemInfo._encode(toy, 59, proc, to_bytes(j, 4) + to_bytes(j2, 4))).data
@staticmethod
def clear_event_log(toy, proc=None):
toy._execute(SystemInfo._encode(toy, 60, proc))
@staticmethod
def enable_sos_message_notify(toy, enable: bool, proc=None):
toy._execute(SystemInfo._encode(toy, 61, proc, [int(enable)]))
sos_message_notify = (17, 62, 0xff), lambda listener, p: listener(SosMessages(p.data[0]))
@staticmethod
def get_sos_message(toy, proc=None):
toy._execute(SystemInfo._encode(toy, 63, proc))
@staticmethod
def clear_sos_message(toy, proc=None):
toy._execute(SystemInfo._encode(toy, 68, proc))
|
# ------------------------------
# 154. Find Minimum in Rotated Sorted Array II
#
# Description:
# Suppose an array sorted in ascending order is rotated at some pivot unknown to you beforehand.
#
# (i.e., [0,1,2,4,5,6,7] might become [4,5,6,7,0,1,2]).
#
# Find the minimum element.
#
# The array may contain duplicates.
#
# Example 1:
# Input: [1,3,5]
# Output: 1
#
# Example 2:
# Input: [2,2,2,0,1]
# Output: 0
#
# Note:
# This is a follow up problem to Find Minimum in Rotated Sorted Array.
# Would allow duplicates affect the run-time complexity? How and why?
#
# Version: 1.0
# 10/30/19 by Jianfa
# ------------------------------
class Solution:
def findMin(self, nums: List[int]) -> int:
low = 0
high = len(nums) - 1
while low < high:
mid = (low + high) // 2
if nums[mid] > nums[high]:
low = mid + 1
elif nums[mid] < nums[high]:
high = mid
else:
# nums[mid] == nums[high]
# not sure where the minimum is
high -= 1
return nums[low]
# Used for testing
if __name__ == "__main__":
test = Solution()
# ------------------------------
# Summary:
# Idea from: https://leetcode.com/problems/find-minimum-in-rotated-sorted-array-ii/discuss/48808/My-pretty-simple-code-to-solve-it
# corner case: [3, 1, 3]
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import sys
import os
import time
import math
class Message():
def __init__(self, user, message):
self.user = user
self.message = message
def __eq__(self, other):
return self.message == other.message
if os.name == "nt":
driverPath = "driver/chromedriver_2.24.exe"
dataPath = "Data"
else:
driverPath = "driver/chromedriver"
dataPath = "Data/ChatBot"
options = webdriver.ChromeOptions()
options.add_argument("--user-data-dir=" + dataPath)
driver = webdriver.Chrome(chrome_options=options, executable_path=driverPath)
driver.get('https://web.whatsapp.com')
driver.execute_script("window.open('','_blank');")
driver.switch_to_window(driver.window_handles[0])
driver.switch_to_window(driver.window_handles[1])
driver.get('http://www.square-bear.co.uk/mitsuku/nfchat.htm')
driver.switch_to_window(driver.window_handles[0])
input("Choose a chat on whatsapp and press enter : ")
chatHistory = []
replyQueue = []
firstRun = True
print("Starting...")
while True:
try:
driver.switch_to_window(driver.window_handles[0])
usersDiv = driver.find_element_by_id("side")
messageDiv = driver.find_element_by_id("main")
messageList = messageDiv.find_element_by_class_name("message-list")
messageList = messageList.find_elements_by_class_name("msg")
newMessages = []
for message in reversed(messageList):
bubbleText = None
try:
bubbleText = message.find_element_by_class_name(
"message-chat").find_element_by_class_name("bubble")
except:
pass
if bubbleText is not None:
author = "Unknown"
msgObj = None
if "has-author" in bubbleText.get_attribute("class"):
try:
author = bubbleText.find_element_by_class_name(
"message-author").find_element_by_class_name("emojitext").text
except Exception as e:
pass
elif "msg-group" in message.get_attribute("class"):
author = "Akshay Aradhya"
try:
text_message = bubbleText.find_element_by_class_name(
"message-text").find_element_by_class_name("emojitext").text
if len(text_message) > 0:
msgObj = Message(author, text_message)
except Exception as e:
pass
if len(chatHistory) > 0 and (msgObj is not None) and msgObj == chatHistory[-1]:
break
elif msgObj is not None:
newMessages.append(msgObj)
# print("New Messages : ", len(newMessages))
for message in reversed(newMessages):
chatHistory.append(message)
# Update Unknown Users
for i in range(len(chatHistory)):
if i > 0 and chatHistory[i].user == "Unknown":
chatHistory[i].user = chatHistory[i - 1].user
for message in reversed(newMessages):
if message.message[0] == "$" and firstRun == False:
replyQueue.append(message)
# print("Querries =", len(replyQueue))
firstRun = False
if len(replyQueue) == 0:
continue
# Switch tabs and get Response
driver.switch_to_window(driver.window_handles[1])
driver.switch_to_default_content()
driver.switch_to.frame('input')
textField = driver.find_elements_by_tag_name("input")[1]
responses = []
for message in replyQueue:
textField.send_keys(message.message[1:] + Keys.ENTER)
responseBody = None
fontTags = driver.find_elements_by_tag_name("font")
for tag in fontTags:
if tag.get_attribute("face") == "Trebuchet MS,Arial" and tag.get_attribute("color") == "#000000":
responseBody = tag
break
start = responseBody.text.find("Mitsuku")
end = responseBody.text.find("You", 4)
firstName = message.user.split(' ')[0]
resp = responseBody.text[start + 10:end - 2]
print(start, end, repr(resp))
responses.append("@" + firstName + " : " + resp)
replyQueue = []
# Switch tabs and reply on whatsapp
driver.switch_to_window(driver.window_handles[0])
inputMessage = messageDiv.find_element_by_class_name('input')
for response in responses:
lines = response.split('\n')
for line in lines:
inputMessage.send_keys(line)
inputMessage.send_keys(Keys.SHIFT, Keys.ENTER)
print("SE")
inputMessage.send_keys(Keys.ENTER)
print("E")
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
|
import os
class Logger(object):
def __init__(self):
pass
@staticmethod
def LogDebug(msg):
Logger._Log('[D] ' + msg)
@staticmethod
def LogInformation(msg):
Logger._Log('[I] ' + msg)
@staticmethod
def LogWarning(msg):
Logger._Log('[W] ' + msg)
@staticmethod
def LogError(msg):
Logger._Log('[E] ' + msg)
@staticmethod
def _Log(msg):
print(msg)
|
# 779. K-th Symbol in Grammar
"""
We build a table of n rows (1-indexed). We start by writing 0 in the 1st row. Now in every subsequent row,
we look at the previous row and replace each occurrence of 0 with 01, and each occurrence of 1 with 10.
For example, for n = 3, the 1st row is 0, the 2nd row is 01, and the 3rd row is 0110.
Given two integer n and k, return the kth (1-indexed) symbol in the nth row of a table of n rows.
Input: n = 4, k = 6
Output: 0
Explanation:
row 1: 0
row 2: 01
row 3: 0110
row 4: 01101001
"""
import math
class Solution:
def kthGrammar(self, N: int, K: int) -> int:
# Base condition
if N == 1:
return 0
parent = self.kthGrammar(N - 1, math.ceil(K / 2))
isEven = K % 2 == 0
if parent == 1:
return 0 if isEven else 1
else:
return 1 if isEven else 0
# Tried creating all values, exceeds recursion branching
# def kthGrammar(n, k, values):
# def insert_helper(values):
# print(len(values))
# if len(values) == 0:
# return values
# temp = values[-1]
# values.pop()
# insert_helper(values)
# if temp == 0:
# values.append(0)
# values.append(1)
# else:
# values.append(1)
# values.append(0)
# def helper(n, values):
# if n == 1:
# values.append(0)
# return values
# helper(n - 1, values)
# insert_helper(values)
# helper(n, values)
# values = []
# kthGrammar(12, 1, values)
# print(len(values))
|
from camp.timepix.run import TimePixRun
from camp.timepix.vmi import VmiImage
run_number = 178 # short run
timepix_run = TimePixRun(run_number)
# extract data
timepix_dict = timepix_run.get_events('centroided', ['x', 'y'], fragment='fragments,test_ion')
x, y = timepix_dict['x'], timepix_dict['y']
# show VMI image
vmi_image = VmiImage(x, y)
vmi_image.show()
# zoom in VMI image
x_start, x_end = 120, 130
y_start, y_end = 60, 70
vmi_image.zoom_in(x_start, x_end, y_start, y_end)
# radial averaging
x_center, y_center = 130, 110
## optional for circle/ ring sectors
start_angle, end_angle = -45, 45 # degrees
start_radius, end_radius = 20, 100 # pixel
radial_average = VmiImage(x, y).create_radial_average((x_center, y_center), angles=(start_angle, end_angle),
radii=(start_radius, end_radius))
|
from collections import defaultdict, Counter
from itertools import product, permutations
from glob import glob
import json
import os
from pathlib import Path
import pickle
import sqlite3
import string
import sys
import time
import matplotlib as mpl
from matplotlib import colors
from matplotlib import pyplot as plt
from matplotlib.gridspec import GridSpec
from matplotlib.lines import Line2D
import matplotlib.patches as mpatches
from multiprocessing import Pool
import numpy as np
import pandas as pd
from palettable.colorbrewer.qualitative import Paired_12
from palettable.colorbrewer.diverging import PuOr_5, RdYlGn_6, PuOr_10, RdBu_10
from palettable.scientific.diverging import Cork_10
from scipy.spatial import distance_matrix, ConvexHull, convex_hull_plot_2d
from scipy.stats import linregress, pearsonr, lognorm
import seaborn as sns
import svgutils.compose as sc
import asym_io
from asym_io import PATH_BASE, PATH_ASYM, PATH_ASYM_DATA
import asym_utils as utils
import folding_rate
import paper_figs
import structure
PATH_FIG = PATH_ASYM.joinpath("Figures")
PATH_FIG_DATA = PATH_FIG.joinpath("Data")
custom_cmap = sns.diverging_palette(230, 22, s=100, l=47, n=13)
c_helix = custom_cmap[2]
c_sheet = custom_cmap[10]
col = [c_helix, c_sheet, "#CB7CE6", "#79C726"]
####################################################################
### SI Figures
####################################################################
### FIG 1
def fig1(df, nx=3, ny=3, N=50):
fig, ax = plt.subplots(nx,ny, figsize=(12,12))
ax = ax.reshape(ax.size)
fig.subplots_adjust(hspace=.5)
lbls = ['Helix', 'Sheet', 'Coil', 'Disorder']
cat = 'HS.D'
scop_desc = {row[1]:row[2] for row in pd.read_csv(PATH_BASE.joinpath('SCOP/scop-des-latest.txt')).itertuples()}
CF_count = sorted(df.CF.value_counts().items(), key=lambda x:x[1], reverse=True)[1:]
bold_idx = [0, 1, 2, 6, 8]
for i in range(nx*ny):
cf_id, count = CF_count[i]
countN, countC = utils.pdb_end_stats_disorder_N_C(df.loc[df.CF==cf_id], N=N, s1='SEQ_PDB2', s2='SS_PDB2')
base = np.zeros(len(countN['S']), dtype=float)
Yt = np.array([[sum(p.values()) for p in countN[s]] for s in cat]).sum(axis=0)
X = np.arange(base.size)
for j, s in enumerate(cat):
YN = np.array([sum(p.values()) for p in countN[s]])
YC = np.array([sum(p.values()) for p in countC[s]])
ax[i].plot(YN/Yt, '-', c=col[j], label=f"{s} N")
ax[i].plot(YC/Yt, ':', c=col[j], label=f"{s} C")
if i in bold_idx:
ax[i].set_title(f"{scop_desc[int(cf_id)][:40]}\nTotal sequences: {count}", fontweight='bold')
else:
ax[i].set_title(f"{scop_desc[int(cf_id)][:40]}\nTotal sequences: {count}")
ax[i].set_xlabel('Sequence distance from ends')
if not i%3:
ax[i].set_ylabel('Secondary\nstructure\nprobability')
handles = [Line2D([0], [0], ls=ls, c=c, label=l) for ls, c, l in zip(['-', '--'], ['k']*2, ['N', 'C'])] + \
[Line2D([0], [0], ls='-', c=c, label=l) for l, c in zip(lbls, col)]
ax[1].legend(handles=handles, bbox_to_anchor=(1.40, 1.45), frameon=False,
ncol=6, columnspacing=1.5, handlelength=2.0)
fig.savefig(PATH_FIG.joinpath("si1.pdf"), bbox_inches='tight')
####################################################################
### FIG 2
def fig2():
pfdb = asym_io.load_pfdb()
fig, ax = plt.subplots(1,2, figsize=(10,5))
fig.subplots_adjust(wspace=0.3)
X1 = np.log10(pfdb.loc[pfdb.use, 'L'])
X2 = np.log10(pfdb.loc[pfdb.use, 'CO'])
Y = pfdb.loc[pfdb.use, 'log_kf']
sns.regplot(X1, Y, ax=ax[0])
sns.regplot(X2, Y, ax=ax[1])
print(pearsonr(X1, Y))
print(pearsonr(X2, Y))
ax[0].set_ylabel(r'$\log_{10} k_f$')
ax[1].set_ylabel(r'$\log_{10} k_f$')
ax[0].set_xlabel(r'$\log_{10}$ Sequence Length')
ax[1].set_xlabel(r'$\log_{10}$ Contact Order')
fs = 14
for i, b in zip([0,1], list('ABCDEFGHI')):
ax[i].text( -0.10, 1.05, b, transform=ax[i].transAxes, fontsize=fs)
fig.savefig(PATH_FIG.joinpath("si2.pdf"), bbox_inches='tight')
####################################################################
### FIG 3
def fig3(pdb, Y='S_ASYM'):
LO = folding_rate.get_folding_translation_rates(pdb.copy(), which='lo')
HI = folding_rate.get_folding_translation_rates(pdb.copy(), which='hi')
fig, ax = plt.subplots()
lbls = ['Fit', r"$95\% CI$", r"$95\% CI$"]
for i, d in enumerate([pdb, LO, HI]):
print(f"{i}: frac R less than 0 = {utils.R_frac_1(d)}")
print(f"{i}: Euk frac (.1 < R < 10) = {utils.R_frac_2(d, k=5)}")
print(f"{i}: Prok frac (.1 < R < 10) = {utils.R_frac_2(d, k=10)}")
print(f"{i}: frac R faster than 'speed-limit' = {utils.R_frac_3(d)}")
print(f"{i}: frac R slower than 20 minutes = {utils.R_frac_4(d)}")
print()
sns.distplot(d['REL_RATE'], label=lbls[i], color=col[i])
ax.legend(loc='best', frameon=False)
ax.set_xlim(-6, 6)
ax.set_xlabel(r'$\log_{10}R$')
ax.set_ylabel('Density')
fig.savefig(PATH_FIG.joinpath("si3.pdf"), bbox_inches='tight')
####################################################################
### FIG 4
def fig4(pdb, Y='S_ASYM'):
LO = folding_rate.get_folding_translation_rates(pdb.copy(), which='lo')
HI = folding_rate.get_folding_translation_rates(pdb.copy(), which='hi')
# For the results using only 2-state proteins...
# HI = folding_rate.get_folding_translation_rates(pdb.copy(), which='best', only2s=True)
fig = plt.figure(figsize=(8,10.5))
gs = GridSpec(5,12, wspace=0.5, hspace=0.0, height_ratios=[1,0.5,1,0.5,1.5])
ax = [fig.add_subplot(gs[i*2,j*4:(j+1)*4]) for i in [0,1] for j in [0,1,2]] + \
[fig.add_subplot(gs[4,:5]), fig.add_subplot(gs[4,7:])]
X = np.arange(10)
width = .35
ttls = [r'$\alpha$ Helix', r'$\beta$ Sheet']
lbls = [r'$E_{\alpha}$', r'$E_{\beta}$']
custom_cmap = sns.diverging_palette(230, 22, s=100, l=47, n=13)
c_helix = custom_cmap[0]
c_sheet = custom_cmap[12]
col = [c_helix, c_sheet]
bins = np.linspace(-0.20, 0.20, 80)
width = np.diff(bins[:2])
X = bins[:-1] + width * 0.5
mid = 39
sep = 0.05
for k, pdb in enumerate([LO, HI]):
quantiles = pdb['REL_RATE'].quantile(np.arange(0,1.1,.1)).values
pdb['quant'] = pdb['REL_RATE'].apply(lambda x: utils.assign_quantile(x, quantiles))
enrich_data = pickle.load(open(PATH_FIG_DATA.joinpath("fig3_enrich.pickle"), 'rb'))
for i, Y in enumerate(['H_ASYM', 'S_ASYM']):
for j in range(len(quantiles)-1):
hist, bins = np.histogram(pdb.loc[pdb.quant==j, Y], bins=bins)
hist = hist / hist.sum()
if i:
ax[k*3+i].bar(X[:mid], (hist/hist.sum())[:mid], width, bottom=[sep*j]*mid, color='grey', alpha=.5)
ax[k*3+i].bar(X[-mid:], (hist/hist.sum())[-mid:], width, bottom=[sep*j]*mid, color=col[i], alpha=.5)
else:
ax[k*3+i].bar(X[:mid], (hist/hist.sum())[:mid], width, bottom=[sep*j]*mid, color=col[i], alpha=.5)
ax[k*3+i].bar(X[-mid:], (hist/hist.sum())[-mid:], width, bottom=[sep*j]*mid, color='grey', alpha=.5)
ax[k*3+i].plot(X[:mid], (hist/hist.sum()+sep*j)[:mid], '-', c='k', alpha=.5)
ax[k*3+i].plot(X[-mid:], (hist/hist.sum()+sep*j)[-mid:], '-', c='k', alpha=.5)
mean = np.mean(enrich_data[Y[0]], axis=0)
lo = np.abs(mean - np.quantile(enrich_data[Y[0]], 0.025, axis=0))
hi = np.abs(mean - np.quantile(enrich_data[Y[0]], 0.975, axis=0))
ax[k*3+2].barh([sep*j+(i+.7)*sep/3 for j in range(10)], mean, sep/3, xerr=(lo, hi), color=col[i], ec='k', alpha=.5, label=lbls[i], error_kw={'lw':.8})
ax[k*3+2].plot([0,0], [-0.05, 0.5], '-', c='k', lw=.1)
for i in [0,2]:
ax[k*3+i].set_yticks(np.arange(len(quantiles))*sep)
ax[k*3+i].set_yticklabels([round(x,1) for x in quantiles])
for i in range(2):
ax[k*3+i].spines['top'].set_visible(False)
ax[k*3+i].spines['right'].set_visible(False)
for i in range(1,3):
ax[k*3+i].spines['left'].set_visible(False)
ax[k*3+i].spines['top'].set_visible(False)
for i in range(3):
ax[k*3+i].set_ylim(0-sep/4, (0.5+sep/4)*1.05)
ax[k*3+1].set_yticks([])
ax[k*3+2].yaxis.set_label_position('right')
ax[k*3+2].yaxis.tick_right()
ax[k*3+0].set_xlabel(r"asym$_{\alpha}$")
ax[k*3+1].set_xlabel(r"asym$_{\beta}$")
ax[k*3+0].set_ylabel(r'$\log_{10}R$')
ax[k*3+2].set_xlabel('N terminal\nEnrichment')
plot_metric_space(fig, ax[6:])
fs = 14
for i, b in zip([0,3,6], list('ABCDEFGHI')):
ax[i].text( -0.20, 1.05, b, transform=ax[i].transAxes, fontsize=fs)
fig.savefig(PATH_FIG.joinpath("si4.pdf"), bbox_inches='tight')
def get_ci_index(X, Y):
xlo = np.quantile(X, 0.025)
xhi = np.quantile(X, 0.975)
ylo = np.quantile(Y, 0.025)
yhi = np.quantile(Y, 0.975)
return np.where((X>=xlo)&(X<=xhi)&(Y>=ylo)&(Y<=yhi))[0]
def plot_hull(boot_fit, patt, ax='', c='k', lw=1):
idx = get_ci_index(*boot_fit[:,:2].T)
tmp = boot_fit[idx].copy()
hull = ConvexHull(np.array([boot_fit[idx,1], boot_fit[idx, 0]]).T)
for simplex in hull.simplices:
if not isinstance(ax, str):
ax.plot(tmp[simplex, 1], tmp[simplex, 0], patt, c=c, lw=lw)
else:
plt.plot(tmp[simplex, 1], tmp[simplex, 0], patt, c=c, lw=lw)
def plot_metric_space(fig, ax):
fit = pickle.load(open(PATH_FIG_DATA.joinpath("boot_fit_met.pickle"), 'rb'))['AA']
boot_fit = pickle.load(open(PATH_FIG_DATA.joinpath("boot_fit_param.pickle"), 'rb'))
boot_fit_0 = pickle.load(open(PATH_FIG_DATA.joinpath("boot_fit_param_useall.pickle"), 'rb'))
X, Y = np.meshgrid(fit["c1"], fit["c2"])
cmap = colors.ListedColormap(sns.diverging_palette(230, 22, s=100, l=47, n=8))
bounds = np.linspace(-2, 2, 9)
norm = colors.BoundaryNorm(bounds, cmap.N)
im = []
ttls = ['Helices', 'Sheets']
for i in range(2):
im = ax[i].contourf(X, Y, fit['met'][:,:,i], bounds, cmap=cmap, vmin=-2, vmax=2, norm=norm)
cbar = fig.colorbar(im, ax=ax[i], fraction=0.046, pad=0.04, norm=norm, boundaries=bounds, ticks=bounds)
cbar.set_label(r"$R_{\mathregular{max}}$", labelpad=-5)
ax[i].set_xlabel('A')
ax[i].set_xlim(X.min(), X.max())
ax[i].set_ylabel('B')
ax[i].set_ylim(Y.max(), Y.min())
ax[i].invert_yaxis()
ax[i].set_aspect((np.max(X)-np.min(X))/(np.max(Y)-np.min(Y)))
ax[i].set_title(ttls[i])
col = ['k', '#79C726']
for i, boofi in enumerate([boot_fit, boot_fit_0]):
for j in range(2):
for bf, p in zip(boofi, ['-', ':']):
plot_hull(bf, p, ax[j], c=col[i])
c1 = [13.77, -6.07]
c1a = [11.36553036, -4.87716477]
c1b = [16.17819934, -7.27168306]
patt = ['*', 'o', 'o']
lbls = ['Fit', r"$95\% CI$", r"$95\% CI$"]
col = "#CB7CE6"
for i in range(2):
for coef, p, l in zip([c1, c1a, c1b], patt, lbls):
ax[i].plot([coef[0]], [coef[1]], p, label=l, fillstyle='none', ms=10, c=col, mew=2)
ax[i].legend(loc='best', frameon=False)
####################################################################
### FIG 5
def fig5():
fig, ax = plt.subplots(2,1)
fig.subplots_adjust(hspace=0.3)
bins = np.arange(0,620,20)
X = [bins[:-1] + np.diff(bins[:2])]
bins = np.arange(0,61,2.0)
X.append(bins[:-1] + np.diff(bins[:2]))
yellows = sns.diverging_palette(5, 55, s=95, l=77, n=13)
pinks = sns.diverging_palette(5, 55, s=70, l=52, n=13)
col = [yellows[12], pinks[0]]
col2 = [yellows[10], pinks[3]]
data = [pickle.load(open(PATH_FIG_DATA.joinpath(f"dom_{x}_dist_boot.pickle"), 'rb')) for x in ['aa', 'smco']]
for j in range(2):
for i in [1,2]:
MEAN, LO, HI = [np.array(x) for x in data[j][f"pos{i}"]]
ax[j].plot(X[j], MEAN, '--', c=col[i-1], label=f'position {i}')
ax[j].fill_between(X[j], LO, HI, color=col2[i-1], alpha=0.5)
ax[0].set_xlabel('Sequence Length')
ax[1].set_xlabel('Contact Order')
ax[0].set_ylabel('Density')
ax[1].set_ylabel('Density')
ax[0].legend(loc='upper right', frameon=False)
fig.savefig(PATH_FIG.joinpath("si5.pdf"), bbox_inches='tight')
####################################################################
### FIG 6
def fig6(X='REL_RATE', Y='S_ASYM'):
fig, ax = plt.subplots(1,2, figsize=(10,4))
fig.subplots_adjust(hspace=0.7, wspace=0.3)
sep = 0.40
col = Paired_12.hex_colors[5]
ttls = [f"Position {i}" for i in range(1,3)]
dom_pos_boot = pickle.load(open(PATH_FIG_DATA.joinpath("dom_pos_boot.pickle"), 'rb'))
custom_cmap = sns.diverging_palette(230, 22, s=100, l=47, n=13)
c_helix = custom_cmap[2]
c_sheet = custom_cmap[11]
col = [c_helix, c_sheet, "#CB7CE6", "#79C726"]
# ttls = ["Two-domain", "Three-domain"]
xlbls = [r'$E_{\alpha}$', r'$E_{\beta}$']
for i in range(2):
for j, (pos, dat) in enumerate(dom_pos_boot[2].items()):
quantiles = dat[0].mean(axis=0)
mean = dat[1][:,i,:].mean(axis=0)
lo = np.abs(np.quantile(dat[1][:,i,:], 0.025, axis=0) - mean)
hi = np.abs(np.quantile(dat[1][:,i,:], 0.975, axis=0) - mean)
ax[j].bar(np.arange(10)+(i+1)*sep, mean, sep, yerr=(lo, hi), color=col[i], label=xlbls[i], alpha=0.7, error_kw={'lw':.8})
ax[j].set_xticks(np.arange(len(quantiles)))
ax[j].set_xticklabels(np.round(quantiles, 1), rotation=90)
ax[i].spines['top'].set_visible(False)
ax[i].spines['right'].set_visible(False)
ax[i].set_title(ttls[i], loc='left')
ax[i].set_xlabel(r'$\log_{10}R$')
# ax[i,k].set_ylabel('N terminal\nEnrichment')
ax[i].set_ylabel("N Terminal Enrichment")
ax[0].legend(bbox_to_anchor=(1.17, 1.12), frameon=False, ncol=3)
fig.savefig(PATH_FIG.joinpath("si6.pdf"), bbox_inches='tight')
####################################################################
### FIG 7
def fig7(pdb, Y='D_ASYM'):
fig, ax = plt.subplots(3,3, figsize=(12,8))
fig.subplots_adjust(hspace=0.5, wspace=0.5)
sep = 0.05
col = Paired_12.hex_colors[7]
xlbls = [r'$\log_{10} R$', 'Sequence Length', 'Contact Order']
ttls = ['Full sample', 'Eukaryotes', 'Prokaryotes']
for k, df in enumerate([pdb, pdb.loc[pdb.k_trans==5], pdb.loc[pdb.k_trans==10]]):
for i, X in enumerate(['REL_RATE', 'AA_PDB', 'CO']):
quantiles = df[X].quantile(np.arange(0,1.1,.1)).values
df['quant'] = df[X].apply(lambda x: utils.assign_quantile(x, quantiles))
ratio = []
for j in range(len(quantiles)-1):
left = len(df.loc[(df.quant==j)&(df[Y]<0)]) / max(1, len(df.loc[(df.quant==j)]))
right = len(df.loc[(df.quant==j)&(df[Y]>0)]) / max(1, len(df.loc[(df.quant==j)]))
ratio.append((right - left))
# print(ratio)
ax[i,k].bar([sep*j+sep/2 for j in range(10)], ratio, sep/2, color=[col if r > 0 else 'grey' for r in ratio], alpha=.5)
ax[i,k].set_xticks(np.arange(len(quantiles))*sep)
if i == 1:
ax[i,k].set_xticklabels([int(x) for x in quantiles], rotation=90)
else:
ax[i,k].set_xticklabels([round(x,1) for x in quantiles], rotation=90)
ax[i,k].set_xlabel(xlbls[i])
ax[i,k].set_ylabel('N terminal\nEnrichment')
ax[0,k].set_title(ttls[k])
fig.savefig(PATH_FIG.joinpath("si7.pdf"), bbox_inches='tight')
####################################################################
### FIG 8
def fig8(df_pdb):
fig = plt.figure()
gs = GridSpec(2,1, wspace=0.0, height_ratios=[.5,1])
ax = [fig.add_subplot(gs[1,0]), fig.add_subplot(gs[0,0])]
X = np.arange(-3, 3, 0.01)
Y = np.array([(10**x + 1)/max(10**x, 1) for x in X])
Y2 = (1+10**X) / np.array([max(1, 10**x+30./100.) for x in X])
ax[0].plot(X, Y, '-', label=r"$\tau_{ribo}=0$")
ax[0].plot(X, Y2, ':', label=r"$\tau_{ribo}=0.3\tau_{trans}$")
lbls = ['1ILO', '2OT2', '3BID']
patt = ['o', 's', '^']
for l, p in zip(lbls, patt):
X, Y = np.load(PATH_FIG_DATA.joinpath(f"{l}.npy"))
ax[0].plot(X, Y, p, label=l, alpha=0.5, mec='k', ms=7)
ax[0].set_xlim(-2.3, 2.3)
ax[0].set_ylim(1, 2.05)
ax[0].set_xlabel(r'$\log_{10} R$')
ax[0].set_ylabel("Speed-up")
ax[0].spines['top'].set_visible(False)
ax[0].spines['right'].set_visible(False)
ax[0].legend(loc='upper right', frameon=False, bbox_to_anchor=(1.05, 1.00), ncol=1, labelspacing=.1)
fig8a(df_pdb, ax[1])
fig.savefig(PATH_FIG.joinpath("si8.pdf"), bbox_inches='tight')
def fig8a(df_pdb, ax):
lbls = ['2OT2', '1ILO', '3BID']
idx = [98212, 19922, 127370]
SS = df_pdb.loc[idx, 'SS_PDB2'].values
custom_cmap = sns.diverging_palette(230, 22, s=100, l=47, n=13)
col_key = {'.':'grey', 'D':'grey', 'H':custom_cmap[3], 'S':custom_cmap[9]}
ec_key = {'.':'grey', 'D':'grey', 'H':custom_cmap[1], 'S':custom_cmap[11]}
wid_key = {'.':0.1, 'D':0.1, 'H':0.3, 'S':0.3}
lw_key = {'.':0.7, 'D':0.7, 'H':1.5, 'S':1.5}
for i, ss in enumerate(SS):
left = 0.
for j, strand in enumerate(new_figs.generate_strand(ss)):
s = strand[0]
ax.barh([i], [len(strand)], wid_key[s], left=[left], color=col_key[s], ec=ec_key[s], linewidth=lw_key[s])
left += len(strand) + 0.20
ax.annotate("N", xy=(-0.01, 1.0), xycoords='axes fraction')
ax.annotate("C", xy=(0.59, 1.0), xycoords='axes fraction')
for pos in ['left', 'right', 'top', 'bottom']:
ax.spines[pos].set_visible(False)
col = np.array(custom_cmap)[[3,9,1,11]]
ax.legend(handles=[mpatches.Patch(fc=c1, ec=c2, label=l) for c1, c2, l in zip(col[:2], col[2:], ['Helix', 'Sheet'])],
loc='upper right', frameon=False, ncol=1, bbox_to_anchor=(0.95, 1.10))
ax.set_xticks([])
ax.set_yticks(range(3))
ax.set_yticklabels(lbls)
ax.tick_params(axis='y', which='major', length=0, pad=10)
####################################################################
### FIG 9
def fig9(pdb, s='S'):
pdb = pdb.loc[(pdb.USE_RSA)]
pdb = pdb.loc[(pdb.SS_PDB2.str.len()==pdb.RSA.apply(len))]
path = PATH_FIG_DATA.joinpath("RSA_quantiles.pickle")
if path.exists():
quantiles, euk_quantiles, prok_quantiles = pickle.load(open(path, 'rb'))
else:
quantiles = [np.quantile([x for y in pdb['RSA'] for x in y if np.isfinite(x)], x/3) for x in range(1,4)]
euk_quantiles = [np.quantile([x for y in pdb.loc[pdb.k_trans==5, 'RSA'] for x in y if np.isfinite(x)], x/3) for x in range(1,4)]
prok_quantiles = [np.quantile([x for y in pdb.loc[pdb.k_trans==10, 'RSA'] for x in y if np.isfinite(x)], x/3) for x in range(1,4)]
pickle.dump([quantiles, euk_quantiles, prok_quantiles], open(path, 'wb'))
print(quantiles)
# fig, ax = plt.subplots(4,3, figsize=(8,8))
# fig.subplots_adjust(wspace=0.5)
fig = plt.figure(figsize=(12,9))
gs = GridSpec(5,3, wspace=0.3, height_ratios=[1,1,1,1,1])
ax = [fig.add_subplot(gs[j,i]) for i in range(3) for j in [0,1]] + \
[fig.add_subplot(gs[j,i]) for i in range(3) for j in [3,4]]
print("All proteins, all SS")
fig9a(pdb['RSA'], pdb['SS_PDB2'], quantiles, ax[:2], s='SH.D')
print("euk proteins, all ss")
fig9a(pdb.loc[pdb.k_trans==5, 'RSA'], pdb.loc[pdb.k_trans==5, 'SS_PDB2'], euk_quantiles, ax[2:4], s='SH.D')
print("Prok proteins, all SS")
fig9a(pdb.loc[pdb.k_trans==10, 'RSA'], pdb.loc[pdb.k_trans==10, 'SS_PDB2'], prok_quantiles, ax[4:6], s='SH.D')
print("Euk proteins, only SHC")
fig9a(pdb.loc[pdb.k_trans==5, 'RSA'], pdb.loc[pdb.k_trans==5, 'SS_PDB2'], euk_quantiles, ax[6:8], s='SH.')
print("Euk proteins, only S")
fig9a(pdb.loc[pdb.k_trans==5, 'RSA'], pdb.loc[pdb.k_trans==5, 'SS_PDB2'], euk_quantiles, ax[8:10], s='S')
print("Prok proteins, only S")
fig9a(pdb.loc[pdb.k_trans==10, 'RSA'], pdb.loc[pdb.k_trans==10, 'SS_PDB2'], prok_quantiles, ax[10:12], s='S')
ttls = ['All proteins\nAll residues', 'Eukaryotic proteins\nAll residues', 'Prokaryotic proteins\nAll residues',
'Eukaryotic proteins\nHelix, sheet and coil', 'Eukaryotic proteins\nOnly Sheets', 'Prokaryotic proteins\nOnly Sheets']
col = np.array(list(Paired_12.hex_colors))[[0,2,4,6]]
lbls = ['Buried', 'Middle', 'Exposed']
ax[0].set_ylabel('Solvent accessibility\nprobability')
ax[1].set_ylabel('Solvent accessibility\nasymmetry\n$\\log_2 (N / C)$')
ax[6].set_ylabel('Solvent accessibility\nprobability')
ax[7].set_ylabel('Solvent accessibility\nasymmetry\n$\\log_2 (N / C)$')
handles = [Line2D([0], [0], ls=ls, c=c, label=l) for ls, c, l in zip(['-', '--'], ['k']*2, ['N', 'C'])] + \
[Line2D([0], [0], ls='-', c=c, label=l) for l, c in zip(lbls, col)]
ax[8].legend(handles=handles, bbox_to_anchor=(1.30, 1.85), frameon=False,
ncol=5, columnspacing=1.5, handlelength=2.0, labelspacing=2.0)
for i, a in enumerate(ax):
if i % 2:
ax[i].set_xticks(range(0, 60, 10))
ax[i].set_xlabel('Sequence distance from ends')
else:
ax[i].set_xticks([])
ax[i].set_title(ttls[i//2])
ax[i].set_xlim(0, 50)
fig.savefig(PATH_FIG.joinpath("si9.pdf"), bbox_inches='tight')
def fig9a(rsa_list, ss_list, quantiles, ax, s='S'):
cat = 'BME'
countN, countC = utils.sheets_rsa_seq_dist(rsa_list, ss_list, quantiles, ss_key=s)
col = np.array(list(Paired_12.hex_colors))[[0,2,4,6]]
base = np.zeros(len(countN[cat[0]]), dtype=float)
YtN = np.array(list(countN.values())).sum(axis=0)
YtC = np.array(list(countC.values())).sum(axis=0)
X = np.arange(base.size)
for i, s in enumerate(cat):
YN = countN[s]
YC = countC[s]
ax[0].plot(YN/YtN, '-', c=col[i], label=f"{s} N")
ax[0].plot(YC/YtC, ':', c=col[i], label=f"{s} C")
ax[1].plot(np.log2(YN/YC*YtC/YtN), '-', c=col[i], label=f"{s}")
print(s, np.round((np.sum(YN[:20]) / np.sum(YtN[:20])) / (np.sum(YC[:20]) / np.sum(YtC[:20])), 2))
ax[1].plot([0]*base.size, ':', c='k')
ax[0].set_ylim(0,1)
ax[1].set_ylim(-1,1)
for a in ax:
a.set_xlim(X[0], X[-1])
####################################################################
### FIG 10
def fig10(pdb):
pfdb = asym_io.load_pfdb()
acpro = asym_io.load_acpro()
fig = plt.figure(figsize=(12,9))
gs = GridSpec(3,7, wspace=0.0, width_ratios=[5,0.2,5,0.4,3,1.0,6], height_ratios=[1,.3,1])
ax = [fig.add_subplot(gs[2,i*2]) for i in range(4)] + \
[fig.add_subplot(gs[0,0:3]), fig.add_subplot(gs[0,5:])]
# sns.distplot(pdb.ln_kf, ax=ax[5], label='PDB - PFDB fit', hist=False)
pdb = pdb.copy()
coef = folding_rate.linear_fit(np.log10(acpro['L']), acpro['log_kf']).params
pdb['ln_kf'] = folding_rate.pred_fold(np.log10(pdb.AA_PDB), coef)
pdb = utils.get_rel_rate(pdb)
fig10a(fig, ax[4])
fig10b(fig, ax[:4], pdb)
# sns.distplot(pdb.ln_kf, ax=ax[5], label='PDB - ACPro fit', hist=False)
# sns.distplot(pfdb.log_kf, ax=ax[5], label='PFDB data', kde=False, norm_hist=True)
# sns.distplot(acpro["ln kf"], ax=ax[5], label='KDB data', kde=False, norm_hist=True)
sns.regplot(np.log10(acpro['L']), acpro['log_kf'], label='ACPro data', scatter_kws={"alpha":0.5})
sns.regplot(np.log10(pfdb.loc[pfdb.use, 'L']), pfdb.loc[pfdb.use, 'log_kf'], label='PFDB data', scatter_kws={"alpha":0.5})
ax[5].legend(loc='best', frameon=False)
ax[5].set_xlabel(r"$\log_{10}L$")
ax[5].set_ylabel(r"$\log_{10}k_f$")
fs = 14
for i, b in zip([4,5,0,2,3], list('ABCDEFGHI')):
ax[i].text( -0.20, 1.16, b, transform=ax[i].transAxes, fontsize=fs)
fig.savefig(PATH_FIG.joinpath("si10.pdf"), bbox_inches='tight')
def fig10a(fig, ax):
Rdist_data = pickle.load(open(PATH_FIG_DATA.joinpath("R_dist_acpro.pickle"), 'rb'))
custom_cmap = sns.diverging_palette(230, 22, s=100, l=47, n=13)
c_helix = custom_cmap[2]
c_sheet = custom_cmap[10]
col = [c_helix, c_sheet, "#CB7CE6", "#79C726"]
lbls = ['All', 'Prokaryotes', 'Eukaryotes']
for i, k in enumerate(['All', 'Prok', 'Euk']):
ax.plot(Rdist_data['grid'], Rdist_data[k][0], '-', c=col[i], label=lbls[i])
ax.fill_between(Rdist_data['grid'], Rdist_data[k][1], Rdist_data[k][2], color=col[i], alpha=0.5)
ax.plot([0,0], [0, 0.60], ':', c='k', alpha=0.7)
ax.set_xlabel(r'$\log_{10} R$')
ax.set_ylabel('Density')
ax.set_xticks(np.arange(-6, 5, 2))
ax.set_xlim(-7, 2)
ax.set_ylim(0, 0.60)
ax.legend(loc='upper center', bbox_to_anchor=(0.55, 1.17), frameon=False, ncol=3, columnspacing=2)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
def fig10b(fig, ax, pdb, Y='S_ASYM'):
ft = 12
X = np.arange(10)
width = .35
ttls = [r'$\alpha$ Helix', r'$\beta$ Sheet']
lbls = [r'$E_{\alpha}$', r'$E_{\beta}$']
# col = np.array(Paired_12.hex_colors)[[1,5]]
custom_cmap = sns.diverging_palette(230, 22, s=100, l=47, n=13)
c_helix = custom_cmap[0]
c_sheet = custom_cmap[12]
col = [c_helix, c_sheet]
bins = np.linspace(-0.20, 0.20, 80)
width = np.diff(bins[:2])
X = bins[:-1] + width * 0.5
mid = 39
sep = 0.05
enrich_data = pickle.load(open(PATH_FIG_DATA.joinpath("fig3_enrich_acpro.pickle"), 'rb'))
quantiles = enrich_data['edges'].mean(axis=0)
for i, Y in enumerate(['H_ASYM', 'S_ASYM']):
for j in range(len(quantiles)-1):
hist, bins = np.histogram(pdb.loc[pdb.quant==j, Y], bins=bins)
hist = hist / hist.sum()
# total = len(pdb)/10
# left = len(pdb.loc[(pdb.quant==j)&(pdb[Y]<0)]) / total
# right = len(pdb.loc[(pdb.quant==j)&(pdb[Y]>0)]) / total
# print(Y, j, ''.join([f"{x:6.3f}" for x in [left, right, left/right, right / left]]))
if i:
ax[i].bar(X[:mid], (hist/hist.sum())[:mid], width, bottom=[sep*j]*mid, color='grey', alpha=.5)
ax[i].bar(X[-mid:], (hist/hist.sum())[-mid:], width, bottom=[sep*j]*mid, color=col[i], alpha=.5)
else:
ax[i].bar(X[:mid], (hist/hist.sum())[:mid], width, bottom=[sep*j]*mid, color=col[i], alpha=.5)
ax[i].bar(X[-mid:], (hist/hist.sum())[-mid:], width, bottom=[sep*j]*mid, color='grey', alpha=.5)
ax[i].plot(X[:mid], (hist/hist.sum()+sep*j)[:mid], '-', c='k', alpha=.5)
ax[i].plot(X[-mid:], (hist/hist.sum()+sep*j)[-mid:], '-', c='k', alpha=.5)
mean = np.mean(enrich_data[Y[0]], axis=0)
lo = np.abs(mean - np.quantile(enrich_data[Y[0]], 0.025, axis=0))
hi = np.abs(mean - np.quantile(enrich_data[Y[0]], 0.975, axis=0))
ax[2].barh([sep*j+(i+.7)*sep/3 for j in range(10)], mean, sep/3, xerr=(lo, hi), color=col[i], ec='k', alpha=.5, label=lbls[i], error_kw={'lw':.8})
ax[2].plot([0,0], [-0.05, 0.5], '-', c='k', lw=.1)
ax[0].set_yticks(np.arange(len(quantiles))*sep)
ax[0].set_yticklabels([round(x,1) for x in quantiles])
ax[2].legend(loc='upper center', ncol=2, columnspacing=1.5, frameon=False,
bbox_to_anchor=(0.52, 1.15))
for i, t in zip([0,1], ttls):
ax[i].set_title(t)
ax[i].set_xlim(-.15, .15)
ax[i].set_xticks([-.1, 0, .1])
for i in range(3):
ax[i].spines['top'].set_visible(False)
ax[i].spines['right'].set_visible(False)
ax[i].set_ylim(0-sep/4, 0.5+sep)
for i in [1,2]:
ax[i].spines['left'].set_visible(False)
ax[i].set_yticks([])
ax[0].set_xlabel(r"asym$_{\alpha}$")
ax[1].set_xlabel(r"asym$_{\beta}$")
ax[0].set_ylabel(r'$\log_{10}R$')
ax[2].set_xlabel('N terminal\nEnrichment')
pdb = pdb.loc[pdb.OC!='Viruses']
X = np.arange(10)
X = np.array([sep*j+(i+.7)*sep/3 for j in range(10)])
width = .175
ttls = ['Eukaryote ', 'Prokaryote ']
lbls = [r'$E_{\alpha}$', r'$E_{\beta}$']
custom_cmap = sns.diverging_palette(230, 22, s=100, l=47, n=13)
col = [custom_cmap[i] for i in [3, 9, 0, 12]]
paths = [f"fig3_enrich_{a}_acpro.pickle" for a in ['eukaryote', 'prokaryote']]
for i, path in enumerate(paths):
enrich_data = pickle.load(open(PATH_FIG_DATA.joinpath(path), 'rb'))
for j, Y in enumerate(['H_ASYM', 'S_ASYM']):
# adjust = (j - 1 + i*2)*width
adjust = (j*2 - 4.0 + i)*(sep/5)
mean = np.mean(enrich_data[Y[0]], axis=0)
lo = np.abs(mean - np.quantile(enrich_data[Y[0]], 0.025, axis=0))
hi = np.abs(mean - np.quantile(enrich_data[Y[0]], 0.975, axis=0))
print(i, Y, max(np.abs(mean)))
ax[3].barh(X+adjust, mean, sep/5.0, ec='k', xerr=(lo, hi), color=col[i*2+j],
label=ttls[i]+lbls[j], lw=0.001, error_kw={'lw':.2})
ax[3].plot([0,0], [-0.05, 0.5], '-', c='k', lw=.1)
ax[3].set_yticks(np.arange(len(quantiles))*sep)
ax[3].set_ylabel(r'$\log_{10} R$')
ax[3].set_yticklabels([round(x,1) for x in quantiles])
ax[3].set_xlabel('N terminal\nEnrichment')
ax[3].set_xlim(-.42, .42)
ax[3].set_ylim(0-sep/4, 0.5+sep)
ax[3].spines['top'].set_visible(False)
ax[3].spines['left'].set_visible(False)
handles = [mpatches.Patch([], [], color=col[j*2+i], label=ttls[j]+lbls[i]) for i in [0,1] for j in [1,0]]
ax[3].legend(handles=handles, bbox_to_anchor=(1.05, 1.25), frameon=False,
loc='upper right', ncol=2, columnspacing=1.0, handlelength=1.5)
ax[3].yaxis.set_label_position('right')
ax[3].yaxis.tick_right()
####################################################################
### FIG 11
def fig11(pdb, X='AA_PDB', Y='CO', w=.1, ax='', fig=''):
if isinstance(ax, str):
fig, ax = plt.subplots(4,2, figsize=(9,12))
fig.subplots_adjust(wspace=0.0, hspace=0.65)
# ax = ax.reshape(ax.size)
pdb_CO = np.load(PATH_FIG_DATA.joinpath("pdb_config_CO.npy"))[:,:,0]
df = pdb.copy()
q = np.arange(w,1+w,w)
lbls = ['Helix', 'Sheet']
# cb_lbl = [r"$E_{\alpha}$", r"$E_{\beta}$"]
cb_lbl = [r"$asym_{\alpha}$", r"$asym_{\beta}$"]
vmax = 0.03
vmin = -vmax
for j, co in enumerate(pdb_CO.T):
df['CO'] = co
quant1 = [df[X].min()] + list(df[X].quantile(q).values)
quant2 = [df[Y].min()] + list(df[Y].quantile(q).values)
for i, Z in enumerate(['H_ASYM', 'S_ASYM']):
mean = []
for l1, h1 in zip(quant1[:-1], quant1[1:]):
for l2, h2 in zip(quant2[:-1], quant2[1:]):
samp = df.loc[(df[X]>=l1)&(df[X]<h1)&(df[Y]>=l2)&(df[Y]<h2), Z]
mean.append(samp.mean())
# left = len(df.loc[(df[X]>=l1)&(df[X]<h1)&(df[Y]>=l2)&(df[Y]<h2)&(df[Z]<0)])
# right = len(df.loc[(df[X]>=l1)&(df[X]<h1)&(df[Y]>=l2)&(df[Y]<h2)&(df[Z]>0)])
# tot = max(len(df.loc[(df[X]>=l1)&(df[X]<h1)&(df[Y]>=l2)&(df[Y]<h2)]), 1)
# mean.append((right - left)/tot)
cmap = sns.diverging_palette(230, 22, s=100, l=47, as_cmap=True)
norm = colors.BoundaryNorm([vmin, vmax], cmap.N)
bounds = np.linspace(vmin, vmax, 3)
im = ax[j,i].imshow(np.array(mean).reshape(q.size, q.size).T, cmap=cmap, vmin=vmin, vmax=vmax)
cbar = fig.colorbar(im, cmap=cmap, ticks=bounds, ax=ax[j,i], fraction=0.046, pad=0.04)
cbar.set_label(cb_lbl[i], labelpad=-5)
ax[j,i].set_title(lbls[i])
ax[j,i].set_xticks(np.arange(q.size+1)-0.5)
ax[j,i].set_yticks(np.arange(q.size+1)-0.5)
ax[j,i].set_xticklabels([int(x) for x in quant1], rotation=90)
ax[j,i].set_yticklabels([int(round(x,0)) for x in quant2])
for a in ax.ravel():
a.invert_yaxis()
a.set_xlabel('Sequence Length')
a.set_ylabel('Contact Order')
a.tick_params(axis='both', which='major', direction='in')
fs = 14
for i, b in zip(range(4), list('ABCDEFGHI')):
ax[i,0].text( -0.20, 1.16, b, transform=ax[i,0].transAxes, fontsize=fs)
fig.savefig(PATH_FIG.joinpath("si11.pdf"), bbox_inches='tight')
def fig12(pdb, X='REL_RATE', Y='S_ASYM', w=0.1):
fig = plt.figure(figsize=(8,12))
gs = GridSpec(3,2, wspace=0.4, hspace=0.5, width_ratios=[1,1])
ax_all = [[fig.add_subplot(gs[j,i]) for i in [0,1]] for j in range(3)]
custom_cmap = sns.diverging_palette(230, 22, s=100, l=47, n=13)
c_helix = custom_cmap[2]
c_sheet = custom_cmap[10]
col = [c_helix, c_sheet]
bins = np.linspace(-0.20, 0.20, 80)
width = np.diff(bins[:2])
mid = 39
sep = 0.05
lbls = ['Sheet', 'Helix']
quantiles = pdb[X].quantile(np.arange(0,1+w,w)).values
# print(np.round(quantiles, 2))
pdb['quant'] = pdb[X].apply(lambda x: utils.assign_quantile(x, quantiles))
# pdb['quant'] = np.random.choice(pdb['quant'], len(pdb), replace=False)
for ax, threshold in zip(ax_all, [0, 0.025, 0.05]):
print(f"threshold = {threshold}")
for i, Y in enumerate(['S_ASYM', 'H_ASYM']):
ratio1 = []
ratio2 = []
lefts = []
rights = []
for j in range(len(quantiles)-1):
hist, bins = np.histogram(pdb.loc[pdb.quant==j, Y], bins=bins)
hist = hist / hist.sum()
left = len(pdb.loc[(pdb.quant==j)&(pdb[Y]<-threshold)]) / max(len(pdb.loc[(pdb.quant==j)]), 1)
right = len(pdb.loc[(pdb.quant==j)&(pdb[Y]>threshold)]) / max(len(pdb.loc[(pdb.quant==j)]), 1)
lefts.append(left)
rights.append(right)
ratio1.append((right - left))
ratio2.append(np.log2(right / left))
print(Y, j, left, right)
xgrid = [sep*j+(i+1.0)*sep/3 for j in range(len(quantiles)-1)]
ax[0].barh(xgrid, ratio1, sep/3, color=col[i], alpha=.5)
ax[1].barh(xgrid, ratio2, sep/3, color=col[i], alpha=.5)
ax[0].set_xticks(np.arange(-0.3, 0.4, 0.1))
for a in ax:
a.set_yticks(np.arange(len(quantiles))*sep)
a.set_yticklabels([round(x,1) for x in quantiles])
a.plot([0,0], [-0.05, 0.5], '-', c='k', lw=.1)
a.spines['top'].set_visible(False)
a.spines['right'].set_visible(False)
a.set_ylim(0, 0.5)
a.set_ylabel(r'$\log_{10}R$')
ax[0].set_xlim(-0.35, 0.35)
ax[1].set_xlim(-1.50, 1.50)
ax[0].set_xlabel(r'$P(\mathregular{{asym}} \geq {0}) - P(\mathregular{{asym}} \leq -{0})$'.format(*[threshold]*2))
ax[1].set_xlabel(r'$\log_{{2}} \frac{{P(\mathregular{{asym}} \geq {0})}}{{P(\mathregular{{asym}} \leq -{0})}} $'.format(*[threshold]*2))
fig.savefig(PATH_FIG.joinpath("si12.pdf"), bbox_inches='tight')
def fig13(df, X='AA_PDB', Y='CO', w=.1, ax='', fig=''):
if isinstance(ax, str):
fig, ax = plt.subplots(1,3, figsize=(15,4))
fig.subplots_adjust(wspace=0.5)
q = np.arange(w,1+w,w)
quant1 = [df[X].min()] + list(df[X].quantile(q).values)
quant2 = [df[Y].min()] + list(df[Y].quantile(q).values)
lbls = ['Helix', 'Sheet']
cb_lbl = [r"$asym_{\alpha}$", r"$asym_{\beta}$"]
vmax = 0.03
vmin = -vmax
count = []
for i, Z in enumerate(['H_ASYM', 'S_ASYM']):
mean = []
for l1, h1 in zip(quant1[:-1], quant1[1:]):
for l2, h2 in zip(quant2[:-1], quant2[1:]):
samp = df.loc[(df[X]>=l1)&(df[X]<h1)&(df[Y]>=l2)&(df[Y]<h2), Z]
mean.append(samp.mean())
# left = len(df.loc[(df[X]>=l1)&(df[X]<h1)&(df[Y]>=l2)&(df[Y]<h2)&(df[Z]<0)])
# right = len(df.loc[(df[X]>=l1)&(df[X]<h1)&(df[Y]>=l2)&(df[Y]<h2)&(df[Z]>0)])
# tot = max(len(df.loc[(df[X]>=l1)&(df[X]<h1)&(df[Y]>=l2)&(df[Y]<h2)]), 1)
# mean.append((right - left)/tot)
if not i:
count.append(len(samp))
# print(len(samp))
mean = np.array(mean).reshape(q.size, q.size)
count = np.array(count).reshape(q.size, q.size)
cmap = sns.diverging_palette(230, 22, s=100, l=47, as_cmap=True)
norm = colors.BoundaryNorm([vmin, vmax], cmap.N)
bounds = np.linspace(vmin, vmax, 3)
im = ax[i].imshow(mean.T, cmap=cmap, vmin=vmin, vmax=vmax)
cbar = fig.colorbar(im, cmap=cmap, ticks=bounds, ax=ax[i], fraction=0.046, pad=0.04)
cbar.set_label(cb_lbl[i], labelpad=-5)
ax[i].set_title(lbls[i])
ax[i].set_xticks(np.arange(q.size+1)-0.5)
ax[i].set_yticks(np.arange(q.size+1)-0.5)
ax[i].set_xticklabels([int(x) for x in quant1], rotation=90)
ax[i].set_yticklabels([int(round(x,0)) for x in quant2])
for i in [2]:
cmap = plt.cm.Greys
# norm = colors.BoundaryNorm([-.04, .04], cmap.N)
# bounds = np.linspace(-.04, .04, 5)
im = ax[i].imshow(np.array(count).reshape(q.size, q.size).T, cmap=cmap, vmin=0)
cbar = fig.colorbar(im, cmap=cmap, ax=ax[i], fraction=0.046, pad=0.04)
cbar.set_label('Count')
ax[i].set_title('Distribution')
ax[i].set_xticks(np.arange(q.size+1)-0.5)
ax[i].set_yticks(np.arange(q.size+1)-0.5)
ax[i].set_xticklabels([int(x) for x in quant1], rotation=90)
ax[i].set_yticklabels([int(round(x,0)) for x in quant2])
for a in ax:
a.invert_yaxis()
a.set_xlabel('Sequence Length')
a.set_ylabel('Contact Order')
a.tick_params(axis='both', which='major', direction='in')
fs = 14
for i, b in zip([0,1,2], list('ABCDEFGHI')):
ax[i].text( -0.20, 1.05, b, transform=ax[i].transAxes, fontsize=fs)
fig.savefig(PATH_FIG.joinpath("si13.pdf"), bbox_inches='tight')
def scop_ss():
fig, ax = plt.subplots(2,1)
cat = 'HS.D'
N = 50
X = np.arange(50)
Nboot, Cboot, asym, enrich_edges, enrich_vals = pickle.load(open(PATH_FIG_DATA.joinpath(f"pdb_scop_indep.pickle"), 'rb'))
data = [Nboot, Cboot, asym]
custom_cmap = sns.diverging_palette(230, 22, s=100, l=47, n=13)
c_helix = custom_cmap[2]
c_sheet = custom_cmap[10]
col = [c_helix, c_sheet, "#CB7CE6", "#79C726"]
lbls = ['Helix', 'Sheet', 'Coil', 'Disorder']
for j, s in enumerate(cat):
ax[0].plot(X, data[0][s]['mean']/4, '-', c=col[j], label=f"{s} N")
ax[0].fill_between(X, data[0][s]['hi']/4, data[0][s]['lo']/4, color="grey", label=f"{s} N", alpha=0.5)
ax[0].plot(X, data[1][s]['mean']/4, '--', c=col[j], label=f"{s} N")
ax[0].fill_between(X, data[1][s]['hi']/4, data[1][s]['lo']/4, color="grey", label=f"{s} N", alpha=0.2)
print(s, round(np.mean(data[2][s]['mean']), 2), round(np.mean(data[2][s]['mean'][:20]), 2), round(np.mean(data[2][s]['mean'][20:]), 2))
ax[1].plot(X, np.log2(data[2][s]['mean']), '-', c=col[j], label=lbls[j])
ax[1].fill_between(X, np.log2(data[2][s]['hi']), np.log2(data[2][s]['lo']), color="grey", label=f"{s} N", alpha=0.2)
ax[1].set_ylim(-1, 1.3)
ax[1].plot([0]*50, '-', c='k')
ax[1].set_yticks(np.arange(-1,1.5,0.5))
ax[0].set_ylim(0, 0.6)
ax[1].set_xlabel('Sequence distance from ends')
ax[0].set_ylabel('Secondary structure\nprobability')
ax[1].set_ylabel('Structural asymmetry\n$\\log_2 (N / C)$')
fs = 14
for i, b in zip([0,1], list('ABCDEFGHI')):
ax[i].text( -0.10, 1.05, b, transform=ax[i].transAxes, fontsize=fs)
fig.savefig(PATH_FIG.joinpath("si14.pdf"), bbox_inches='tight')
def percentage_asym(x):
return np.sign(x) * 100*2**(abs(x)) - np.sign(x) * 100
def fig15():
fig, ax = plt.subplots(3,1, figsize=(10,10))
cat = 'HS.D'
N = 100
X = np.arange(N)
Nboot, Cboot, asym, = pickle.load(open(PATH_FIG_DATA.joinpath(f"pdb_ss_max_asym.pickle"), 'rb'))
data = [Nboot, Cboot, asym]
custom_cmap = sns.diverging_palette(230, 22, s=100, l=47, n=13)
c_helix = custom_cmap[2]
c_sheet = custom_cmap[10]
col = [c_helix, c_sheet, "#CB7CE6", "#79C726"]
lbls = ['Helix', 'Sheet', 'Coil', 'Disorder']
X2 = np.arange(5)
for j, s in enumerate(cat):
ax[0].plot(X, data[0][s]['mean']/2, '-', c=col[j], label=f"{s} N")
ax[0].fill_between(X, data[0][s]['hi']/2, data[0][s]['lo']/2, color="grey", label=f"{s} N", alpha=0.5)
ax[0].plot(X, data[1][s]['mean']/2, '--', c=col[j], label=f"{s} N")
ax[0].fill_between(X, data[1][s]['hi']/2, data[1][s]['lo']/2, color="grey", label=f"{s} N", alpha=0.2)
for k in range(5):
print(s, round(np.mean(data[2][s]['mean']), 2), round(np.mean(data[2][s]['mean'][k*20:(k+1)*20]), 2))
ax[1].plot(X, np.log2(data[2][s]['mean']), '-', c=col[j], label=lbls[j])
ax[1].fill_between(X, np.log2(data[2][s]['hi']), np.log2(data[2][s]['lo']), color="grey", label=f"{s} N", alpha=0.2)
if s in 'HS':
Y2 = [percentage_asym(np.log2(data[2][s]['mean'])[k*20:(k+1)*20].mean()) for k in range(5)]
ax[2].bar(X2, Y2, 0.5, color=col[j], label=lbls[j], ec='k')
ax[1].set_ylim(-1.5, 2.0)
ax[1].plot([0]*100, '-', c='k')
ax[2].plot([0]*5, '-', c='k')
ax[1].set_yticks(np.arange(-1,2.5,0.5))
ax[0].set_ylim(0, 0.6)
ax[2].set_xticks(np.arange(5))
ax[2].set_xticklabels([f"{i*20} - {(i+1)*20}" for i in range(5)])
ax[0].set_xlabel('Sequence distance from ends')
ax[1].set_xlabel('Sequence distance from ends')
ax[2].set_xlabel('Sequence distance from ends')
ax[0].set_ylabel('Secondary structure\nprobability')
ax[1].set_ylabel('Structural asymmetry\n$\\log_2 (N / C)$')
ax[2].set_ylabel('Percentage asymmetry')
fs = 14
for i, b in zip([0,1,2], list('ABCDEFGHI')):
ax[i].text( -0.10, 1.05, b, transform=ax[i].transAxes, fontsize=fs)
fig.savefig(PATH_FIG.joinpath("si15.pdf"), bbox_inches='tight')
def oligomer(pdb, X='REL_RATE', Y='S_ASYM', w=0.1):
pdb = pdb.copy()
fig = plt.figure(figsize=(8,8))
gs = GridSpec(2,2, wspace=0.4, hspace=0.5, width_ratios=[1,1])
ax_all = [[fig.add_subplot(gs[j,i]) for i in [0,1]] for j in range(2)]
custom_cmap = sns.diverging_palette(230, 22, s=100, l=47, n=13)
c_helix = custom_cmap[2]
c_sheet = custom_cmap[10]
col = [c_helix, c_sheet]
bins = np.linspace(-0.20, 0.20, 80)
width = np.diff(bins[:2])
mid = 39
sep = 0.05
threshold = 0
lbls = [r'$E_{\beta}$', r'$E_{\alpha}$']
ttls = ['Monomers', 'Oligomers']
for ax, idx, ttl in zip(ax_all, [pdb.NPROT==1, pdb.NPROT>1], ttls):
quantiles = pdb.loc[idx, X].quantile(np.arange(0,1+w,w)).values
pdb['quant'] = pdb.loc[idx, X].apply(lambda x: utils.assign_quantile(x, quantiles))
for i, Y in enumerate(['S_ASYM', 'H_ASYM']):
ratio1 = []
ratio2 = []
lefts = []
rights = []
for j in range(len(quantiles)-1):
hist, bins = np.histogram(pdb.loc[(idx)&(pdb.quant==j), Y], bins=bins)
hist = hist / hist.sum()
left = len(pdb.loc[(idx)&(pdb.quant==j)&(pdb[Y]<-threshold)]) / max(len(pdb.loc[(idx)&(pdb.quant==j)]), 1)
right = len(pdb.loc[(idx)&(pdb.quant==j)&(pdb[Y]>threshold)]) / max(len(pdb.loc[(idx)&(pdb.quant==j)]), 1)
lefts.append(left)
rights.append(right)
ratio1.append((right - left))
ratio2.append(np.log2(right / left))
xgrid = [sep*j+(i+1.0)*sep/3 for j in range(len(quantiles)-1)]
ax[0].barh(xgrid, ratio1, sep/3, color=col[i], alpha=.5, label=lbls[i])
ax[1].barh(xgrid, ratio2, sep/3, color=col[i], alpha=.5)
ax[0].set_xticks(np.arange(-0.3, 0.4, 0.1))
for a in ax:
a.set_yticks(np.arange(len(quantiles))*sep)
a.set_yticklabels([round(x,1) for x in quantiles])
a.plot([0,0], [-0.05, 0.5], '-', c='k', lw=.1)
a.spines['top'].set_visible(False)
a.spines['right'].set_visible(False)
a.set_ylim(0, 0.5)
a.set_ylabel(r'$\log_{10}R$')
a.set_title(f"{ttl}, N={np.sum(idx)}")
ax[0].set_xlim(-0.35, 0.35)
ax[1].set_xlim(-1.50, 1.50)
ax[0].set_xlabel(r'$P(\mathregular{{asym}} \geq {0}) - P(\mathregular{{asym}} \leq -{0})$'.format(*[threshold]*2))
ax[1].set_xlabel(r'$\log_{{2}} \frac{{P(\mathregular{{asym}} \geq {0})}}{{P(\mathregular{{asym}} \leq -{0})}} $'.format(*[threshold]*2))
ax[0].legend(loc='upper center', ncol=2, columnspacing=3, frameon=False,
bbox_to_anchor=(1.20, 1.20))
fig.savefig(PATH_FIG.joinpath("si16.pdf"), bbox_inches='tight')
fig.savefig(PATH_FIG.joinpath("oligomers.png"), bbox_inches='tight')
def scop2(X='REL_RATE', Y='S_ASYM', w=0.1):
fig, ax = plt.subplots(figsize=(10,6))
edges, data = pickle.load(open(PATH_FIG_DATA.joinpath("pdb_scop_indep.pickle"), 'rb'))[3:]
edges = edges[0]
sep = 0.05
lbls = [r'$E_{\alpha}$', r'$E_{\beta}$']
for i, Y in enumerate(['H_ASYM', 'S_ASYM']):
mean = np.mean(data[:,i], axis=0)
lo = np.abs(mean - np.quantile(data[:,i], 0.025, axis=0))
hi = np.abs(mean - np.quantile(data[:,i], 0.975, axis=0))
ax.barh([sep*j+(i+.7)*sep/3 for j in range(10)], mean, sep/3, xerr=(lo, hi), color=col[i], ec='k', alpha=.5, label=lbls[i], error_kw={'lw':.8})
ax.plot([0,0], [-0.05, 0.5], '-', c='k', lw=.1)
ax.set_yticks(np.arange(len(edges))*sep)
ax.set_yticklabels([round(x,1) for x in edges])
ax.legend(loc='upper center', ncol=2, columnspacing=3, frameon=False,
bbox_to_anchor=(0.52, 1.06))
ax.set_xlim(-.38, .38)
ax.set_xticks(np.arange(-.3, .4, .1))
# To create this figure, you need to download the complete
# Human and E. coli proteomes at:
# https://alphafold.ebi.ac.uk/download
# and then change the code so that "base" points to the
# folder that contains the downloaded ".pdb" files
def disorder_proteome(N=100):
fig, ax = plt.subplots(1,2, figsize=(12,4))
lbls = ["Human", "Ecoli"]
ttls = ["Human", "E. coli"]
for i, l in enumerate(lbls):
path = PATH_FIG_DATA.joinpath(f"alphafold_{l}.npy")
if not path.exists():
base = PATH_BASE.joinpath(f"AlphaFold/{l}")
countN = np.zeros(N, float)
countC = np.zeros(N, float)
tot = np.zeros(N, float)
with Pool(50) as pool:
dis = list(pool.imap_unordered(utils.get_disorder_from_conf, base.glob("*pdb"), 10))
for d in dis:
n = min(int(len(d)/2), N)
countN[:n] = countN[:n] + d[:n]
countC[:n] = countC[:n] + d[-n:][::-1]
tot[:n] = tot[:n] + 1
fracN = countN / tot
fracC = countC / tot
np.save(path, np.array([fracN, fracC]))
else:
fracN, fracC = np.load(path)
ax[i].plot(np.arange(N)+1, fracN, '-', label='N')
ax[i].plot(np.arange(N)+1, fracC, '--', label='C')
ax[i].set_title(ttls[i])
ax[i].set_xlabel("Sequence distance from ends")
ax[i].set_ylabel("Disorder probability")
ax[i].set_ylim(0, 1)
ax[i].legend(loc='best', frameon=False)
fig.savefig(PATH_FIG.joinpath("si17.pdf"), bbox_inches='tight')
def kfold_vs_ss():
pfdb = asym_io.load_pfdb()
fig, ax = plt.subplots(figsize=(8,8))
for c in pfdb.Class.unique():
X = np.log10(pfdb.loc[pfdb.Class==c, 'L'])
Y = pfdb.loc[pfdb.Class==c, 'log_kf']
sns.regplot(X, Y, label=c)
ax.set_xlabel(r"$\log_{10}$ Sequence Length")
ax.set_ylabel(r"$\log_{10} k_f$")
ax.legend(loc='best', frameon=False)
fig.savefig(PATH_FIG.joinpath("si18.pdf"), bbox_inches='tight')
def hbond_asym(pdb, Xl='REL_RATE', Y='hb_asym', w=0.1):
fig = plt.figure(figsize=(9,6))
gs = GridSpec(1,2, wspace=0.2, hspace=0.0, width_ratios=[1,.3])
ax = [fig.add_subplot(gs[i]) for i in [0,1]]
col = np.array(Paired_12.hex_colors)[[1,3]]
bins = np.linspace(-0.20, 0.20, 80)
width = np.diff(bins[:2])
X = bins[:-1] + width * 0.5
mid = 39
sep = 0.05
quantiles = pdb[Xl].quantile(np.arange(0,1+w,w)).values
ratio = []
lefts = []
rights = []
threshold = 0.00
for j in range(len(quantiles)-1):
hist, bins = np.histogram(pdb.loc[pdb.quant==j, Y], bins=bins)
hist = hist / hist.sum()
left = len(pdb.loc[(pdb.quant==j)&(pdb[Y]<-threshold)]) / max(len(pdb.loc[(pdb.quant==j)]), 1)
right = len(pdb.loc[(pdb.quant==j)&(pdb[Y]>threshold)]) / max(len(pdb.loc[(pdb.quant==j)]), 1)
lefts.append(left)
rights.append(right)
ratio.append((right - left))
ax[0].bar(X[:mid], (hist/hist.sum())[:mid], width, bottom=[sep*j]*mid, color='grey', alpha=.5)
ax[0].bar(X[-mid:], (hist/hist.sum())[-mid:], width, bottom=[sep*j]*mid, color=col[0], alpha=.5)
ax[0].plot(X[:mid], (hist/hist.sum()+sep*j)[:mid], '-', c='k', alpha=.5)
ax[0].plot(X[-mid:], (hist/hist.sum()+sep*j)[-mid:], '-', c='k', alpha=.5)
ax[0].set_yticks(np.arange(len(quantiles))*sep)
ax[0].set_yticklabels([round(x,1) for x in quantiles])
ax[1].barh([sep*j+sep/2 for j in range(len(quantiles)-1)], ratio, sep/2, color=[col[0] if r > 0 else 'grey' for r in ratio], alpha=.5)
ax[1].plot([0,0], [-0.05, 0.5], '-', c='k', lw=.1)
ax[0].spines['top'].set_visible(False)
ax[0].spines['right'].set_visible(False)
ax[1].spines['top'].set_visible(False)
ax[1].spines['right'].set_visible(False)
ax[1].spines['left'].set_visible(False)
ax[1].set_yticks([])
for a in ax:
a.set_ylim(0, 0.60)
ax[0].set_xlabel('Asymmetry in mean hydrogen bond length')
ax[0].set_ylabel(r'$\log_{10}R$')
ax[1].set_xlabel('N terminal enrichment')
fig.savefig(PATH_FIG.joinpath("si19.pdf"), bbox_inches='tight')
def hyd_asym(pdb, Xl='REL_RATE', Y='hyd_asym', w=0.1):
fig = plt.figure(figsize=(9,6))
gs = GridSpec(1,2, wspace=0.2, hspace=0.0, width_ratios=[1,.3])
ax = [fig.add_subplot(gs[i]) for i in [0,1]]
col = np.array(Paired_12.hex_colors)[[1,3]]
bins = np.linspace(-4.5, 4.5, 80)
width = np.diff(bins[:2])
X = bins[:-1] + width * 0.5
mid = 39
sep = 0.05
quantiles = pdb[Xl].quantile(np.arange(0,1+w,w)).values
ratio = []
lefts = []
rights = []
threshold = 0.00
for j in range(len(quantiles)-1):
hist, bins = np.histogram(pdb.loc[pdb.quant==j, Y], bins=bins)
hist = hist / hist.sum()
left = len(pdb.loc[(pdb.quant==j)&(pdb[Y]<-threshold)]) / max(len(pdb.loc[(pdb.quant==j)]), 1)
right = len(pdb.loc[(pdb.quant==j)&(pdb[Y]>threshold)]) / max(len(pdb.loc[(pdb.quant==j)]), 1)
lefts.append(left)
rights.append(right)
ratio.append((right - left))
ax[0].bar(X[:mid], (hist/hist.sum())[:mid], width, bottom=[sep*j]*mid, color='grey', alpha=.5)
ax[0].bar(X[-mid:], (hist/hist.sum())[-mid:], width, bottom=[sep*j]*mid, color=col[0], alpha=.5)
ax[0].plot(X[:mid], (hist/hist.sum()+sep*j)[:mid], '-', c='k', alpha=.5)
ax[0].plot(X[-mid:], (hist/hist.sum()+sep*j)[-mid:], '-', c='k', alpha=.5)
ax[0].set_yticks(np.arange(len(quantiles))*sep)
ax[0].set_yticklabels([round(x,1) for x in quantiles])
ax[1].barh([sep*j+sep/2 for j in range(len(quantiles)-1)], ratio, sep/2, color=[col[0] if r > 0 else 'grey' for r in ratio], alpha=.5)
ax[1].plot([0,0], [-0.05, 0.5], '-', c='k', lw=.1)
ax[0].spines['top'].set_visible(False)
ax[0].spines['right'].set_visible(False)
ax[1].spines['top'].set_visible(False)
ax[1].spines['right'].set_visible(False)
ax[1].spines['left'].set_visible(False)
ax[1].set_yticks([])
for a in ax:
a.set_ylim(0, 0.60)
ax[0].set_xlabel('Asymmetry in mean hydrophobicity')
ax[0].set_ylabel(r'$\log_{10}R$')
ax[1].set_xlabel('N terminal enrichment')
fig.savefig(PATH_FIG.joinpath("si20.pdf"), bbox_inches='tight')
|
# Generated by Django 2.2.7 on 2019-12-04 20:58
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('post', '0002_remove_comment_liked'),
]
operations = [
migrations.AddField(
model_name='post',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='post.Post'),
),
]
|
import pandas as pd
# 1.1 Understanding the dataset
# ===================================================================================
# - Import the dataset into a pandas dataframe using the read_table method.
# Because this is a tab separated dataset we will be using '\t' as the value
# for the 'sep' argument which specifies this format.
# - Also, rename the column names by specifying a list ['label, 'sms_message']
# to the 'names' argument of read_table().
# - Print the first five values of the dataframe with the new column names.
# ===================================================================================
# Read from spam collection file
df = pd.read_table('smsspamcollection/SMSSpamCollection',
sep = '\t',
header = None,
names = ['label', 'sms_messages'])
# Take a look
# print(df.head())
# 1.2 Preprocessing
# ===================================================================================
# - Convert the values in the 'label' colum to numerical values using map
# method as follows: {'ham':0, 'spam':1} This maps the 'ham' value to 0 and
# the 'spam' value to 1.
# - Also, to get an idea of the size of the dataset we are dealing with, print
# out number of rows and columns using 'shape'.
# ===================================================================================
# Convert labels to [0, 1]
# Define mapping
mapping = {'ham' : 0, 'spam': 1}
# Apply mapping
df.label = df.label.map(mapping)
# Get an understanding of the size
# print(df.shape)
# 2.1 Bag of Worms (Sklearn)
# ===================================================================================
# Describes bag of worms, no code
# ===================================================================================
# 2.2 Bag of Worms (from scratch)
# ===================================================================================
# The example goes on a slight tangent using a smaller documents array
# I attempt this with the imported data
# ===================================================================================
# 2.2.1 Convert all strings to lower case
# ===================================================================================
# - Convert all the strings in the documents set to their lower case.
# Save them into a list called 'lower_case_documents'. You can convert
# strings to their lower case in python by using the lower() method.
# ===================================================================================
lower_case_documents = df.sms_messages.apply(lambda x: str.lower(x));
# print(lower_case_documents)
# 2.2.2 Remove all punctuation
# ===================================================================================
# - Remove all punctuation from the strings in the document set. Save
# them into a list called 'sans_punctuation_documents'.
# ===================================================================================
# Regex function
def removePunctuation(s) :
import re, string
regex = re.compile('[%s]+' % re.escape(string.punctuation))
return regex.sub("", s)
sans_punctuation_documents = lower_case_documents.apply(lambda x: removePunctuation(x))
# print(sans_punctuation_documents)
# 2.2.3 Tokenization
# ===================================================================================
# - Tokenize the strings stored in 'sans_punctuation_documents' using the split()
# method. and store the final document set in a list called
# 'preprocessed_documents'.
# ===================================================================================
preprocessed_documents = sans_punctuation_documents.apply(lambda x: str.split(x))
# print(preprocessed_documents)
# 2.2.4 Count frequency
# ===================================================================================
# - Using the Counter() method and preprocessed_documents as the input, create a
# dictionary with the keys being each word in each document and the corresponding
# values being the frequncy of occurrence of that word. Save each Counter dictionary
# an item in a list called 'frequency_list'.
# ===================================================================================
from collections import Counter
frequency_list = preprocessed_documents.apply(lambda x: Counter(x))
# print(frequency_list)
# 2.3 BoW with Sklearn
# ===================================================================================
# As in 2.2, I attempt this with the imported dataset instead of a small list
# ===================================================================================
# 2.3.1 Importing
# ===================================================================================
# - Import the sklearn.feature_extraction.text.CountVectorizer
# method and create an instance of it called 'count_vector'.
# ===================================================================================
from sklearn.feature_extraction.text import CountVectorizer
count_vector = CountVectorizer()
# 2.3.2 Using the CountVectorizer
# ===================================================================================
# - Fit your document dataset to the CountVectorizer object you have created
# using fit(), and get the list of words which have been categorized as
# features using the get_feature_names() method.
# ===================================================================================
count_vector.fit(df.sms_messages)
# print(count_vector.get_feature_names())
# 2.3.3 Create the count frequency matrix
# ===================================================================================
# Create a matrix with the rows being each of the 4 documents, and the columns
# being each word. The corresponding (row, column) value is the frequency of
# occurrance of that word(in the column) in a particular document(in the row).
# You can do this using the transform() method and passing in the document data
# set as the argument. The transform() method returns a matrix of numpy integers,
# you can convert this to an array using toarray(). Call the array 'doc_array'
# ===================================================================================
doc_array = count_vector.transform(df.sms_messages).toarray()
# print(doc_array)
# 2.3.4 Convert Frequency Matrix to Dataframe
# ===================================================================================
# Convert the array we obtained, loaded into 'doc_array', into a dataframe and
# set the column names to the word names(which you computed earlier using
# get_feature_names(). Call the dataframe 'frequency_matrix'.
# ===================================================================================
columns = count_vector.get_feature_names()
frequency_matrix = pd.DataFrame(doc_array, columns = columns)
# print(frequency_matrix.head())
# 3.1 Training and testing sets
# ===================================================================================
# - Split the dataset into a training and testing set by using the train_test_split
# method in sklearn. Split the data using the following variables:
# X_train is our training data for the 'sms_message' column.
# y_train is our training data for the 'label' column
# X_test is our testing data for the 'sms_message' column.
# y_test is our testing data for the 'label' column
# - Print out the number of rows we have in each our training and testing data.
# ===================================================================================
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(df.sms_messages, df.label)
# print("Num rows in x_train: " + str(x_train.size))
# print("Num rows in x_test: " + str(x_test.size))
# print("Num rows in y_train: " + str(y_train.size))
# print("Num rows in y_test: " + str(y_test.size))
# print("Total num rows: " + str(len(df)))
# 3.2 Applying BoW to our dataset
# ===================================================================================
# - Firstly, we have to fit our training data (X_train) into CountVectorizer()
# and return the matrix.
# - Secondly, we have to transform our testing data (X_test) to return the matrix.
# ===================================================================================
cv = CountVectorizer()
training_data = cv.fit_transform(x_train)
testing_data = cv.transform(x_test)
# 4.1 Bayes Theorem From Scratch
# ===================================================================================
# Let us implement the Bayes Theorem from scratch using a simple example.
# Let's say we are trying to find the odds of an individual having diabetes,
# given that he or she was tested for it and got a positive result.
# In the medical field, such probabilies play a very important role as it usually
# deals with life and death situatuations.
# P(D) is the probability of a person having Diabetes.
# It's value is 0.01 or in other words, 1% of the general population has diabetes.
# P(Pos) is the probability of getting a positive test result.
# P(Neg) is the probability of getting a negative test result.
# P(Pos|D) is the probability of getting a positive result on a test done for detecting
# diabetes, given that you have diabetes. This has a value 0.9. (Sensitivity)
# P(Neg|~D) is the probability of getting a negative result on a test done for detecting
# diabetes, given that you do not have diabetes. This also has a value of 0.9.
# (Specificity)
# Putting our values into the formula for Bayes theorem we get:
# P(D|Pos) = (P(D) * P(Pos|D)) / P(Pos)
# P(Pos) = [P(D) * Sensitivity] + [P(~D) * (1-Specificity))]
# ===================================================================================
# 4.1.1
# ===================================================================================
# Calculate probability of getting a positive test result, P(Pos)
# ===================================================================================
# P(D)
p_diabetes = 0.01
# P(~D)
p_no_diabetes = 0.99
# Sensitivity P(pos|D)
p_sens = 0.9
# Specificity P(neg|~D)
p_spec = 0.9
p_pos = (p_diabetes * p_sens) + (p_no_diabetes * (1-p_spec)) # 10.8%
# print("The probabilitiy of getting a positive test result is: " + str(p_pos * 100) + "%")
# 4.1.2
# ===================================================================================
# Compute the probability of an individual having diabetes, given that, that individual
# got a positive test result. In other words, compute P(D|Pos).
# ===================================================================================
p_diabetes_given_pos = (p_diabetes * p_sens) / p_pos # 8.3%
# print("The probability of having diabetes given a positive test result is: "
# + str(p_diabetes_given_pos * 100) + "%")
# 4.1.3
# ===================================================================================
# Compute the probability of an individual not having diabetes, given that, that individual
# got a positive test result. In other words, compute P(~D|Pos).
# P(~D|Pos) = (P(~D) * P(Pos|~D)) / P(Pos)
# ===================================================================================
p_pos_no_diabetes = 1 - p_spec # P(Pos|~D) = 1 - P(Neg|~D)
p_no_diabetes_given_pos = p_no_diabetes * p_pos_no_diabetes / p_pos # 91.67%
# print("The proability of not having diabetes given a positive test result is: "
# + str(p_no_diabetes_given_pos * 100) + "%")
# 4.2 Naive Bayes From Scratch
# ===================================================================================
# Now that you have understood the ins and outs of Bayes Theorem, we will extend it
# to consider cases where we have more than feature.
#
# Let's say that we have two political parties' candidates, 'Jill Stein' of the
# Green Party and 'Gary Johnson' of the Libertarian Party and we have the probabilities
# of each of these candidates saying the words 'freedom', 'immigration' and 'environment'
# when they give a speech:
# Probability that Jill Stein says 'freedom': 0.1 ---------> P(F|J)
# Probability that Jill Stein says 'immigration': 0.1 -----> P(I|J)
# Probability that Jill Stein says 'environment': 0.8 -----> P(E|J)
# Probability that Gary Johnson says 'freedom': 0.7 -------> P(F|G)
# Probability that Gary Johnson says 'immigration': 0.2 ---> P(I|G)
# Probability that Gary Johnson says 'environment': 0.1 ---> P(E|G)
# And let us also assume that the probablility of Jill Stein giving a speech,
# P(J) is 0.5 and the same for Gary Johnson, P(G) = 0.5.
#
# The Naive Bayes formula is P(y|x1,...,xn) = (P(Y) * P(x1,...,xn|y)) / P(x1,...,xn)
# ===================================================================================
# 4.2.1 Compute P(F,I)
# ===================================================================================
# Compute the probability of the words 'freedom' and 'immigration' being said in a
# speech, or P(F,I).
#
# The first step is multiplying the probabilities of Jill Stein giving a speech with her
# individual probabilities of saying the words 'freedom' and 'immigration'. Store this
# in a variable called p_j_text
#
# The second step is multiplying the probabilities of Gary Johnson giving a speech with
# his individual probabilities of saying the words 'freedom' and 'immigration'. Store
# this in a variable called p_g_text
#
# The third step is to add both of these probabilities and you will get P(F,I).
# ===================================================================================
# P(J) = P(G) = 0.5
p_jill = p_gary = 0.5
# P(F|J)
p_j_f = 0.1
# P(I|J)
p_j_i = 0.1
# P(F|G)
p_g_f = 0.7
# P(I|G)
p_g_i = 0.2
p_j_text = p_jill * p_j_f * p_j_i
p_g_text = p_gary * p_g_f * p_g_i
# P(F,I)
p_f_i = p_j_text + p_g_text # 7.5%
# print("P(F, I) = " + str(p_f_i))
# 4.2.2 Compute P(J|F,I)
# ===================================================================================
# Compute P(J|F,I) using the formula P(J|F,I) = (P(J) * P(F|J) * P(I|J)) / P(F,I)
# and store it in a variable p_j_fi
# ===================================================================================
p_j_fi = p_j_text / p_f_i # 6.67%
# print("The probability of the speaker being Jill, given the words 'Freedom' and 'Immigration' \
# were said is: " + str(p_j_fi * 100) + "%")
# 4.2.3 Compute P(G|F,I)
# ===================================================================================
# Compute P(G|F,I) using the formula P(G|F,I) = (P(G) * P(F|G) * P(I|G)) / P(F,I)
# and store it in a variable p_g_fi
# ===================================================================================
p_g_fi = p_g_text / p_f_i # 93.33%
# print("The probability of the speaker being Gary, given the words 'Freedom' and 'Immigration' \
# were said is: " + str(p_g_fi * 100) + "%")
# 5 Naive Bayes w/ Scikit-Learn
# ===================================================================================
# (Back to spam detection)
# We have loaded the training data into the variable 'training_data' and the testing
# data into the variable 'testing_data'.
#
# Thankfully, sklearn has several Naive Bayes implementations that we can use and so
# we do not have to do the math from scratch. We will be using sklearns
# sklearn.naive_bayes method to make predictions on our dataset.
#
# Specifically, we will be using the multinomial Naive Bayes implementation. This
# particular classifier is suitable for classification with discrete features
# (such as in our case, word counts for text classification). It takes in integer
# word counts as its input. On the other hand Gaussian Naive Bayes is better suited
# for continuous data as it assumes that the input data has a Gaussian(normal) distribution.
# ===================================================================================
# 5.1 Training
# ===================================================================================
# Import the MultinomialNB classifier and fit the training data into the classifier
# using fit(). Name your classifier 'naive_bayes'. You will be training the classifier
# using 'training_data' and y_train' from our split earlier.
# ===================================================================================
from sklearn.naive_bayes import MultinomialNB
clf = MultinomialNB()
clf.fit(training_data, y_train)
# 5.2 Predict
# ===================================================================================
# Now that our algorithm has been trained using the training data set we can now make
# some predictions on the test data stored in 'testing_data' using predict().
# Save your predictions into the 'predictions' variable.
# ===================================================================================
predictions = clf.predict(testing_data)
# 6 Evaluating Our Model
# ===================================================================================
# Now that we have made predictions on our test set, our next goal is to evaluate how
# well our model is doing. There are various mechanisms for doing so, but first let's
# do quick recap of them.
#
# Accuracy measures how often the classifier makes the correct prediction. It’s the
# ratio of the number of correct predictions to the total number of predictions
# (the number of test data points).
#
# Precision tells us what proportion of messages we classified as spam, actually were
# spam. It is a ratio of true positives(words classified as spam, and which are
# actually spam) to all positives(all words classified as spam, irrespective of whether
# that was the correct classificatio), in other words it is the ratio of
# [True Positives/(True Positives + False Positives)]
#
# Recall(sensitivity) tells us what proportion of messages that actually were spam
# were classified by us as spam. It is a ratio of true positives(words classified as
# spam, and which are actually spam) to all the words that were actually spam,
# in other words it is the ratio of
# [True Positives/(True Positives + False Negatives)]
#
# The Precision and REcall can be combined to get the F1 score, which is the weighted
# average of the precision and recall scores. It ranges from 0 to 1 with 1 being the
# best score
# ===================================================================================
# 6.1 Computing accuracy, precision, recall, F1
# ===================================================================================
# Compute the accuracy, precision, recall and F1 scores of your model using your
# test data 'y_test' and the predictions you made earlier stored in the 'predictions'
# variable.
# ===================================================================================
fp = tp = fn = tn = 0
for i in range(0, predictions.size):
pi = predictions[i]
yi = y_test.data[i]
if (pi == yi):
if (pi == 0): # True negative
tn += 1
else: # True positive
tp += 1
else:
if (pi == 0): # False negative
fn += 1
else: # False positive
fp += 1
accuracy = (tn + tp) / predictions.size
precision = tp / (tp + fp)
recall = tp / (tp + fn)
f1 = 2 * (precision * recall)/(precision + recall) # As stated on wikipedia
print("My attempt at manually calculating: ")
print("\tAccuracy is: " + str(accuracy))
print("\tPrecision is: " + str(precision))
print("\tRecall is: " + str(recall))
print("\tF1 score is: " + str(f1))
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
print("SKlearn metrics: ")
print("\tAccuracy is: " + str(accuracy_score(y_test, predictions)))
print("\tPrecision is: " + str(precision_score(y_test, predictions)))
print("\tRecall is: " + str(recall_score(y_test, predictions)))
print("\tF1 score is: " + str(f1_score(y_test, predictions)))
|
icon_player = 0
icon_player_horseman = 1
icon_gray_knight = 2
icon_vaegir_knight = 3
icon_flagbearer_a = 4
icon_flagbearer_b = 5
icon_peasant = 6
icon_khergit = 7
icon_khergit_horseman_b = 8
icon_axeman = 9
icon_woman = 10
icon_woman_b = 11
icon_town = 12
icon_town_steppe = 13
icon_village_a = 14
icon_village_burnt_a = 15
icon_village_deserted_a = 16
icon_village_snow_a = 17
icon_village_snow_burnt_a = 18
icon_village_snow_deserted_a = 19
icon_camp = 20
icon_ship = 21
icon_ship_on_land = 22
icon_castle_a = 23
icon_mule = 24
icon_cattle = 25
icon_training_ground = 26
icon_bridge_a = 27
icon_bridge_b = 28
icon_mansion = 29
icon_temple = 30
icon_custom_banner_01 = 31
icon_custom_banner_02 = 32
icon_custom_banner_03 = 33
icon_banner_01 = 34
icon_banner_02 = 35
icon_banner_03 = 36
icon_banner_04 = 37
icon_banner_05 = 38
icon_banner_06 = 39
icon_banner_07 = 40
icon_banner_08 = 41
icon_banner_09 = 42
icon_banner_10 = 43
icon_banner_11 = 44
icon_banner_12 = 45
icon_banner_13 = 46
icon_banner_14 = 47
icon_banner_15 = 48
icon_banner_16 = 49
icon_banner_17 = 50
icon_banner_18 = 51
icon_banner_19 = 52
icon_banner_20 = 53
icon_banner_21 = 54
icon_banner_22 = 55
icon_banner_23 = 56
icon_banner_24 = 57
icon_banner_25 = 58
icon_banner_26 = 59
icon_banner_27 = 60
icon_banner_28 = 61
icon_banner_29 = 62
icon_banner_30 = 63
icon_banner_31 = 64
icon_banner_32 = 65
icon_banner_33 = 66
icon_banner_34 = 67
icon_banner_35 = 68
icon_banner_36 = 69
icon_banner_37 = 70
icon_banner_38 = 71
icon_banner_39 = 72
icon_banner_40 = 73
icon_banner_41 = 74
icon_banner_42 = 75
icon_banner_43 = 76
icon_banner_44 = 77
icon_banner_45 = 78
icon_banner_46 = 79
icon_banner_47 = 80
icon_banner_48 = 81
icon_banner_49 = 82
icon_banner_50 = 83
icon_banner_51 = 84
icon_banner_52 = 85
icon_banner_53 = 86
icon_banner_54 = 87
icon_banner_55 = 88
icon_banner_56 = 89
icon_banner_57 = 90
icon_banner_58 = 91
icon_banner_59 = 92
icon_banner_60 = 93
icon_banner_61 = 94
icon_banner_62 = 95
icon_banner_63 = 96
icon_banner_64 = 97
icon_banner_65 = 98
icon_banner_66 = 99
icon_banner_67 = 100
icon_banner_68 = 101
icon_banner_69 = 102
icon_banner_70 = 103
icon_banner_71 = 104
icon_banner_72 = 105
icon_banner_73 = 106
icon_banner_74 = 107
icon_banner_75 = 108
icon_banner_76 = 109
icon_banner_77 = 110
icon_banner_78 = 111
icon_banner_79 = 112
icon_banner_80 = 113
icon_banner_81 = 114
icon_banner_82 = 115
icon_banner_83 = 116
icon_banner_84 = 117
icon_banner_85 = 118
icon_banner_86 = 119
icon_banner_87 = 120
icon_banner_88 = 121
icon_banner_89 = 122
icon_banner_90 = 123
icon_banner_91 = 124
icon_banner_92 = 125
icon_banner_93 = 126
icon_banner_94 = 127
icon_banner_95 = 128
icon_banner_96 = 129
icon_banner_97 = 130
icon_banner_98 = 131
icon_banner_99 = 132
icon_banner_100 = 133
icon_banner_101 = 134
icon_banner_102 = 135
icon_banner_103 = 136
icon_banner_104 = 137
icon_banner_105 = 138
icon_banner_106 = 139
icon_banner_107 = 140
icon_banner_108 = 141
icon_banner_109 = 142
icon_banner_110 = 143
icon_banner_111 = 144
icon_banner_112 = 145
icon_banner_113 = 146
icon_banner_114 = 147
icon_banner_115 = 148
icon_banner_116 = 149
icon_banner_117 = 150
icon_banner_118 = 151
icon_banner_119 = 152
icon_banner_120 = 153
icon_banner_121 = 154
icon_banner_122 = 155
icon_banner_123 = 156
icon_banner_124 = 157
icon_banner_125 = 158
icon_banner_126 = 159
icon_banner_127 = 160
icon_banner_128 = 161
icon_banner_129 = 162
icon_banner_130 = 163
icon_banner_131 = 164
icon_banner_132 = 165
icon_banner_133 = 166
icon_banner_134 = 167
icon_banner_135 = 168
icon_banner_137 = 169
icon_banner_138 = 170
icon_banner_139 = 171
icon_banner_140 = 172
icon_banner_141 = 173
icon_banner_142 = 174
icon_banner_143 = 175
icon_banner_144 = 176
icon_banner_145 = 177
icon_banner_146 = 178
icon_banner_147 = 179
icon_banner_148 = 180
icon_banner_149 = 181
icon_banner_150 = 182
icon_banner_151 = 183
icon_banner_152 = 184
icon_banner_153 = 185
icon_banner_154 = 186
icon_banner_155 = 187
icon_banner_156 = 188
icon_banner_157 = 189
icon_banner_158 = 190
icon_banner_159 = 191
icon_banner_160 = 192
icon_banner_161 = 193
icon_banner_162 = 194
icon_banner_163 = 195
icon_banner_164 = 196
icon_banner_165 = 197
icon_banner_166 = 198
icon_banner_167 = 199
icon_banner_168 = 200
icon_banner_169 = 201
icon_banner_170 = 202
icon_banner_171 = 203
icon_banner_172 = 204
icon_banner_173 = 205
icon_banner_175 = 206
icon_banner_176 = 207
icon_banner_177 = 208
icon_banner_178 = 209
icon_banner_179 = 210
icon_banner_180 = 211
icon_banner_181 = 212
icon_banner_182 = 213
icon_banner_183 = 214
icon_banner_184 = 215
icon_banner_185 = 216
icon_banner_186 = 217
icon_banner_187 = 218
icon_banner_188 = 219
icon_banner_189 = 220
icon_banner_190 = 221
icon_banner_191 = 222
icon_banner_192 = 223
icon_banner_193 = 224
icon_banner_194 = 225
icon_banner_195 = 226
icon_banner_196 = 227
icon_banner_136 = 228
icon_bandit_lair = 229
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import json
from os import (
listdir,
getcwd,
makedirs,
rename,
)
from os.path import (
isfile,
isdir,
join,
splitext,
basename,
)
from threading import Thread
from time import sleep
from urllib import error as request_error
from urllib.request import urlopen
import hashlib
from requests import Session
from argparse import ArgumentParser
_args = ArgumentParser()
_args.add_argument('-m', help='Method', type=str, required=False, default='')
_args.add_argument('--hide', help='Hidden', action='store_const', required=False, const=True, default=False)
# _args.add_argument('-f', help='File', type=str, required=False, default='tts.txt')
args = _args.parse_args()
configFile = './vk_key.json'
apiVersion = '5.65'
oauthUrl = 'https://oauth.vk.com/authorize?client_id={}&display=page&redirect_uri=https://oauth.vk.com/blank.html&response_type=token&v={}&scope={}'
apiUrl = 'https://api.vk.com/method/{}?v={}&access_token={}&{}'
access = (
#notify
1
#friends
+ 2
#protos
+ 4
#audio
+ 8
#video
+ 16
#pages
+ 128
#status
+ 1024
#notes
# -- messages
# + 4096
#offline
+ 65536
#docs
+ 131072
#groups
+ 262144
)
try:
with open(configFile, 'rb') as _config:
vk_config = json.loads(_config.read().decode('utf-8'))
except Exception:
print('Error. No config file!')
exit(1)
if not (
isinstance(vk_config, object)
):
print('error parse config')
exit(1)
secretKey = vk_config['secret_key']
serviceKey = vk_config['service_key']
appId = vk_config['app_id']
token = vk_config['token']
uploadAlbumId = vk_config['album']
user = vk_config['user_id'] #int(input("Input you user id: \n"))
if not user or int(user) < 0:
print('Error!')
exit(1)
if token == '':
code = oauthUrl.format(appId, apiVersion, access,)
token = input("Please, go to {} and paste code here\n".format(code,))
if token == '':
print('token is empty!')
exit(1)
data = {
"app_id": appId,
"secret_key": secretKey,
"service_key": serviceKey,
"user_id": user,
"album": uploadAlbumId,
"token": token
}
_ = open(configFile, 'wb')
_.write(json.dumps(data).encode())
_.close()
def _safe_downloader(url, file_name):
while True:
try:
response = urlopen(url)
out_file = open(file_name, 'wb')
out_file.write(response.read())
return True
except request_error.HTTPError:
return False
except request_error.URLError:
sleep(1)
pass
def request(method: str, more: str = ""):
url = apiUrl.format(method, apiVersion, token, more)
r = urlopen(url)
return r.read().decode('utf-8')
if not args.hide:
print("User: {}\nToken: {}\nUserId: {}\n".format(id, 'secret', user,))
class MultiThreads:
threads = []
def __init__(self):
self.threads = []
def addThread(self, target: callable, args: tuple):
self.threads.append(Thread(target=target, args=args))
def startAll(self):
for t in self.threads: # starting all threads
t.start()
for t in self.threads: # joining all threads
t.join()
self.threads = []
class User:
albums = dict()
def _upload(self, url: str, files):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 5.1; rv:20.0) Gecko/20100101 Firefox/20.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'Connection': 'keep-alive'
}
# url = 'http://httpbin.org/post';
p = Session()
q = p.request('POST', url, files=files, headers=headers)
if q.status_code == 200:
j = q.json()
server = str(j['server'])
aid = str(j['aid'])
hash = str(j['hash'])
photos_list = str(bytearray(j['photos_list'], 'utf-8').decode('unicode_escape'))
params = 'server=' + server + '&album_id=' + aid + '&hash=' + hash + '&photos_list=' + photos_list
request('photos.save', params)
def downloadPhotos(self, album: str = '', offset: int = 0):
if album == '':
album = '-1_wall'
owner_id, album_id = album.split('_') if album.find('_') > 0 else ['', '']
path = join(getcwd(), 'vk_download_files')
if not isdir(path):
return False
if album_id == '' or owner_id == '':
print('Album or Owner is empty!')
print('Please, paste of format <owner>_<album>. Example:' +
' https://vk.com/album5962770_24571412 =>' +
' (5962770_24571412 or -5962770_24571412 from groups)')
return False
if album_id == '000':
album_id = 'saved'
if album_id == '00':
album_id = 'wall'
if album_id == '0':
album_id = 'profile'
_ = 'owner_id={}&album_id={}&photo_sizes=1&offset={}&count=1000'
response = request('photos.get', _.format(owner_id,album_id,str(offset),))
response = json.loads(response)
if 'response' not in response or 'count' not in response.get('response'):
print('response error')
return False
response = response.get('response')
count = response.get('count')
if not args.hide:
print('Find ' + str(count) + ' photos')
if count < 1:
return False
items = response.get('items')
# images = map(lambda x: x.get('sizes')[-1], items)
i = 1
_items = [{'items': len(items)}]
threads = MultiThreads()
dn = join(path, owner_id, album_id)
if not isdir(dn) and not (makedirs(dn, 0o777, True) or isdir(dn)):
print('mkdir {} error!'.format(dn))
exit(1)
for f in items:
src = f.get('sizes')[-1].get('src')
m = hashlib.sha256()
m.update(src.encode())
h = m.hexdigest()
_items.append({'src': h})
_ = join(dn, h + '.' + src.split('.')[-1])
if isfile(_):
i += 1
if not args.hide:
print('Skip {}'.format(_))
continue
if not args.hide:
print('Downloading photo # {}/{} ({})'.format((i+offset), count, src,))
threads.addThread(_safe_downloader, (src, _))
i += 1
if i % 50 == 0:
threads.startAll()
threads.startAll()
with open('{}/_{}'.format(dn, offset), 'w') as it:
it.write(json.dumps(_items))
if len(items) > 999:
self.downloadPhotos(album, (offset + len(items)))
def photosGetAlbums(self, owner_id: str = 0):
data = request('photos.getAlbums', 'owner_id=' + owner_id)
self.albums = json.loads(data)
return data
def photos(self):
if not (isinstance(self.albums, object) and 'response' in self.albums and 'items' in self.albums.get('response')):
return False
url = ','.join(map(lambda a: str(a.get('id'))+':6000', self.albums.get('response').get('items')))
print(url)
exit()
data = request('execute.getAllUserPhotos', '')
return data
def _movePhotos(self, to, items):
def _(_ids):
if not len(_ids):
return None
return request('execute.photosMove', 'photos={}&to={}&owner_id={}'.format(','.join(_ids), to, user))
ids = []
for i, j in enumerate(items):
if i and i % 25 == 0:
sleep(2)
_(ids)
print('sleep 2sec. loop %d' % i)
ids = []
if j:
ids.append('%d' % j)
sleep(2)
_(ids)
def _deletePhotos(self, items):
def _(_ids):
if not len(_ids):
return None
return request('execute.deletePhotos', 'photos={}&owner={}'.format(','.join(_ids), user))
ids = []
for i, j in enumerate(items):
if i and i % 25 == 0:
sleep(2)
_(ids)
print('sleep 2sec. loop %d' % i)
ids = []
ids.append('%d' % j)
sleep(3)
_(ids)
def _copyPhotos(self, items, owner):
from captcha_decoder import decoder
def _(_ids):
if not len(_ids):
return []
return request('execute.photosCopy', 'photos={}&owner_id={}'.format(','.join(_ids), owner))
_items = []
ids = []
print('Count items: %d' % len(items))
_captcha_img = '/tmp/__vk_captcha_img.png'
for i, j in enumerate(items):
if i and i % 25 == 0:
print('Sleeping 10 sec')
sleep(10)
__ = json.loads(_(ids))
error = __.get('error', '')
_ERR = __.get('execute_errors', [{}])[0].get('error_msg', '')
if _ERR:
print(_ERR)
# exit()
if error:
# __ = json.loads(_(ids))
print(error['error_msg'])
if error.get('error_code') == 14:
__n = 0
while True:
print('try solving')
captcha_sid = error.get('captcha_sid')
captcha_img = error.get('captcha_img')
_safe_downloader(captcha_img, _captcha_img)
solved = decoder(_captcha_img)
print(solved)
if __n > 10 or not len(solved):
solved = input('\nNot solved. Need manual!\nSee {}\n'.format(_captcha_img))
__ = json.loads(request('execute.photosCopy', 'photos={}&owner_id={}&captcha_sid={}&captcha_key={}'.format(
','.join(ids),
owner,
captcha_sid,
solved
)))
__n += 1
if not __.get('error'):
break
sleep(1)
sleep(1)
_items += __.get('response', [])
print('sleep 2sec. loop %d' % i)
ids = []
ids.append('%d' % j)
sleep(2)
__ = json.loads(_(ids))
error = __.get('error', '')
if error:
print(error)
_items += __.get('response', [])
# print(_items)
# exit()
return _items
def movePhotos(self, ids=None):
to = '249795469'
data = json.loads(request('execute.getAllUserPhotos', 'user={}&albums=saved:6000'.format(user)))
if data:
self._movePhotos(to, data['response'])
def uploadPhotos(self):
if uploadAlbumId == '':
print('upload_album_id is empty')
return False
# if need delete old uploaded photos
# delete album here
data = json.loads(request('photos.getUploadServer', 'album_id=' + str(uploadAlbumId)))
if not data.get('response', False) or not data.get('response').get('upload_url', False):
return False
url = data.get('response').get('upload_url')
path = join(getcwd(), 'vk_upload_files')
if not isdir(path):
return False
uploadedPath = join(path, 'uploaded')
if not isdir(uploadedPath):
makedirs(uploadedPath, 0o777, True)
_files = [f for f in listdir(path) if isfile(join(path, f))]
files = []
for f in _files:
_, ext = splitext(f)
if ext in ['.jpeg', '.jpg', '.png']:
files.append(f)
i = 0
n = 0
countFiles = len(files)
_list = []
_move = []
if countFiles > 0:
if not args.hide:
print('uploading start')
for f in files:
if i == 5:
n += 5
self._upload(url, _list)
sleep(1) # на всякий случай
for _ in _move:
_[1].close()
rename(_[0], join(uploadedPath, basename(_[0])))
print('uploaded ' + str(n) + '/' + str(countFiles))
i = 0
_list = []
_move = []
index = 'file' + str(i+1)
fileName = join(path, f)
d = open(fileName, 'rb')
_list.append((index, ('image.png', d,)))
_move.append((fileName, d,))
i += 1
if i != 5:
self._upload(url, _list)
for _ in _move:
_[1].close()
rename(_[0], join(uploadedPath, basename(_[0])))
if not args.hide:
print('uploaded finish')
def summary(self):
print(json.loads(request('execute.getSummaryData', '')))
def copyPhotos(self):
to = '249798346'
album_id = 'wall'
owner = '-127518015'
items = json.loads(request('execute.photosGetIds', 'owner=%s&album=%s' % (owner, album_id))).get('response', [])
# items = items[0:2]
moved_photos = self._copyPhotos(items, owner)
print(len(moved_photos))
exit()
if len(items):
self._movePhotos(to, moved_photos)
def deleteSavedPhotos(self):
items = json.loads(request('execute.photosGetIds', 'owner=%s&album=%s' % (user, 'saved'))).get('response', [])
self._deletePhotos(items)
newUser = User()
# newUser.photosGetAlbums()
# newUser.photos()
# newUser.uploadPhotos()
# newUser.downloadPhotos(input("Paste album number\n"))
# exit()
if args.m:
method = args.m
else:
method = input("Method: \n")
# moreParams = input("More params: \n")
if method == '-1':
newUser.downloadPhotos(input("Paste album number\n"))
if method == '-2':
owner_id = input("Paste owner id\n")
newUser.photosGetAlbums(owner_id)
for i in newUser.albums['response']['items']:
print(owner_id + '_' + str(i['id']))
if method == '-3':
newUser.movePhotos()
if method == '-4':
newUser.copyPhotos()
if method == '-5':
newUser.summary()
if method == '-6':
newUser.deleteSavedPhotos()
exit()
m = getattr(newUser, method)
print(json.dumps(json.loads(m()), sort_keys=True, indent=4))
|
# (C) Copyright 2019-2022 Hewlett Packard Enterprise Development LP.
# Apache License 2.0
from pyaoscx.exceptions.generic_op_error import GenericOperationError
class ListDescriptor(list):
"""
Attribute descriptor class to keep track of a list that contains
pyaoscx_module objects simulating a Reference to a resource. If the
list changes, then every pyaoscx_module object has to be changed.
"""
def __init__(
self,
name,
):
self.name = name
def __get__(self, instance, owner):
"""
Method called when current attribute is used.
:param instance: Instance of the current Object
"""
return instance.__dict__[self.name]
def __set__(self, instance, new_list):
"""
Method called when current attribute is set.
:param instance: Instance of the current Object.
:param new_list: new list being set to current attribute object.
"""
new_list = ReferenceList(new_list)
prev_list = (
instance.__dict__[self.name]
if self.name in instance.__dict__
else None
)
# Update value inside the instance dictionary
instance.__dict__[self.name] = new_list
# Check changes and delete
if prev_list is not None and prev_list != new_list:
# Reflect changes made inside the list
for element in prev_list:
if element not in new_list:
# Delete element reference
try:
element.delete()
except AttributeError:
# Ignore
pass
class ReferenceList(list):
"""
Wrapper class for a Python List object.
Modifies remove() method to use the pyaoscx.pyaoscx_module.delete()
method when using remove on this special type list.
"""
def __init__(self, value):
list.__init__(self, value)
def __setitem__(self, key, value):
"""
Intercept the l[key]=value operations.
Also covers slice assignment.
"""
try:
_ = self.__getitem__(key)
except KeyError:
list.__setitem__(self, key, value)
else:
list.__setitem__(self, key, value)
def __delitem__(self, key):
"""
Delete self.key.
"""
_ = list.__getitem__(self, key)
list.__delitem__(self, key)
def pop(self):
"""
Remove and return item at index (default last).
"""
oldvalue = list.pop(self)
return oldvalue
def extend(self, newvalue):
"""
Extend list by appending elements from iterable.
"""
list.extend(self, newvalue)
def insert(self, i, element):
"""
Insert object before index.
"""
list.insert(self, i, element)
def remove(self, element):
"""
Remove first occurrence of value.
"""
_ = list.index(self, element)
list.remove(self, element)
try:
# Delete element with a DELETE request
element.delete()
# If delete fails because table entry
# is already deleted: IGNORE
except GenericOperationError as error:
# In case error is not 404, raise
if error.response_code != 404:
raise error
def reverse(self):
"""
Reverse *IN PLACE*.
"""
list.reverse(self)
def sort(self, cmpfunc=None):
"""
Stable sort *IN PLACE*.
"""
_ = self[:]
list.sort(self, cmpfunc)
|
# coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2018-2020 azai/Rgveda mods with yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.optimize as sco
try:
import talib
except:
print('PLEASE run "pip install TALIB" to call these modules')
pass
import QUANTAXIS as QA
try:
import QUANTAXIS as QA
from QUANTAXIS.QAUtil.QAParameter import ORDER_DIRECTION
from QUANTAXIS.QAData.QADataStruct import (
QA_DataStruct_Index_min,
QA_DataStruct_Index_day,
QA_DataStruct_Stock_day,
QA_DataStruct_Stock_min,
QA_DataStruct_CryptoCurrency_day,
QA_DataStruct_CryptoCurrency_min,
)
from QUANTAXIS.QAIndicator.talib_numpy import *
from QUANTAXIS.QAUtil.QADate_Adv import (
QA_util_timestamp_to_str,
QA_util_datetime_to_Unix_timestamp,
QA_util_print_timestamp
)
from QUANTAXIS.QAUtil.QALogs import (
QA_util_log_info,
QA_util_log_debug,
QA_util_log_expection
)
except:
print('PLEASE run "pip install QUANTAXIS" before call GolemQ.cli.portfolio modules')
pass
from GolemQ.indices.indices import *
from GolemQ.cli.show_number import (
position,
)
def show_verbose(opt_res, obj, rm, no_print=False):
"""
解读资产配置优化结果
"""
if (obj == 'Sharpe'):
obj_name = '夏普率'
elif (obj == 'Sortino'):
obj_name = 'Sortino Ratio'
elif (obj == 'MinRisk'):
obj_name = '最小风险'
if (rm == 'MV'):
rm_name = '均衡收益'
elif (rm == 'WR'):
rm_name = '最坏可能性'
else:
rm_name = 'CVaR风险最低'
res_weights = pd.DataFrame(opt_res, columns=['code', 'name', 'weights'])
ret_verbose = '按{}{}化计算有推荐仓位的股票:\n{}'.format(obj_name, rm_name, res_weights[res_weights['weights'].gt(0.001)])
ret_verbose = '{}\n{}'.format(ret_verbose,
'剩下都是没有推荐仓位(牌面)的:\n{}'.format(res_weights.loc[res_weights['weights'].lt(0.001), ['code', 'name']].values))
if (no_print == False):
print(ret_verbose)
return ret_verbose
def portfolio_optimizer(rm='CVaR',
alpha=0.1,
risk_free=0.02,
strategy='sharpe_scale_patterns_day',):
pd.options.display.float_format = '{:.1%}'.format
# 股票代码,我直接用我的选股程序获取选股列表。
position_signals = position(portfolio=strategy,
frequency='day',
market_type=QA.MARKET_TYPE.STOCK_CN,
verbose=False)
if (position_signals is not None) and \
(len(position_signals) > 0):
datestamp = position_signals.index[0][0]
position_signals_best = position_signals.loc[position_signals[FLD.LEVERAGE_ONHOLD].gt(0.99), :]
if (len(position_signals_best) > 20):
position_signals = position_signals_best
else:
pass
codelist = position_signals.index.get_level_values(level=1).to_list()
# 获取股票中文名称,只是为了看得方便,交易策略并不需要股票中文名称
stock_names = QA.QA_fetch_stock_name(codelist)
codename = [stock_names.at[code, 'name'] for code in codelist]
codename_T = {codename[i]:codelist[i] for i in range(len(codelist))}
data_day = QA.QA_fetch_stock_day_adv(codelist,
start='2014-01-01',
end='{}'.format(datetime.date.today())).to_qfq()
# 收益率序列
rets_jotion = data_day.add_func(kline_returns_func)
returns = pd.DataFrame(columns=codelist,
index=sorted(data_day.data.index.get_level_values(level=0).unique()))
for code in codelist:
returns[code] = rets_jotion.loc[(slice(None), code), :].reset_index(level=[1], drop=True)
returns = returns.fillna(0)
returns = returns.rename(columns={codelist[i]:codename[i] for i in range(len(codelist))})
import riskfolio.Portfolio as pf
# Building the portfolio object
port = pf.Portfolio(returns=returns)
# Calculating optimum portfolio
# Select method and estimate input parameters:
method_mu = 'hist' # Method to estimate expected returns based on historical data.
method_cov = 'hist' # Method to estimate covariance matrix based on historical data.
port.assets_stats(method_mu=method_mu, method_cov=method_cov, d=0.94)
## Estimate optimal portfolio:
model = 'Classic' # Could be Classic (historical), BL (Black Litterman) or FM (Factor Model)
obj = 'Sharpe' # Objective function, could be MinRisk, MaxRet, Utility or Sharpe
hist = True # Use historical scenarios for risk measures that depend on scenarios
rf = risk_free / 365 # Risk free rate
l = 0 # Risk aversion factor, only useful when obj is 'Utility'
port.alpha = alpha
# 暗色主题
plt.style.use('Solarize_Light2')
# 正常显示中文字体
plt.rcParams['font.sans-serif'] = ['Microsoft YaHei']
import riskfolio.PlotFunctions as plf
# Plotting the composition of the portfolio
w = port.optimization(model=model, rm=rm, obj=obj, rf=rf, l=l, hist=hist)
opt_weights = w.copy()
opt_weights['code'] = opt_weights.apply(lambda x: codename_T[x.name], axis=1)
opt_weights['name'] = opt_weights.apply(lambda x: x.name, axis=1)
opt_weights = opt_weights.set_index(['code'], drop=False)
print(u'交易日', datestamp)
show_verbose(opt_weights, obj, rm)
if (rm == 'CVaR'):
# Risk measure CVaR
title = 'Sharpe Mean CVaR'
elif (rm == 'MV'):
# Risk measure used, this time will be variance
title = 'Sharpe Mean Variance'
elif (rm == 'WR'):
title = 'Sharpe Mean WR'
elif (rm == 'Sortino'):
title = 'Sortino Mean WR'
else:
rm = 'CVaR'
title = 'Sharpe Mean CVaR'
ax = plf.plot_pie(w=w, title=title, others=0.05, nrow=25, cmap = "tab20",
height=6, width=10, ax=None)
plt.show()
## Plotting efficient frontier composition
#points = 10 # Number of points of the frontier
#frontier = port.efficient_frontier(model=model, rm=rm, points=points,
#rf=rf, hist=hist)
##print(frontier.T.head())
#ax = plf.plot_frontier_area(w_frontier=frontier, cmap="tab20",
#height=6,
#width=10, ax=None)
#plt.show()
else:
print(u'没有可用的选股数据。')
if __name__ == '__main__':
pass
|
from datetime import timedelta
from django.test import TestCase
from django.utils import timezone
from .. import exceptions
from ..models import Expense, Project, Timesheet
class ProjectTestCase(TestCase):
def test_str(self):
project = Project(name="test")
self.assertEqual(str(project), "test")
class ExpenseTestCase(TestCase):
def test_str(self):
expense = Expense(name="test")
self.assertEqual(str(expense), "test")
class TimesheetTestCase(TestCase):
def test_str(self):
timesheet = Timesheet(label="test")
self.assertEqual(str(timesheet), "test")
def test_diff(self):
date_start = timezone.now()
date_end = date_start + timedelta(hours=1)
timesheet = Timesheet(date_start=date_start, date_end=date_end)
self.assertEqual(timesheet.diff, date_end - date_start)
def test_diff_return_none_with_no_date_end(self):
date_start = timezone.now()
timesheet = Timesheet(date_start=date_start, date_end=None)
self.assertEqual(timesheet.diff, None)
def test_diff_humanize(self):
date_start = timezone.now()
date_end = date_start + timedelta(hours=1)
timesheet = Timesheet(date_start=date_start, date_end=date_end)
self.assertEqual(timesheet.diff_humanize, "1 hour")
def test_diff_humanize_return_empty_string_with_no_date_end(self):
date_start = timezone.now()
timesheet = Timesheet(date_start=date_start, date_end=None)
self.assertEqual(timesheet.diff_humanize, "")
def test_finish(self):
timesheet = Timesheet.objects.create(is_active=True)
self.assertIsNone(timesheet.date_end)
timesheet.finish()
self.assertIsNotNone(timesheet.date_end)
self.assertFalse(timesheet.is_active)
def test_clock_in(self):
qs = Timesheet.objects.filter(is_active=True)
self.assertEqual(qs.count(), 0)
Timesheet.clock_in()
self.assertEqual(qs.count(), 1)
def test_clock_in_raise_error_when_an_active_timesheet_exists(self):
qs = Timesheet.objects.filter(is_active=True)
Timesheet.objects.create(is_active=True)
self.assertEqual(qs.count(), 1)
with self.assertRaises(exceptions.ActiveTimesheetExists):
Timesheet.clock_in()
self.assertEqual(qs.count(), 1)
def test_clock_out(self):
Timesheet.objects.create(is_active=True)
qs = Timesheet.objects.filter(is_active=True)
self.assertEqual(qs.count(), 1)
Timesheet.clock_out()
self.assertEqual(qs.count(), 0)
def test_clock_out_raise_error_when_there_is_no_active_timesheet(self):
qs = Timesheet.objects.filter(is_active=True)
self.assertEqual(qs.count(), 0)
with self.assertRaises(exceptions.ActiveTimesheetDoesNotExist):
Timesheet.clock_out()
self.assertEqual(qs.count(), 0)
|
# -*- coding: utf-8 -*-
"""
"""
from collections import OrderedDict
import os
import subprocess
import librosa
import numpy as np
import pandas as pd
import sklearn as sk
import sklearn.model_selection
import skimage as skim
import skimage.measure
import skimage.morphology
import skimage.restoration
from tqdm import tqdm
import xml.etree.ElementTree
from echonet.datasets.dataset import Dataset
from echonet.utils.generics import generate_delta, load_audio, to_one_hot
from IPython.core.debugger import Tracer
class BirdCLEF2016(Dataset):
"""
"""
def __init__(self, data_dir, work_dir, downsample=True):
super().__init__(data_dir, work_dir)
self.DOWNSAMPLE = downsample
self.SEGMENT_LENGTH = 500
self.BANDS = 180
self.WITH_DELTA = False
self.FMAX = 16000
self.FFT = 2205
self.HOP = 441
self._resample_recordings()
self._parse_recordings()
self._generate_spectrograms()
if self.DOWNSAMPLE:
self.SEGMENT_LENGTH //= 2
self.BANDS //= 3
self.class_count = len(self.encoder.classes_)
self._split_dataset()
self.train_meta = self.meta[self.meta['fold'] == 'train']
self.validation_data.meta = self.meta[self.meta['fold'] == 'validation']
self.test_data.meta = self.meta[self.meta['fold'] == 'test']
self._train_size = len(self.recordings[self.recordings['fold'] == 'train'])
self._validation_size = len(self.recordings[self.recordings['fold'] == 'validation'])
self._test_size = len(self.recordings[self.recordings['fold'] == 'test'])
self._populate(self.validation_data)
self._populate(self.test_data)
def _resample_recordings(self):
src_dir = self.data_dir + 'TrainingSet/wav/'
for recording in tqdm(sorted(os.listdir(src_dir))):
if os.path.isfile(src_dir + recording):
wav_in = src_dir + recording
wav_out = self.work_dir + recording
if not os.path.isfile(wav_out):
subprocess.call(['sox', '-S', wav_in, '-r', '44100', '-b', '16', wav_out])
def _parse_recordings(self):
if os.path.isfile(self.work_dir + 'BirdCLEF2016.csv'):
self.recordings = pd.read_csv(self.work_dir + 'BirdCLEF2016.csv')
self.encoder = sk.preprocessing.LabelEncoder()
self.encoder.fit(self.recordings['birdclass'].values)
else:
self.recordings = []
src_dir = self.data_dir + 'TrainingSet/xml/'
for recording in tqdm(sorted(os.listdir(src_dir))):
root = xml.etree.ElementTree.parse(src_dir + recording).getroot()
data = {
'filename': recording[:-4] + '.wav',
'birdclass': root.find('ClassId').text,
'species': root.find('Species').text,
'genus': root.find('Genus').text,
'family': root.find('Family').text,
'background': root.find('BackgroundSpecies').text
}
if data['background'] is None:
data['background'] = ''
columns = ['filename', 'birdclass', 'species', 'genus', 'family', 'background']
row = pd.DataFrame(data, columns=columns, index=[0])
self.recordings.append(row)
self.recordings = pd.concat(self.recordings, ignore_index=True)
self.encoder = sk.preprocessing.LabelEncoder()
self.encoder.fit(self.recordings['birdclass'].values)
self.recordings['target'] = self.encoder.transform(self.recordings['birdclass'].values)
self.recordings.to_csv(self.work_dir + 'BirdCLEF2016.csv', index=False)
def _generate_spectrograms(self):
if os.path.isfile(self.work_dir + 'BirdCLEF2016-clips.csv'):
self.meta = pd.read_csv(self.work_dir + 'BirdCLEF2016-clips.csv')
else:
self.meta = []
for row in tqdm(self.recordings.itertuples(), total=len(self.recordings)):
self.meta.extend(self._split_recording(row))
self.meta = pd.concat(self.meta, ignore_index=True)
self.meta.to_csv(self.work_dir + 'BirdCLEF2016-clips.csv', index=False)
def _split_recording(self, row):
audio = load_audio(self.work_dir + row.filename, 44100)
spec = librosa.feature.melspectrogram(audio, sr=44100, n_fft=self.FFT, fmax=self.FMAX,
hop_length=self.HOP, n_mels=self.BANDS)
freqs = librosa.core.mel_frequencies(n_mels=self.BANDS, fmax=self.FMAX)
spec = librosa.core.perceptual_weighting(spec, freqs, ref_power=np.max)
spec = self._enhance_spectrogram(spec)
mask = skim.morphology.dilation(spec, selem=np.ones((3, 40))) > 0
mask[:10, :] = False
clip_list = []
counter = 0
current = []
window_size = 25
w = 0
while w * window_size < np.shape(spec)[1]:
window = slice(w * window_size, (w + 1) * window_size)
if np.any(mask[:, window]):
current.append(spec[:, window])
elif len(current):
clip_list.append(self._save(np.concatenate(current, axis=1), row, counter))
counter += 1
current = []
w += 1
if len(current):
clip_list.append(self._save(np.concatenate(current, axis=1), row, counter))
return clip_list
def _enhance_spectrogram(self, spec):
spec = (spec + 60.0) / 15.0 # quasi-normalization
np.clip(spec, 0, 5, out=spec)
spec = (spec ** 2 - 6.0) / 6.0
spec = skim.restoration.denoise_tv_chambolle(spec, weight=0.1)
spec = ((spec - np.min(spec)) / np.max(spec - np.min(spec)) - 0.5) * 2.0
spec += 0.5
spec[spec > 0] *= 2
spec = ((spec - np.min(spec)) / np.max(spec - np.min(spec)) - 0.5) * 2.0
return spec
def _save(self, clip, row, counter):
reduced_clip = skim.measure.block_reduce(clip, block_size=(3, 2), func=np.mean)
np.save(self.work_dir + row.filename + '.spec{}.npy'.format(counter),
clip.astype('float16'), allow_pickle=False)
np.save(self.work_dir + row.filename + '.spec{}.ds.npy'.format(counter),
reduced_clip.astype('float16'), allow_pickle=False)
data = OrderedDict([
('filename', row.filename + '.spec{}.npy'.format(counter)),
('target', row.target),
('recording', row.filename),
('birdclass', row.birdclass),
('species', row.species),
('genus', row.genus),
('family', row.family),
('background', '' if pd.isnull(row.background) else row.background)
])
return pd.DataFrame(data, columns=data.keys(), index=[0])
def _split_dataset(self):
"""Splits the dataset into training/validation/testing folds
Stratified split with shuffling:
- 75% of recordings go to training
- 12.5% validation
- 12.5% testing
"""
splitter = sklearn.model_selection.StratifiedShuffleSplit
quarter = splitter(n_splits=1, test_size=0.25, random_state=20161013)
half = splitter(n_splits=1, test_size=0.5, random_state=20161013)
train_split = quarter.split(self.recordings['filename'], self.recordings['target'])
train_idx, holdout_idx = list(train_split)[0]
holdout_split = half.split(self.recordings.loc[holdout_idx, 'filename'],
self.recordings.loc[holdout_idx, 'target'])
validation_idx, test_idx = list(holdout_split)[0]
self.recordings.loc[train_idx, 'fold'] = 'train'
self.recordings.loc[holdout_idx[validation_idx], 'fold'] = 'validation'
self.recordings.loc[holdout_idx[test_idx], 'fold'] = 'test'
right = self.recordings[['filename', 'fold']].rename(columns={'filename': 'recording'})
self.meta = pd.merge(self.meta, right, on='recording')
@property
def input_shape(self):
return 1 + self.WITH_DELTA, self.BANDS, self.SEGMENT_LENGTH
@property
def train_size(self):
return self._train_size
@property
def train_segments(self):
return len(self.train_meta)
@property
def validation_size(self):
return self._validation_size
@property
def validation_segments(self):
return len(self.validation_data.meta)
@property
def test_size(self):
return self._test_size
@property
def test_segments(self):
return len(self.test_data.meta)
def to_categories(self, targets):
return self.encoder.classes_[targets]
def to_targets(self, categories):
return self.encoder.transform(categories)
def test(self, model):
return self._score(model, self.test_data)
def validate(self, model):
return self._score(model, self.validation_data)
def _populate(self, data):
X, y, meta = [], [], []
for row in tqdm(data.meta.itertuples(), total=len(data.meta)):
values = dict(zip(row._fields[1:], row[1:]))
columns = row._fields[1:]
rows = []
for _ in range(2): # multiply segment variants for prediction
X.append(self._extract_segment(row.filename))
y.append(row.target)
rows.append(pd.DataFrame(values, columns=columns, index=[0]))
meta.extend(rows)
X = np.stack(X)
y = to_one_hot(np.array(y), self.class_count)
meta = pd.concat(meta, ignore_index=True)
if self.data_mean is None:
self.data_mean = np.mean(X)
self.data_std = np.std(X)
X -= self.data_mean
X /= self.data_std
data.X = X
data.y = y
data.meta = meta
def iterbatches(self, batch_size):
itrain = super()._iterrows(self.train_meta)
while True:
X, y = [], []
for i in range(batch_size):
row = next(itrain)
X.append(self._extract_segment(row.filename))
y.append(row.target)
X = np.stack(X)
y = to_one_hot(np.array(y), self.class_count)
X -= self.data_mean
X /= self.data_std
yield X, y
def _extract_segment(self, filename):
if self.DOWNSAMPLE:
spec = np.load(self.work_dir + filename[:-4] + '.ds.npy').astype('float32')
else:
spec = np.load(self.work_dir + filename).astype('float32')
spec = spec[:, :-1] # trim border artifacts
if np.shape(spec)[1] >= self.SEGMENT_LENGTH:
offset = self.RandomState.randint(0, np.shape(spec)[1] - self.SEGMENT_LENGTH + 1)
spec = spec[:, offset:offset + self.SEGMENT_LENGTH]
else:
offset = self.RandomState.randint(0, self.SEGMENT_LENGTH - np.shape(spec)[1] + 1)
overlay = np.zeros((self.BANDS, self.SEGMENT_LENGTH)) - 1.0
overlay[:, offset:offset + np.shape(spec)[1]] = spec
spec = overlay
if self.WITH_DELTA:
delta = generate_delta(spec)
return np.stack([spec, delta])
else:
return np.stack([spec])
def _score(self, model, data):
predictions = pd.DataFrame(model.predict(data.X))
results = pd.concat([data.meta[['recording', 'target']], predictions], axis=1)
results = results.groupby('recording').aggregate('mean').reset_index()
results['predicted'] = np.argmax(results.iloc[:, 2:].values, axis=1)
return np.sum(results['predicted'] == results['target']) / len(results)
|
# coding: utf-8
from typing import List
from ...shared.data.shared_data import BaseData
from ...shared.data.from_import import FromImport
from .items.data_item import DataItem
class Data(BaseData):
flags: bool
allow_db: bool
quote: List[str]
typings: List[str]
requires_typing: bool
from_imports: List[FromImport]
from_imports_typing: List[FromImport]
items: List[DataItem]
@property
def fullname(self) -> str:
return self.namespace + "." + self.name
|
import datetime
import typing as T
from pqcli import random
def format_float(num: float) -> str:
ret = f"{num:.01f}"
if ret.endswith("0"):
ret = ret[:-2]
return ret
def format_timespan(timespan: datetime.timedelta) -> str:
num = timespan.total_seconds()
if num < 60.0:
return f"~{int(num)}s"
num /= 60
if num < 60.0:
return f"~{int(num)}m"
num /= 60
if num < 24.0:
return f"~{format_float(num)}h"
num /= 24
return f"~{format_float(num)}d"
def generate_name() -> str:
parts = [
"br|cr|dr|fr|gr|j|kr|l|m|n|pr||||r|sh|tr|v|wh|x|y|z".split("|"),
"a|a|e|e|i|i|o|o|u|u|ae|ie|oo|ou".split("|"),
"b|ck|d|g|k|m|n|p|t|v|x|z".split("|"),
]
result = ""
for i in range(6):
result += random.choice(parts[i % 3])
return result.title()
def to_roman(num: int) -> str:
if not num:
return "N"
ret = ""
def _rome(dn: int, ds: str) -> bool:
nonlocal num, ret
if num >= dn:
num -= dn
ret += ds
return True
return False
if num < 0:
ret = "-"
num = -num
while _rome(1000, "M"):
pass
_rome(900, "CM")
_rome(500, "D")
_rome(400, "CD")
while _rome(100, "C"):
pass
_rome(90, "XC")
_rome(50, "L")
_rome(40, "XL")
while _rome(10, "X"):
pass
_rome(9, "IX")
_rome(5, "V")
_rome(4, "IV")
while _rome(1, "I"):
pass
return ret
def act_name(act: int) -> str:
if act == 0:
return "Prologue"
return f"Act {to_roman(act)}"
def plural(subject: str) -> str:
if subject.endswith("y"):
return subject[:-1] + "ies"
if subject.endswith("us"):
return subject[:-2] + "i"
if subject.endswith(("ch", "x", "s", "sh")):
return subject + "es"
if subject.endswith("f"):
return subject[:-1] + "ves"
if subject.endswith(("man", "Man")):
return subject[:-2] + "en"
return subject + "s"
def indefinite(subject: str, qty: int) -> str:
if qty == 1:
if subject.startswith(tuple("AEIOU?aeiou?")):
return "an " + subject
return "a " + subject
return str(qty) + " " + plural(subject)
def definite(subject: str, qty: int) -> str:
if qty > 1:
subject = plural(subject)
return "the " + subject
def prefix(a: T.List[str], m: int, subject: str, sep: str = " ") -> str:
m = abs(m)
if m < 1 or m > len(a):
return subject
return a[m - 1] + sep + subject
def sick(m: int, subject: str) -> str:
m = 6 - abs(m)
return prefix(
["dead", "comatose", "crippled", "sick", "undernourished"], m, subject
)
def young(m: int, subject: str) -> str:
m = 6 - abs(m)
return prefix(
["foetal", "baby", "preadolescent", "teenage", "underage"], m, subject
)
def big(m: int, subject: str) -> str:
return prefix(
["greater", "massive", "enormous", "giant", "titanic"], m, subject
)
def special(m: int, subject: str) -> str:
if " " in subject:
return prefix(
["veteran", "cursed", "warrior", "undead", "demon"], m, subject
)
return prefix(
["Battle-", "cursed ", "Were-", "undead ", "demon "], m, subject, ""
)
def terminate_message(player_name: str) -> str:
adjective = random.choice(["faithful", "noble", "loyal", "brave"])
return f"Terminate {adjective} {player_name}?"
|
'''
A positive integer m is a sum of squares if it can be written as k + l where k > 0, l > 0 and both k and l are perfect squares.
Write a Python function sumofsquares(m) that takes an integer m returns True if m is a sum of squares and False otherwise. (If m is not positive, your function should return False.)
Here are some examples to show how your function should work.
>>> sumofsquares(41)
True
>>> sumofsquares(30)
False
>>> sumofsquares(17)
True
'''
import math
def sumofsquares(m):
d = range((int(math.sqrt(m / 2))), (int(math.sqrt(m - 1)) + 2))
s = [1]
for x in d:
s.append(x*x)
print(m, d, s)
z = 0
while z < len(s):
for y in s[z:]:
if y + s[z] == m:
return True
z += 1
return False
print(sumofsquares(17))
|
import bokeh.model
import bokeh.core.properties
import bokeh.util.callback_manager
class Collection(bokeh.model.Model):
objects = bokeh.core.properties.Dict(bokeh.core.properties.String, bokeh.core.properties.Instance(bokeh.model.Model))
def on_change(self, attr, *callbacks):
bokeh.util.callback_manager.PropertyCallbackManager.on_change(self, attr, *callbacks)
def __setattr__(self, name, value):
if name.startswith("_"):
bokeh.model.Model.__setattr__(self, name, value)
else:
old = self.objects.get(name, None)
self.objects[name] = value
self.trigger(name, old, value)
def __getattr__(self, name):
if name.startswith("_"):
return super(Collection, self).__getattribute__(name)
return super(Collection, self).__getattribute__("objects")[name]
|
"""
Aggregating results into DataPoints
Copyright 2015 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import collections
import copy
import difflib
import logging
import re
from abc import abstractmethod
from collections import Counter
import yaml
from yaml.representer import SafeRepresenter
from __init__ import TaurusInternalException, TaurusConfigError
from engine import Aggregator
from six import iteritems, PY3
from utils import dehumanize_time, JSONConvertible
from hdrpy import HdrHistogram
class RespTimesCounter(JSONConvertible):
def __init__(self, low, high, sign_figures):
super(RespTimesCounter, self).__init__()
self.low = low
self.high = high
self.sign_figures = sign_figures
self.histogram = HdrHistogram(low, high, sign_figures)
self._cached_perc = None
self._cached_stdev = None
def __deepcopy__(self, memo):
new = RespTimesCounter(self.low, self.high, self.sign_figures)
new._cached_perc = self._cached_perc
new._cached_stdev = self._cached_stdev
# TODO: maybe hdrpy can encapsulate this itself
new.histogram.counts = copy.deepcopy(self.histogram.counts, memo)
new.histogram.total_count = self.histogram.total_count
new.histogram.min_value = self.histogram.min_value
new.histogram.max_value= self.histogram.max_value
return new
def __bool__(self):
return len(self) > 0
def __len__(self):
return self.histogram.total_count
def add(self, item, count=1):
item = round(item * 1000.0, 3)
self._cached_perc = None
self._cached_stdev = None
self.histogram.record_value(item, count)
def merge(self, other):
self._cached_perc = None
self._cached_stdev = None
self.histogram.add(other.histogram)
def get_percentiles_dict(self, percentiles):
if self._cached_perc is None or set(self._cached_perc.keys()) != set(percentiles):
self._cached_perc = self.histogram.get_percentile_to_value_dict(percentiles)
return self._cached_perc
def get_counts(self):
return self.histogram.get_value_counts()
def get_stdev(self, mean):
if self._cached_stdev is None:
self._cached_stdev = self.histogram.get_stddev(mean) / 1000.0 # is this correct to divide?
return self._cached_stdev
def __json__(self):
return {
rt / 1000.0: int(count) # because hdrpy returns int64, which is unrecognized by json serializer
for rt, count in iteritems(self.get_counts())
}
class KPISet(dict):
"""
Main entity in results, contains all KPIs for single label,
capable of merging other KPISet's into it to compose cumulative results
"""
ERRORS = "errors"
SAMPLE_COUNT = "throughput"
CONCURRENCY = "concurrency"
SUCCESSES = "succ"
FAILURES = "fail"
BYTE_COUNT = "bytes"
RESP_TIMES = "rt"
AVG_RESP_TIME = "avg_rt"
STDEV_RESP_TIME = "stdev_rt"
AVG_LATENCY = "avg_lt"
AVG_CONN_TIME = "avg_ct"
PERCENTILES = "perc"
RESP_CODES = "rc"
ERRTYPE_ERROR = 0
ERRTYPE_ASSERT = 1
ERRTYPE_SUBSAMPLE = 2
def __init__(self, perc_levels=(), rt_dist_maxlen=None):
super(KPISet, self).__init__()
self.sum_rt = 0
self.sum_lt = 0
self.sum_cn = 0
self.perc_levels = perc_levels
self.rtimes_len = rt_dist_maxlen
self._concurrencies = Counter()
# scalars
self[KPISet.SAMPLE_COUNT] = 0
self[KPISet.CONCURRENCY] = 0
self[KPISet.SUCCESSES] = 0
self[KPISet.FAILURES] = 0
self[KPISet.AVG_RESP_TIME] = 0
self[KPISet.STDEV_RESP_TIME] = 0
self[KPISet.AVG_LATENCY] = 0
self[KPISet.AVG_CONN_TIME] = 0
self[KPISet.BYTE_COUNT] = 0
# vectors
self[KPISet.ERRORS] = []
self[KPISet.RESP_TIMES] = RespTimesCounter(1, 60 * 30 * 1000, 3) # is maximum value of 30 minutes enough?
self[KPISet.RESP_CODES] = Counter()
self[KPISet.PERCENTILES] = {}
def __deepcopy__(self, memo):
mycopy = KPISet(self.perc_levels)
mycopy.sum_rt = self.sum_rt
mycopy.sum_lt = self.sum_lt
mycopy.sum_cn = self.sum_cn
mycopy.rtimes_len = self.rtimes_len
mycopy.perc_levels = self.perc_levels
mycopy._concurrencies = copy.deepcopy(self._concurrencies, memo)
for key in self:
mycopy[key] = copy.deepcopy(self.get(key, no_recalc=True), memo)
return mycopy
@staticmethod
def error_item_skel(error, ret_c, cnt, errtype, urls, tag):
"""
:type error: str
:type ret_c: str
:type tag: str
:type cnt: int
:type errtype: int
:type urls: collections.Counter
:rtype: dict
"""
assert isinstance(urls, collections.Counter)
return {
"cnt": cnt,
"msg": error,
"tag": tag, # just one more string qualifier
"rc": ret_c,
"type": errtype,
"urls": urls,
}
def add_sample(self, sample):
"""
Add sample, consisting of: cnc, rt, cn, lt, rc, error, trname, byte_count
:type sample: tuple
"""
# TODO: introduce a flag to not count failed in resp times? or offer it always?
cnc, r_time, con_time, latency, r_code, error, trname, byte_count = sample
self[self.SAMPLE_COUNT] += 1
if cnc:
self._concurrencies[trname] = cnc
if r_code is not None:
self[self.RESP_CODES][r_code] += 1
# count times only if we have RCs
if con_time:
self.sum_cn += con_time
self.sum_lt += latency
self.sum_rt += r_time
if error is not None:
self[self.FAILURES] += 1
item = self.error_item_skel(error, r_code, 1, KPISet.ERRTYPE_ERROR, Counter(), None)
self.inc_list(self[self.ERRORS], ("msg", error), item)
else:
self[self.SUCCESSES] += 1
self[self.RESP_TIMES].add(r_time, 1)
if byte_count is not None:
self[self.BYTE_COUNT] += byte_count
# TODO: max/min rt? there is percentiles...
# TODO: throughput if interval is not 1s
@staticmethod
def inc_list(values, selector, value):
"""
Increment list item, based on selector criteria
:param values: list to update
:param selector: tuple of 2 values, field name and value to match
:param value: dict to put into list
:type values: list[dict]
:type selector: tuple
:type value: dict
"""
found = False
for item in values:
if item[selector[0]] == selector[1]:
item['cnt'] += value['cnt']
item['urls'] += value['urls']
found = True
break
if not found:
values.append(copy.deepcopy(value))
def __getitem__(self, key):
rtimes = self.get(self.RESP_TIMES, no_recalc=True)
if key != self.RESP_TIMES and rtimes:
if key == self.STDEV_RESP_TIME:
self[self.STDEV_RESP_TIME] = rtimes.get_stdev(self.get(self.AVG_RESP_TIME, no_recalc=True))
elif key == self.PERCENTILES:
percs = {str(float(perc)): value / 1000.0 for perc, value in
iteritems(rtimes.get_percentiles_dict(self.perc_levels))}
self[self.PERCENTILES] = percs
return super(KPISet, self).__getitem__(key)
def get(self, k, no_recalc=False):
if no_recalc:
return super(KPISet, self).get(k)
else:
return self.__getitem__(k)
def items(self):
for item in super(KPISet, self).items():
yield (item[0], self.__getitem__(item[0]))
def iteritems(self):
if PY3:
raise TaurusInternalException("Invalid call")
for item in super(KPISet, self).iteritems():
yield (item[0], self.__getitem__(item[0]))
def viewitems(self):
if PY3:
raise TaurusInternalException("Invalid call")
for item in super(KPISet, self).viewitems():
yield (item[0], self.__getitem__(item[0]))
def viewvalues(self):
raise TaurusInternalException("Invalid call")
def values(self):
raise TaurusInternalException("Invalid call")
def recalculate(self): # FIXME: get rid of it at all?
"""
Recalculate averages, stdev and percentiles
:return:
"""
if self[self.SAMPLE_COUNT]:
self[self.AVG_CONN_TIME] = self.sum_cn / self[self.SAMPLE_COUNT]
self[self.AVG_LATENCY] = self.sum_lt / self[self.SAMPLE_COUNT]
self[self.AVG_RESP_TIME] = self.sum_rt / self[self.SAMPLE_COUNT]
if len(self._concurrencies):
self[self.CONCURRENCY] = sum(self._concurrencies.values())
return self
def merge_kpis(self, src, sid=None):
"""
Merge other instance into self
:param sid: source ID to use when suming up concurrency
:type src: KPISet
:return:
"""
src.recalculate() # TODO: could be not resource efficient strat
self.sum_cn += src.sum_cn
self.sum_lt += src.sum_lt
self.sum_rt += src.sum_rt
self[self.SAMPLE_COUNT] += src[self.SAMPLE_COUNT]
self[self.SUCCESSES] += src[self.SUCCESSES]
self[self.FAILURES] += src[self.FAILURES]
self[self.BYTE_COUNT] += src[self.BYTE_COUNT]
# NOTE: should it be average? mind the timestamp gaps
if src[self.CONCURRENCY]:
self._concurrencies[sid] = src[self.CONCURRENCY]
if src[self.RESP_TIMES]:
self[self.RESP_TIMES].merge(src[self.RESP_TIMES])
elif not self[self.PERCENTILES]:
# using existing percentiles, in case we have no source data to recalculate them
# TODO: it's not valid to overwrite, better take average
self[self.PERCENTILES] = copy.deepcopy(src[self.PERCENTILES])
self[self.RESP_CODES].update(src[self.RESP_CODES])
for src_item in src[self.ERRORS]:
self.inc_list(self[self.ERRORS], ('msg', src_item['msg']), src_item)
@staticmethod
def from_dict(obj):
"""
:type obj: dict
:rtype: KPISet
"""
inst = KPISet()
assert inst.PERCENTILES in obj
inst.perc_levels = [float(x) for x in obj[inst.PERCENTILES].keys()]
for key, val in iteritems(obj):
if key == inst.RESP_TIMES:
if isinstance(val, dict):
for value, count in iteritems(val):
inst[inst.RESP_TIMES].add(value, count)
else:
inst[key] = val
inst.sum_cn = obj[inst.AVG_CONN_TIME] * obj[inst.SAMPLE_COUNT]
inst.sum_lt = obj[inst.AVG_LATENCY] * obj[inst.SAMPLE_COUNT]
inst.sum_rt = obj[inst.AVG_RESP_TIME] * obj[inst.SAMPLE_COUNT]
for error in inst[KPISet.ERRORS]:
error['urls'] = Counter(error['urls'])
return inst
class DataPoint(dict):
"""
Represents an aggregate data point
:param ts: timestamp of this point
"""
SOURCE_ID = 'id'
TIMESTAMP = "ts"
CURRENT = "current"
CUMULATIVE = "cumulative"
SUBRESULTS = "subresults"
def __init__(self, ts, perc_levels=()):
"""
:type ts: int
:type perc_levels: list[float]
"""
super(DataPoint, self).__init__()
self.perc_levels = perc_levels
self[self.SOURCE_ID] = None
self[self.TIMESTAMP] = ts
self[self.CUMULATIVE] = {}
self[self.CURRENT] = {}
self[self.SUBRESULTS] = []
def __deepcopy__(self, memo):
new = DataPoint(self[self.TIMESTAMP], self.perc_levels)
for key in self.keys():
new[key] = copy.deepcopy(self[key], memo)
return new
def __merge_kpis(self, src, dst, sid):
"""
:param src: KPISet
:param dst: KPISet
:param sid: int
:return:
"""
for label, val in iteritems(src):
dest = dst.setdefault(label, KPISet(self.perc_levels))
if not isinstance(val, KPISet):
val = KPISet.from_dict(val)
val.perc_levels = self.perc_levels
dest.merge_kpis(val, sid)
def recalculate(self):
"""
Recalculate all KPISet's
"""
for val in self[self.CURRENT].values():
val.recalculate()
for val in self[self.CUMULATIVE].values():
val.recalculate()
def merge_point(self, src, do_recalculate=True):
"""
:type src: DataPoint
"""
if self[self.TIMESTAMP] != src[self.TIMESTAMP]:
msg = "Cannot merge different timestamps (%s and %s)"
raise TaurusInternalException(msg % (self[self.TIMESTAMP], src[self.TIMESTAMP]))
self[DataPoint.SUBRESULTS].append(src)
self.__merge_kpis(src[self.CURRENT], self[self.CURRENT], src[DataPoint.SOURCE_ID])
self.__merge_kpis(src[self.CUMULATIVE], self[self.CUMULATIVE], src[DataPoint.SOURCE_ID])
if do_recalculate:
self.recalculate()
yaml.add_representer(KPISet, SafeRepresenter.represent_dict)
yaml.add_representer(DataPoint, SafeRepresenter.represent_dict)
class ResultsProvider(object):
"""
:type listeners: list[AggregatorListener]
"""
def __init__(self):
super(ResultsProvider, self).__init__()
self.cumulative = {}
self.track_percentiles = [0.0, 50.0, 90.0, 95.0, 99.0, 99.9, 100.0]
self.listeners = []
self.buffer_len = 2
self.min_buffer_len = 2
self.max_buffer_len = float('inf')
self.buffer_multiplier = 2
self.buffer_scale_idx = None
self.rtimes_len = None
self.known_errors = set()
self.max_error_count = 100
def _fold_error(self, error):
if not error or error in self.known_errors or self.max_error_count <= 0:
return error
size = len(self.known_errors)
threshold = (size / float(self.max_error_count)) ** 2
matches = difflib.get_close_matches(error, self.known_errors, 1, 1 - threshold)
if matches:
error = matches[0]
self.known_errors.add(error)
return error
def add_listener(self, listener):
"""
Add aggregate results listener
:type listener: AggregatorListener
"""
self.listeners.append(listener)
def __merge_to_cumulative(self, current):
"""
Merge current KPISet to cumulative
:param current: KPISet
"""
for label, data in iteritems(current):
cumul = self.cumulative.setdefault(label, KPISet(self.track_percentiles, self.rtimes_len))
cumul.merge_kpis(data)
cumul.recalculate()
def datapoints(self, final_pass=False):
"""
Generator object that returns datapoints from the reader
:type final_pass: bool
"""
for datapoint in self._calculate_datapoints(final_pass):
current = datapoint[DataPoint.CURRENT]
self.__merge_to_cumulative(current)
datapoint[DataPoint.CUMULATIVE] = copy.deepcopy(self.cumulative)
datapoint.recalculate()
for listener in self.listeners:
listener.aggregated_second(datapoint)
yield datapoint
@abstractmethod
def _calculate_datapoints(self, final_pass=False):
"""
:rtype : list[DataPoint]
"""
yield
class ResultsReader(ResultsProvider):
"""
Aggregator that reads samples one by one,
supposed to be attached to every executor
"""
label_generalize_regexps = [
(re.compile(r"\b[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}\b"), "U"),
(re.compile(r"\b[0-9a-fA-F]{2,}\b"), "U"),
# (re.compile(r"\b[0-9a-fA-F]{32}\b"), "U"), # implied by previous, maybe prev is too wide
(re.compile(r"\b\d{2,}\b"), "N")
]
def __init__(self, perc_levels=None):
super(ResultsReader, self).__init__()
self.generalize_labels = False
self.ignored_labels = []
self.log = logging.getLogger(self.__class__.__name__)
self.buffer = {}
self.min_timestamp = 0
if perc_levels is not None:
self.track_percentiles = perc_levels
def __process_readers(self, final_pass=False):
"""
:param final_pass: True if in post-process stage
:return:
"""
for result in self._read(final_pass):
if result is None:
self.log.debug("No data from reader")
break
elif isinstance(result, list) or isinstance(result, tuple):
t_stamp, label, conc, r_time, con_time, latency, r_code, error, trname, byte_count = result
if label in self.ignored_labels:
continue
if t_stamp < self.min_timestamp:
self.log.debug("Putting sample %s into %s", t_stamp, self.min_timestamp)
t_stamp = self.min_timestamp
if r_time < 0:
self.log.warning("Negative response time reported by tool, resetting it to zero")
r_time = 0
if t_stamp not in self.buffer:
self.buffer[t_stamp] = []
error = self._fold_error(error)
self.buffer[t_stamp].append((label, conc, r_time, con_time, latency, r_code, error, trname, byte_count))
else:
raise TaurusInternalException("Unsupported results from %s reader: %s" % (self, result))
def __aggregate_current(self, datapoint, samples):
"""
:param datapoint: DataPoint
:param samples: list of samples
:return:
"""
current = datapoint[DataPoint.CURRENT]
for sample in samples:
label, r_time, concur, con_time, latency, r_code, error, trname, byte_count = sample
if label == '':
label = '[empty]'
if self.generalize_labels:
label = self.__generalize_label(label)
label = current.setdefault(label, KPISet(self.track_percentiles))
# empty means overall
label.add_sample((r_time, concur, con_time, latency, r_code, error, trname, byte_count))
overall = KPISet(self.track_percentiles)
for label in current.values():
overall.merge_kpis(label, datapoint[DataPoint.SOURCE_ID])
current[''] = overall
return current
def _calculate_datapoints(self, final_pass=False):
"""
A generator to read available datapoints
:type final_pass: bool
:rtype: DataPoint
"""
self.__process_readers(final_pass)
self.log.debug("Buffer len: %s; Known errors count: %s", len(self.buffer), len(self.known_errors))
if not self.buffer:
return
if self.cumulative and self.track_percentiles and self.buffer_scale_idx is not None:
old_len = self.buffer_len
chosen_timing = self.cumulative[''][KPISet.PERCENTILES][self.buffer_scale_idx]
self.buffer_len = round(chosen_timing * self.buffer_multiplier)
self.buffer_len = max(self.min_buffer_len, self.buffer_len)
self.buffer_len = min(self.max_buffer_len, self.buffer_len)
if self.buffer_len != old_len:
self.log.info("Changed data analysis delay to %ds", self.buffer_len)
timestamps = sorted(self.buffer.keys())
while final_pass or (timestamps[-1] >= (timestamps[0] + self.buffer_len)):
timestamp = timestamps.pop(0)
self.min_timestamp = timestamp + 1
self.log.debug("Aggregating: %s", timestamp)
samples = self.buffer.pop(timestamp)
datapoint = self.__get_new_datapoint(timestamp)
self.__aggregate_current(datapoint, samples)
yield datapoint
if not timestamps:
break
def __get_new_datapoint(self, timestamp):
"""
:rtype: DataPoint
"""
point = DataPoint(timestamp, self.track_percentiles)
point[DataPoint.SOURCE_ID] = id(self)
return point
@abstractmethod
def _read(self, final_pass=False):
"""
:param final_pass: True if called from post-process stage, when reader
should report possible rests of results
:rtype: list
:return: timestamp, label, concurrency, rt, latency, rc, error
"""
yield
def __generalize_label(self, label):
for regexp, replacement in self.label_generalize_regexps:
label = regexp.sub(replacement, label)
return label
class ConsolidatingAggregator(Aggregator, ResultsProvider):
"""
:type underlings: list[bzt.modules.aggregator.ResultsProvider]
"""
# TODO: switch to underling-count-based completeness criteria
def __init__(self):
Aggregator.__init__(self, is_functional=False)
ResultsProvider.__init__(self)
self.generalize_labels = False
self.ignored_labels = ["ignore"]
self.underlings = []
self.buffer = {}
self.rtimes_len = 1000
def prepare(self):
"""
Read aggregation options
"""
super(ConsolidatingAggregator, self).prepare()
# make unique & sort
self.track_percentiles = self.settings.get("percentiles", self.track_percentiles)
self.track_percentiles = list(set(self.track_percentiles))
self.track_percentiles.sort()
self.settings["percentiles"] = self.track_percentiles
self.ignored_labels = self.settings.get("ignore-labels", self.ignored_labels)
self.generalize_labels = self.settings.get("generalize-labels", self.generalize_labels)
self.min_buffer_len = dehumanize_time(self.settings.get("min-buffer-len", self.min_buffer_len))
max_buffer_len = self.settings.get("max-buffer-len", self.max_buffer_len)
try:
self.max_buffer_len = dehumanize_time(max_buffer_len)
except TaurusInternalException as exc:
self.log.debug("Exception in dehumanize_time(%s): %s", max_buffer_len, exc)
raise TaurusConfigError("Wrong 'max-buffer-len' value: %s" % max_buffer_len)
self.buffer_multiplier = self.settings.get("buffer-multiplier", self.buffer_multiplier)
count = len(self.track_percentiles)
if count == 1:
self.buffer_scale_idx = str(float(self.track_percentiles[0]))
if count > 1:
percentile = self.settings.get("buffer-scale-choice", 0.5)
percentiles = [i / (count - 1.0) for i in range(count)]
distances = [abs(percentile - percentiles[i]) for i in range(count)]
index_position = distances.index(min(distances))
self.buffer_scale_idx = str(float(self.track_percentiles[index_position]))
debug_str = 'Buffer scaling setup: percentile %s from %s selected'
self.log.debug(debug_str, self.buffer_scale_idx, self.track_percentiles)
self.rtimes_len = self.settings.get("rtimes-len", self.rtimes_len)
self.max_error_count = self.settings.get("max-error-variety", self.max_error_count)
def add_underling(self, underling):
"""
Add source for aggregating
:type underling: ResultsProvider
"""
underling.track_percentiles = self.track_percentiles
if isinstance(underling, ResultsReader):
underling.ignored_labels = self.ignored_labels
underling.generalize_labels = self.generalize_labels
underling.min_buffer_len = self.min_buffer_len
underling.max_buffer_len = self.max_buffer_len
underling.buffer_multiplier = self.buffer_multiplier
underling.buffer_scale_idx = self.buffer_scale_idx
underling.rtimes_len = self.rtimes_len
underling.max_error_count = self.max_error_count
underling.known_errors = self.known_errors # share error set between underlings
self.underlings.append(underling)
def check(self):
"""
Check if there is next aggregate data present
:rtype: bool
"""
for point in self.datapoints():
self.log.debug("Processed datapoint: %s/%s", point[DataPoint.TIMESTAMP], point[DataPoint.SOURCE_ID])
return super(ConsolidatingAggregator, self).check()
def post_process(self):
"""
Process all remaining aggregate data
"""
super(ConsolidatingAggregator, self).post_process()
for point in self.datapoints(True):
self.log.debug("Processed datapoint: %s/%s", point[DataPoint.TIMESTAMP], point[DataPoint.SOURCE_ID])
def _process_underlings(self, final_pass):
for underling in self.underlings:
for data in underling.datapoints(final_pass):
tstamp = data[DataPoint.TIMESTAMP]
if self.buffer:
mints = min(self.buffer.keys())
if tstamp < mints:
self.log.debug("Putting datapoint %s into %s", tstamp, mints)
data[DataPoint.TIMESTAMP] = mints
tstamp = mints
self.buffer.setdefault(tstamp, []).append(data)
def _calculate_datapoints(self, final_pass=False):
"""
Override ResultsProvider._calculate_datapoints
"""
self._process_underlings(final_pass)
self.log.debug("Consolidator buffer[%s]: %s", len(self.buffer), self.buffer.keys())
if not self.buffer:
return
timestamps = sorted(self.buffer.keys())
while timestamps and (final_pass or (timestamps[-1] >= timestamps[0] + self.buffer_len)):
tstamp = timestamps.pop(0)
self.log.debug("Merging into %s", tstamp)
points_to_consolidate = self.buffer.pop(tstamp)
point = DataPoint(tstamp, self.track_percentiles)
for subresult in points_to_consolidate:
self.log.debug("Merging %s", subresult[DataPoint.TIMESTAMP])
point.merge_point(subresult)
point.recalculate()
yield point
class NoneAggregator(Aggregator, ResultsProvider):
"""
Dummy aggregator
"""
def __init__(self):
Aggregator.__init__(self, is_functional=False)
ResultsProvider.__init__(self)
def _calculate_datapoints(self, final_pass=False):
pass
class AggregatorListener(object):
"""
Mixin for listeners of aggregator data
"""
@abstractmethod
def aggregated_second(self, data):
"""
Notification about new data point
:param data: bzt.modules.reporting.DataPoint
"""
pass
def finalize(self):
"""
This method is called at the end of run
to close open file descriptors etc.
"""
pass
|
from abstract_rl.src.data_structures.temporal_difference_data.trajectory_collection import TrajectoryCollection
class TrajectoryOperator:
"""
A simple evaluation operator interface.
"""
def transform(self, trajectory):
"""
Transform a trajectory with the current instance of the evaluation operator.
:param trajectory: trajectory to transform.
"""
raise NotImplementedError
def transform_all(self, trajectories):
"""
Transform a trajectory with the current instance of the evaluation operator.
:param trajectories: trajectories to transform.
"""
if isinstance(trajectories, TrajectoryCollection):
trajectories = trajectories.trajectories()
for trajectory in trajectories:
self.transform(trajectory)
|
#!/usr/bin/env python3.5
import sys
import re
import os
import csv
def read_file(fname):
f = open(fname, 'r')
csv_reader = csv.reader(f, delimiter='~')
no_rows = 0
for row in csv_reader:
no_rows += 1
no_cols = len(row)
print("Row %d: columns = %d" % (no_rows, no_cols))
f.close()
print(".........")
print("Number of records in csv file: %d" % no_rows)
if __name__ == '__main__':
args = sys.argv[1:]
for fl in args:
print("File : %s" % fl)
print("..................................")
read_file(fl)
|
import talker.base
import talker.server
from talker.mesh import PeerObserver, LOG, PeerClient
class TopoMixin:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.register_command("/peers", TopoMixin.command_peers)
self.register_command("/peer-listen", TopoMixin.command_peer_listen)
self.register_command("/peer-connect", TopoMixin.command_peer_connect)
self.register_command("/peer-kill", TopoMixin.command_peer_kill)
self.register_command("/broadcast", TopoMixin.command_broadcast)
self.register_command("/reachable", TopoMixin.command_reachable)
def command_reachable(self):
helper = self.server.observer(TopologyObserver)
reachable = helper.reachable()
self.output_line("There are {} reachable peers:".format(len(reachable)))
for node in reachable:
self.output_line(node)
def command_peers(self):
peers = self.server.list_peers()
self.output_line("There are {} peers directly connected".format(len(peers)))
for peer in peers:
self.output_line(str(peer))
def command_peer_listen(self, host, port):
port = int(port)
LOG.info("Adding PeerServer at %s %d", host, port)
s = self.server.make_server_socket(host, port)
peer_server = talker.base.ServerSocket(server=self.server, socket=s, client_factory=PeerClient)
self.server.add_socket(peer_server)
def command_peer_connect(self, host, port):
port = int(port)
LOG.info("Adding PeerClient at %s %d", host, port)
peer = PeerClient.connect(self.server, host, port)
self.server.add_socket(peer)
def command_peer_kill(self, host, port):
port = int(port)
LOG.info("Killing PeerClient at %s %d", host, port)
for peer in self.server.list_peers():
if peer.addr == (host, port):
self.output_line("Shutting down {}".format(peer))
peer.close()
def command_broadcast(self, *args):
message = ' '.join(args)
LOG.info("Broadcasting message: %s", message)
self.server.peer_broadcast(message)
# This is a more complicated observer of peer-to-peer messages.
# As servers are connected to and disconnected from each other, each node
# broadcasts across the network the latest version of its connectivity
# information. TopologyObservers on each server collate this information
# and use it to form an up-to-date map of who is connected to whom.
class TopologyObserver(PeerObserver):
I_AM = 'i-am'
I_SEE = 'i-see'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.register_method(TopologyObserver.I_AM, self.recv_i_am)
self.register_method(TopologyObserver.I_SEE, self.recv_i_see)
# We track all the peers we know about, keeping track of who they
# are directly connected to, and the most recent update we have received from them.
self.peer_ids = {}
self.topology = {self.server.peer_id: (0, set())}
self.calculate_reachable_peers()
def peer_added(self, peer):
LOG.debug('New peer detected by %s: %s', self, peer)
self.unicast(peer, TopologyObserver.I_AM)
def peer_removed(self, peer):
LOG.debug('Peer removed: %s', peer)
if peer in self.peer_ids:
del self.peer_ids[peer]
self.broadcast_new_neighbours()
def broadcast_new_neighbours(self):
self.broadcast(TopologyObserver.I_SEE, ';'.join(self.peer_ids.values()))
def recv_i_am(self, peer, source, id, args):
self.peer_ids[peer] = source
self.broadcast_new_neighbours()
def recv_i_see(self, peer, source, id, args):
if args == '':
neighbours = set()
else:
neighbours = set(args.split(';'))
if source not in self.topology:
self.topology[source] = (id, neighbours)
self.calculate_reachable_peers()
# We've just heard about a new server joining the network, so let them know about us.
self.broadcast_new_neighbours()
elif self.topology[source][0] < id:
old_neighbours = self.topology[source][1]
self.topology[source] = (id, neighbours)
if old_neighbours != neighbours:
self.calculate_reachable_peers()
def calculate_reachable_peers(self):
LOG.debug('Calculating reachability from topology, initial is %s', self.topology)
# Start with ourselves, work out who is reachable on the current network
reachable = set()
new = {self.server.peer_id}
while len(new) != 0:
reachable.update(new)
added = new
new = set()
for node in added:
if node in self.topology:
new.update(self.topology[node][1])
new.difference_update(reachable)
LOG.debug('Calculated reachable peers: %s', reachable)
for node in set(self.topology):
if node not in reachable:
LOG.debug(' deleting node %s', node)
del self.topology[node]
LOG.debug('Final topology is %s', self.topology)
def reachable(self):
return set(self.topology)
|
from math import hypot
cat_opo = int(input("Digite a medida do cateto oposto: "))
cat_adj = int(input("Digite a medida do cateto adjacente: "))
hipotenusa = hypot(cat_adj, cat_adj)
print(hipotenusa)
|
"""add_agent_external_url_to_streamsets
Revision ID: 9d3d42cad294
Revises: 507ccc9cb1a6
Create Date: 2020-11-13 13:05:40.922113
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '9d3d42cad294'
down_revision = '507ccc9cb1a6'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('streamsets', sa.Column('agent_external_url', sa.String, nullable=False))
def downgrade():
op.drop_column('streamsets', 'agent_external_url')
|
from mendeleev.fetch import fetch_table
from mendeleev import element
ptable = fetch_table('elements')
def nombre_atomico(simbolo):
return element(simbolo).name
def estados_oxidacion(simbolo):
return element(simbolo).oxistates
def numero_atomico(simbolo):
'''
regresa el numero atomico.
Uso: num_at(string)
string -> simbolo, eg. 'H','K','Be'
'''
num_at = ptable[ptable.symbol==simbolo]['atomic_number'].to_list()[0]
return num_at
def simbolo_atomico(simbolo):
'''
Regresa el simbolo atomico.
Uso: simbolo_atomico(string)
string -> simbolo, eg. 'H','K','Be'
'''
sim_at = ptable[ptable.symbol==simbolo]['symbol'].to_list()[0]
return sim_at
def peso_atomico(simbolo):
'''
Regresa el peso atomico.
Uso: peso_atomico(string)
string -> simbolo, eg. 'H','K','Be'
'''
peso_at = ptable[ptable.symbol==simbolo]['atomic_weight'].to_list()[0]
return peso_at
def conf_electronica(simbolo):
'''
Regresa la configuracion electronica del elemento
Uso: conf_electronica(string)
string -> simbolo, eg. 'H','K','Be'
'''
conf_el = ptable[ptable.symbol==simbolo]['electronic_configuration'].to_list()[0]
return conf_el
def info_elemento(simbolo):
'''
Funcion para escribir informacion sobre un elemento quimico.
Regresa
-Numero atomico
-Simbolo
-Peso atomico
-Configuracion electronica
Uso: info_elemento(string)
string -> simbolo, eg. 'H','K','Be'
'''
print('Numero Atomico = '+ str(numero_atomico(simbolo)))
print('Simbolo = '+simbolo_atomico(simbolo))
print('Peso atomico = ' + str(peso_atomico(simbolo)))
print('Configuracion electronica = '+conf_electronica(simbolo))
|
"""
Non-negative 1-sparse recovery problem.
This algorithm assumes we have a non negative dynamic stream.
Given a stream of tuples, where each tuple contains a number and a sign (+/-), it check if the
stream is 1-sparse, meaning if the elements in the stream cancel eacheother out in such
a way that ther is only a unique number at the end.
Examples:
#1
Input: [(4,'+'), (2,'+'),(2,'-'),(4,'+'),(3,'+'),(3,'-')],
Output: 4
Comment: Since 2 and 3 gets removed.
#2
Input: [(2,'+'),(2,'+'),(2,'+'),(2,'+'),(2,'+'),(2,'+'),(2,'+')]
Output: 2
Comment: No other numbers present
#3
Input: [(2,'+'),(2,'+'),(2,'+'),(2,'+'),(2,'+'),(2,'+'),(1,'+')]
Output: None
Comment: Not 1-sparse
"""
def one_sparse(array):
"""1-sparse algorithm
Keyword arguments:
array -- stream of tuples
"""
sum_signs = 0
bitsum = [0]*32
sum_values = 0
for val,sign in array:
if sign == "+":
sum_signs += 1
sum_values += val
else:
sum_signs -= 1
sum_values -= val
_get_bit_sum(bitsum,val,sign)
if sum_signs > 0 and _check_every_number_in_bitsum(bitsum,sum_signs):
return int(sum_values/sum_signs)
else:
return None
#Helper function to check that every entry in the list is either 0 or the same as the
#sum of signs
def _check_every_number_in_bitsum(bitsum,sum_signs):
for val in bitsum:
if val != 0 and val != sum_signs :
return False
return True
# Adds bit representation value to bitsum array
def _get_bit_sum(bitsum,val,sign):
i = 0
if sign == "+":
while val:
bitsum[i] += val & 1
i +=1
val >>=1
else :
while val:
bitsum[i] -= val & 1
i +=1
val >>=1
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Created on Fri Mar 26 15:14:34 2021
# Copyright © Enrico Gandini <enricogandini93@gmail.com>
#
# Distributed under terms of the MIT License.
"""Utils necessary to store data generated by 'Molecular Similarity Survey'
web app into an SQL database.
"""
from sqlalchemy import Column, Integer, String, Date, Boolean, ForeignKey
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy.orm import relationship
from sqlalchemy.orm import sessionmaker
HexUUID = String(32)
Base = declarative_base()
class MolecularPair(Base):
__tablename__ = "MolecularPairs"
id = Column(Integer, primary_key=True)
answers = relationship("Answer")
class User(Base):
__tablename__ = "Users"
id = Column(HexUUID, primary_key=True)
date = Column(Date, nullable=False)
status = Column(String, default="Captcha")
display_above = Column(String)
experience = Column(String)
answers = relationship("Answer", back_populates="user")
class Answer(Base):
__tablename__ = "Answers"
id = Column(Integer, primary_key=True)
id_user = Column(HexUUID, ForeignKey("Users.id"))
id_pair = Column(Integer, ForeignKey("MolecularPairs.id"), nullable=False)
similar = Column(String, nullable=False)
user = relationship("User", back_populates="answers")
def create_db_engine_and_session(db_objects: dict):
"""Add an `Engine` and a `Session`
to a dictionary that stores database properties.
Using this dictionary is important for the survey app:
the dictionary is globally accessible by all functions
in the async loop of the app."""
db_objects["engine"] = create_engine(db_objects["url"])
Session = sessionmaker()
Session.configure(bind=db_objects["engine"])
db_objects["session"] = Session()
|
s1 = ' \n'
print(s1.isspace()) # -- SP1
s2 = ' b '
print(s2.isspace()) # -- SP2
s3 = ' '
print(s3.isspace()) # -- SP3
s4 = ' \t'
print(s4.isspace()) # -- SP4
s5 = '10+3 = 13 '
print(s5.isspace()) # -- SP5
s6 = ' \f'
print(s6.isspace()) # -- SP6
|
import numpy as np
import xarray as xr
from itertools import combinations
import dask.array as dsa
from ..core import calc_cape
from ..core import calc_srh
from .fixtures import empty_dask_array, dataset_soundings
import pytest
@pytest.fixture(scope='module')
def p_t_td_1d(nlevs=20):
p = np.random.rand(nlevs)
t = np.random.rand(nlevs)
td = np.random.rand(nlevs)
return p, t, td
@pytest.fixture(scope='module')
def p_t_td_3d(nlevs=20, nx=10, ny=5):
p = np.random.rand(nlevs, ny, nx)
t = np.random.rand(nlevs, ny, nx)
td = np.random.rand(nlevs, ny, nx)
return p, t, td
@pytest.fixture(scope='module')
def p_t_td_surface(nx=10, ny=5):
ps = np.random.rand(ny, nx)
ts = np.random.rand(ny, nx)
tds = np.random.rand(ny, nx)
return ps, ts, tds
# surface mode returns cape, cin
# most-unstable mode returns cape, cin, mulev, zmulev
@pytest.mark.parametrize('sourcein,n_returns',
[('surface', 2), ('most-unstable', 4)])
def test_calc_cape_shape_3d(p_t_td_3d, p_t_td_surface, sourcein, n_returns):
p, t, td = p_t_td_3d
ps, ts, tds = p_t_td_surface
result = calc_cape(p, t, td, ps, ts, tds, source=sourcein, method='dummy')
assert len(result) == n_returns
for data in result:
assert data.shape == (1, p.shape[1], p.shape[2])
# tolerance for tests
decimal_cape = 0
decimal_cin = 0
decimal_mulv = 0
decimal_zmulv = 0
def test_calc_surface_cape_model_lev(dataset_soundings):
"""Test Surface Cape based on previously calculated using George Bryans code"""
ds = dataset_soundings
cape, cin = calc_cape(ds.pressure.values[1:],
ds.temperature.values[1:],
ds.dewpoint.values[1:],
ds.pressure.values[0],
ds.temperature.values[0],
ds.dewpoint.values[0],
source='surface', ml_depth=500., adiabat='pseudo-liquid',
pinc=100.,
method='fortran', vertical_lev='sigma', pres_lev_pos=1)
np.testing.assert_almost_equal(cape[0], ds.SB_CAPE_pinc100.values, decimal_cape)
np.testing.assert_almost_equal(cin[0], ds.SB_CIN_pinc100.values, decimal_cin)
def test_calc_most_unstable_cape_model_lev(dataset_soundings):
"""Test Surface Cape based on previously calculated using George Bryans code"""
ds = dataset_soundings
# in real data, the surface values will come in separate variables
cape, cin, mulv, zmulv = calc_cape(ds.pressure.values[1:],
ds.temperature.values[1:],
ds.dewpoint.values[1:],
ds.pressure.values[0],
ds.temperature.values[0],
ds.dewpoint.values[0],
source='most-unstable', ml_depth=500., adiabat='pseudo-liquid',
pinc=100.,
method='fortran', vertical_lev='sigma', pres_lev_pos=1)
np.testing.assert_almost_equal(cape[0], ds.MU_CAPE_pinc100.values, decimal_cape)
np.testing.assert_almost_equal(cin[0], ds.MU_CIN_pinc100.values, decimal_cin)
np.testing.assert_almost_equal(mulv[0], ds.MU_lv_pinc100.values.astype('int32'), decimal_mulv)
np.testing.assert_almost_equal(zmulv[0], ds.MU_z_pinc100.values, decimal_zmulv)
def test_calc_mixed_layer_cape_model_lev(dataset_soundings):
"""Test Surface Cape based on previously calculated using George Bryans code"""
ds = dataset_soundings
cape, cin = calc_cape(ds.pressure.values[1:],
ds.temperature.values[1:],
ds.dewpoint.values[1:],
ds.pressure.values[0],
ds.temperature.values[0],
ds.dewpoint.values[0],
source='mixed-layer', ml_depth=500., adiabat='pseudo-liquid',
pinc=1000.,
method='fortran', vertical_lev='sigma', pres_lev_pos=1)
np.testing.assert_almost_equal(cape[0], ds.ML_CAPE_pinc1000_mldepth500.values, decimal_cape)
np.testing.assert_almost_equal(cin[0], ds.ML_CIN_pinc1000_mldepth500.values, decimal_cin)
def test_calc_surface_cape_pressure_lev(dataset_soundings):
"""Test Surface Cape based on previously calculated using George Bryans code"""
ds = dataset_soundings
cape, cin = calc_cape(ds.pressure.values[1:],
ds.temperature.values[1:],
ds.dewpoint.values[1:],
ds.pressure.values[0],
ds.temperature.values[0],
ds.dewpoint.values[0],
source='surface', ml_depth=500., adiabat='pseudo-liquid',
pinc=100.,
method='fortran', vertical_lev='pressure',
pres_lev_pos=ds.pressure.values[0]*0+1)
np.testing.assert_almost_equal(cape[0], ds.SB_CAPE_pinc100.values, decimal_cape)
np.testing.assert_almost_equal(cin[0], ds.SB_CIN_pinc100.values, decimal_cin)
def test_calc_most_unstable_cape_pressure_lev(dataset_soundings):
"""Test Surface Cape based on previously calculated using George Bryans code"""
ds = dataset_soundings
# in real data, the surface values will come in separate variables
cape, cin, mulv, zmulv = calc_cape(ds.pressure.values[1:],
ds.temperature.values[1:],
ds.dewpoint.values[1:],
ds.pressure.values[0],
ds.temperature.values[0],
ds.dewpoint.values[0],
source='most-unstable', ml_depth=500., adiabat='pseudo-liquid',
pinc=100.,
method='fortran', vertical_lev='pressure',
pres_lev_pos=ds.pressure.values[0]*0+1)
np.testing.assert_almost_equal(cape[0], ds.MU_CAPE_pinc100.values, decimal_cape)
np.testing.assert_almost_equal(cin[0], ds.MU_CIN_pinc100.values, decimal_cin)
np.testing.assert_almost_equal(mulv[0], ds.MU_lv_pinc100.values.astype('int32'), decimal_mulv)
np.testing.assert_almost_equal(zmulv[0], ds.MU_z_pinc100.values, decimal_zmulv)
def test_calc_mixed_layer_cape_pressure_lev(dataset_soundings):
"""Test Surface Cape based on previously calculated using George Bryans code"""
ds = dataset_soundings
cape, cin = calc_cape(ds.pressure.values[1:],
ds.temperature.values[1:],
ds.dewpoint.values[1:],
ds.pressure.values[0],
ds.temperature.values[0],
ds.dewpoint.values[0],
source='mixed-layer', ml_depth=500., adiabat='pseudo-liquid',
pinc=1000.,
method='fortran', vertical_lev='pressure',
pres_lev_pos=ds.pressure.values[0]*0+1)
np.testing.assert_almost_equal(cape[0], ds.ML_CAPE_pinc1000_mldepth500.values, decimal_cape)
np.testing.assert_almost_equal(cin[0], ds.ML_CIN_pinc1000_mldepth500.values, decimal_cin)
def test_calc_srh_model_lev(dataset_soundings):
"""Test SRH code"""
ds = dataset_soundings
srh, rm, lm, mean_6km = calc_srh(ds.pressure.values[1:],
ds.temperature.values[1:],
ds.dewpoint.values[1:],
ds.u_wind_ms.values[1:],
ds.v_wind_ms.values[1:],
ds.pressure.values[0],
ds.temperature.values[0],
ds.dewpoint.values[0],
ds.u_wind_ms.values[0],
ds.v_wind_ms.values[0],
depth = 3000,
vertical_lev='sigma', pres_lev_pos=1,
output_var='all')
srh2 = calc_srh(ds.pressure.values[1:],
ds.temperature.values[1:],
ds.dewpoint.values[1:],
ds.u_wind_ms.values[1:],
ds.v_wind_ms.values[1:],
ds.pressure.values[0],
ds.temperature.values[0],
ds.dewpoint.values[0],
ds.u_wind_ms.values[0],
ds.v_wind_ms.values[0],
depth = 3000,
vertical_lev='sigma', pres_lev_pos=1,
output_var='srh')
np.testing.assert_almost_equal(srh[0], ds.SRH03_model_lev.values, 5)
np.testing.assert_almost_equal(srh2[0], ds.SRH03_model_lev.values, 5)
def test_calc_srh_pressure_lev(dataset_soundings):
"""Test SRH code"""
ds = dataset_soundings
srh, rm, lm, mean_6km = calc_srh(ds.pressure.values[1:],
ds.temperature.values[1:],
ds.dewpoint.values[1:],
ds.u_wind_ms.values[1:],
ds.v_wind_ms.values[1:],
ds.pressure.values[0],
ds.temperature.values[0],
ds.dewpoint.values[0],
ds.u_wind_ms.values[0],
ds.v_wind_ms.values[0],
depth = 3000,
vertical_lev='pressure',
pres_lev_pos=ds.pressure.values[0]*0+1,
output_var='all')
srh2 = calc_srh(ds.pressure.values[1:],
ds.temperature.values[1:],
ds.dewpoint.values[1:],
ds.u_wind_ms.values[1:],
ds.v_wind_ms.values[1:],
ds.pressure.values[0],
ds.temperature.values[0],
ds.dewpoint.values[0],
ds.u_wind_ms.values[0],
ds.v_wind_ms.values[0],
depth = 3000,
vertical_lev='pressure',
pres_lev_pos=ds.pressure.values[0]*0+1,
output_var='srh')
np.testing.assert_almost_equal(srh[0], ds.SRH03_pressure_lev.values, 5)
np.testing.assert_almost_equal(srh2[0], ds.SRH03_pressure_lev.values, 5)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-06 15:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fileupload', '0002_auto_20170726_2027'),
]
operations = [
migrations.AddField(
model_name='picture',
name='file_type',
field=models.CharField(default='pas', max_length=3),
preserve_default=False,
),
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from pyfdm import fdmexec
JSBSIM_ROOT = os.path.abspath('./data/jsbsim_data') + os.sep
print('JSBSIM_PATH : ' + JSBSIM_ROOT)
#Load
fdm = fdmexec.FGFDMExec(root_dir=JSBSIM_ROOT)
fdm.load_model("f16")
fdm.print_property_catalog()
|
import numpy as np
import math
import matplotlib.pyplot as plt
#### 11 July 13: E-M Coin Toss Example as given in Stanford EM tutorial* ####
# * http://algorithmicalley.com/archive/2013/03/29/the-expectation-maximization-algorithm.aspx
def get_mn_log_likelihood(obs,probs):
""" Return the (log)likelihood of obs, given the probs"""
# Multinomial Distribution Log PDF
# ln (pdf) = multinomial coeff * product of probabilities
# ln[f(x|n, p)] = [ln(n!) - (ln(x1!)+ln(x2!)+...+ln(xk!))] + [x1*ln(p1)+x2*ln(p2)+...+xk*ln(pk)]
multinomial_coeff_denom= 0
prod_probs = 0
for x in range(0,len(obs)): # loop through state counts in each observation
multinomial_coeff_denom = multinomial_coeff_denom + math.log(math.factorial(obs[x]))
prod_probs = prod_probs + obs[x]*math.log(probs[x])
multinomial_coeff = math.log(math.factorial(sum(obs))) - multinomial_coeff_denom
likelihood = multinomial_coeff + prod_probs
return likelihood
# The real situation for the 5 experiments which individually comprise 10 tosses with the corresponding coins (A or B)
# 1st: Coin B, {HTTTHHTHTH}, 5H,5T
# 2nd: Coin A, {HHHHTHHHHH}, 9H,1T
# 3rd: Coin A, {HTHHHHHTHH}, 8H,2T
# 4th: Coin B, {HTHTTTHHTT}, 4H,6T
# 5th: Coin A, {THHHTHHHTH}, 7H,3T
# so, from MLE: pA(heads) = 0.80 and pB(heads)=0.45
# represent the experiments
head_counts = np.array([5,9,8,4,7])
tail_counts = 10-head_counts
experiments = zip(head_counts,tail_counts)
# initialise the pA(heads) and pB(heads)
pA_heads = np.zeros(100); pA_heads[0] = 0.60
pB_heads = np.zeros(100); pB_heads[0] = 0.50
# E-M begins!
delta = 0.001
j = 0 # iteration counter
improvement = float('inf')
while (improvement>delta):
# expactations with coins A and B with the initial pA_heads and pB_heads parameters
expectation_A = np.zeros((5,2), dtype=float)
expectation_B = np.zeros((5,2), dtype=float)
for i in range(0,len(experiments)):
e = experiments[i] # i'th experiment
ll_A = get_mn_log_likelihood(e,np.array([pA_heads[j],1-pA_heads[j]])) # loglikelihood of e given coin A
ll_B = get_mn_log_likelihood(e,np.array([pB_heads[j],1-pB_heads[j]])) # loglikelihood of e given coin B
weightA = math.exp(ll_A) / ( math.exp(ll_A) + math.exp(ll_B) ) # corresponding weight of A proportional to likelihood of A
weightB = math.exp(ll_B) / ( math.exp(ll_A) + math.exp(ll_B) ) # corresponding weight of B proportional to likelihood of B
# Find Expectations of coins A and B for the i'th experiment
expectation_A[i] = np.dot(weightA, e) # weightA * e does not work.
expectation_B[i] = np.dot(weightB, e)
# Find the parameters that maximise the accumulated (sum) expectations of A and B
# (through counting, i.e. MLE of pA_heads and pB_heads for multinomial distributions)
pA_heads[j+1] = sum(expectation_A)[0] / sum(sum(expectation_A));
pB_heads[j+1] = sum(expectation_B)[0] / sum(sum(expectation_B));
improvement = max( abs(np.array([pA_heads[j+1],pB_heads[j+1]]) - np.array([pA_heads[j],pB_heads[j]]) ))
j = j+1
plt.figure();
plt.plot(range(0,j),pA_heads[0:j], 'r--')
plt.plot(range(0,j),pB_heads[0:j])
plt.show()
|
import pysam
import csv
import sys
import collections
class Variant(object):
def __init__(self, original, new, refdict):
self.original = original
self.new = new
self.chrom = self.original['CHROM']
self.position = int(self.original['POS'])
self.coordinates = (self.chrom, self.position)
self.original_ref = self.original['REF']
self.new_ref = self.new['REF']
self.original_alt = self.original['ALT']
self.new_alt = self.new['ALT']
# Needed to remove these since this is a run with no filtering, meaning we
# don't know what is the real base
# self.real_reference_base = refdict[self.coordinates].reference
# self.real_alternate_base = refdict[self.coordinates].alternate
self.cutoffs = cos # cos is a global variable defined in the combine_snps function
def __repr__(self):
return 'Variant(original={}, new={})'.format(self.original, self.new)
def get_coverage_cutoffs(csv):
with open(csv, 'rb') as cf:
cutoff_reader = csv.reader(cf, delimiter=',')
next(cutoff_reader) # remove header
cutoffs = {int(row[0]): int(row[1]) for row in cutoff_reader}
return cutoffs
def get_new_row(self, orig_bam, new_bam):
start_pos = self.position - 1
end_pos = self.position
if not (self.original_ref == self.new_alt and self.original_alt == self.new_ref):
return
orig_refList = []
orig_altList = []
# These will correspond to the *real* reference and alternate bases,
# not the reversed ones in the new bam
new_refList = []
new_altList = []
orig_sam = pysam.Samfile(orig_bam, 'rb')
new_sam = pysam.Samfile(new_bam, 'rb')
for pileupcolumn in orig_sam.pileup(reference=self.chrom, start=start_pos, end=end_pos):
if pileupcolumn.pos == start_pos:
count = 0
bases = set()
for read in pileupcolumn.pileups:
if read.alignment.overlap(start_pos, end_pos) == 1:
count += 1
bases.add(read.alignment.seq[read.qpos])
quality = ord(read.alignment.qqual[read.qpos]) - 33
if read.alignment.seq[read.qpos] == self.original_ref and quality >= 20:
orig_refList.append(read.alignment.qname)
elif read.alignment.seq[read.qpos] == self.original_alt and quality >= 20:
orig_altList.append(read.alignment.qname)
# Check for three state SNPs
if len(bases) > 2:
return
# Check coverage
cutoff = self.cutoffs.get(count, 24)
if count >= 15 and len(orig_refList) >= cutoff and len(orig_altList) >= cutoff:
pass
else:
return
for pileupcolumn in new_sam.pileup(reference=self.chrom, start=start_pos, end=end_pos):
if pileupcolumn.pos == start_pos:
count = 0
bases = set()
for read in pileupcolumn.pileups:
if read.alignment.overlap(start_pos, end_pos) == 1:
count += 1
bases.add(read.alignment.seq[read.qpos])
quality = ord(read.alignment.qqual[read.qpos]) - 33
if read.alignment.seq[read.qpos] == self.new_ref and quality >= 20:
new_altList.append(read.alignment.qname)
elif read.alignment.seq[read.qpos] == self.new_alt and quality >= 20:
new_refList.append(read.alignment.qname)
# Check for three state SNPs
if len(bases) > 2:
return
# Check coverage
cutoff = self.cutoffs.get(count, 24)
if count >= 15 and len(new_refList) >= cutoff and len(new_altList) >= cutoff:
pass
else:
return
orig_sam.close()
new_sam.close()
reference_depth = len(set(orig_refList + new_refList))
alternate_depth = len(set(orig_altList + new_altList))
newrow = {'CHROM': self.chrom,
'POS': self.position,
'REF': self.original_ref,
'ALT': self.original_alt,
'R_Depth': reference_depth,
'A_Depth': alternate_depth}
# print 'orig_refList:', orig_refList
# print 'orig_altList:', orig_altList
# print 'new_refList:', new_refList
# print 'new_altList:', new_altList
return newrow
def combine_SNPs(orig_f, new_f, orig_bam, new_bam, ref_vcf, output_f, cutoff_table):
def get_coverage_cutoffs(cutoff_table):
with open(cutoff_table, 'rb') as cf:
cutoff_reader = csv.reader(cf, delimiter=',')
next(cutoff_reader) # remove header
cutoffs = {int(row[0]): int(row[1]) for row in cutoff_reader}
return cutoffs
global cos
cos = get_coverage_cutoffs(cutoff_table)
with open(ref_vcf, 'rb') as ref_vcf:
ref_reader = csv.reader(ref_vcf, delimiter='\t')
def get_single_base_positions(reader):
for row in reader:
if len(row[3]) == 1 and len(row[4]) == 1:
yield [row[0], int(row[1]), row[3], row[4]] # chrom, pos, ref, alt
else:
# if len(row[3]) == len(row[4]):
position = int(row[1])
for refbase, altbase in zip(row[3], row[4]):
yield [row[0], position, refbase, altbase]
position += 1
Ref_tup = collections.namedtuple('Ref_tup', ['reference', 'alternate'])
# Dictionary containing the coordinates and the ref and alt bases from the reference vcf,
# the known SNPs file for filtering.
ref_dict = {(row[0], row[1]): Ref_tup(row[2], row[3]) for row in
get_single_base_positions(ref_reader)}
with open(orig_f, 'rb') as of, open(new_f, 'rb') as nf:
fields = ('CHROM', 'POS', 'REF', 'ALT', 'RD', 'AD',
'gene_id', 'exon_number', 'gene_name')
oreader = csv.DictReader(of, fields, delimiter='\t')
nreader = csv.DictReader(nf, fields, delimiter='\t')
orig_row_holder = {(row['CHROM'], row['POS']): row for row in oreader}
new_row_holder = {(row['CHROM'], row['POS']): row for row in nreader}
variants = []
for coord in orig_row_holder:
if coord in new_row_holder:
v = Variant(original=orig_row_holder[coord],
new=new_row_holder[coord],
refdict=ref_dict)
variants.append(v)
with open(output_f, 'wb') as fout:
fields = ('CHROM', 'POS', 'REF', 'ALT', 'R_Depth', 'A_Depth')
writer = csv.DictWriter(fout, fields, delimiter='\t', lineterminator='\n')
writer.writerow({field: field for field in fields})
for count, var in enumerate(variants, 1):
if count % 10000 == 0:
print 'rows examined:', count
newrow = var.get_new_row(orig_bam, new_bam)
if newrow:
writer.writerow(newrow)
# orig_f = '/scratch/Drew/testdir/original/16_A12_pUn_down/16_A12_pUn_down_INTER_py.csv'
# new_f = '/scratch/Drew/testdir/alternate/16_A12_pUn_down/16_A12_pUn_down_INTER_py.csv'
# output_f = 'snpstest.csv'
# orig_bam = '/scratch/Drew/testdir/original/16_A12_pUn_down/16_A12_pUn_down_thout/filter.bam'
# alt_bam = '/scratch/Drew/testdir/alternate/16_A12_pUn_down/16_A12_pUn_down_thout/filter.bam'
# combine_SNPs(orig_f, new_f, orig_bam, alt_bam, output_f)
def quick_mean_propR(input_f):
with open(input_f, 'rb') as f:
reader = csv.DictReader(f, delimiter='\t')
propRs = []
for i, row in enumerate(reader, 1):
ref_depth = float(row['R_Depth'])
alt_depth = float(row['A_Depth'])
if ref_depth != 0 and alt_depth != 0:
propR = ref_depth/(ref_depth + alt_depth)
propRs.append(propR)
mean_propR = sum(propRs)/len(propRs)
return (mean_propR, i)
# quick_mean_propR('snps.vcf')
# i = 0
# printcount = 0
# while True and printcount < 2:
# row = variants[i].get_new_row(orig_bam, new_bam)
# if row:
# print variants[i]
# print row
# print i
# printcount += 1
# i += 1
# sys.exit(0)
|
# coding=utf-8
from requests import Response
from monitorrent.plugins.status import Status
from monitorrent.plugins.trackers import TrackerSettings
from monitorrent.plugins.trackers.rutor import RutorOrgPlugin, RutorOrgTopic
from tests import use_vcr, DbTestCase
class RutorTrackerPluginTest(DbTestCase):
def setUp(self):
self.tracker_settings = TrackerSettings(10, None)
def test_can_parse_url(self):
tracker = RutorOrgPlugin()
tracker.tracker_settings = self.tracker_settings
self.assertTrue(tracker.can_parse_url('http://rutor.info/torrent/442959'))
self.assertTrue(tracker.can_parse_url('http://www.rutor.info/torrent/442959'))
self.assertTrue(tracker.can_parse_url('http://d.rutor.info/torrent/442959'))
@use_vcr
def test_parse_url(self):
plugin = RutorOrgPlugin()
plugin.init(self.tracker_settings)
original_name = u'Время приключений с Финном и Джейком / Adventure Time with Finn & Jake [S01-06] (2010-2015) WEB-DL 720p | Cartoon Network, Зебуро'
urls = ['http://rutor.info/torrent/466037',
'http://www.rutor.info/torrent/466037']
for url in urls:
result = plugin.parse_url(url)
self.assertIsNotNone(result, 'Can\'t parse url={}'.format(url))
self.assertTrue('original_name' in result, 'Can\'t find original_name for url={}'.format(url))
self.assertEqual(original_name, result['original_name'])
def test_parse_url_with_full_cover(self):
plugin = RutorOrgPlugin()
plugin.init(self.tracker_settings)
urls = ['http://www.notrutor.info/torrent/442959',
'http://www.rutor.info/not-match-url/442959',
'http://rutor.info/search/']
for url in urls:
self.assertIsNone(plugin.parse_url(url))
def test_prepare_request(self):
plugin = RutorOrgPlugin()
plugin.init(self.tracker_settings)
urls = ['http://rutor.info/torrent/442959',
'http://www.rutor.info/torrent/442959',
'http://rutor.info/torrent/442959/rjej-donovan_ray-donovan-03h01-04-iz-12-2015-hdtvrip-720r-newstudio',
'http://www.rutor.info/torrent/442959/rjej-donovan_ray-donovan-03h01-04-iz-12-2015-hdtvrip-720r-newstud']
for url in urls:
topic = RutorOrgTopic(url=url)
self.assertEqual('http://rutor.info/download/442959', plugin._prepare_request(topic))
def test_check_download(self):
plugin = RutorOrgPlugin()
plugin.init(self.tracker_settings)
response = Response()
response.status_code = 200
response.headers['Content-Type'] = 'application/bittorrent'
self.assertEqual(plugin.check_download(response), Status.Ok)
response = Response()
response.status_code = 200
response.url = 'http://rutor.info/d.php'
self.assertEqual(plugin.check_download(response), Status.NotFound)
response = Response()
response.status_code = 500
response.url = 'http://rutor.info/d.php'
self.assertEqual(plugin.check_download(response), Status.Error)
|
#! /usr/bin/env python
"""
This script allows for the search of Sentinel-1 data on scihub.
Based on some search parameters the script will create a query on
www.scihub.copernicus.eu and return the results either as shapefile,
sqlite, or PostGreSQL database.
"""
# import modules
import getpass
import os
import logging
try:
import ogr
except ModuleNotFoundError as e:
from osgeo import ogr
except ModuleNotFoundError:
raise e
import psycopg2 as pg
from ost.helpers.vector import get_proj4, reproject_geometry
logger = logging.getLogger(__name__)
# see if the pg-file is there
def pgHandler(dbConnectFile = '{}/.phiSAR/pgdb'.format(os.getenv("HOME"))):
"""
This function connects to an existing PostGreSQL database,
with the access parameters stored in the dbConnectFile as follows:
"database name"
"database user"
"database password"
"database host"
"database port"
:param dbConnectFile: path to the connect file
:return: the psycopg2 database connection object
"""
try:
f = open(dbConnectFile)
except (FileNotFoundError, IOError):
logger.info('ERROR: No PostGreSQL connection established. Make sure to configure a connection to phiSAR.')
# read out dbname, username
lines = f.read().splitlines()
dbname = lines[0]
uname = lines[1]
pwDb = lines[2]
host = lines[3]
port = lines[4]
logger.info('Connecting to PostGreSQL database: {}'.format(dbname))
dbConnect = pgConnect(uname, pwDb, dbname, host, port)
return dbConnect
class pgConnect:
def __init__(self, uname=None, pword=None, dbname='sat', host='localhost', port='5432'):
"""
Establish a connection to the Scihub-catalogue db
"""
# ask for username and password in case you have not defined as command line options
if uname == None:
uname = input(' Your PostGreSQL database username:')
if pword == None:
pword = getpass.getpass(' Your PostGreSQL database password:')
# try connecting
try:
self.connection = pg.connect(
dbname=dbname, user=uname, host=host, password=pword, port=port)
self.connection.autocommit = True
self.cursor = self.connection.cursor()
except:
logger.info('Cannot connect to database')
def pgCreateS1(self, tablename):
f_list = ('id serial PRIMARY KEY, identifier varchar(100), \
polarisation varchar(100), orbitdirection varchar(12), \
acquisitiondate date, relativeorbit smallint, \
orbitnumber integer, producttype varchar(4), \
slicenumber smallint, size varchar(12), \
beginposition timestamp, endposition timestamp, \
lastrelativeorbitnumber smallint, lastorbitnumber int, \
uuid varchar(40), platformidentifier varchar(10), \
missiondatatakeid integer, swathidentifer varchar(21), \
ingestiondate timestamp, sensoroperationalmode varchar(3), \
geometry geometry')
sql_cmd = 'CREATE TABLE {} ({})'.format(tablename, f_list)
self.cursor.execute(sql_cmd)
def pgGetUUID(self, sceneID, tablename):
sql_cmd = 'SELECT uuid FROM {} WHERE identifier = \'{}\''.format(tablename, sceneID)
self.cursor.execute(sql_cmd)
uuid = self.cursor.fetchall()[0][0]
return uuid
def pgDrop(self, tablename):
sql_cmd = 'DROP TABLE {}'.format(tablename)
self.cursor.execute(sql_cmd)
def pgInsert(self, tablename, values):
"""
This function inserts a table into the connected database object.
"""
sql_cmd = 'INSERT INTO {} VALUES {}'.format(tablename, values)
self.cursor.execute(sql_cmd)
def pgSQL(self, sql):
"""
This is a wrapper for a sql input that does get all responses.
"""
self.cursor.execute(sql)
return self.cursor.fetchall()
def pgSQLnoResp(self, sql):
"""
This is a wrapper for a sql input that does not get any response.
"""
self.cursor.execute(sql)
def shpGeom2pg(self, aoi, tablename):
"""
This function is a wrapper to import a shapefile geometry to a PostGreSQL database
"""
sqlCmd = 'DROP TABLE IF EXISTS {}'.format(tablename)
self.cursor.execute(sqlCmd)
fList = 'id smallint, geometry geometry'
sqlCmd = 'CREATE TABLE {} ({})'.format(tablename, fList)
self.cursor.execute(sqlCmd)
prjFile = '{}.prj'.format(aoi[:-4])
inProj4 = get_proj4(prjFile)
sf = ogr.Open(aoi)
layer = sf.GetLayer(0)
for i in range(layer.GetFeatureCount()):
feature = layer.GetFeature(i)
wkt = feature.GetGeometryRef().ExportToWkt()
if inProj4 != '+proj=longlat +datum=WGS84 +no_defs':
wkt = reproject_geometry(wkt, inProj4, 4326)
wkt = 'St_GeomFromText(\'{}\', 4326)'.format(wkt)
values = '(\'{}\', {})'.format(i, wkt)
sql_cmd = 'INSERT INTO {} VALUES {}'.format(tablename, values)
self.cursor.execute(sql_cmd)
def pgDateline(self, tablename, uuid):
"""
This function splits the acquisition footprint
into a geometry collection if it crosses the dateline
"""
# edited after https://www.mundialis.de/update-for-our-maps-mundialis-application-solves-dateline-wrap/
sql_cmd = 'UPDATE {} SET (geometry) = \
(SELECT \
ST_SetSRID( \
ST_CollectionExtract( \
ST_AsText( \
ST_Split( \
ST_ShiftLongitude(geometry), \
ST_SetSRID( \
ST_MakeLine( \
ST_MakePoint(180,-90), \
ST_MakePoint(180,90) \
), \
4326 \
) \
) \
), \
3 \
), \
4326 \
) geometry \
FROM {} \
WHERE uuid = \'{}\' \
) \
WHERE uuid = \'{}\' \
AND ( \
ST_Intersects( \
geometry, \
ST_SetSRID( \
ST_MakeLine( \
ST_MakePoint(-90,-90), \
ST_MakePoint(-90,90) \
), \
4326 \
) \
) \
AND \
ST_Intersects( \
geometry, \
ST_SetSRID( \
ST_MakeLine( \
ST_MakePoint(90,-90), \
ST_MakePoint(90,90) \
), \
4326 \
) \
) \
) \
AND \
geometry IS NOT NULL'.format(tablename, tablename, uuid, uuid)
self.cursor.execute(sql_cmd)
|
from cowsay_app.models import message
from django.shortcuts import render
from cowsay_app.models import message
from cowsay_app.form import addmessageForm
import subprocess
# Create your views here.
def index_view(request):
if request.method == 'POST':
form = addmessageForm(request.POST)
if form.is_valid():
data = form.cleaned_data
message.objects.create(text=data['text'])
messages = subprocess.check_output(['cowsay', data['text']], universal_newlines=True)
return render(request, 'index.html', {'messages': messages, 'form': form})
form = addmessageForm()
return render(request, 'index.html', {'form': form})
def history(request):
latest_messages = message.objects.all().order_by('-id')[:10]
return render(request, 'history.html', {'latest_messages':latest_messages})
|
from glob import glob
from pbr import util
from setuptools import setup
cfg = util.cfg_to_args()
cfg.update({
'data_files': [
('docs', glob('docs/*.rst'))
],
# 'pbr': True,
})
setup(
**cfg
)
|
#!/usr/bin/env python3
import os
import subprocess
from benchmark_main import maybe_via_docker
PWD = os.getcwd()
TESTDATA_ROOT = os.path.join(PWD, "test_data")
LONG_SBP = os.path.join(TESTDATA_ROOT, "benchmark.sbp")
CMD = ['./rust/bin/sbp2json']
subprocess.run(
maybe_via_docker(PWD, "haskell-sbp2json", CMD),
stdin=open(LONG_SBP, 'rb'),
stdout=subprocess.DEVNULL,
check=True)
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
"""
LICENSE MIT
2020
Guillaume Rozier
Website : http://www.covidtracker.fr
Mail : guillaume.rozier@telecomnancy.net
README:
This file contains scripts that download data from data.gouv.fr and then process it to build many graphes.
I'm currently cleaning the code, please ask me if something is not clear enough.
The charts are exported to 'charts/images/france'.
Data is download to/imported from 'data/france'.
Requirements: please see the imports below (use pip3 to install them).
"""
# In[1]:
import pandas as pd
import plotly.graph_objects as go
import france_data_management as data
from datetime import datetime
from datetime import timedelta
from plotly.subplots import make_subplots
import plotly
import math
import os
import json
PATH = "../../"
PATH_STATS = "../../data/france/stats/"
import locale
locale.setlocale(locale.LC_ALL, 'fr_FR.UTF-8')
# In[ ]:
def import_df_age():
df = pd.read_csv(PATH+"data/france/vaccin/vacsi-a-fra.csv", sep=";")
return df
# In[10]:
df_new = pd.read_csv(PATH+"data/france/donnes-hospitalieres-covid19-nouveaux.csv", sep=";")
df_clage = pd.read_csv(PATH+"data/france/donnes-hospitalieres-clage-covid19.csv", sep=";")
# In[9]:
df_new_france = df_new.groupby("jour").sum()
df_new_france.sum()
# In[32]:
df_clage_france = df_clage.groupby(["jour", "cl_age90"]).sum().reset_index()
df_clage_france[df_clage_france.jour=="2021-04-12"]
# In[66]:
df = import_df_age()
df["n_dose1"] = df["n_dose1"].replace({",": ""}, regex=True).astype("int")
df = df.groupby(["clage_vacsi"]).sum()/100
df = df[1:]
df["n_dose1_pourcent"] = round(df.n_dose1/df.n_dose1.sum()*100, 1)
clage_vacsi = [24, 29, 39, 49, 59, 64, 69, 74, 79, 80]
nb_pop = [5236809, 3593713, 8034961, 8316050, 8494520, 3979481, 3801413, 3404034, 2165960, 4081928]
df_age = pd.DataFrame()
df_age["clage_vacsi"]=clage_vacsi
df_age["nb_pop"]=nb_pop
df = df.merge(df_age, left_on="clage_vacsi", right_on="clage_vacsi")
df["pop_vac"] = df["n_dose1"]/df["nb_pop"]*100
df
# In[73]:
fig = go.Figure()
fig.add_trace(go.Bar(
x=[str(age) + " ans" for age in df.clage_vacsi[:-1]]+["+ 80 ans"],
y=df.pop_vac,
text=[str(round(prct, 2)) + " %" for prct in df.pop_vac],
textposition='auto',))
fig.update_layout(
title={
'text': "% de population ayant reçu au moins 1 dose de vaccin",
'y':0.95,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
titlefont = dict(
size=20),
annotations = [
dict(
x=0,
y=1.07,
xref='paper',
yref='paper',
font=dict(size=14),
text='{}. Données : Santé publique France. Auteur : <b>@GuillaumeRozier - covidtracker.fr.</b>'.format(datetime.strptime("2021-01-27", '%Y-%m-%d').strftime('%d %b')),
showarrow = False
),
]
)
fig.update_yaxes(range=[0, 100])
fig.show()
# In[63]:
fig = go.Figure()
fig.add_trace(go.Pie(
labels=[str(age) + " ans" for age in df.index[:-1]]+["+ 80 ans"],
values=df.n_dose1_pourcent,
text=[str(prct) + "" for prct in df.n_dose1],
textposition='auto',))
fig.update_layout(
title={
'text': "Nombre de vaccinés par tranche d'âge",
'y':0.95,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
titlefont = dict(
size=20),
annotations = [
dict(
x=0,
y=1.07,
xref='paper',
yref='paper',
font=dict(size=14),
text='{}. Données : Santé publique France. Auteur : <b>@GuillaumeRozier - covidtracker.fr.</b>'.format(datetime.strptime("2021-01-27", '%Y-%m-%d').strftime('%d %b')),
showarrow = False
),
]
)
fig.show()
# In[6]:
#locale.setlocale(locale.LC_ALL, 'fr_FR.UTF-8')
import random
import numpy as np
n_sain = 20000
x_sain = np.random.rand(1, n_sain)[0]*100
values_sain = np.random.rand(1, n_sain)[0]*100
x_az = np.random.rand(1,30)[0]*100
values_az = np.random.rand(1,30)[0]*100
fig = go.Figure()
for idx in range(len(x_sain)):
fig.add_trace(go.Scatter(
x=[x_sain[idx]],
y=[values_sain[idx]],
mode="markers",
showlegend=False,
marker_color="rgba(14, 201, 4, 0.5)", #"rgba(0, 0, 0, 0.5)",
marker_size=2))
fig.add_trace(go.Scatter(
x=x_az,
y=values_az,
mode="markers",
showlegend=False,
marker_color="rgba(201, 4, 4,0.5)", #"rgba(0, 0, 0, 0.5)",
marker_size=2))
fig.update_yaxes(range=[0, 100], visible=False)
fig.update_xaxes(range=[0, 100], nticks=10)
fig.update_layout(
plot_bgcolor='rgb(255,255,255)',
title={
'text': "Admissions en réanimation pour Covid19",
'y':0.90,
'x':0.5,
'xanchor': 'center',
'yanchor': 'top'},
titlefont = dict(
size=20),
annotations = [
dict(
x=0.5,
y=1.2,
xref='paper',
yref='paper',
text='Auteur : covidtracker.fr.'.format(),
showarrow = False
)]
)
fig.write_image(PATH + "images/charts/france/points_astrazeneca.jpeg", scale=4, width=800, height=350)
# In[18]:
import numpy as np
np.random.rand(1,20000000)
|
from half_tones.check_range import *
def FloydSteinberg(img, error: int, x, y, z):
if checkRange(x + 1, y, img):
img[x+1][y][z] = img[x+1][y][z] + (7/16) * error
if checkRange(x, y + 1, img):
img[x][y+1][z] = img[x][y+1][z] + (5/16) * error
if checkRange(x + 1, y + 1, img):
img[x+1][y+1][z] = img[x+1][y+1][z] + (1/16) * error
if checkRange(x - 1, y + 1, img):
img[x-1][y+1][z] = img[x-1][y+1][z] + (3/16) * error
|
from jackutil.containerutil import containerChecksum,featuresFromContainer,projectContainer
from jackutil.configuration import configuration
from jackutil.microfunc import shortnames,rename_columns
from tqdm.auto import tqdm
from backtest import tradesim_store
from backtest.tradesim_util import build_simulator,account_profit_summary,summary_extractor,feature_extractor
import pandas as pd
import numpy as np
from pprint import pprint
def main():
# -----------------------------------------------------------------------------
import demo1_cfg as cfg
store = tradesim_store.TradesimStore("pickle_jar")
# -----------------------------------------------------------------------------
# --
# -- run single without cache
# --
delta = cfg.test1
basespec = projectContainer(cfg.basespec,cfg.n100spec)
cfg_acc_pairs = runBacktestsWithCache(basespec=basespec,delta=delta,cache=store)
features = set( featuresFromContainer(delta) )
summary = summary_extractor(
cfg_acc_pairs=cfg_acc_pairs,
cfg_extractor=feature_extractor(features),
acc_extractor=account_profit_summary,
)
colnames = shortnames(*features)+['profit']
summary = rename_columns(summary,colnames)
pprint(summary)
def runBacktestsWithCache(*,basespec,delta,cache,loadCache=True):
all_rtcfg = configuration(basespec=basespec,variations=delta).all_configurations()
result = []
for rtcfg in tqdm(all_rtcfg,leave=None,desc='rtcfg'):
(account,d0,universe,simulator) = runBacktestWithCache(rtspec=rtcfg,cache=cache,loadCache=loadCache)
result.append( (rtcfg,account) )
return result
def runBacktestWithCache(*,rtspec,cache,loadCache=True):
has = np.array( cache.has(rtspec=rtspec) )
has = has[has !=None]
if(has.all()):
print('.', end="")
if(loadCache):
return cache.load(rtspec=rtspec)[0:3]+(None,)
else:
return (None,None,None,None)
# --
(account,d0,universe,simulator) = runBacktest(rtcfg=rtspec)
cache.store(rtspec,account=account,d0=d0,universe=universe)
return (account,d0,universe,simulator)
def runBacktest(rtcfg):
simulator = build_simulator(rtcfg)
(account,d0,universe) = simulator.runBacktest()
return (account,d0,universe,simulator)
# --
# -- ======================================
# --
print(__name__)
if(__name__=="__main__"):
main()
|
import csv
import datetime
from typing import List
from typing import Any
from typing import Dict
import json
import os
import urllib
import urllib.request
import matplotlib
import matplotlib.pyplot as plt
from InstagramAPI import InstagramAPI
from .instagram.instagram_key import InstagramKey
from .api_interface import ApiInterface
matplotlib.use('TkAgg')
# Precision to truncate on a datetime object, down to the minute
DATETIME_MINUTE_PRECISION = 16
class _InstagramUser:
""" Stores a user defined by the InstagramAPI user JSON """
def __init__(self, user: Dict[str, Any]) -> None:
self.uid = int(user['pk'])
self.username = str(user['username'])
self.full_name = str(user['full_name'])
self.profile_pic_url = str(user['profile_pic_url'])
self.is_private = bool(user['is_private'])
self.is_verified = bool(user['is_verified'])
self.is_anon = bool(user['has_anonymous_profile_picture'])
class Instagram(ApiInterface):
""" Wrapper for accessing the instagram API """
def __init__(self) -> None:
# Store keys and api info
self.keys = InstagramKey()
self.api = InstagramAPI(self.keys.username, self.keys.password)
self.api.login()
# Store the authenticated user's Instagram UID
self.uid = self.api.username_id
# Memoize follower and following information for the authenticated user
self.followers: List[_InstagramUser] = self._user_follower_info()
self.followings: List[_InstagramUser] = self._user_following_info()
# Specify the output graphfile for follower/time graphing
self.graphfile = os.path.join('postr', 'instagram', 'instagram_graphing.csv')
if not os.path.isfile(self.graphfile):
self.setup_csv()
def post_text(self, text: str) -> bool:
""" Not an operation that this platform has. """
return False
def post_video(self, url: str, text: str) -> bool:
""" Not an operations that the Instagram API allows. """
return False
def post_photo(self, url: str, text: str) -> bool:
self.api.uploadPhoto(photo=url, caption=text)
return False
def get_user_likes(self) -> int:
""" Not supported by the API """
return -1
def get_user_followers(self, text: str) -> List[str]:
""" Gets the names of all users followers """
# Get all follower information
followers: List[_InstagramUser] = self._user_follower_info()
# Convert each folllower to just their name
names: List[str] = list([x.username for x in followers])
return names
def remove_post(self, post_id: str) -> bool:
""" Removes a post, prints an exception if the post doesn't exist """
try:
self.api.deleteMedia(mediaId=post_id)
return True
except BaseException as e:
print('Error on data %s' % str(e))
return False
def refresh(self) -> None:
""" Updates the stored contents for a user's followers and followings """
self.followers = self._user_follower_info()
self.followings = self._user_following_info()
@staticmethod
def direct_share(media_id: str, recipients: List[int], message: str = '') -> None:
"""
Shares media to a list of recipients via a direct message
mediaID: The id of the media to share
recipients: A list of the user ids to share media with
mesage: The message to go along with the media share
"""
InstagramAPI.direct_share(media_id, recipients, message)
def spam_follower_ratio(self, uid: int = 0) -> float:
""" Determines the ratio of spam followers on a given user.
Assumption: A spam account is an account with a default profile
picture, as well as a 10x or greater following/follower ratio """
# If no uid was specified, use the authenticated user's uid
if uid == 0:
uid = self.uid
# Get the followers for the given uid
followers: List[_InstagramUser] = self._user_follower_info(uid)
# Filter the followers based on default profile picture
default_profile_followers = list([x for x in followers if not x.is_anon])
# Filter the followers again based on if the remaining are likely to be spam accounts
spam_default_profiles = list([x for x in default_profile_followers if self._has_following_ratio_of(x, 10)])
return len(spam_default_profiles) / len(followers)
def username_to_id(self, username: str) -> int:
"""
Converts a username to its associated id
Unfortunately this isn't built in from the InstagramAPI (they wanted to decrease bot usage)
so I had to build this myself.
This function has a small chance of error, as documented in the _username_to_profile() function
"""
profile_json = self._username_to_profile(username)
user = Instagram._profile_to_InstagramUser(profile_json)
return user.uid
def follow_by_id(self, uid: int) -> None:
""" Follows a user based off of their uid """
self.api.follow(uid)
def unsafe_follow_by_username(self, username: str) -> None:
"""
Follows a user based off their username
See the _username_to_profile() function for correctness concerns
"""
uid = self.username_to_id(username)
self.api.follow(uid)
def block_by_id(self, uid: int) -> None:
""" Blocks a user based off their uid """
self.api.block(uid)
def unsafe_block_by_username(self, username: str) -> None:
"""
Blocks a user based off their username
Seee the _username_to_profile() function for correctness concerns
"""
uid = InstagramAPI.username_to_id(username)
self.api.block(uid)
def setup_csv(self) -> None:
""" Initializes a csv file for the time series graphing """
csvData = ['Followers', 'Time']
# Create our CSV file header
with open(self.graphfile, 'w') as csvFile:
writer = csv.writer(csvFile)
writer.writerow(csvData)
csvFile.close()
def log_followers(self) -> None:
""" Logs follower information to the graph file """
with open(self.graphfile, 'a') as gf:
writer = csv.writer(gf)
follower_count = len(self.get_user_followers(''))
date = datetime.datetime.now()
# Append the current date and follower count to the file
writer.writerow([date, follower_count])
gf.close()
@staticmethod
def _read_csv_col(colNum: int, filename: str) -> List[str]:
""" Reads a specific column by index in the graph csv"""
col = []
with open(filename, 'r') as rf:
reader = csv.reader(rf, delimiter=',')
for row in reader:
col.append(str(row[colNum]))
return col[1::] # Ignore the csv header
def graph_followers(self) -> None:
""" Graphs a blob file for twitter sentiment """
dates = Instagram._read_csv_col(0, self.graphfile)
# Truncate the datetime object to the minute precision
dates = [d[:DATETIME_MINUTE_PRECISION] for d in dates]
followers = [int(f) for f in Instagram._read_csv_col(1, self.graphfile)]
# Get the global maximum follower value and its index
max_val = max(followers)
max_index = followers.index(max_val)
# Plot followers vs. time
plt.plot(
dates,
followers,
)
plt.ylabel('Follower count')
plt.xlabel('Time')
# Annotate the plot with the global max
plt.annotate(
'Absolute max', xy=(max_index, max_val),
xytext=(max_index, max_val + 1), arrowprops=dict(facecolor='black', shrink=0.05),
)
# beautify the x-labels
plt.gcf().autofmt_xdate()
# Set our y-range to be the max value plus a few more, to show the annotation
plt.ylim(-1, max_val + 3)
plt.show()
@staticmethod
def _profile_to_InstagramUser(profile: Dict[str, Any]) -> _InstagramUser:
""" Given a user profile JSON, builds an InstagramUser """
# Navigate to the user JSON that is coincidentally used by the provided API methods
user = profile['users'][0]['user']
# Simply build our InstagramUser, as the user JSON is the same
return _InstagramUser(user)
def _username_to_profile(self, username: str) -> Dict[str, Any]:
"""
Creates a json out of a user's profile info given their username
If the username contains any special characters, or just by random chance, Instagram
will not return the correct user. Instead, it seems to return any user whose name is
relatively similar to the given username. Is this a fuzzy matching error?
I'm not the first to discover this flaw.
https://stackoverflow.com/a/13586797
Hopefully Instagram fixes this flaw.
"""
base_url = self.keys.pre_profile + username + self.keys.rank_token + self.keys.post_profile
# Build the page source url for the given user's account
con = urllib.request.urlopen(base_url)
user_profile = con.read().decode('utf-8')
# Convert the webpage to a profile JSON
profile: dict = json.loads(str(user_profile))
return profile
def _has_following_ratio_of(self, user: _InstagramUser, ratio: float) -> bool:
""" Determines if a user has a following/follower ratio greater than a threshold """
follower_count = len(self._user_follower_info(uid=user.uid))
following_count = len(self._user_following_info(uid=user.uid))
if follower_count == 0:
return True
return (following_count / follower_count) > ratio
def _user_follower_info(self, uid: int = 0) -> List[_InstagramUser]:
"""
Gets info about followers
rtype: List of JSON representing users
"""
# If no uid was specified, use the authenticated user's uid
if uid == 0:
uid = self.uid
followers: List[Dict[str, Any]] = self.api.getTotalFollowers(uid)
user_followers = list([_InstagramUser(x) for x in followers])
return user_followers
def _user_following_info(self, uid: int = 0) -> List[_InstagramUser]:
"""
Gets info about followings
rtype: List of JSON representing users
"""
# If no uid was specified, use the authenticated user's uid
if uid == 0:
uid = self.uid
followings: List[Dict[str, Any]] = self.api.getTotalFollowings(uid)
user_followings = list([_InstagramUser(x) for x in followings])
return user_followings
def example_graphing(self) -> None:
""" Example method demonstrating graphing """
# Log the current amount of followers to our history of followers
self.log_followers()
# Graphs all followers / time
self.graph_followers()
|
#! /usr/bin/env python3
a = 1
if a == 1:
pass
else:
print("Hello")
|
"""
Module: 'inisetup' on esp8266 v1.9.3
"""
# MCU: (sysname='esp8266', nodename='esp8266', release='2.0.0(5a875ba)', version='v1.9.3-8-g63826ac5c on 2017-11-01', machine='ESP module with ESP8266')
# Stubber: 1.1.2 - updated
from typing import Any
bdev = None
def check_bootsec():
pass
def fs_corrupted():
pass
network = None
def setup():
pass
uos = None
def wifi():
pass
|
#!/usr/bin/env python
import os
import sys
for i in xrange(1,len(sys.argv)):
tmp1 = sys.argv[i].split('s')
tmp2 = tmp1[1].split('.')
out = tmp2[0] + '.dat'
print sys.argv[i],out
command = 'mv '+sys.argv[i]+' '+out
os.system(command)
|
from titan.react_view_pkg import accountmenu
from . import accountmenu, formsmodule, formview, router, router_and_module, view
modules = [
accountmenu,
formsmodule,
formview,
router,
router_and_module,
view,
]
|
import unittest
from unittest.mock import patch, MagicMock
from botocore.exceptions import UnknownServiceError
from mypy_boto3_builder.parsers.shape_parser import ShapeParser
# pylint: disable=protected-access
class ShapeParserTestCase(unittest.TestCase):
def test_init(self) -> None:
session_mock = MagicMock()
service_name_mock = MagicMock()
shape_parser = ShapeParser(session_mock, service_name_mock)
self.assertEqual(shape_parser.service_name, service_name_mock)
session_mock._loader.load_service_model.side_effect = UnknownServiceError(
service_name="service_name", known_service_names="known_service_names",
)
ShapeParser(session_mock, service_name_mock)
def test_get_paginator_names(self) -> None:
session_mock = MagicMock()
service_name_mock = MagicMock()
session_mock._loader.load_service_model.return_value = {
"pagination": ["c", "a", "b"]
}
shape_parser = ShapeParser(session_mock, service_name_mock)
self.assertEqual(shape_parser.get_paginator_names(), ["a", "b", "c"])
session_mock._loader.load_service_model.return_value = {
"paginations": ["c", "a", "b"]
}
shape_parser = ShapeParser(session_mock, service_name_mock)
self.assertEqual(shape_parser.get_paginator_names(), [])
@patch("mypy_boto3_builder.parsers.shape_parser.ServiceModel")
def test_get_client_method_map(self, ServiceModelMock: MagicMock) -> None:
session_mock = MagicMock()
service_name_mock = MagicMock()
ServiceModelMock().operation_names = ["my_operation"]
session_mock._loader.load_service_model.return_value = {
"resources": ["c", "a", "b"]
}
shape_parser = ShapeParser(session_mock, service_name_mock)
result = shape_parser.get_client_method_map()
self.assertIn("can_paginate", result)
self.assertIn("generate_presigned_url", result)
@patch("mypy_boto3_builder.parsers.shape_parser.ServiceModel")
def test_get_paginate_method(self, ServiceModelMock: MagicMock) -> None:
session_mock = MagicMock()
service_name_mock = MagicMock()
operation_model_mock = MagicMock()
required_arg_shape_mock = MagicMock()
optional_arg_shape_mock = MagicMock()
operation_model_mock.input_shape.members.items.return_value = [
("required_arg", required_arg_shape_mock,),
("optional_arg", optional_arg_shape_mock,),
("InputToken", optional_arg_shape_mock,),
("skip_arg", optional_arg_shape_mock,),
]
ServiceModelMock().operation_names = ["my_paginator"]
ServiceModelMock().operation_model.return_value = operation_model_mock
session_mock._loader.load_service_model.return_value = {
"pagination": {
"my_paginator": {"input_token": "InputToken", "limit_key": "skip_arg"}
},
"resources": [],
}
shape_parser = ShapeParser(session_mock, service_name_mock)
result = shape_parser.get_paginate_method("my_paginator")
self.assertEqual(result.name, "paginate")
self.assertEqual(len(result.arguments), 4)
self.assertEqual(result.arguments[0].name, "self")
self.assertEqual(result.arguments[1].name, "required_arg")
self.assertEqual(result.arguments[2].name, "optional_arg")
self.assertEqual(result.arguments[3].name, "PaginationConfig")
@patch("mypy_boto3_builder.parsers.shape_parser.ServiceModel")
def test_get_collection_filter_method(self, ServiceModelMock: MagicMock) -> None:
session_mock = MagicMock()
service_name_mock = MagicMock()
operation_model_mock = MagicMock()
required_arg_shape_mock = MagicMock()
optional_arg_shape_mock = MagicMock()
operation_model_mock.input_shape.required_members = ["required_arg"]
operation_model_mock.input_shape.members.items.return_value = [
("required_arg", required_arg_shape_mock,),
("optional_arg", optional_arg_shape_mock,),
("InputToken", optional_arg_shape_mock,),
]
ServiceModelMock().operation_names = ["my_operation"]
ServiceModelMock().operation_model.return_value = operation_model_mock
collection_mock = MagicMock()
collection_mock.request.operation = "my_operation"
shape_parser = ShapeParser(session_mock, service_name_mock)
result = shape_parser.get_collection_filter_method(
"MyCollection", collection_mock
)
self.assertEqual(result.name, "filter")
self.assertEqual(len(result.decorators), 1)
self.assertEqual(len(result.arguments), 3)
self.assertEqual(result.arguments[0].name, "cls")
self.assertEqual(result.arguments[1].name, "optional_arg")
self.assertEqual(result.arguments[2].name, "InputToken")
|
# coding: utf-8
from __future__ import division, print_function
import os, sys
import tensorflow as tf
import time
import cv2
import numpy as np
from utils import plot_one_box
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
color_table = [[0, 255, 0], [255, 0, 0], [0, 0, 255]]
classes = ['face', 'mask', 'glasses']
def letterbox_resize(img, new_width, new_height, interp=0):
'''
Letterbox resize. keep the original aspect ratio in the resized image.
'''
ori_height, ori_width = img.shape[:2]
resize_ratio = min(new_width / ori_width, new_height / ori_height)
resize_w = int(resize_ratio * ori_width)
resize_h = int(resize_ratio * ori_height)
img = cv2.resize(img, (resize_w, resize_h), interpolation=interp)
image_padded = np.full((new_height, new_width, 3), 128, np.uint8)
dw = int((new_width - resize_w) / 2)
dh = int((new_height - resize_h) / 2)
image_padded[dh: resize_h + dh, dw: resize_w + dw, :] = img
return image_padded, resize_ratio, dw, dh
def get_img_list(img_path, exts=['jpg', 'png', 'jpeg', 'JPG']):
img_list = os.listdir(img_path)
new_list = []
for img_name in img_list:
for ext in exts:
if img_name.endswith(ext):
new_list.append(img_name)
break
return new_list
def inference(img_dir, out_dir):
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
with tf.gfile.FastGFile('./sur/sur0228/yolov3_tiny_sur.pb', 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
sess.graph.as_default()
tf.import_graph_def(graph_def, name='')#
sess.run(tf.global_variables_initializer())
input_data = sess.graph.get_tensor_by_name('input_data:0')
labels = sess.graph.get_tensor_by_name('labels:0')
scores = sess.graph.get_tensor_by_name('scores:0')
boxes = sess.graph.get_tensor_by_name('boxes:0')
img_names = os.listdir(img_dir)
count = 0
times = []
area_thresh = 0.6
for img_name in img_names:
img_ori = cv2.imread(os.path.join(img_dir, img_name))
#img_ori = cv2.imread(img_name)
start = time.time()
img, resize_ratio, dw, dh = letterbox_resize(img_ori, 416, 416)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
"img = img[np.newaxis, :]"
boxes_, scores_, labels_ = sess.run([boxes, scores, labels], feed_dict={input_data: img})
print(20 * "--", boxes_, labels_ )
boxes_[:, [0, 2]] = (boxes_[:, [0, 2]] - dw) / resize_ratio
boxes_[:, [1, 3]] = (boxes_[:, [1, 3]] - dh) / resize_ratio
"----------------------------------------------------------"
labels_ = np.reshape(labels_, [-1, 1])
scores_ = np.reshape(scores_, [-1, 1])
result = np.concatenate([boxes_, scores_, labels_], axis=1)
#result = tf.concat([boxes_, scores_, labels_], axis=-1)
print(20 * "--", result)
"split result to two matrix, glasses and face "
mask_glasses = np.equal(result[:, 5], 2)
mask_face = np.not_equal(result[:, 5], 2)
result_glasses = result[mask_glasses]
result_face = result[mask_face]
boxes_glasses, scores_glasses, label_glasses = result_glasses[:, 0:4], result_glasses[:, 4:5], result_glasses[:, 5:6]
boxes_face, scores_face, label_face = result_face[:, 0:4], result_face[:, 4:5], result_face[:, 5:6]
# print(20 * "--", label_face.get_shape().as_list())
new_label = np.zeros((len(label_face), 1))
label_face = np.c_[label_face, new_label]
print(20 * "-*-", label_face)
if label_glasses.shape[0] != 0:
x1, y1, x2, y2 = boxes_glasses[:, 0], boxes_glasses[:, 1], boxes_glasses[:, 2], boxes_glasses[:, 3]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
"for each glasses, caculate iou with face or face_mask, and label on face"
for i in range(scores_glasses.shape[0]):
xx1 = np.maximum(x1[i], boxes_face[:, 0])
yy1 = np.maximum(y1[i], boxes_face[:, 1])
xx2 = np.minimum(x2[i], boxes_face[:, 2])
yy2 = np.minimum(y2[i], boxes_face[:, 3])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / areas[i]
inds = np.where(ovr >= area_thresh)
print(20 * "--*", inds)
label_face[inds, 1] = 1
#label_face[inds] = 1
print(20 * "--*", boxes_face, scores_face, label_face)
end = time.time()
print(20 * "--", img_name)
for i in range(len(boxes_face)):
x0, y0, x1, y1 = boxes_face[i]
if label_face[i][1] == 0:
print("no glasses")
else:
print("glasses")
# plot_one_box(img_ori, [x0, y0, x1, y1], label=classes[label_face[i][0]]+',{:.2f}'.format(scores_face[i]), color=color_table[label_face[i][0]])
img_name = os.path.basename(img_name)
cv2.imwrite(os.path.join(out_dir, img_name), img_ori)
count += 1
print('No.{}, img:{}, time:{:.4f}'.format(count, img_name, end-start))
if count > 1:
times.append(end-start)
print('Total:{}, avg time:{:.4f}'.format(count, np.mean(times)))
if __name__ == '__main__':
os.environ["CUDA_VISIBLE_DEVICES"] = '1'
img_dir = './data/test'
out_dir = './data/test_out'
inference(img_dir, out_dir)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayPcreditHuabeiSceneprodBenefitSendResponse(AlipayResponse):
def __init__(self):
super(AlipayPcreditHuabeiSceneprodBenefitSendResponse, self).__init__()
self._retry = None
self._send_id = None
self._status = None
@property
def retry(self):
return self._retry
@retry.setter
def retry(self, value):
self._retry = value
@property
def send_id(self):
return self._send_id
@send_id.setter
def send_id(self, value):
self._send_id = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
def parse_response_content(self, response_content):
response = super(AlipayPcreditHuabeiSceneprodBenefitSendResponse, self).parse_response_content(response_content)
if 'retry' in response:
self.retry = response['retry']
if 'send_id' in response:
self.send_id = response['send_id']
if 'status' in response:
self.status = response['status']
|
"""Test cases for the aioswitcher.schedules module."""
# fmt: off
from asyncio import AbstractEventLoop, wait
from binascii import unhexlify
from pytest import fail, mark, raises
from aioswitcher.consts import (HANDLED_EXCEPTIONS,
SCHEDULE_DUE_ANOTHER_DAY_FORMAT,
SCHEDULE_DUE_TODAY_FORMAT,
SCHEDULE_DUE_TOMMOROW_FORMAT)
from aioswitcher.schedules import (SwitcherV2Schedule,
calc_next_run_for_schedule)
from .asserters import assert_lists_equal
from .consts import (DUMMY_FULL_RECCURING_DAYS_LIST,
DUMMY_FULL_RECCURING_DURATION,
DUMMY_FULL_RECCURING_END_TIME,
DUMMY_FULL_RECCURING_SCHEDULE_DATA,
DUMMY_FULL_RECCURING_SCHEDULE_DATA_BYTES,
DUMMY_FULL_RECCURING_SCHEDULE_ID,
DUMMY_FULL_RECCURING_START_TIME,
DUMMY_NON_RECCURING_DAYS_LIST,
DUMMY_NON_RECCURING_DURATION,
DUMMY_NON_RECCURING_END_TIME,
DUMMY_NON_RECCURING_SCHEDULE_DATA,
DUMMY_NON_RECCURING_SCHEDULE_DATA_BYTES,
DUMMY_NON_RECCURING_SCHEDULE_ID,
DUMMY_NON_RECCURING_START_TIME,
DUMMY_SELECTIVE_RECCURING_DAYS_LIST,
DUMMY_SELECTIVE_RECCURING_DURATION,
DUMMY_SELECTIVE_RECCURING_END_TIME,
DUMMY_SELECTIVE_RECCURING_SCHEDULE_DATA,
DUMMY_SELECTIVE_RECCURING_SCHEDULE_DATA_BYTES,
DUMMY_SELECTIVE_RECCURING_SCHEDULE_ID,
DUMMY_SELECTIVE_RECCURING_START_TIME)
# fmt: on
@mark.asyncio
async def test_recurring_schedule_next_runtime_tommorow(
event_loop: AbstractEventLoop,
recurring_tommorow_schedule: SwitcherV2Schedule,
) -> None:
"""Test the calc_next_run_for_schedule tool."""
try:
result = await calc_next_run_for_schedule(
event_loop, recurring_tommorow_schedule
)
assert (
SCHEDULE_DUE_TOMMOROW_FORMAT.format(
recurring_tommorow_schedule.start_time
)
== result
)
except HANDLED_EXCEPTIONS as exc:
fail(exc)
@mark.asyncio
async def test_recurring_schedule_next_runtime_another_day(
event_loop: AbstractEventLoop,
recurring_another_day_schedule: SwitcherV2Schedule,
) -> None:
"""Test the calc_next_run_for_schedule tool."""
try:
result = await calc_next_run_for_schedule(
event_loop, recurring_another_day_schedule
)
assert (
SCHEDULE_DUE_ANOTHER_DAY_FORMAT.format(
recurring_another_day_schedule.days[0],
recurring_another_day_schedule.start_time,
)
== result
)
except HANDLED_EXCEPTIONS as exc:
fail(exc)
@mark.asyncio
async def test_non_recurring_schedule_next_runtime_calc(
event_loop: AbstractEventLoop,
non_recurring_in_30_minutes_schedule: SwitcherV2Schedule,
) -> None:
"""Test the calc_next_run_for_schedule tool."""
try:
result = await calc_next_run_for_schedule(
event_loop, non_recurring_in_30_minutes_schedule
)
assert (
SCHEDULE_DUE_TODAY_FORMAT.format(
non_recurring_in_30_minutes_schedule.start_time
)
== result
)
except HANDLED_EXCEPTIONS as exc:
fail(exc)
@mark.asyncio
async def test_setters_and_getters_schedule(
event_loop: AbstractEventLoop
) -> None:
"""Test setters of SwitcherV2Schedule object."""
schedule = SwitcherV2Schedule(
event_loop, 0, [unhexlify(DUMMY_SELECTIVE_RECCURING_SCHEDULE_DATA)]
)
await wait([schedule.init_future])
assert schedule.enabled
assert (
schedule.schedule_data == DUMMY_SELECTIVE_RECCURING_SCHEDULE_DATA_BYTES
)
with raises(TypeError) as exc_info_enable:
schedule.enabled = "not_bool" # type: ignore
assert exc_info_enable.type is TypeError
with raises(TypeError) as exc_info_data:
schedule.schedule_data = 0 # type: ignore
assert exc_info_data.type is TypeError
schedule.enabled = False
assert not schedule.as_dict().get("_enabled")
schedule.schedule_data = b"4855f34ca8c58d6f1453"
assert schedule.as_dict().get("_schedule_data") == b"4855f34ca8c58d6f1453"
@mark.asyncio
async def test_selective_recurring_schedule(
event_loop: AbstractEventLoop
) -> None:
"""Test selective recurring SwitcherV2Schedule object."""
schedule = SwitcherV2Schedule(
event_loop, 0, [unhexlify(DUMMY_SELECTIVE_RECCURING_SCHEDULE_DATA)]
)
await wait([schedule.init_future])
assert schedule.schedule_id == DUMMY_SELECTIVE_RECCURING_SCHEDULE_ID
assert schedule.enabled
assert schedule.recurring
await assert_lists_equal(
schedule.days, DUMMY_SELECTIVE_RECCURING_DAYS_LIST
)
assert schedule.start_time == DUMMY_SELECTIVE_RECCURING_START_TIME
assert schedule.end_time == DUMMY_SELECTIVE_RECCURING_END_TIME
assert schedule.duration == DUMMY_SELECTIVE_RECCURING_DURATION
assert (
schedule.schedule_data == DUMMY_SELECTIVE_RECCURING_SCHEDULE_DATA_BYTES
)
@mark.asyncio
async def test_full_recurring_schedule(event_loop: AbstractEventLoop) -> None:
"""Test full recurring SwitcherV2Schedule object."""
schedule = SwitcherV2Schedule(
event_loop, 0, [unhexlify(DUMMY_FULL_RECCURING_SCHEDULE_DATA)]
)
await wait([schedule.init_future])
assert schedule.schedule_id == DUMMY_FULL_RECCURING_SCHEDULE_ID
assert schedule.enabled
assert schedule.recurring
await assert_lists_equal(schedule.days, DUMMY_FULL_RECCURING_DAYS_LIST)
assert schedule.start_time == DUMMY_FULL_RECCURING_START_TIME
assert schedule.end_time == DUMMY_FULL_RECCURING_END_TIME
assert schedule.duration == DUMMY_FULL_RECCURING_DURATION
assert schedule.schedule_data == DUMMY_FULL_RECCURING_SCHEDULE_DATA_BYTES
@mark.asyncio
async def test_non_recurring_schedule(event_loop: AbstractEventLoop) -> None:
"""Test non-recurring SwitcherV2Schedule object."""
schedule = SwitcherV2Schedule(
event_loop, 0, [unhexlify(DUMMY_NON_RECCURING_SCHEDULE_DATA)]
)
await wait([schedule.init_future])
assert schedule.schedule_id == DUMMY_NON_RECCURING_SCHEDULE_ID
assert schedule.enabled # TODO this should return False, fix dummy packet!
assert schedule.recurring
await assert_lists_equal(schedule.days, DUMMY_NON_RECCURING_DAYS_LIST)
assert schedule.start_time == DUMMY_NON_RECCURING_START_TIME
assert schedule.end_time == DUMMY_NON_RECCURING_END_TIME
assert schedule.duration == DUMMY_NON_RECCURING_DURATION
assert schedule.schedule_data == DUMMY_NON_RECCURING_SCHEDULE_DATA_BYTES
|
#!/usr/bin/env python3
import sys
sys.path.insert( 0, '..' )
# this will later be a session multiplexer object in a module abstraction library
from Engines.POF_com import Session as POFSession
def Main():
config = POFSession.Config("config.ini")
testSession = POFSession(config)
testSession.login()
users = testSession.searchUsers(config, 5, online_only=True)
print("Total Users Found: {0}".format( len(users) ) )
testSession.broadcastMessage(users, "hey whats up")
if __name__ == '__main__':
Main()
|
import os
import sys
import shutil
import multiprocessing
from robosat_pink.tools import cover
from robosat_pink.geoc import config as CONFIG, params, utils
multiprocessing.set_start_method('spawn', True)
def main(dsPath,geojson,out):
params_cover = params.Cover(
dir=dsPath,
bbox=None,
geojson=geojson,
cover=None,
raster=None,
sql=None,
pg=None,
no_xyz=None,
zoom=18,
extent=None,
splits=None,
out=out)
cover.main(params_cover)
return True
# 2 mins
|
from setuptools import setup, find_packages
setup(
name="workflowy.automation",
packages=find_packages(),
author="Luke Merrett",
description="Scripts for automating Workflowy tasks using Selenium",
license="MIT",
url="https://github.com/lukemerrett/Workflowy-Automation",
install_requires=['selenium']
)
|
#!/usr/bin/python
"""
Overview: http://fabric.readthedocs.org/en/1.0.0/tutorial.html
Execution: http://fabric.readthedocs.org/en/1.0.0/usage/execution.html
Author(s): Prateek Gupta (prateek@bloomreach.com)
"""
import os
import os.path
import tempfile
import yaml
import sys
import time
import traceback
from fabric.api import *
from fabric.tasks import *
from fabric.decorators import *
from fabric.colors import *
from fabric.contrib.files import exists
import fabric.exceptions
sys.path.append("..")
from utils import deployments
"""
env variables
"""
# Global dictionary containing map of roles to hosts.
# A role provided as an argument to a task must be part of this list;
# otherwise fabric will throw an error saying that role(s) do not exist.
env.roledefs = {
}
#environment variable definitions
env.user='ubuntu'
env.use_ssh_config = True
env.s3_bucket = 's3://brp-admin/config/'
env.root = "/mnt"
#env.project = "cassandra"
#env.project_root = os.path.join(env.root, env.project)
env.tmp_dir = "/mnt/tmp"
env.cassandra_dir = os.path.join(env.root, "cassandra")
env.cassandra_installed_dir = os.path.join(env.root, "cassandra_latest")
env.cassandra_bin = os.path.join(env.cassandra_installed_dir, "bin")
env.restart = os.path.join(env.cassandra_bin, "restart.sh")
env.nodetool = os.path.join(env.cassandra_bin, "nodetool")
env.logs_dir = os.path.join(env.cassandra_dir, "logs")
env.conf_dir = os.path.join(env.cassandra_installed_dir, "conf")
env.cassandra_tar = "s3://br-resources/cassandra/apache-cassandra-2.0.4-SNAPSHOT-20140620-bin.tar.gz"
env.cassandra_local_tar = "apache-cassandra-2.0.4-SNAPSHOT-20140117-bin.tar.gz"
env.cassandra_ver = "apache-cassandra-2.0.4-SNAPSHOT"
BR_TOP = "$dist/../../../.."
env.bstore_tools = "$BR_TOP/work/src/bstore/tools"
env.bstore_scripts = os.path.join(env.bstore_tools, "scripts")
env.src_topology = "conf/prod-cassandra-topology.properties"
azs = { 'us-east-1' : ['us-east-1c', 'us-east-1d', 'us-east-1e'], 'us-west-1' : ['us-west-1a', 'us-west-1b', 'us-west-1c'] }
datacenter_ganglia_ports = { 'bstore_staging' : '8662',
'bstore_stagingfrontend' : '8663',
'pagedb-backend' : '8664',
'pagedb-frontend' : '8665',
'userdb-stagingfrontend' : '8666',
'userdb-stagingbackend' : '8666',
'userdb-frontend' : '8667',
'userdb-backend' : '8667'
}
@task
def realm(deploy_realm):
"""
Set the realm for deployment
"""
assert deploy_realm
env.deploy_realm = deploy_realm
@task
def role(deploy_role, regions=None, zones=None, instances=None):
"""
Set the role for deployment. Must be non empty
"""
assert deploy_role
env.deploy_role = deploy_role
# This allows fabric to generate hostlist from role
env.roledefs[deploy_role] = deployments._get_hosts(deploy_role)
env.roles = [deploy_role]
env.deploy_regions = regions
env.deploy_zones = zones
env.deploy_instances = instances
@task
def project(deploy_project):
"""
Set the project used for deployment
"""
assert deploy_project
env.deploy_project = deploy_project
@task
def region(deploy_region):
"""
Set the region used for deployment
"""
assert deploy_region
env.deploy_region = deploy_region
@task
def push_s3cfg():
"""
Push local s3cfg file to remote host
"""
env.user = "ubuntu"
deployments._push_s3cfg()
@task
def deploy_s3cmd():
"""
Install s3cmd and push s3cfg config to local host
"""
deployments._bootstrap_s3cmd()
@task
def deploy_dist():
"""
Push backend dist
"""
deployments._deploy_dist()
@task
def deploy_scripts():
"""
Push production scripts
"""
deployments._deploy_scripts()
@task
def deploy_monitor():
"""
Deploy monitoring
"""
_deploy_monitor()
@task
def enable_root_login():
"""
Copy ssh authorized_keys to enable root login
"""
deployments._enable_root_login()
def _deploy_monitor():
"""
Deploy monitoring
"""
deployments._deploy_munin()
deployments._rsync("../../scripts/ops-tools/monitoring/nrpe-ub.cfg", "/tmp")
deployments._rsync("../../scripts/ops-tools/monitoring/server-configs/nagios-plugins/lib/", "/tmp/nagios_plugins")
deployments._rsync("../../scripts/ops-tools/monitoring/server-configs/nagios-plugins/config/", "/tmp/nagios_config")
deployments._install_nagios()
@task
def push_topology_file(src_topology=env.src_topology):
"""
Push topology file to nodes.
$ fab -H ip1,ip2,ip3,ip4,ip5 push_topology_file
It can be combined with print_hosts command, for example
$ fab -P print_hosts:datacenter=bstore_staging push_topology_file
"""
sudo("chown -R ubuntu:ubuntu %(root)s" % env)
run("mkdir -p %(tmp_dir)s" % env)
run("cp %(conf_dir)s/cassandra-topology.properties %(tmp_dir)s" % env)
deployments._rsync(src_topology, "%(conf_dir)s/cassandra-topology.properties" % env)
@task
def push_tools():
""" Push customed tools to cassandra_bin. """
sudo("chown -R ubuntu:ubuntu %(root)s" % env)
run("mkdir -p %(tmp_dir)s" % env)
assert exists(env.cassandra_bin), "Cannot find cassandra bin folder!"
deployments._rsync(env.bstore_scripts, env.tmp_dir)
sudo("chmod 755 %(tmp_dir)s/scripts/*" % env)
run("mv %(tmp_dir)s/scripts/* %(cassandra_bin)s" % env)
@task
def create_placement_group(name, region="us-east-1"):
"""
Create placement group, for example
$ fab create_placement_group:name='prod.bstore.backend.us-east'
"""
deployments._create_placement_group(name=name, region=region)
print yellow("Placement group '%s' created in region %s." % (name, region))
@task
def launch_cassandra_node(deploy_name, region = "us-east-1", az = "us-east-1c", project = "bloomstore", roleval = "backend", placement_group = None, security_group="BloomStore"):
"""
Launch new instance and deploy queryserver
"""
assert deploy_name
if region == None:
if hasattr(env, 'deploy_region') and env.deploy_region != None:
region = env.deploy_region
else:
region = "us-east-1"
roleval = ""
if hasattr(env, 'deploy_role') and env.deploy_role != None:
roleval == env.deploy_role
tags = {
"Name" : deploy_name,
"Project" : project,
"Role" : roleval
}
try:
instance = deployments._launch_ec2_server(region=region,
az=az,
instance_type="i2.xlarge",
tags=tags,
security_groups=[security_group],
ami="ami-dc0625b4",
placement_group=placement_group)
except:
time.sleep(10)
instance = deployments._launch_ec2_server(region=region,
az=az,
instance_type="i2.xlarge",
tags=tags,
security_groups=[security_group],
ami="ami-dc0625b4",
placement_group=placement_group)
return instance
@task
def launch_and_add_cassandra_node(datacenter,
deploy_name,
seed=None,
region="us-east-1",
az="us-east-1c",
project="bloomstore",
roleval="backend",
placement_group=None,
maintenance_time=(0,0)):
"""
Launch new instance and add node to cassandra cluster
"""
instance = launch_cassandra_node(deploy_name = deploy_name, region = region, az = az, project = project, roleval = roleval, placement_group = placement_group)
print yellow("Waiting to connect to instance "), instance.public_dns_name
if seed == None:
seed = instance.private_ip_address
print yellow("Launching cassandra node with seed "), seed
wait_for_node(instance.public_dns_name)
try:
with settings(host_string=instance.public_dns_name, user="ubuntu"):
add_cassandra_node(datacenter, seed, project, roleval, maintenance_time)
except SystemExit:
# Retry in case of error
print red("There was an error in deploying cassandra node. Retrying...")
time.sleep(120)
with settings(host_string=instance.public_dns_name, user="ubuntu"):
add_cassandra_node(datacenter, seed, project, roleval, maintenance_time)
pass
@task
def launch_cassandra_cluster(datacenter, num_hosts,
region="us-east-1", num_azs = 1,
initial_seeds="", realm="test",
project = "bloomstore", roleval = "backend", security_group="BloomStore"):
"""
Launch a new cassandra cluster with given nodes
"""
# Adjust topology file according to different realm.
if realm != 'prod':
env.src_topology = 'conf/%s-cassandra-topology.properties' % (realm,)
instances = []
seeds = initial_seeds.strip().split()
j = int(num_hosts)/int(num_azs)
print j
print ",".join(seeds)
print len(seeds)
with open(env.src_topology, "a") as f:
for i in range(0, int(num_hosts)):
az_id = i / j
index = i % j
az = azs[region][int(az_id)]
placement_group = '{0}.{1}.{2}.{3}.{4}'.format(realm, az, project, roleval, datacenter)
create_placement_group(placement_group, region)
name = '{0}.{1}.{2}.cassandra.{3}.bloomreach.com'.format(index,az,realm, datacenter)
print yellow("Launching instance %s in zone %s" %(name, az))
instance = launch_cassandra_node(name, region, az, project, roleval, placement_group, security_group)
instances.append(instance)
if len(seeds) == 0 and i == 0:
seeds.append(instance.private_ip_address)
print green("Successfully launched instance %s" %name )
line = '{0}={1}:{2}\n'.format(instance.private_ip_address,datacenter,az)
f.write(line)
env.hosts = [instance.public_dns_name for instance in instances]
execute("initialize_cassandra_cluster", datacenter, ' '.join(seeds), project, roleval)
def _get_evenly_distributed_maintence_hours(hosts, this_host):
"""
Get evenly distributed maintenance hours.
For example, if we have 3 nodes, then their maintenance hour should be
(sun, 0:00), (tue, 8:00), and (thu, 16:00)
:rtype tuple
:return (week, hour)
"""
DAY_HOURS = 24
WEEK_HOURS = 7 * DAY_HOURS
index = hosts.index(this_host)
cron_job_time = (index * WEEK_HOURS / len(hosts))
week = cron_job_time / DAY_HOURS
hour = cron_job_time % DAY_HOURS
return week, hour
@task
@parallel
def initialize_cassandra_cluster(datacenter, seeds, project, roleval):
wait_for_node(env.host)
seed=seeds.split(" ")
seed_list=",".join(seed)
print yellow("seed list:" + seed_list)
# equally distributing the maintenance hours
maintenance_time = _get_evenly_distributed_maintence_hours(env.hosts, env.host_string)
print yellow("maintenance time: %d, %d" % maintenance_time)
try:
with settings(user="ubuntu"):
add_cassandra_node(datacenter, seed_list, project, roleval, maintenance_time)
except Exception, e:
# Retry in case of error
print red("There was an error in deploying cassandra node. Retrying..." + repr(e))
traceback.print_exc()
time.sleep(120)
with settings(user="ubuntu"):
add_cassandra_node(datacenter, seed_list, project, roleval, maintenance_time)
@task
def push_cassandra_config(pagedb_hosts, seed, project, roleval):
"""
Generates yaml configuration and pushes to each node. Assumes cassandra is already installed on each node
"""
hosts = pagedb_hosts.split(";")
i = 0
num = len(hosts)
print hosts, num
for host in hosts:
# token range for Murmur3Partitioner is [-2**63, 2**63-1]
# token = (i * (2**64) / num) - (2**63)
# i = i + 1
with settings(host_string=host, user="ubuntu"):
with settings(warn_only=True):
r = deployments.pkill('CassandraDaemon',30)
if r != 0:
raise Exception("could not kill existing CassandraDaemon")
_push_cassandra_config(seed, project, roleval)
_start_cassandra()
print green("Waiting 2 min after starting cassandra node %s" % str(host))
time.sleep(120)
pass
@task
def add_cassandra_node(datacenter, seed, project, roleval='backend', maintenance_time=(0,0)):
"""
Add new node to existing cassandra cluster
"""
_bootstrap_basic()
_setup_filesystem("/dev/xvdb")
push_s3cfg()
_install_java7()
_install_cassandra()
_modify_ulimit()
_set_swap()
_push_restart_script()
push_tools()
_setup_cassandra(datacenter, seed, project, roleval)
_start_cassandra()
_setup_maintenace_cronjobs(*maintenance_time)
with settings(warn_only=True):
_deploy_monitor()
def _bootstrap_basic():
"""
Perform basic operations on the AMI (e.g. import security keys, configure timezone)
"""
# https://forums.aws.amazon.com/thread.jspa?messageID=341020
sudo("gpg --keyserver keyserver.ubuntu.com --recv-key 40976EAF437D05B5")
sudo("gpg -a --export 40976EAF437D05B5 | apt-key add -")
sudo("apt-get update")
# fix timezone
sudo("echo UTC | tee /etc/timezone")
sudo("dpkg-reconfigure --frontend noninteractive tzdata")
sudo("apt-get install -y --force-yes ntp cronolog dstat htop unzip nmap apache2-utils siege logtail s3cmd")
sudo("apt-get install -y --force-yes python-pip libxml2-dev libxslt-dev python-dev python-protobuf")
sudo("pip install simplejson pycassa lxml cssselect beautifulsoup4 fabric boto pytz")
def _install_java7():
"""
Install Orcale JDK 7 and set it as default
http://www.webupd8.org/2012/01/install-oracle-java-jdk-7-in-ubuntu-via.html
"""
print yellow("Installing java 7 ...")
sudo("add-apt-repository ppa:webupd8team/java -y")
sudo("apt-get update")
sudo("echo oracle-java7-installer shared/accepted-oracle-license-v1-1 select true | /usr/bin/debconf-set-selections")
sudo("apt-get install oracle-java7-installer -y --force-yes")
sudo("apt-get install oracle-java7-set-default -y --force-yes")
def _setup_filesystem(*drives):
fsDrive = "/dev/md0"
if len(drives) > 1:
"""
Multiple drives available, we should setup a raid0 config for this case
"""
sudo("apt-get install mdadm --no-install-recommends")
for drive in drives:
with settings(warn_only=True):
sudo("umount %s " % drive)
sudo("""echo -e "n\np\n1\n \n \nt\nfd\nw" | fdisk %s""" % drive)
# remove the drive from /etc/fstab
# we have to escape / in drive,
driveEscaped = drive.replace("/","\/");
sudo("sed -i \'/%s/d\' /etc/fstab" % driveEscaped)
pass
sudo("/usr/bin/yes | mdadm --create --verbose --auto=yes %s --level=0 --raid-devices=%d %s" % (fsDrive, len(drives), " ".join(drives) ))
sudo("echo DEVICE %s | tee /etc/mdadm/mdadm.conf" % " ".join(drives))
sudo("mdadm --detail --scan | tee -a /etc/mdadm/mdadm.conf")
else:
fsDrive=drives[0]
sudo("mke2fs -t ext4 %s" %fsDrive)
sudo("""echo "%s /mnt ext4 defaults,nobootwait,noatime 0 2" | tee -a /etc/fstab""" %fsDrive)
sudo("update-initramfs -u")
sudo("mount %s /mnt" %fsDrive)
sudo("chown -R ubuntu:ubuntu /mnt")
def _install_cassandra():
with cd("%(root)s" % env):
run("s3cmd get --force %(cassandra_tar)s" % env)
run("tar xvzf %(cassandra_local_tar)s; rm %(cassandra_local_tar)s" % env)
run("ln -s %(cassandra_ver)s cassandra_latest" % env)
run("mkdir -p %(cassandra_dir)s" % env)
def _setup_maintenace_cronjobs(week=0, hour=0):
assert hour < 24
assert week < 7 # 0: sunday, 1: monday, ..., 6: saturday
bindings = dict(env)
bindings['hour'] = hour
bindings['week'] = week
run('crontab -l | { cat; echo "0 %(hour)d * * %(week)d %(nodetool)s repair"; } | crontab -' % bindings)
def _set_swap():
sudo("dd if=/dev/zero of=/mnt/swap bs=1M count=12288")
sudo("chmod 600 /mnt/swap")
sudo("mkswap /mnt/swap")
sudo("swapon /mnt/swap")
def _modify_ulimit():
sudo("""echo "* soft nofile 200000" | sudo tee -a /etc/security/limits.conf""")
sudo("""echo "* hard nofile 200000" | sudo tee -a /etc/security/limits.conf""")
sudo("""echo "root soft nofile 200000" | sudo tee -a /etc/security/limits.conf""")
sudo("""echo "root hard nofile 200000" | sudo tee -a /etc/security/limits.conf""")
sudo("""echo "* soft memlock 4194304" |sudo tee -a /etc/security/limits.conf""")
sudo("""echo "* hard memlock 4194304" |sudo tee -a /etc/security/limits.conf""")
sudo("""echo "root soft memlock 4194304" |sudo tee -a /etc/security/limits.conf""")
sudo("""echo "root hard memlock 4194304" |sudo tee -a /etc/security/limits.conf""")
sudo("""echo "* soft as unlimited" |sudo tee -a /etc/security/limits.conf""")
sudo("""echo "* hard as unlimited " |sudo tee -a /etc/security/limits.conf""")
sudo("""echo "root soft as unlimited" |sudo tee -a /etc/security/limits.conf""")
sudo("""echo "root hard as unlimited " |sudo tee -a /etc/security/limits.conf""")
sudo("sysctl -w vm.max_map_count=131072")
def _generate_cassandra_yaml(seed, project, roleval):
local_ip_addr = run("curl 169.254.169.254/latest/meta-data/local-ipv4")
local_ip = str(local_ip_addr)
public_ip_addr = run("curl 169.254.169.254/latest/meta-data/public-ipv4")
public_ip = str(public_ip_addr)
template = yaml.safe_load(open("conf/"+project+"_"+roleval+".yaml", "r"))
template["listen_address"] = local_ip
template["broadcast_address"] = local_ip
if seed != None:
template["seed_provider"][0]["parameters"][0]["seeds"] = seed
template["num_tokens"] = 32
template["endpoint_snitch"] = "PropertyFileSnitch"
_, f = tempfile.mkstemp(prefix="cassandra-"+ local_ip +"-", suffix=".yaml")
with open(f, "w") as fd:
yaml.safe_dump(template, fd, default_flow_style=False)
print "yaml file:", f
return f
def _push_cassandra_config(seed, project, roleval, installed_dir=env.cassandra_installed_dir):
""" Initialize and push cassandra config for node """
deployments._rsync("conf/cassandra-env.sh", "%s/conf/cassandra-env.sh" % installed_dir)
f = _generate_cassandra_yaml(seed, project, roleval)
print "yaml file:", f
put(f, "%s/conf/cassandra.yaml" % installed_dir)
deployments._rsync("conf/log4j-server.properties", "%s/conf/log4j-server.properties" % installed_dir)
deployments._rsync(env.src_topology, "%s/conf/cassandra-topology.properties" % installed_dir)
def _push_restart_script():
""" Push the restart script to the cassandra node at /mnt location """
deployments._rsync("./restart.sh", "/mnt/cassandra_latest/bin/")
deployments._rsync("./stop.sh", "/mnt/cassandra_latest/bin/")
@task
def start_cassandra(host, user='ubuntu'):
with settings(host_string=host, user=user):
_start_cassandra()
def _start_cassandra():
with settings(warn_only=True):
r = deployments.pkill('CassandraDaemon', 30)
if r != 0:
raise Exception("could not kill existing CassandraDaemon")
run("nohup /mnt/cassandra_latest/bin/cassandra > /dev/null &", pty=False)
@task
def print_hosts(datacenter, realm='*', region="us-east-1", instance_type='*', private_ip=False, verbose=False):
""" print all ips given realm and datacenter. """
hosts = deployments._get_relevant_hosts(datacenter, realm, region, instance_type, private_ip, verbose)
print green(",".join(hosts))
env.hosts = hosts
@task
@runs_once
def cssh():
""" Should be concatenated with print_hosts
$ fab print_hosts:datacenter=pagedb-frontend,instance_type='i2.xlarge' cssh
"""
hosts = ' '.join(env.hosts)
with settings(warn_only=True):
if local('cssh --username ubuntu %s' % hosts).failed:
local('csshx --login ubuntu %s' % hosts)
def _setup_cassandra(datacenter, seed, project, roleval, installed_dir=env.cassandra_installed_dir):
# push cassandra related configs
_push_cassandra_config(seed, project, roleval, installed_dir)
# setup ganglia
setup_ganglia(datacenter, installed_dir)
def _deploy_cassandra_build(bin, cassandra_local_tar, cassandra_version, datacenter, realm='*', region="us-east-1", seeds='', verbose=False, project = "bloomstore", roleval = "backend"):
""" Deploy the customed built cassandra binary to a temp folder. """
sudo("chown -R ubuntu:ubuntu %(root)s" % env)
tmp_folder = env.tmp_dir
run("mkdir -p %(tmp_folder)s" % locals())
print green("Downloading the build %(bin)s..." % locals())
run("s3cmd get --force %(bin)s %(tmp_folder)s/%(cassandra_local_tar)s" % locals(), quiet=True)
run("tar xvzf %(tmp_folder)s/%(cassandra_local_tar)s -C %(tmp_folder)s; rm %(tmp_folder)s/%(cassandra_local_tar)s" % locals(), quiet=True)
deployments._rsync("conf/cassandra-env.sh", "%(tmp_folder)s/%(cassandra_version)s/conf/cassandra-env.sh" % locals())
# if the client does not specify the seeds, it will try to figure it out from datacenter and realm
if not seeds:
seeds = ','.join(deployments._get_relevant_hosts(datacenter=datacenter, realm=realm, region=region, private_ip=True))
print green("Seeds: " + seeds)
installed_dir = "%(tmp_folder)s/%(cassandra_version)s" % locals()
_setup_cassandra(datacenter, seeds, project, roleval, installed_dir)
return installed_dir
def _start_cassandra_process():
""" Restarting the node """
print green("Restarting CassandraDaemon")
run(env.restart, pty=False)
def _switch_cassandra_running_build(casssandra_target_running_build):
""" 'Hot switch' the cassandra running build with a replaced build.
Steps:
1. Terminate CassandraDaemon
2. Relink cassandra_latest to deployed build
3. Restart CassandraDaemon
"""
print yellow("Switching to build: " + casssandra_target_running_build)
print green("Terminating CassandraDaemon")
r = deployments.pkill('CassandraDaemon', 30, wait_secs=30)
if r != 0:
raise Exception("could not kill existing CassandraDaemon")
with cd("%(root)s" % env):
run("unlink cassandra_latest")
run("ln -s %(casssandra_target_running_build)s cassandra_latest" % locals())
run("mkdir -p %(cassandra_dir)s" % env)
_start_cassandra_process()
def _wait_until_cassandra_is_up(host, timeout=180):
""" Use telnet localhost 9160 to see if CassandraDaemon is up and running. """
print green("Waiting for CassandraDaemon at %(host)s to be up" % locals())
timeout = int(timeout)
while run("exec 6<>/dev/tcp/localhost/9160", warn_only=True, quiet=True).failed:
# sleep for a while and try again.
run("sleep 1", quiet=True)
timeout -= 1
sys.stdout.write('.')
sys.stdout.flush()
if timeout <= 0:
print red("\nWARNING: CassandraDaemon at %(host)s is not restarted, please fix it ASAP" % locals())
raise fabric.exceptions.CommandTimeout("CassandraDaemon at %(host)s is not restarted, please fix it ASAP" % locals())
print green("\nCassandraDaemon at %(host)s is UP." % locals())
def _move_deployed_build_to_root(deployed_build_folder):
""" Move deployed build folder to root aka /mnt """
basename = os.path.basename(deployed_build_folder)
root = env.root
today = time.strftime('%Y%m%dZ%H%M')
casssandra_build_destination = "%(root)s/%(basename)s-d%(today)s" % locals()
run("mv %(deployed_build_folder)s %(casssandra_build_destination)s" % locals())
return casssandra_build_destination
@task
def stop_cassandra_process():
"""
Stop the protocol listeners for gossip, thrift and binary
Stop any compaction and index building
Stop backups
Now drain
Should be safe to kill now
Kill the process of CassandraDaemon assuming there is the process
"""
run("%(nodetool)s disablebinary" % env)
run("%(nodetool)s disablethrift" % env)
run("%(nodetool)s disablegossip" % env)
run("%(nodetool)s disablebackup" % env)
run("%(nodetool)s stop compaction" % env)
run("%(nodetool)s stop index_build" % env)
run("%(nodetool)s drain" % env)
r = deployments.pkill('CassandraDaemon', 30, wait_secs=30)
if r != 0:
raise Exception("could not kill existing CassandraDaemon")
@task
def start_cassandra_process():
""" Start cassandra process and wait until the server is up and running assuming there is no cassandra process running """
_start_cassandra_process()
_wait_until_cassandra_is_up(host=env.host)
@task
def restart_cassandra_process():
""" Restart cassandra process, esp useful when combined with print_hosts.
Example:
# Do a rolling restart for frontend cluster
$ fab print_hosts:datacenter=pagedb-frontend restart_cassandra_process
"""
stop_cassandra_process()
start_cassandra_process()
@task
def wait_until_cassandra_is_up(timeout=180):
""" Wait for CassandraDaemon to be up and running. """
_wait_until_cassandra_is_up(host=env.host, timeout=timeout)
@task
def switch_cassandra_running_build(casssandra_target_running_build='/mnt/apache-cassandra-2.0.1'):
""" This command will terminate CassandraDaemon, relink cassandra_latest, and restart cassandra """
_switch_cassandra_running_build(casssandra_target_running_build)
_wait_until_cassandra_is_up(host=env.host)
@task
def recover_bad_node(force=False):
""" Recover the bad node assuming it is in a bad state. """
if not force and not run("exec 6<>/dev/tcp/localhost/9160", warn_only=True, quiet=True).failed:
print green("Your cassandra process is fine, we don't need to recover it.")
return
# If it is in a bad state, restart it.
stop_cassandra_process()
start_cassandra_process()
@task
def deploy_cassandra_build(bin, datacenter, realm='*', region="us-east-1", seeds='', timeout=300, verbose=False, project = "bloomstore", roleval = "backend"):
""" Deploy the cassandra customed build binary to one node. """
cassandra_local_tar = os.path.basename(bin)
cassandra_version = '-'.join(cassandra_local_tar.split('-')[:4])
if realm != 'prod':
env.src_topology = 'conf/%s-cassandra-topology.properties' % (realm,)
cassandra_tmp_folder = _deploy_cassandra_build(bin, cassandra_local_tar, cassandra_version,
datacenter, realm=realm, region=region, seeds=seeds, verbose=verbose, project = project, roleval = roleval)
serving_folder = _move_deployed_build_to_root(cassandra_tmp_folder)
_switch_cassandra_running_build(serving_folder)
_wait_until_cassandra_is_up(env.host, timeout=timeout)
@task
def wait_for_node(host):
while True:
try:
with settings(host_string=host, warn_only=True):
print yellow("checking "+host+" ...")
echo_command_output = run("echo check")
if echo_command_output.find("check") >= 0:
print green(host + " UP")
return True
except:
print yellow(host + "...offline")
time.sleep(1)
@task
def setup_ganglia(datacenter, installed_dir=env.cassandra_installed_dir):
"""
Install ganglia monitoring for Cassandra
staging backend: port 8662
staging frontend: port 8663
prod backend: port 8664
prod frontend: port 8665
"""
try:
ganglia_port = datacenter_ganglia_ports[datacenter]
except KeyError as err:
print red("Cannot find matching ganglia port: {}".format(err))
return
print yellow("Installing ganglia monitoring using port " + ganglia_port + "...")
lib_dir = os.path.join(installed_dir, "lib")
conf_dir = os.path.join(installed_dir, "conf")
sudo("apt-get update")
with settings(warn_only=True):
sudo("apt-get install -y --force-yes ganglia-monitor")
deployments._rsync("$BR_TOP/tools/3rd_party_libs/cassandra/jmxetric-1.0.4.jar", "%s/" % lib_dir)
deployments._rsync("$BR_TOP/tools/3rd_party_libs/cassandra/gmetric4j-1.0.3.jar", "%s/" % lib_dir)
deployments._rsync("$BR_TOP/tools/3rd_party_libs/cassandra/oncrpc-1.0.7.jar", "%s/" % lib_dir)
deployments._rsync("conf/cassandra-env.sh", "%s/cassandra-env.sh" % env.tmp_dir)
deployments._rsync("conf/jmxetric.xml", "%s/jmxetric.xml" % env.tmp_dir)
deployments._rsync("conf/ganglia/gmond.conf", "%s/gmond.conf" % env.tmp_dir)
deployments._rsync("conf/ganglia/conf.d/modpython.conf", "%s/modpython.conf" % env.tmp_dir)
deployments._rsync("conf/ganglia/conf.d/simple_diskstats.conf", "%s/simple_diskstats.conf" % env.tmp_dir)
deployments._rsync("conf/ganglia/python_modules/simple_diskstats.py", "%s/simple_diskstats.py" % env.tmp_dir)
sudo("mv %s/cassandra-env.sh %s/cassandra-env.sh" % (env.tmp_dir, conf_dir))
sudo("mv %s/jmxetric.xml %s/jmxetric.xml" % (env.tmp_dir, conf_dir))
sudo("mv %s/gmond.conf /etc/ganglia/gmond.conf" % env.tmp_dir)
sudo("mkdir -p /etc/ganglia/conf.d 1>/dev/null")
sudo("mv %s/modpython.conf /etc/ganglia/conf.d/modpython.conf" % env.tmp_dir)
sudo("mv %s/simple_diskstats.conf /etc/ganglia/conf.d/simple_diskstats.conf" % env.tmp_dir)
sudo("mkdir -p /usr/lib/ganglia/python_modules 1>/dev/null")
sudo("mv %s/simple_diskstats.py /usr/lib/ganglia/python_modules/simple_diskstats.py" % env.tmp_dir)
sudo("sed -i -e s/'<SEND_PORT>'/" + ganglia_port + "/g %s/cassandra-env.sh" % conf_dir)
sudo("sed -i -e s/'<SEND_PORT>'/" + ganglia_port + "/g %s/jmxetric.xml" % conf_dir)
sudo("sed -i -e s/'<SEND_PORT>'/" + ganglia_port + "/g /etc/ganglia/gmond.conf")
sudo("sed -i -e s/'<HOST_LOCATION>'/" + env.host + "/g /etc/ganglia/gmond.conf")
sudo("sudo /etc/init.d/ganglia-monitor restart")
@task
def launch_ratelimiter_node(deploy_name, region = "us-east-1", az = "us-east-1c", project = "bloomstore", roleval = "backend"):
"""
Launch new rate limiter node.
"""
assert deploy_name
tags = {
"Name" : deploy_name,
"Project" : project,
"Role" : roleval
}
try:
instance = deployments._launch_ec2_server(region=region,
az=az,
instance_type="c3.large",
tags=tags,
security_groups=["BloomStore"],
key_name = "gsg-keypair",
ami="ami-dc0625b4")
except:
time.sleep(10)
instance = deployments._launch_ec2_server(region=region,
az=az,
instance_type="c3.large",
tags=tags,
security_groups=["BloomStore"],
key_name = "gsg-keypair",
ami="ami-dc0625b4")
env.hosts = [instance.public_dns_name]
return instance
@task
def setup_ratelimiter():
''' Setup the machine of rate limiter. '''
sudo('apt-get update')
run('s3cmd get -f s3://br-software/redis-2.6.17.tar.gz')
run('tar xvfz redis-2.6.17.tar.gz')
sudo('apt-get -y --force-yes install python-software-properties')
sudo('add-apt-repository ppa:chris-lea/node.js -y')
sudo('apt-get update')
sudo('apt-get -y --force-yes install make')
sudo('apt-get -y --force-yes install nginx')
sudo('apt-get -y --force-yes install nodejs')
sudo('cd redis-2.6.17 && make')
sudo('npm install redis --global')
sudo('npm install forever --global')
sudo('npm install socket.io --global')
sudo("""echo "* soft nofile 200000" | sudo tee -a /etc/security/limits.conf""")
sudo("""echo "* hard nofile 200000" | sudo tee -a /etc/security/limits.conf""")
# setup PATH
run("echo PATH=$PATH:/home/ubuntu/redis-2.6.17/src >> ~/.bashrc")
# setup folders
sudo('chown ubuntu:ubuntu /mnt')
#run('mkdir -p /mnt/node')
run('mkdir -p /mnt/logs')
run('mkdir -p /mnt/logs/redis')
run('mkdir -p /mnt/redis')
# setup redis
put('ratelimiter/*', '/mnt/')
run('ln -s /mnt/bps-rate-limiter /mnt/node')
run('mkdir -p /mnt/node/logs')
run('mv /mnt/redis.conf /home/ubuntu/redis-2.6.17/redis.conf')
run('/home/ubuntu/redis-2.6.17/src/redis-server /home/ubuntu/redis-2.6.17/redis.conf')
with cd("/mnt/node"):
sudo('npm install connect')
sudo('npm install connect-route')
sudo('npm install ejs')
sudo('npm install express')
sudo('chmod +x /mnt/node/*')
run('/mnt/node/init_redis')
run('/mnt/node/restore_redis')
run('/mnt/node/reset_node')
# setup app.js
run("""echo '''#!/bin/sh -e
set -e
DAEMON=/mnt/node/app.js
FOREVER_LOG=/mnt/node/logs/forever.log
STDOUT_LOG=/mnt/node/logs/stdout.log
STDERR_LOG=/mnt/node/logs/stderr.log
DEFAULT_PORT=8080
case "$1" in
start) forever -l $FOREVER_LOG -o $STDOUT_LOG -e $STDERR_LOG -a start $DAEMON $DEFAULT_PORT;;
stop) forever stop $DAEMON ;;
force-reload|restart)
forever restart $DAEMON ;;
*) echo "Usage: /etc/init.d/node {start|stop|restart|force-reload}"
exit 1
;;
esac
exit 0''' > ~/node""")
sudo('chown root:root ~/node')
sudo('mv ~/node /etc/init.d/node')
sudo('chmod 755 /etc/init.d/node')
sudo('/etc/init.d/node start')
# setup nginx
run("""echo '''upstream nodes {
server localhost:8080;
server localhost:8081;
}
server {
listen 80;
server_name ratelimiter.bloomreach.com;
root /mnt/node/public;
location / {
proxy_pass http://nodes;
proxy_redirect off;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
}''' > ~/node.conf""")
sudo('chown root:root ~/node.conf')
sudo('mv ~/node.conf /etc/nginx/sites-available/')
sudo('rm /etc/nginx/sites-enabled/default')
sudo('ln -s /etc/nginx/sites-available/node.conf /etc/nginx/sites-enabled/default')
sudo('service nginx restart')
|
# -*- coding: utf-8 -*-
""" 数据库相关函数 """
import pickle
import sqlite3
from gensim.models import KeyedVectors
from .log import get_logger
class Word2VecDb:
def __init__(self, db_path):
print("Use: `{}`".format(db_path))
self.db = sqlite3.connect(db_path)
self.cur = self.db.cursor()
def get_vec(self, key):
"""
获取key对应的向量
Args:
key -- 词汇,如"我"
Returns:
vector -- 如果key存在,则返回对应的向量numpy.array(dim), 否则返回None
"""
self.cur.execute("SELECT * FROM `model` WHERE `word`=?", (key, ))
result = self.cur.fetchone()
if result:
return pickle.loads(result[1])
else:
return None
def get_vec_batch(self, keys):
"""
获取key对应的向量
Args:
keys -- 词汇列表,如["我", "来自", "广州"]
Returns:
vector list -- 如果keys存在,则返回对应的向量列表[numpy.array(dim),...], 否则返回None
"""
try:
if keys:
self.cur.execute("SELECT * FROM `model` WHERE `word` IN ({})".\
format("'" + "','".join([k.replace("'", "''") for k in keys]) + "'"))
res = [pickle.loads(d[1]) for d in self.cur.fetchall()]
res = res if res else None
else:
res = None
except Exception as er:
print("Error: {}".format(er))
res = None
return res
def insert_vec(self, key, val):
try:
self.cur.execute("INSERT INTO `model` VALUES (?, ?)", (key, pickle.dumps(val)))
self.db.commit()
except Exception as er:
print("Key: `{}`, Value: `{}`\nError: {}!".format(key, val, er))
def insert_vec_batch(self, table_name, iter_obj, batch):
"""
Args:
table_name -- 数据表名
iter_obj -- 数据对象,格式:[(?, ?, ..., ?), (), ..., ()]
batch -- 每批数量
"""
each_len = len(iter_obj[0])
place_holder = ", ".join(["?"] * each_len)
sql_text = "INSERT INTO %s VALUES (%s)" % (table_name, place_holder)
for i in range(0, len(iter_obj), batch):
try:
print("==>>[{},{})".format(i, i + batch))
self.cur.executemany(sql_text, iter_obj[i : i + batch])
self.db.commit()
except Exception as er:
print("[{},{})\nError: {}!".format(i, i + batch, er))
def create(self):
sql = """
CREATE TABLE IF NOT EXISTS `model` (
`word` VARCHAR(128) NOT NULL,
`value` BLOB NOT NULL,
PRIMARY KEY (`word`)
)
"""
self.cur.execute(sql)
self.db.commit()
def drop(self):
sql = "DROP TABLE IF EXISTS `model`;"
self.cur.execute(sql)
self.db.commit()
def get_size(self):
self.cur.execute("SELECT COUNT(*) FROM `model`;")
return self.cur.fetchone()
def destroy(self):
self.cur.close()
self.db.close()
def vec_to_db(vec_path,
db_path,
binary=True,
table_name="model",
batch=10000):
logger = get_logger(name="vec2db_log", level="debug")
logger.info("====[init sqlite]====")
db = Word2VecDb(db_path=vec_path)
db.drop()
db.create()
logger.info("====[load vector]====")
model = KeyedVectors.load_word2vec_format(vec_path, binary=binary)
logger.info("====[insert to the db]====")
iter_obj = [(w, pickle.dumps(model[w])) for w in model.vocab]
del model
db.insert_vec_batch(table_name, iter_obj, batch=batch)
db.destroy()
logger.info("====[update `%s`]====" % db_path)
|
def initialise_rankings(input_text):
"""
Initialises the rankings with each player and a score of 0
:param input_text: A multi-line string containing player names
:return: A multi-line string containing all rankings
"""
player_list = input_text.split('\n')
# delete last empty line
del player_list[-1]
output_text = ''
for player in player_list:
output_text += f'{player}\t0\n'
return output_text
def update_score(score_text, who_won):
"""
Update the score based on who won
:param score_text: the text of the current score
:param who_won: a String of who won this round
:return: output_text for the score
"""
red, black, score_list = get_players_and_score(score_text)
# Add scores based on Crokinole rules
if who_won == 'RED':
score_list[0] = score_list[0] + 2
elif who_won == 'DRAW':
score_list[0] = score_list[0] + 1
score_list[1] = score_list[1] + 1
elif who_won == 'BLACK':
score_list[1] = score_list[1] + 2
# Format the text to original layout
output_text = f'Red - Black\n{red} - {black}\n{score_list[0]} - {score_list[1]}'
return output_text
def update_rankings(rankings_text, score_text):
"""
Update the rankings based on the score
:param rankings_text: multiline string to update
:param score_text: multiline string to get score from
:return: new updated rankings_text
"""
rankings_list = rankings_text.split('\n')
red, black, score_list = get_players_and_score(score_text)
if score_list[0] > score_list[1]:
_update_rankings_list(rankings_list, red)
elif score_list[0] < score_list[1]:
_update_rankings_list(rankings_list, black)
output_text = ''
for ranking in rankings_list:
output_text += f'{ranking}\n'
return output_text
def get_players_and_score(score_text):
"""
Get the players and the score from the score_text
:param score_text: multiline string
:return: red player name, black player name, score list
"""
# Red player
red = score_text.split('\n')[1].split(' - ')[0]
# Black player
black = score_text.split('\n')[1].split(' - ')[1]
# Get scores red - black
score_list = score_text.split('\n')[2].split(' - ')
# Convert to int
score_list = list(map(int, score_list))
return red, black, score_list
def _update_rankings_list(rankings_list, winner):
"""
Update the rankings list with new score
:param rankings_list: list of rankings
:param winner: player to update score to
"""
counter = 0
for ranking in rankings_list:
if winner in ranking:
player = ranking.split('\t')[0]
score = int(ranking.split('\t')[1])
ranking = f'{player}\t{score + 1}'
rankings_list[counter] = ranking
counter += 1
|
# https://app.codesignal.com/arcade/intro/level-6/6cmcmszJQr6GQzRwW
def evenDigitsOnly(n):
# Return if all digits on a number are even. For this convert the
# number into a string so its individual digits can be itere
return all([int(digit) % 2 == 0 for digit in str(n)])
|
from codecs import decode
from os import link
from pathlib import Path
from subprocess import Popen, PIPE, STDOUT, call
def __base_cond(path: Path):
"""
Base condition for all link operations.
:param path: the base file/dir path
:return: True if the base file/dir name passed the base condition check
"""
return path.name.lower() not in (
'.git', '.gitignore', '.gitkeep', '.directory', '.gitmodules',
'.github', '.travis.yml'
)
def shell_command(cmd: str, print_output=True):
"""
Run a shell command and prints its output to stdout
:param cmd: the shell command
:param print_output: if True this will print the output, if false this will
yield the output
"""
process = Popen(cmd, stdout=PIPE, stderr=STDOUT, shell=True)
lines = []
for line in process.stdout:
res = decode(line)
if print_output:
print(res)
else:
lines.append(res)
return lines
def create_link(src: Path, dest: Path):
"""
Create a link from the src file to the dest directory
:param src: the src file path
:param dest: the dest dir path.
"""
if not dest.parent.is_dir():
dest.parent.mkdir(parents=True, exist_ok=True)
assert src.is_file()
assert dest.parent.is_dir()
assert not dest.is_dir()
if dest.exists():
dest.unlink()
link(src.absolute(), dest.absolute())
dest.resolve()
def __link_all(src: Path, dest: Path):
"""
Recursively link all files under a the root path to the dest path
:param src: the source path
:param dest: the dest path
"""
if __base_cond(src):
if src.is_file():
print('Linking {} to {}'.format(src, dest))
create_link(src, dest)
elif src.is_dir():
for sub in src.iterdir():
__link_all(src.joinpath(sub), dest.joinpath(sub.name))
def link_all(src: Path, dest: Path):
"""
Link all your dot files from source to dest
:param src: the source path
:param dest: the dest path
"""
__link_all(src, dest)
print('Done!')
|
#!/usr/bin/env python3
"""
Advent of Code 2015: Day #
"""
import os
import hashlib
SECRET_KEY = 'iwrupvqb'
def make_byte_string(n):
return "{}{}".format(SECRET_KEY, n).encode()
def first_half():
"""
first half solver:
find MD5 hashes which, in hexadecimal, start with at least five zeroes.
iwrupvqb followed by a number in decimal.
To mine AdventCoins, you must find Santa the lowest positive number that produces such a hash.
"""
n = 1
while n:
new_bytes = make_byte_string(n)
hash_to_check = hashlib.md5(new_bytes).hexdigest()
if hash_to_check[:5] == '00000':
return n
n += 1
def second_half():
"""
second half solver:
"""
n = 346386
while n:
new_bytes = make_byte_string(n)
hash_to_check = hashlib.md5(new_bytes).hexdigest()
if hash_to_check[:6] == '000000':
return n
n += 1
def app():
"""
runs day application
"""
half_one = first_half()
half_two = second_half()
print(half_one, half_two)
if __name__ == "__main__":
"""
MAIN APP
"""
app()
|
#desafio31. maneira 1. desenvolva um progama que pergunte a distancia de uma viagem em km.
# calcule o preço da passagem cobrando R$0.50 por km para viagens de até 200 km e R$0.45 para viagens mais longas.
entrada = float(input('Qual é a distancia da sua viagem? '))
if entrada >= 1 and entrada <= 200 :
preco = entrada * 0.50
print('Você está prestes a começar uma viagem de {:.1f}Km.'.format(entrada))
print('e o preço de sua passagem será de R${:.2f}'.format(preco))
elif entrada > 200:
preco = entrada * 0.45
print('Você está prestes a começar uma viagem de {:.1f}Km'.format(entrada))
print('E o preço de sua passagem será de R${:.2f}'.format(preco))
|
#!/usr/bin/env python3
import argparse
import sys
import contextlib
import importlib
def run(code, files=None, filter_=None, begin=None, end=None, imports=None):
if files is None:
files = [sys.stdin]
if imports is not None:
for imp in imports:
locals()[imp] = importlib.import_module(imp)
if begin is not None:
exec(begin)
if filter_ is not None:
filter_ = compile(filter_, "<string>", "eval")
code = f"({code},)[-1]"
code = compile(code, "<string>", "eval")
for file in files:
for i, line in enumerate(file):
line = line.rstrip("\n")
x = l = line
if filter_ is not None and not eval(filter_):
continue
res = eval(code)
if res is not None:
print(res)
if end is not None:
exec(end)
def main():
parser = argparse.ArgumentParser(prog="pyl")
parser.add_argument("-b", "-H", "--begin", help="code to run at the beginning")
parser.add_argument("-e", "--end", help="code to run at the end")
parser.add_argument("-f", "--filter", help="code to filter lines to run")
parser.add_argument(
"-i", "--import", dest="imports", help="modules to import before running code"
)
parser.add_argument("code", nargs="?", help="code to run per line")
parser.add_argument("files", nargs="*", default=["-"], help="list of files")
args = parser.parse_args()
if args.imports is None:
imports = []
else:
imports = [imp.strip() for imp in args.imports.split(",")]
if args.code is None:
if args.filter is not None:
args.code = "l"
else:
parser.error("At least one of code or filter must be provided")
with contextlib.ExitStack() as stack:
files = [
stack.enter_context(open(file)) if file != "-" else sys.stdin for file in args.files
]
run(args.code, files, args.filter, args.begin, args.end, imports)
if __name__ == "__main__":
main()
|
#!/usr/bin/python
"""
recall.py: version 0.1.0
History:
2017/06/19: Initial version converted to a class
"""
# import some useful function
import numpy as np
import random
# Define a class that will handle remembering features and
# steering angles to be learn by the model.
class Recall:
def __init__(self, maxmem=1000, width=320, height=160):
# initialize the recall class with empty storage
self.maxmem = maxmem
self.X = []
self.y = []
self.width = width
self.height = height
self.input_size = width*height*3
# store additional information for later retrieval
def remember(self, X, y):
self.X.append(X)
self.y.append(y)
if len(self.X) > self.maxmem:
self.X = self.X[1:]
self.y = self.y[1:]
# forget half (first half - FIFO) of what we collected
def forget(self):
self.X = self.X[len(self.X)//2:]
self.y = self.y[len(self.y)//2:]
# the batch generator used by the fit generator
def batchgen(self, batch_size=1):
while 1:
i = int(random.random()*len(self.X))
image = self.X[i][None, :, :, :]
y = np.array([self.y[i]])
yield image, y
|
import re
import os
import glob
import json
import hashlib
import operator
import itertools
import subprocess
from collections import defaultdict
import tabulate
import pydot
import pysolr
import markdown
from bs4 import BeautifulSoup
from flask import Flask, send_file, render_template, abort, url_for, request, send_from_directory
app = Flask(__name__)
base = '/IFC/RELEASE/IFC4x3/HTML'
def make_url(fragment): return base + '/' + fragment
entity_to_package = json.load(open("entity_to_package.json", encoding="utf-8"))
entity_supertype = json.load(open("entity_supertype.json", encoding="utf-8"))
concepts = json.load(open("concepts.json", encoding="utf-8"))
navigation_entries = [
("Cover", "Contents", "Foreword", "Introduction"),
("Scope", "Normative references", "Terms, definitions, and abbreviated terms", "Fundamental concepts and assumptions"),
("Core data schemas", "Shared element data schemas", "Domain specific data schemas", "Resource definition data schemas"),
("Computer interpretable listings", "Alphabetical listings", "Inheritance listings", "Diagrams"),
("Examples", "Change logs", "Bibliography", "Index")
]
content_names = ['scope','normative_references','terms_and_definitions','concepts']
content_names_2 = ['cover','foreword','introduction','bibliography']
def to_dict(x):
if isinstance(x, (list, tuple)):
return type(x)(map(to_dict, x))
else:
return {"title": x}
def make_entries(x):
md_root = "../docs/schemas"
categories = [d for d in os.listdir(md_root) if os.path.isdir(os.path.join(md_root, d))]
if isinstance(x, (list, tuple)):
return type(x)(map(make_entries, x))
elif x['title'] == 'Alphabetical listings':
url = make_url('listing')
elif x['title'] == 'Contents':
url = make_url('toc.html')
elif type(x['number']) == int:
if x['number'] >= 5:
url = make_url('chapter-%d/' % x['number'])
else:
url = make_url('content/' + content_names[x['number'] - 1] + '.htm')
elif x['number'] in {'A', 'C', 'D', 'E'}:
url = make_url('annex-%s.html' % x['number'].lower())
elif x['title'].lower() in content_names_2:
url = make_url('content/' + x['title'].lower() + '.htm')
else:
url = '#'
return dict(**x, url=url)
def make_counter(start=0):
n = start
def counter():
nonlocal n
n += 1
if n > 14:
return None
if n > 8:
return chr(ord('A') + n - 9)
elif n >= 1:
return n
return counter
section_counter = make_counter(-4)
def number_entries(x):
if isinstance(x, (list, tuple)) and set(map(type, x)) == {dict}:
return type(x)(dict(**di, number=section_counter()) for i, di in enumerate(x))
else:
return type(x)(map(number_entries, x))
navigation_entries = make_entries(number_entries(to_dict(navigation_entries)))
def chapter_lookup(number=None, cat=None):
def do_chapter_lookup(x):
if isinstance(x, (list, tuple)):
return next((v for v in map(do_chapter_lookup, x) if v is not None), None)
if number is not None and x['number'] == number:
return x
if cat is not None and x['title'].split(" ")[0].lower() == cat:
return x
return do_chapter_lookup(navigation_entries)
hierarchy = json.load(open("hierarchy.json"))
entity_names = sorted(sum([schema.get('Entities', []) for _, cat in hierarchy for __, schema in cat], []))
type_names = sorted(sum([schema.get('Types', []) for _, cat in hierarchy for __, schema in cat], []))
name_to_number = {}
for i, (cat, schemas) in enumerate(hierarchy, start=5):
for j, (schema_name, members) in enumerate(schemas, start=1):
for k, ke in enumerate(["Types", "Entities"], start=2):
for l, name in enumerate(members.get(ke, ()), start=1):
name_to_number[name] = ".".join(map(str, (i,j,k,l)))
def generate_inheritance_graph(current_entity):
i = current_entity
g = pydot.Graph('dot_inheritance', graph_type='graph')
di = {
'rankdir': 'BT',
'ranksep': 0.2
}
for kv in di.items():
g.set(*kv)
previous = None
while i:
n = pydot.Node(i)
di = {
'color':'black',
'fillcolor':'grey43',
'fontcolor':'white',
'fontsize': '10',
'height':'0.2',
'shape':'rectangle',
'style':'filled',
'width':'3',
}
for kv in di.items():
n.set(*kv)
g.add_node(n)
if previous:
g.add_edge(pydot.Edge(previous, n))
previous = n
i = entity_supertype.get(i)
return g.to_string()
def get_node_colour(n):
try:
i = S.declaration_by_name(n)
except:
return 'gray'
def is_relationship(n):
while n:
if n.name() == 'IfcRelationship':
return True
n = n.supertype()
return 'yellow' if is_relationship(i) else 'dodgerblue'
def transform_graph(current_entity, graph_data, only_urls=False):
graphs = pydot.graph_from_dot_data(graph_data)
graph = graphs[0]
all_nodes = []
if len(graph.get_subgraphs()):
for subgraph in graph.get_subgraphs():
for node in subgraph.get_nodes():
all_nodes.append(node)
elif len(graph.get_nodes()):
for node in graph.get_nodes():
all_nodes.append(node)
for n in all_nodes:
if not only_urls:
n.set('fillcolor', get_node_colour(n.get_name()))
if n.get_name() == current_entity:
n.set('color', 'red')
n.set('shape', 'box')
n.set('style', 'filled')
n.set('URL', url_for('resource', resource=n.get_name(), _external=True))
return graph.to_string()
def process_graphviz(current_entity, md):
def is_figure(s):
if 'dot_figure' in s:
return 1
elif 'dot_inheritance' in s:
return 2
else:
return 0
graphviz_code = filter(is_figure, re.findall('```(.*?)```', md, re.S))
for c in graphviz_code:
hash = hashlib.sha256(c.encode('utf-8')).hexdigest()
fn = os.path.join('svgs', current_entity + "_" + hash+'.dot')
c2 = transform_graph(current_entity, c, only_urls=is_figure(c) == 2)
with open(fn, "w") as f:
f.write(c2)
md = md.replace("```%s```" % c, '' % (current_entity, hash))
subprocess.call(["dot", "-O", "-Tsvg", fn])
return md
"""
@app.route('/svgs/<entity>/<hash>.svg')
def get_svg(entity, hash):
return send_from_directory('svgs', entity + "_" + hash + '.dot.svg');
"""
@app.route(make_url('figures/<fig>'))
def get_figure(fig):
return send_from_directory('../docs/figures', fig)
@app.route(make_url('lexical/<resource>.htm'))
def resource(resource):
try:
idx = name_to_number[resource]
except:
abort(404)
"""
package = entity_to_package.get(resource)
if not package:
abort(404)
"""
md = None
md_root = "../docs/schemas"
# for category in os.listdir(md_root):
# for module in os.listdir(os.path.join(md_root, category)):
# if module == package:
md = os.path.join("../docs/schemas", "*", "*", "*", resource + ".md")
html = ''
if glob.glob(md):
md = glob.glob(md)[0]
with open(md, 'r', encoding='utf-8') as f:
mdc = f.read()
if "Entities" in md:
try:
# @todo we still need to properly implement inheritance based on XMI
mdc += '\n\n' + idx + '.2 Entity inheritance\n===========\n\n```' + generate_inheritance_graph(resource) + '```'
except:
pass
html = markdown.markdown(
process_graphviz(resource, mdc),
extensions=['tables', 'fenced_code'])
soup = BeautifulSoup(html)
# First h1 is handled by the template
try:
soup.find('h1').decompose()
except:
# only entities have H1?
pass
hs = []
# Renumber the headings
for i in list(range(7))[::-1]:
for h in soup.findAll('h%d' % i):
h.name = 'h%d' % (i + 2)
hs.append(h)
# Change svg img references to embedded svg
# because otherwise URLS are not interactive
for img in soup.findAll("img"):
if img['src'].endswith('.svg'):
print(img['src'].split('/')[-1].split('.')[0])
entity, hash = img['src'].split('/')[-1].split('.')[0].split('_')
svg = BeautifulSoup(open(os.path.join('svgs', entity + "_" + hash + '.dot.svg')))
img.replaceWith(svg.find('svg'))
else:
img['src'] = img['src'][9:]
html = str(soup)
if "Entities" in md:
ty = resource
dicts = []
while ty is not None:
dicts.append(concepts.get(ty, {}))
ty = entity_supertype.get(ty)
usage = {}
# in reverse so that the most-specialized are retained
for d in reversed(dicts):
usage.update(d)
if usage:
html += "<h3>" + idx + ".3 Definitions applying to General Usage</h3>"
for n, (concept, data) in enumerate(sorted(usage.items()), start=1):
html += "<h4>" + idx + ".3.%d " % n + concept + "</h4>"
html += data['definition'].replace("../../", "")
keys = set()
for d in dicts:
keys |= d.get(concept, {}).get('parameters', {}).keys()
params = defaultdict(list)
for d in dicts:
for k in keys:
params[k] += d.get(concept, {}).get('parameters', {}).get(k, [])
print(params)
# transpose
vals = list(map(list, itertools.zip_longest(*params.values())))
html += tabulate.tabulate(vals, headers=params.keys(), tablefmt='html')
# html += "<pre>" + data['rules'] + "</pre>"
return render_template('entity.html', navigation=navigation_entries, content=html, number=idx, entity=resource, path=md[3:])
@app.route(make_url('listing'))
def listing():
items = [{'number': name_to_number[n], 'url': url_for('resource', resource=n), 'title': n} for n in sorted(entity_names + type_names)]
return render_template('list.html', navigation=navigation_entries, items=items)
@app.route(make_url('chapter-<n>/'))
def chapter(n):
try: n = int(n)
except: pass
md_root = "../docs/schemas"
chp = chapter_lookup(number=n)
t = chp.get('title')
cat = t.split(" ")[0].lower()
fn = os.path.join(md_root, cat, "README.md")
if os.path.exists(fn):
html = markdown.markdown(open(fn).read())
soup = BeautifulSoup(html)
# First h1 is handled by the template
soup.find('h1').decompose()
html = str(soup)
else:
html = ''
subs = [itms for t, itms in hierarchy if t == chp.get('title')][0]
subs = list(map(operator.itemgetter(0), subs))
return render_template('chapter.html', navigation=navigation_entries, content=html, path=fn[3:], title=t, number=n, subs=subs)
@app.route('/')
@app.route(make_url('content/<s>.htm'))
def content(s='cover'):
fn = "../content"
fn = os.path.join(fn, s + ".md")
if not os.path.exists(fn):
abort(404)
try:
i = content_names.index(s)
number = i + 1
title = navigation_entries[1][i]['title']
except:
try:
i = content_names_2.index(s)
number = ""
title = s[0].upper() + s[1:]
except:
abort(404)
html = markdown.markdown(open(fn).read())
return render_template('chapter.html', navigation=navigation_entries, content=html, path=fn[3:], title=title, number=number, subs=[])
@app.route(make_url('annex-a.html'))
def annex_a():
url = "https://github.com/buildingSMART/IFC4.3.x-output/blob/master/IFC.exp"
html = "<h2>Computer interpretable listings</h2>" + \
"<p>This annex contains a listing of the complete schema combining all definitions of clauses 5, 6, 7, and 8 without comments " + \
"or other explanatory text. These listings are available in computer-interpretable form that may be parsed by computer.</p>" + \
"<p>Official schema publications for this release are at the following URLs:</p>" + \
(tabulate.tabulate([["IFC EXPRESS long form schema", '%s']], headers=["Format", "URL"], tablefmt='html') % \
("<a href='%(url)s'>%(url)s</a>" % locals()))
return render_template('chapter.html', navigation=navigation_entries, content=html, path=None, title="Annex A", number="", subs=[])
@app.route(make_url('toc.html'))
def toc():
subs = [(x['title'], []) for x in navigation_entries[1]] + hierarchy
return render_template('chapter.html', navigation=navigation_entries, content='', path=None, title="Contents", number="", subs=subs, toc=True)
@app.route(make_url('annex-c.html'))
def annex_c():
html = "<h2>Inheritance listings</h2>" + \
"<p>This annex contains listings of entity definitions organized by inheritance.</p>"
def transform(s):
s = s.strip('\n')
padding = s.count(' ')
entity = "".join([c for c in s if c != ' '])
return '<tr><td>' + ' ' * padding * 4 + "<a href='" + url_for('resource', resource=entity) + "'>" + entity + "</a> </td><td>" + name_to_number[entity] + "</td>"
html += "<table style='width:fit-content'>" + "".join(map(transform, open("inheritance_listing.txt"))) + "</table>"
return render_template('chapter.html', navigation=navigation_entries, content=html, path=None, title="Annex C", number="", subs=[])
@app.route(make_url('annex-d.html'))
def annex_d():
subs = map(os.path.basename, glob.glob("../output/IFC.xml/*.png"))
subs = sorted(s[:-4] + ":" + url_for('annex_d_diagram_page', s=s[:-4]) for s in subs)
return render_template('chapter.html', navigation=navigation_entries, content='<h2>Diagrams</h2>', path=None, title="Annex D", number="", subs=subs)
@app.route(make_url('annex_d/<s>.html'))
def annex_d_diagram_page(s):
img = "<h2>" + s + " diagram</h2><img src='"+s+".png'/>"
return render_template('chapter.html', navigation=navigation_entries, content=img, path=None, title="Annex D", number="", subs=[])
@app.route(make_url('annex_d/<s>.png'))
def annex_d_diagram(s):
return send_from_directory("../output/IFC.xml", s + ".png")
@app.route(make_url('annex-e.html'))
def annex_e():
subs = map(os.path.basename, filter(os.path.isdir, glob.glob("../../examples/IFC 4.3/*")))
subs = sorted(s + ":" + url_for('annex_e_example_page', s=s) for s in subs)
return render_template('chapter.html', navigation=navigation_entries, content='<h2>Examples</h2>', path=None, title="Annex E", number="", subs=subs)
@app.route(make_url('annex_e/<s>.html'))
def annex_e_example_page(s):
subs = map(os.path.basename, filter(os.path.isdir, glob.glob("../../examples/IFC 4.3/*")))
if s not in subs:
abort(404)
fn = glob.glob(os.path.join("../../examples/IFC 4.3", s, "*.md"))[0]
html = '<p></p>' + markdown.markdown(open(fn).read(), extensions=['tables', 'fenced_code'])
code = open(glob.glob(os.path.join("../../examples/IFC 4.3", s, "*.ifc"))[0]).read()
html += "<h2>Source</h2>"
html += "<pre>" + code + "</pre>"
path_repo = 'buildingSMART/Sample-Test-Files'
path = fn[15:]
return render_template('chapter.html', navigation=navigation_entries, content=html, path=path, title="Annex E", number="", subs=[], repo=path_repo)
@app.route(make_url('<name>/content.html'))
def schema(name):
md_root = "../docs/schemas"
cat_full, schemas = [(t, itms) for t, itms in hierarchy if name in [i[0].lower() for i in itms]][0]
cat = cat_full.split(" ")[0].lower()
t, subs = [x for x in schemas if x[0].lower() == name][0]
chp = chapter_lookup(cat=cat)
n1 = chp.get('number')
n2 = [s[0] for s in schemas].index(t) + 1
n = "%d.%d" % (n1, n2)
fn = os.path.join(md_root, cat, t, "README.md")
if os.path.exists(fn):
html = markdown.markdown(open(fn).read(), extensions=['sane_lists'])
soup = BeautifulSoup(html)
# First h1 is handled by the template
soup.find('h1').decompose()
html = "<h2>" + n + ".1 Schema Definition</h2>" + str(soup)
else:
html = ''
order = ["Types", "Entities"]
subs = sorted(subs.items(), key=lambda tup: order.index(tup[0]))
return render_template('chapter.html', navigation=navigation_entries, content=html, path=fn[5:], title=t, number=n, subs=subs)
@app.route('/search', methods=['GET', 'POST'])
def search():
matches = []
query = ''
if request.method == 'POST' and request.form['query']:
solr = pysolr.Solr('http://localhost:8983/solr/ifc')
query = request.form['query']
results = solr.search('body:(%s)' % query, **{'hl':'on', 'hl.fl':'body'})
h = results.highlighting
def format(s):
return re.sub(r'[^\w\s<>/]', '', s)
matches = [{
'url': url_for('resource', resource=r['title'][0]),
'match': format(h[r['id']]['body'][0]),
'title': r['title'][0]
} for r in list(results)[0:10]]
return render_template('search.html', navigation=navigation_entries, matches=matches, query=query)
|
import sys
import os
import subprocess
import matplotlib
import unittest
matplotlib.use('Agg')
script_dir = os.path.sep.join(
os.path.abspath(__file__).split(os.path.sep)[:-2] + ['examples']
)
def test_scripts(script_dir=script_dir):
passing = []
for dirname, _, filenames in os.walk(script_dir):
for filename in filenames:
if filename.endswith(".py"):
testme = dirname + os.path.sep + filename
print("\n------ Testing {} --------- \n".format(filename))
try:
exc = subprocess.check_call(['python', testme])
print(" ... {} Passed \n".format(filename))
passing += [True]
except subprocess.CalledProcessError as exc:
passing += [False]
msg = "\n ... {} FAILED \n".format(filename)
traceback = """
----------------- >> begin Traceback << ----------------- \n
{}\n{}\n
\n----------------- >> end Traceback << -----------------\n
""".format(
exc.returncode, exc.output
)
print(u"{}".format(msg + traceback))
assert all(passing)
# tests = TestScripts(directory=script_dir)
# print(Test._script_path)
# TestScripts = Test.get_tests()
# tests.run_tests()
# unittest.main()
|
import numpy as np
from ..local_interpolation import ThirdOrderHermitePolynomialInterpolation
from .runge_kutta import AbstractESDIRK, ButcherTableau
# This γ notation is from the original paper. All the coefficients are described in
# terms of it.
γ = 0.43586652150
a21 = γ
a31 = (-4 * γ**2 + 6 * γ - 1) / (4 * γ)
a32 = (-2 * γ + 1) / (4 * γ)
a41 = (6 * γ - 1) / (12 * γ)
a42 = -1 / ((24 * γ - 12) * γ)
a43 = (-6 * γ**2 + 6 * γ - 1) / (6 * γ - 3)
# See /devdocs/predictor_dirk.md
θ = 1 / (2 * γ)
α21 = 1.0
α31 = 1.0 - θ
α32 = θ
α41 = a31
α42 = a32
α43 = γ
_kvaerno3_tableau = ButcherTableau(
a_lower=(
np.array([a21]),
np.array([a31, a32]),
np.array([a41, a42, a43]),
),
a_predictor=(np.array([α21]), np.array([α31, α32]), np.array([α41, α42, α43])),
a_diagonal=np.array([0, γ, γ, γ]),
b_sol=np.array([a41, a42, a43, γ]),
b_error=np.array([a41 - a31, a42 - a32, a43 - γ, γ]),
c=np.array([2 * γ, 1.0, 1.0]),
)
class Kvaerno3(AbstractESDIRK):
r"""Kvaerno's 3/2 method.
A-L stable stiffly accurate 3rd order ESDIRK method. Has an embedded 2nd order
method for adaptive step sizing. Uses 4 stages.
??? cite "Reference"
```bibtex
@article{kvaerno2004singly,
title={Singly diagonally implicit Runge--Kutta methods with an explicit first
stage},
author={Kv{\ae}rn{\o}, Anne},
journal={BIT Numerical Mathematics},
volume={44},
number={3},
pages={489--502},
year={2004},
publisher={Springer}
}
```
"""
tableau = _kvaerno3_tableau
interpolation_cls = ThirdOrderHermitePolynomialInterpolation.from_k
def order(self, terms):
return 3
|
import numpy as np
def si_sdr(reference, estimation):
"""
Scale-Invariant Signal-to-Distortion Ratio (SI-SDR)
Args:
reference: numpy.ndarray, [..., T]
estimation: numpy.ndarray, [..., T]
Returns:
SI-SDR
[1] SDR– Half- Baked or Well Done?
http://www.merl.com/publications/docs/TR2019-013.pdf
>>> np.random.seed(0)
>>> reference = np.random.randn(100)
>>> si_sdr(reference, reference)
inf
>>> si_sdr(reference, reference * 2)
inf
>>> si_sdr(reference, np.flip(reference))
-25.127672346460717
>>> si_sdr(reference, reference + np.flip(reference))
0.481070445785553
>>> si_sdr(reference, reference + 0.5)
6.3704606032577304
>>> si_sdr(reference, reference * 2 + 1)
6.3704606032577304
>>> si_sdr([1., 0], [0., 0]) # never predict only zeros
nan
>>> si_sdr([reference, reference], [reference * 2 + 1, reference * 1 + 0.5])
array([6.3704606, 6.3704606])
"""
estimation, reference = np.broadcast_arrays(estimation, reference)
assert reference.dtype == np.float64, reference.dtype
assert estimation.dtype == np.float64, estimation.dtype
reference_energy = np.sum(reference ** 2, axis=-1, keepdims=True)
# This is $\alpha$ after Equation (3) in [1].
optimal_scaling = np.sum(reference * estimation, axis=-1, keepdims=True) \
/ reference_energy
# This is $e_{\text{target}}$ in Equation (4) in [1].
projection = optimal_scaling * reference
# This is $e_{\text{res}}$ in Equation (4) in [1].
noise = estimation - projection
ratio = np.sum(projection ** 2, axis=-1) / np.sum(noise ** 2, axis=-1)
return 10 * np.log10(ratio)
|
import argparse
import logging
import sys
from block_server_subscriber.subscriber import Subscriber
from block_server_subscriber.databaseImp import DatabaseImp
from block_server_subscriber.event_handling import EventHandler
LOGGER = logging.getLogger(__name__)
def parse_args(args):
parser = argparse.ArgumentParser(add_help=False)
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument(
'-v', '--verbose',
action='count',
default=0,
help='Increase output sent to stderr')
parser.add_argument(
'-C', '--connect',
help='The url of the validator to subscribe to',
default='tcp://localhost:4004')
parser.add_argument(
'--url',
type=str,
help='specify URL of REST API',
default='http://127.0.0.1:8008')
parser.add_argument(
'--uri',
type=str,
help='database URI',
default='mongodb://127.0.0.1:27017/')
return parser.parse_args(args)
def init_logger(level):
logger = logging.getLogger()
logger.addHandler(logging.StreamHandler())
if level == 1:
logger.setLevel(logging.INFO)
elif level > 1:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.WARN)
def do_subscribe(opts):
LOGGER.info('Starting block server...')
subscriber = Subscriber(opts.connect)
eventHandler = EventHandler(opts.url)
subscriber.add_handler(eventHandler.get_events_handler())
subscriber.listen_to_event()
def main():
opts = parse_args(sys.argv[1:])
init_logger(opts.verbose)
try:
LOGGER.warning("## initialize db ##")
DatabaseImp.initialize(opts.uri)
do_subscribe(opts)
except KeyboardInterrupt:
pass
main()
|
from bbdata.base import *
from bbdata.uid import *
CONNECTIONSTATE_TYPE = [ CONTEXT_UID_SENSORDATAFACTORY, 64, 1, 0 ]
class ConnectionState(BBCompound):
def __init__(self, name='connectionstate'):
super(BBCompound, self).__init__(name)
self.name = ShortString('name')
self.state = Int('state')
self.message = LongString('message')
self.error = ErrorInfo('error')
self.retry = Time('retry')
#@classmethod
def type(self):
return CONNECTIONSTATE_TYPE
type=classmethod(type)
ConnectionState.add_to_factory()
|
import mimetypes
import os
import time
import warnings
from typing import (
overload,
TYPE_CHECKING,
Optional,
Union,
Iterator,
Generator,
Iterable,
Dict,
)
from urllib.parse import urlparse
if TYPE_CHECKING:
import numpy as np
from docarray import DocumentArray, Document
class Client:
def __init__(self, server: str):
"""Create a Clip client object that connects to the Clip server.
Server scheme is in the format of `scheme://netloc:port`, where
- scheme: one of grpc, websocket, http, grpcs, websockets, https
- netloc: the server ip address or hostname
- port: the public port of the server
:param server: the server URI
"""
try:
r = urlparse(server)
_port = r.port
_scheme = r.scheme
if not _scheme:
raise
except:
raise ValueError(f'{server} is not a valid scheme')
_tls = False
if _scheme in ('grpcs', 'https', 'wss'):
_scheme = _scheme[:-1]
_tls = True
if _scheme == 'ws':
_scheme = 'websocket' # temp fix for the core
if _scheme in ('grpc', 'http', 'websocket'):
_kwargs = dict(host=r.hostname, port=_port, protocol=_scheme, tls=_tls)
from jina import Client
self._client = Client(**_kwargs)
self._async_client = Client(**_kwargs, asyncio=True)
else:
raise ValueError(f'{server} is not a valid scheme')
@overload
def encode(
self,
content: Iterable[str],
*,
batch_size: Optional[int] = None,
show_progress: bool = False,
) -> 'np.ndarray':
"""Encode images and texts into embeddings where the input is an iterable of raw strings.
Each image and text must be represented as a string. The following strings are acceptable:
- local image filepath, will be considered as an image
- remote image http/https, will be considered as an image
- a dataURI, will be considered as an image
- plain text, will be considered as a sentence
:param content: an iterator of image URIs or sentences, each element is an image or a text sentence as a string.
:param batch_size: the number of elements in each request when sending ``content``
:param show_progress: if set, show a progress bar
:return: the embedding in a numpy ndarray with shape ``[N, D]``. ``N`` is in the same length of ``content``
"""
...
@overload
def encode(
self,
content: Union['DocumentArray', Iterable['Document']],
*,
batch_size: Optional[int] = None,
show_progress: bool = False,
) -> 'DocumentArray':
"""Encode images and texts into embeddings where the input is an iterable of :class:`docarray.Document`.
:param content: an iterable of :class:`docarray.Document`, each Document must be filled with `.uri`, `.text` or `.blob`.
:param batch_size: the number of elements in each request when sending ``content``
:param show_progress: if set, show a progress bar
:return: the embedding in a numpy ndarray with shape ``[N, D]``. ``N`` is in the same length of ``content``
"""
...
def encode(self, content, **kwargs):
if isinstance(content, str):
raise TypeError(
f'content must be an Iterable of [str, Document], try `.encode(["{content}"])` instead'
)
self._prepare_streaming(
not kwargs.get('show_progress'),
total=len(content) if hasattr(content, '__len__') else None,
)
with self._pbar:
self._client.post(
**self._get_post_payload(content, kwargs), on_done=self._gather_result
)
return self._unboxed_result
def _gather_result(self, r):
from rich import filesize
if not self._results:
self._pbar.start_task(self._r_task)
r = r.data.docs
self._results.extend(r)
self._pbar.update(
self._r_task,
advance=len(r),
total_size=str(
filesize.decimal(int(os.environ.get('JINA_GRPC_RECV_BYTES', '0')))
),
)
@property
def _unboxed_result(self):
if self._results.embeddings is None:
raise ValueError(
'empty embedding returned from the server. '
'This often due to a mis-config of the server, '
'restarting the server or changing the serving port number often solves the problem'
)
return self._results.embeddings if self._return_plain else self._results
def _iter_doc(self, content) -> Generator['Document', None, None]:
from rich import filesize
from docarray import Document
self._return_plain = True
if hasattr(self, '_pbar'):
self._pbar.start_task(self._s_task)
for c in content:
if isinstance(c, str):
self._return_plain = True
_mime = mimetypes.guess_type(c)[0]
if _mime and _mime.startswith('image'):
yield Document(uri=c).load_uri_to_blob()
else:
yield Document(text=c)
elif isinstance(c, Document):
if c.content_type in ('text', 'blob'):
self._return_plain = False
yield c
elif not c.blob and c.uri:
c.load_uri_to_blob()
self._return_plain = False
yield c
elif c.tensor is not None:
yield c
else:
raise TypeError(f'unsupported input type {c!r} {c.content_type}')
else:
raise TypeError(f'unsupported input type {c!r}')
if hasattr(self, '_pbar'):
self._pbar.update(
self._s_task,
advance=1,
total_size=str(
filesize.decimal(
int(os.environ.get('JINA_GRPC_SEND_BYTES', '0'))
)
),
)
def _get_post_payload(self, content, kwargs):
return dict(
on='/',
inputs=self._iter_doc(content),
request_size=kwargs.get('batch_size', 8),
total_docs=len(content) if hasattr(content, '__len__') else None,
)
def profile(self, content: Optional[str] = '') -> Dict[str, float]:
"""Profiling a single query's roundtrip including network and computation latency. Results is summarized in a table.
:param content: the content to be sent for profiling. By default it sends an empty Document
that helps you understand the network latency.
:return: the latency report in a dict.
"""
st = time.perf_counter()
r = self._client.post('/', self._iter_doc([content]), return_responses=True)
ed = (time.perf_counter() - st) * 1000
route = r[0].routes
gateway_time = (
route[0].end_time.ToMilliseconds() - route[0].start_time.ToMilliseconds()
)
clip_time = (
route[1].end_time.ToMilliseconds() - route[1].start_time.ToMilliseconds()
)
network_time = ed - gateway_time
server_network = gateway_time - clip_time
from rich.table import Table
def make_table(_title, _time, _percent):
table = Table(show_header=False, box=None)
table.add_row(
_title, f'[b]{_time:.0f}[/b]ms', f'[dim]{_percent * 100:.0f}%[/dim]'
)
return table
from rich.tree import Tree
t = Tree(make_table('Roundtrip', ed, 1))
t.add(make_table('Client-server network', network_time, network_time / ed))
t2 = t.add(make_table('Server', gateway_time, gateway_time / ed))
t2.add(
make_table(
'Gateway-CLIP network', server_network, server_network / gateway_time
)
)
t2.add(make_table('CLIP model', clip_time, clip_time / gateway_time))
from rich import print
print(t)
return {
'Roundtrip': ed,
'Client-server network': network_time,
'Server': gateway_time,
'Gateway-CLIP network': server_network,
'CLIP model': clip_time,
}
@overload
async def aencode(
self,
content: Iterator[str],
*,
batch_size: Optional[int] = None,
show_progress: bool = False,
) -> 'np.ndarray':
...
@overload
async def aencode(
self,
content: Union['DocumentArray', Iterable['Document']],
*,
batch_size: Optional[int] = None,
show_progress: bool = False,
) -> 'DocumentArray':
...
async def aencode(self, content, **kwargs):
from rich import filesize
self._prepare_streaming(
not kwargs.get('show_progress'),
total=len(content) if hasattr(content, '__len__') else None,
)
async for da in self._async_client.post(
**self._get_post_payload(content, kwargs)
):
if not self._results:
self._pbar.start_task(self._r_task)
self._results.extend(da)
self._pbar.update(
self._r_task,
advance=len(da),
total_size=str(
filesize.decimal(int(os.environ.get('JINA_GRPC_RECV_BYTES', '0')))
),
)
return self._unboxed_result
def _prepare_streaming(self, disable, total):
if total is None:
total = 500
warnings.warn(
'the length of the input is unknown, the progressbar would not be accurate.'
)
from docarray.array.mixins.io.pbar import get_pbar
self._pbar = get_pbar(disable)
os.environ['JINA_GRPC_SEND_BYTES'] = '0'
os.environ['JINA_GRPC_RECV_BYTES'] = '0'
self._s_task = self._pbar.add_task(
':arrow_up: Send', total=total, total_size=0, start=False
)
self._r_task = self._pbar.add_task(
':arrow_down: Recv', total=total, total_size=0, start=False
)
from docarray import DocumentArray
self._results = DocumentArray()
@staticmethod
def _prepare_single_doc(d: 'Document'):
if d.content_type in ('text', 'blob'):
return d
elif not d.blob and d.uri:
d.load_uri_to_blob()
return d
elif d.tensor is not None:
return d
else:
raise TypeError(f'unsupported input type {d!r} {d.content_type}')
@staticmethod
def _prepare_rank_doc(d: 'Document', _source: str = 'matches'):
_get = lambda d: getattr(d, _source)
if not _get(d):
raise ValueError(f'`.rank()` requires every doc to have `.{_source}`')
d = Client._prepare_single_doc(d)
setattr(d, _source, [Client._prepare_single_doc(c) for c in _get(d)])
return d
def _iter_rank_docs(
self, content, _source='matches'
) -> Generator['Document', None, None]:
from rich import filesize
from docarray import Document
self._return_plain = True
if hasattr(self, '_pbar'):
self._pbar.start_task(self._s_task)
for c in content:
if isinstance(c, Document):
yield self._prepare_rank_doc(c, _source)
else:
raise TypeError(f'unsupported input type {c!r}')
if hasattr(self, '_pbar'):
self._pbar.update(
self._s_task,
advance=1,
total_size=str(
filesize.decimal(
int(os.environ.get('JINA_GRPC_SEND_BYTES', '0'))
)
),
)
def _get_rank_payload(self, content, kwargs):
return dict(
on='/rank',
inputs=self._iter_rank_docs(
content, _source=kwargs.get('source', 'matches')
),
request_size=kwargs.get('batch_size', 8),
total_docs=len(content) if hasattr(content, '__len__') else None,
)
def rank(self, docs: Iterable['Document'], **kwargs) -> 'DocumentArray':
"""Rank image-text matches according to the server CLIP model.
Given a Document with nested matches, where the root is image/text and the matches is in another modality, i.e.
text/image; this method ranks the matches according to the CLIP model.
Each match now has a new score inside ``clip_score`` and matches are sorted descendingly according to this score.
More details can be found in: https://github.com/openai/CLIP#usage
:param docs: the input Documents
:return: the ranked Documents in a DocumentArray.
"""
self._prepare_streaming(
not kwargs.get('show_progress'),
total=len(docs),
)
with self._pbar:
self._client.post(
**self._get_rank_payload(docs, kwargs), on_done=self._gather_result
)
return self._results
async def arank(self, docs: Iterable['Document'], **kwargs) -> 'DocumentArray':
from rich import filesize
self._prepare_streaming(
not kwargs.get('show_progress'),
total=len(docs),
)
async for da in self._async_client.post(**self._get_rank_payload(docs, kwargs)):
if not self._results:
self._pbar.start_task(self._r_task)
self._results.extend(da)
self._pbar.update(
self._r_task,
advance=len(da),
total_size=str(
filesize.decimal(int(os.environ.get('JINA_GRPC_RECV_BYTES', '0')))
),
)
return self._results
|
"""Test the popular tags API."""
from typing import List
from rest_framework.test import APITestCase
from blog.factories import PostFactory
from tests.decorators import authenticated
_TAG_FIELDS = {"id", "name", "post_count"}
@authenticated
class PopularTagTest(APITestCase):
"""Test the popular tags endpoint."""
def setUp(self):
posts = [
PostFactory.create(tags=["python", "docker"]),
PostFactory.create(tags=["python"]),
PostFactory.create(tags=["aws"]),
PostFactory.create(),
]
for post in posts:
post.publish()
def perform(self, **params) -> List[dict]:
response = self.client.get("/api/popular-tags/", params)
self.assertEqual(response.status_code, 200)
return response.data
def test_list(self):
tags = self.perform()
self.assertEqual(len(tags), 3)
def test_returns_expected_fields(self):
tags = self.perform()
expected = _TAG_FIELDS
self.assertSetEqual(expected, set(tags[0]))
def test_is_ordered_by_decreasing_post_count(self):
tags = self.perform()
sorted_by_post_count_desc = sorted(
tags, key=lambda tag: tag["post_count"], reverse=True
)
actual = [tag["name"] for tag in tags]
expected = [tag["name"] for tag in sorted_by_post_count_desc]
self.assertListEqual(actual, expected)
def test_limit_query_parameter_limits_amount_of_returned_values(self):
tags = self.perform(limit=1)
self.assertEqual(len(tags), 1)
with_most_posts = "python"
self.assertEqual(tags[0]["name"], with_most_posts)
def test_does_not_include_drafts(self):
PostFactory.create(tags=["angular"]) # not published
tags = self.perform()
self.assertNotIn("angular", map(lambda tag: tag["name"], tags))
def test_equal_count_tags_sorted_in_alphabetical_order(self):
tags = self.perform()
self.assertEqual("aws", tags[1]["name"])
self.assertEqual("docker", tags[2]["name"])
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import copy
import os
import time
from contextlib import contextmanager
import numpy as np
import tensorflow as tf
from tensorflow.python.compiler.tensorrt import trt_convert as trt
from tensorflow.python.compiler.tensorrt.trt_convert import \
DEFAULT_TRT_MAX_WORKSPACE_SIZE_BYTES
from tensorflow.python.framework import convert_to_constants
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.saved_model.signature_constants import \
DEFAULT_SERVING_SIGNATURE_DEF_KEY
def _print_dict(input_dict, prefix=' ', postfix=''):
for k, v in sorted(input_dict.items()):
print('{prefix}{arg_name}: {value}{postfix}'.format(
prefix=prefix,
arg_name=k,
value='%.1f' % v if isinstance(v, float) else v,
postfix=postfix
))
@contextmanager
def timed_section(msg):
print('\n[START] {}'.format(msg))
start_time = time.time()
yield
print("[END] Duration: {:.1f}s".format(time.time() - start_time))
print("=" * 80, "\n")
class BaseCommandLineAPI(object):
ALLOWED_TFTRT_PRECISION_MODES = ['FP32', 'FP16', 'INT8']
SAMPLES_IN_VALIDATION_SET = None
def __init__(self):
self._parser = argparse.ArgumentParser(description='tftrt_benchmark')
# ======================= SavedModel Directories ===================== #
self._parser.add_argument('--input_saved_model_dir', type=str,
default=None,
help='Directory containing the input saved '
'model.')
self._parser.add_argument('--output_saved_model_dir', type=str,
default=None,
help='Directory in which the converted model '
'will be saved')
# ======================== Dataset Directories ======================= #
self._parser.add_argument('--calib_data_dir', type=str,
help='Directory containing the dataset used '
'for INT8 calibration.')
self._parser.add_argument('--data_dir', type=str, default=None,
help='Directory containing the dataset used '
'for model validation.')
# ======================= Generic Runtime Flags ====================== #
self._parser.add_argument('--batch_size', type=int, default=8,
help='Number of images per batch.')
self._parser.add_argument('--display_every', type=int, default=100,
help='Number of iterations executed between'
'two consecutive display of metrics')
self._parser.add_argument('--gpu_mem_cap', type=int, default=0,
help='Upper bound for GPU memory in MB. '
'Default is 0 which means allow_growth '
'will be used.')
default_sign_key = DEFAULT_SERVING_SIGNATURE_DEF_KEY
self._parser.add_argument('--input_signature_key', type=str,
default=default_sign_key,
help='SavedModel signature to use for '
'inference, defaults to: %s' % (
default_sign_key
))
self._parser.add_argument('--num_iterations', type=int, default=None,
help='How many iterations(batches) to '
'evaluate. If not supplied, the whole '
'set will be evaluated.')
self._parser.add_argument('--num_warmup_iterations', type=int,
default=50,
help='Number of initial iterations skipped '
'from timing')
self._add_bool_argument(
name="skip_accuracy_testing",
default=False,
required=False,
help='If set to True, accuracy calculation will be skipped.'
)
self._add_bool_argument(
name="use_synthetic_data",
default=False,
required=False,
help='If set to True, one unique batch of random batch of data is '
'generated and used at every iteration.'
)
# =========================== TF-TRT Flags ========================== #
self._add_bool_argument(
name="use_tftrt",
default=False,
required=False,
help='If set to True, the inference graph will be converted using '
'TF-TRT graph converter.'
)
self._add_bool_argument(
name="allow_build_at_runtime",
default=False,
required=False,
help="Whether to build TensorRT engines during runtime."
)
self._parser.add_argument('--max_workspace_size', type=int,
default=DEFAULT_TRT_MAX_WORKSPACE_SIZE_BYTES,
help='The maximum GPU temporary memory which '
'the TRT engine can use at execution '
'time.')
self._parser.add_argument('--minimum_segment_size', type=int, default=5,
help='Minimum number of TensorFlow ops in a '
'TRT engine.')
self._parser.add_argument('--num_calib_inputs', type=int, default=500,
help='Number of inputs (e.g. images) used '
'for calibration (last batch is skipped '
'in case it is not full)')
self._add_bool_argument(
name="optimize_offline",
default=False,
required=False,
help='If set to True, TensorRT engines are built before runtime.'
)
self._parser.add_argument('--precision', type=str,
choices=self.ALLOWED_TFTRT_PRECISION_MODES,
default='FP32',
help='Precision mode to use. FP16 and INT8 '
'modes only works if --use_tftrt is '
'used.')
self._add_bool_argument(
name="use_dynamic_shape",
default=False,
required=False,
help='Whether to use implicit batch mode or dynamic shape mode.'
)
def _add_bool_argument(self, name=None, default=False, required=False, help=None):
if not isinstance(default, bool):
raise ValueError()
feature_parser = self._parser.add_mutually_exclusive_group(\
required=required
)
feature_parser.add_argument('--' + name, dest=name,
action='store_true',
help=help,
default=default)
feature_parser.add_argument('--no' + name, dest=name,
action='store_false')
feature_parser.set_defaults(name=default)
def _validate_args(self, args):
if args.data_dir is None:
raise ValueError("--data_dir is required")
elif not os.path.isdir(args.data_dir):
raise RuntimeError("The path --data_dir=`{}` doesn't exist or is "
"not a directory".format(args.data_dir))
if (
args.num_iterations is not None and
args.num_iterations <= args.num_warmup_iterations
):
raise ValueError(
'--num_iterations must be larger than --num_warmup_iterations '
'({} <= {})'.format(args.num_iterations,
args.num_warmup_iterations))
if not args.use_tftrt:
if args.use_dynamic_shape:
raise ValueError('TensorRT must be enabled for Dynamic Shape '
'support to be enabled (--use_tftrt).')
if args.precision != 'FP32':
raise ValueError('TensorRT must be enabled for FP16'
'or INT8 modes (--use_tftrt).')
else:
if args.precision not in self.ALLOWED_TFTRT_PRECISION_MODES:
raise ValueError("The received --precision={} is not supported."
" Allowed: {}".format(
args.precision,
self.ALLOWED_TFTRT_PRECISION_MODES
))
if args.precision == 'INT8':
if not args.calib_data_dir:
raise ValueError('--calib_data_dir is required for INT8 '
'precision mode')
elif not os.path.isdir(args.calib_data_dir):
raise RuntimeError("The path --calib_data_dir=`{}` doesn't "
"exist or is not a directory".format(
args.calib_data_dir))
if args.use_dynamic_shape:
raise ValueError('TF-TRT does not support dynamic shape '
'mode with INT8 calibration.')
if args.num_calib_inputs <= args.batch_size:
raise ValueError(
'--num_calib_inputs must not be smaller than '
'--batch_size ({} <= {})'.format(
args.num_calib_inputs, args.batch_size))
def _post_process_args(self, args):
if args.num_iterations is None:
args.num_iterations = (
self.SAMPLES_IN_VALIDATION_SET // args.batch_size
)
return args
def parse_args(self):
args = self._parser.parse_args()
args = self._post_process_args(args)
self._validate_args(args)
print('\nBenchmark arguments:')
_print_dict(vars(args))
return args
def config_gpu_memory(gpu_mem_cap):
gpus = tf.config.experimental.list_physical_devices('GPU')
if not gpus:
raise RuntimeError("No GPUs has been found.")
print('Found the following GPUs:')
for gpu in gpus:
print(' ', gpu)
for gpu in gpus:
try:
if not gpu_mem_cap:
tf.config.experimental.set_memory_growth(gpu, True)
else:
tf.config.experimental.set_virtual_device_configuration(
gpu,
[tf.config.experimental.VirtualDeviceConfiguration(
memory_limit=gpu_mem_cap)])
except RuntimeError as e:
print('Can not set GPU memory config', e)
def get_graph_func(
input_saved_model_dir,
output_saved_model_dir,
allow_build_at_runtime=False,
calibration_input_fn=None,
input_signature_key=DEFAULT_SERVING_SIGNATURE_DEF_KEY,
max_workspace_size_bytes=DEFAULT_TRT_MAX_WORKSPACE_SIZE_BYTES,
minimum_segment_size=5,
num_calib_inputs=None,
optimize_offline=False,
optimize_offline_input_fn=None,
precision_mode=None,
use_dynamic_shape=False,
use_tftrt=False):
"""Retreives a frozen SavedModel and applies TF-TRT
use_tftrt: bool, if true use TensorRT
precision: str, floating point precision (FP32, FP16, or INT8)
returns: TF function that is ready to run for inference
"""
if not use_tftrt:
with timed_section('Loading TensorFlow native model...'):
saved_model_loaded = tf.saved_model.load(
input_saved_model_dir, tags=[tag_constants.SERVING]
)
graph_func = saved_model_loaded.signatures[input_signature_key]
graph_func = convert_to_constants.convert_variables_to_constants_v2(
graph_func
)
else:
def get_trt_conversion_params(
allow_build_at_runtime,
max_workspace_size_bytes,
precision_mode,
minimum_segment_size):
params = copy.deepcopy(trt.DEFAULT_TRT_CONVERSION_PARAMS)
def get_trt_precision():
if precision_mode == "FP32":
return trt.TrtPrecisionMode.FP32
elif precision_mode == "FP16":
return trt.TrtPrecisionMode.FP16
elif precision_mode == "INT8":
return trt.TrtPrecisionMode.INT8
else:
raise RuntimeError("Unknown precision received: `{}`. Expected: "
"FP32, FP16 or INT8".format(precision))
params = params._replace(
allow_build_at_runtime=allow_build_at_runtime,
max_workspace_size_bytes=max_workspace_size_bytes,
minimum_segment_size=minimum_segment_size,
precision_mode=get_trt_precision(),
use_calibration=precision_mode == "INT8"
)
print('\nTensorRT Conversion Params:')
_print_dict(dict(params._asdict()))
return params
conversion_params = get_trt_conversion_params(
allow_build_at_runtime=allow_build_at_runtime,
max_workspace_size_bytes=max_workspace_size_bytes,
precision_mode=precision_mode,
minimum_segment_size=minimum_segment_size
)
converter = trt.TrtGraphConverterV2(
input_saved_model_dir=input_saved_model_dir,
conversion_params=conversion_params,
input_saved_model_signature_key=input_signature_key,
use_dynamic_shape=use_dynamic_shape
)
def _check_input_fn(func, name):
if func is None:
raise ValueError("The function `{}` is None.".format(name))
if not callable(func):
raise ValueError("The argument `{}` is not a function.".format(
name))
if conversion_params.precision_mode == 'INT8':
_check_input_fn(calibration_input_fn, "calibration_input_fn")
with timed_section('TF-TRT graph conversion and INT8 '
'calibration ...'):
graph_func = converter.convert(
calibration_input_fn=tf.autograph.experimental.do_not_convert(
calibration_input_fn
)
)
else:
with timed_section('TF-TRT graph conversion ...'):
graph_func = converter.convert()
if optimize_offline or use_dynamic_shape:
_check_input_fn(
optimize_offline_input_fn,
"optimize_offline_input_fn"
)
with timed_section('Building TensorRT engines...'):
converter.build(input_fn=tf.autograph.experimental.do_not_convert(
optimize_offline_input_fn
))
if output_saved_model_dir is not None:
with timed_section('Saving converted graph with TF-TRT ...'):
converter.save(output_saved_model_dir)
print("Converted graph saved to `{}`".format(
output_saved_model_dir))
return graph_func
|
import os
from os import path, system
import convert
def main():
folder = input(
'Enter the directory path containing SUMMARY.md or other *.md files:\n')
try:
c = convert.MdToPdf(folder)
p = path.abspath(c.output)
print(60 * '-')
print('Successfully converted!')
print(p)
except Exception as e:
print(60 * '-')
print(e)
system("PAUSE")
if __name__ == '__main__':
main()
|
"""import libraries"""
import os
from ibm_watson import LanguageTranslatorV3 #from IBM
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator#authenticator
from dotenv import load_dotenv
load_dotenv()
apikey = os.environ['apikey']
url = os.environ['url']
authenticator = IAMAuthenticator('rThbq2SZ8s2Re6QBulyKfSmjrjD31b4uQKnzIO2N1vT_')
language_translator = LanguageTranslatorV3(
version='2018-05-01',
authenticator=authenticator)
"""Watson language
translator URL"""
language_translator.set_service_url(
'https://api.us-south.language-translator.watson.cloud.ibm.com/instances/e279067d-1540-4b78-aa82-94651eb0d368')
def english_to_french(english_text):
"""function to translate eng to french"""
translation = language_translator.translate(text=english_text, model_id='en-fr').get_result()
french_text = translation['translations'][0]['translation']
return french_text
def french_to_english(french_text):
"""function to translate french to eng"""
translation = language_translator.translate(text=french_text, model_id='fr-en').get_result()
english_text = translation['translations'][0]['translation']
return english_text
|
# -*- coding: utf-8; -*-
#
# @file customglyph.py
# @brief Compatibility with others glyphs enums.
# @author Frédéric SCHERMA (INRA UMR1095)
# @date 2017-10-16
# @copyright Copyright (c) 2015 INRA
# @license MIT (see LICENSE file)
# @details
class CustomGlyph(object):
def __init__(self, prefix, name, opts=None):
self._prefix = prefix
self._name = name
self._opts = opts
@property
def value(self):
if type(self._name) is tuple or type(self._name) is list:
return " ".join("%s-%s" % (self._prefix, self._name))
else:
return "%s-%s" % (self._prefix, self._name)
@property
def opts(self):
if self._opts is None:
return
if type(self._opts) is tuple or type(self._opts) is list:
return " ".join("%s-%s" % (self._prefix, self._opts))
else:
return "%s-%s" % (self._prefix, self._opts)
def __str__(self):
return self.value
def __repr__(self):
return self.value
class FaGlyph(CustomGlyph):
"""
Font awesome glyphicons.
Helper to add glyphicons from font awesome into the menu.
@todo Stacked icons doesn't looks good in menu because of an additional useless left offset.
@todo Stocked icons options need more complexes compositions. How to ?
"""
FA_FW = "fa-fw"
FA_LG = "fa-lg"
FA_2X = "fa-2x"
FA_3X = "fa-3x"
FA_4X = "fa-4x"
FA_5X = "fa-5x"
def __init__(self, name, stack=False, rotate=False, pulse=False, opts=None):
super().__init__('fa', name, opts=opts)
self._stack = stack
self._rotate = rotate
self._pulse = pulse
@property
def html(self):
if self._stack:
if self._opts:
classes = 'fa-stack ' + self.opts
else:
classes = 'fa-stack'
result = '<span class="%s">' % classes
if type(self._name) is tuple or type(self._name) is list:
for i in self._name:
result += '<i class="fa fa-%s fa-stack-1x"></i>' % i
else:
result += '<i class="fa fa-%s fa-stack-1x"></i>' % self._name
result += '</span>'
return result
else:
lopts = ""
if self._rotate:
lopts = 'fa-spin'
elif self._pulse:
lopts = 'fa-pulse'
if self._opts:
lopts += self.opts
if lopts:
return '<i class="fa %s %s"></i>' % (self.value, lopts)
else:
return '<i class="fa %s"></i>' % self.value
|
from bluesky.magics import BlueskyMagics
from .startup import sd
from .detectors import *
from .endstation import *
from .accelerator import *
from .optics import *
from .tardis import *
#
# Setup of sup. data for plans
#
sd.monitors = []
sd.flyers = []
sd.baseline = [theta, delta, gamma, muR,
sx, say, saz,
cryoangle, sy, sz,
epu1, epu2,
slt1, slt2, slt3,
m1a, m3a,
#nanop, tardis,
tardis,
stemp, pgm,
inout, es_diag1_y, diag6_pid]
#bec.disable_baseline() #no print to CLI, just save to datastore
sclr.names.read_attrs=['name1','name2','name3','name4','name5','name6'] # TODO WHAT IS THIS??? - Dan Allan
sclr.channels.read_attrs=['chan1','chan2','chan3','chan4','chan5','chan6']
# Old-style hints config is replaced by the new 'kind' feature
# sclr.hints = {'fields': ['sclr_ch2', 'sclr_ch3', 'sclr_ch6']}
for i in [2, 3, 4, 5]:
getattr(sclr.channels, f'chan{i}').kind = 'hinted'
# getattr(sclr.channels, f'chan{i}').kind = 'normal' will remove the
# hinted fields from LivePlot and LiveTable.
def relabel_fig(fig, new_label):
fig.set_label(new_label)
fig.canvas.manager.set_window_title(fig.get_label())
# fccd.hints = {'fields': ['fccd_stats1_total']}
for i in [1, 2, 3, 4, 5]:
getattr(fccd, f'stats{i}').total.kind = 'hinted'
# dif_beam.hints = {'fields' : ['dif_beam_stats3_total','dif_beam_stats1_total']}
for i in [1, 3]:
getattr(dif_beam, f'stats{i}').total.kind = 'hinted'
## 20180726 needed to comment due to IOC1 problems
#cube_beam.hints = {'fields': ['cube_beam_stats2_total', 'cube_beam_stats1_total']}
#for i in [1, 2]:
# getattr(cube_beam, f'stats{i}').total.kind = 'hinted'
# This was imported in 00-startup.py # used to generate the list: [thing.name for thing in get_all_positioners()]
"""
BlueskyMagics.positioners = [
cryoangle,
delta,
diag2_y,
diag3_y,
diag5_y,
diag6_pid,
diag6_y,
epu1.gap,
epu1.phase,
epu2.gap,
epu2.phase,
es_diag1_y,
eta,
gamma,
m1a.z,
m1a.y,
m1a.x,
m1a.pit,
m1a.yaw,
m1a.rol,
m3a.x,
m3a.pit,
m3a.bdr,
# muR, # TODO turn this back on when safe
# muT, # TODO turn this back on when safe
#nanop.tx,
#nanop.ty,
#nanop.tz,
#nanop.bx,
#nanop.by,
#nanop.bz,
say,
saz,
slt1.xg,
slt1.xc,
slt1.yg,
slt1.yc,
slt2.xg,
slt2.xc,
slt2.yg,
slt2.yc,
slt3.x,
slt3.y,
sx,
sy,
sz,
tardis.h,
tardis.k,
tardis.l,
tardis.theta,
tardis.mu,
tardis.chi,
tardis.phi,
tardis.delta,
tardis.gamma,
theta,
]
"""
|
'''
A python script to compute bandgap in zinc oxide
Requires 1 argument in command line: doscar file.
Example, python compute_bandgap doscar 0 10
Created by: Shiva Bhusal, Aneer Lamichhane.
'''
import sys
'''
Function to convert numbers in E+ and E- exponential format to
normal floating point numbers.
'''
def stringToFloat(myStr):
if 'E+' in myStr:
myStr=myStr.split('E+')
return float(myStr[0])*pow(10,float(myStr[1]))
elif 'E-' in myStr:
myStr=myStr.split('E-')
return float(myStr[0])* pow(10,float(myStr[1])*-1)
else:
return float(myStr)
doscarFile=open(sys.argv[1])
seriesList=[] # List to keep the series of values in each Gaps.
is_zero=False
'''
Reads each lines from the Doscar file, filtres out the lines with first column in the range -3 to 3.
For each of these lines, finds the first occurance of 0 in the 2nd column
Appends the result until it finds the first occurance of non-zero.
Appends the first first occurance of non-zero.
The loop stops.
'''
tempSeries=[]
for lines in doscarFile:
lines=lines.strip().split(' ') # Two spaces.
if stringToFloat(lines[1])==0:
tempSeries.append([stringToFloat(lines[0]),stringToFloat(lines[1])])
is_zero=True
if is_zero==True:
if stringToFloat(lines[1])!=0:
tempSeries.append([stringToFloat(lines[0]),stringToFloat(lines[1])])
seriesList.append(tempSeries)
tempSeries=[]
is_zero=False
doscarFile.close()
print("Total Gaps:" +str(len(seriesList)))
gapList=[]
for series in seriesList:
start=series[0][0]
end=series[len(series)-1][0]
gap=end-start
gapList.append(gap) #In case, list is needed in the future.
print('Start:' +str(start)+' End:'+str(end)+' Gap:'+str(gap))
|
from datetime import date
from django.db import models
from groups.models import Student
from subjects.models import Lesson, Task
class Attendance(models.Model):
visit = models.BooleanField(default=False)
lesson = models.ForeignKey(Lesson, on_delete=models.CASCADE)
student = models.ForeignKey(Student, on_delete=models.CASCADE)
class Result(models.Model):
rating = models.FloatField(default=0)
task = models.ForeignKey(Task, on_delete=models.CASCADE)
student = models.ForeignKey(Student, on_delete=models.CASCADE)
date = models.DateField(default=date.today)
|
from . import UserTestCase, HTTPTestMixin
class LoginTest(UserTestCase, HTTPTestMixin):
def test_get_login(self):
response = self.anon_user.get('login')
data = self.assert200(response)
def test_login_valid(self):
response = self.anon_user.post('/login', data={
'password': 'password',
'email': 'bob@example.com'
})
self.assert302(response)
def test_login_invalid(self):
response = self.anon_user.post('/login', data={
'password': 'notthepassword',
'email': 'bob@example.com',
})
self.assert401(response)
def test_login_unknown_user(self):
response = self.anon_user.post('/login', data={
'password': 'notthepassword',
'email': 'anna@example.com',
})
self.assert401(response)
def test_logout(self):
response = self.admin_user.get('/logout')
self.assert302(response)
|
from TradingGym.OrderBook import OrderBook
class Strategy:
"""
Implements base strategy which holds constant orders
"""
def __init__(self):
self.sleep = 100 # ms
def action(self, position, history, old_book, market_book):
"""Override this method in subclasses"""
new_book = OrderBook()
new_book.book = (old_book.book[0].copy(), old_book.book[1].copy())
return new_book, self.sleep # order-book held by our strategy and time until next rebalancing
class SpreadStrategy(Strategy):
"""
Implements strategy which places orders on best bid-ask prices
"""
def __init__(self, value = 10, offset = 10):
super().__init__()
self.value = value
self.offset = offset
def action(self, position, history, old_book, market_book):
new_book = OrderBook()
new_book.book[0][max(market_book.book[0].keys()) - self.offset] = self.value
new_book.book[1][min(market_book.book[1].keys()) + self.offset] = self.value
return new_book, self.sleep
|
import sys
import numpy as np
#import matplotlib.pyplot as plt
#NOTES FROM ROSS - 9/9/15
#NOTE: W should be 42x7
#NOTE: Bias should be 1x7
#for linear regression -
#loss = 0.5*sum((scores-targets)^2)
#scores = Wx + b
#dL/dW = dL/dscores * dscores/dW
#dL/dscores = (score - y)
#dscores/dW = X
#so
#dL/dW = (score - y) * X
#you can then update the weights with this gradient
#read the data file
dataFilename = "../mocap_test/labelled_data.txt"
if(len(sys.argv) > 1):
dataFilename = sys.argv[1]
txtfile = open(dataFilename)
lines = txtfile.read().split("\n")
#remove the field names
fieldnames = lines.pop(0).strip().split("\t")
#remove fieldnames of frame, time and valid
fieldnames.pop(0)
fieldnames.pop(0)
fieldnames.pop()
data = []
labels = []
for line in lines:
fields = line.strip().split("\t")
if(len(fields) != len(fieldnames) + 3):
continue
#take out frame num
fields.pop(0)
#take out time
fields.pop(0)
#now take out the last column - it is the valid indicator
fields.pop()
data.append(fields[:len(fields) - 7])
labels.append(fields[len(fields) - 7:])
N = len(data) #number of training examples - around 4K
D = len(data[0]) #dimensionality - around 42
K = 1 #there are no classes as this is regression
LABEL_DIM = 7
X = np.zeros((N*K,D))
y = np.zeros((N*K,LABEL_DIM))
#copy data into X
for row in range(N):
for column in range(D):
X[row, column] = float(data[row][column]) #Slow way
#copy labels into Y
for row in range(N):
for column in range(LABEL_DIM):
y[row, column] = float(labels[row][column]) #Slow way
print "done importing data"
#NOTE: I manually checked parsing... it is correct.
#Train a Linear Classifier
# initialize parameters randomly
W = 0.01 * np.random.randn(D,LABEL_DIM) #This is 42 weights... looks right(?)
b = np.zeros((1,LABEL_DIM)) #Just one bias
# some hyperparameters
step_size = 1e-5
reg = 1e-3 # regularization strength
# gradient descent loop
num_examples = X.shape[0]
for i in xrange(50000):
# evaluate class scores, [N x K]
scores = np.dot(X, W) + b
#compute the L2 loss
score_diff = np.subtract(scores, y)
L2_loss = 0.5 * np.sum(np.multiply(score_diff,score_diff))
data_loss = L2_loss
reg_loss = 0.5*reg*np.sum(W*W) #this stays the same
loss = data_loss + reg_loss #this stays the same
if i % 100 == 0:
print "iteration %d: loss %f" % (i, loss)
# compute the gradient on scores
dscores = score_diff
# backpropate the gradient to the parameters (W,b)
dW = np.dot(X.T, dscores)
db = np.sum(dscores, axis=0, keepdims=True)
dW += reg*W # regularization gradient
# perform a parameter update
W += -step_size * dW
# evaluate training set accuracy
scores = np.dot(X, W) + b
score_diff = np.subtract(scores,y)
distances = np.sqrt(np.multiply(score_diff, score_diff))
avg_distance = np.sum(distances)/num_examples
#predicted_class = np.argmax(scores, axis=1)
print 'training accuracy (distance): %.2f meters' % avg_distance
# data units are in meters.. although we mixed in a quaternion
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Apache Beam SDK version information and utilities."""
import re
__version__ = '0.6.0'
# The following utilities are legacy code from the Maven integration;
# see BEAM-378 for further details.
# Reads the actual version from pom.xml file,
def get_version_from_pom():
with open('pom.xml', 'r') as f:
pom = f.read()
regex = (r'.*<parent>\s*'
r'<groupId>[a-z\.]+</groupId>\s*'
r'<artifactId>[a-z\-]+</artifactId>\s*'
r'<version>([0-9a-zA-Z\.\-]+)</version>.*')
pattern = re.compile(str(regex))
search = pattern.search(pom)
version = search.group(1)
version = version.replace("-SNAPSHOT", ".dev")
return version
# Synchronizes apache_beam.__version__ field for later usage
def sync_version(version):
init_path = 'apache_beam/__init__.py'
regex = r'^__version__\s*=\s*".*"'
with open(init_path, "r") as f:
lines = f.readlines()
with open(init_path, "w") as f:
for line in lines:
if re.search(regex, line):
f.write(re.sub(regex, '__version__ = "%s"' % version, line))
else:
f.write(line)
|
from concurrent.futures import ThreadPoolExecutor
from concurrent.futures._base import TimeoutError
import datetime
import shutil
import time
import pytest
import requests
from .test_utils import *
from glide import *
def test_placeholder_node(rootdir):
nodes = PlaceholderNode("extract") | CSVLoad("load")
glider, infile, outfile = file_glider(rootdir, "csv", nodes)
glider["extract"] = CSVExtract("extract")
with open(outfile, "w") as f:
glider.consume([infile], extract=dict(chunksize=10, nrows=20), load=dict(f=f))
def test_profiler_node(rootdir):
nodes = Profile("profile") | CSVExtract("extract") | CSVLoad("load")
glider, infile, outfile = file_glider(rootdir, "csv", nodes)
with open(outfile, "w") as f:
glider.consume([infile], extract=dict(chunksize=10, nrows=20), load=dict(f=f))
def test_filter_node(rootdir):
nodes = (
CSVExtract("extract")
| Filter("filter", func=lambda n, d: len(d) == 5)
| Reduce("reduce", flatten=True)
| LenPrint("len")
| CSVLoad("load")
| AssertFunc("length_check", func=lambda n, d: len(d) == 5)
)
glider, infile, outfile = file_glider(rootdir, "csv", nodes)
with open(outfile, "w") as f:
glider.consume([infile], extract=dict(chunksize=10, nrows=15), load=dict(f=f))
def test_assert_node(rootdir):
nodes = (
CSVExtract("extract", chunksize=10, nrows=20)
| AssertFunc("length_check", func=lambda node, data: len(data) == 10)
| CSVLoad("load")
)
glider, infile, outfile = file_glider(rootdir, "csv", nodes)
with open(outfile, "w") as f:
glider.consume([infile], load=dict(f=f))
def parity_zip_router(row):
if int(row["Zip_Code"]) % 2 == 0:
return "even"
return "odd"
def threshold_zip_router(row):
zipcode = int(row["Zip_Code"])
prepend = "odd"
if zipcode % 2 == 0:
prepend = "even"
if zipcode >= 1020:
return "%s_large" % prepend
return "%s_small" % prepend
def test_router_function(rootdir):
nodes = (
CSVExtract("extract", nrows=20)
| IterPush("iter")
| [parity_zip_router, Print("even"), Print("odd")]
)
glider, infile, outfile = file_glider(rootdir, "csv", nodes)
glider.consume([infile])
def test_window_push(rootdir):
nodes = (
CSVExtract("extract", nrows=5) | WindowPush("window", size=3) | Print("print")
)
glider, infile, outfile = file_glider(rootdir, "csv", nodes)
glider.consume([infile])
def test_window_reduce(rootdir):
nodes = (
CSVExtract("extract", nrows=5)
| IterPush("iter")
| WindowReduce("window", size=3)
| Print("print")
)
glider, infile, outfile = file_glider(rootdir, "csv", nodes)
glider.consume([infile])
class ZipGroupByDMA(GroupByNode):
def key(self, row):
key = row["DMA_Description"]
return key
def run(self, batch):
self.push({batch[0]["DMA_Description"]: [z["Zip_Code"] for z in batch]})
def test_group_by_node(rootdir):
nodes = (
CSVExtract("extract", nrows=20)
| IterPush("iter")
| ZipGroupByDMA("group")
| Print("print")
)
glider, infile, outfile = file_glider(rootdir, "csv", nodes)
glider.consume([infile])
def test_complex_pipeline(rootdir):
nodes = (
CSVExtract("extract", nrows=40)
| IterPush("iter")
| [
parity_zip_router,
(
Print("even")
| [threshold_zip_router, Print("even_large"), Print("even_small")]
),
(
Print("odd")
| [threshold_zip_router, Print("odd_large"), Print("odd_small")]
),
]
)
glider, infile, outfile = file_glider(rootdir, "csv", nodes)
large = Print("large")
small = Print("small")
reducer = Reduce("reduce")
combined = LenPrint("combined")
large.add_downstream(reducer)
small.add_downstream(reducer)
reducer.add_downstream(combined)
glider["even_large"].add_downstream(large)
glider["odd_large"].add_downstream(large)
glider["even_small"].add_downstream(small)
glider["odd_small"].add_downstream(small)
glider.consume([infile])
if shutil.which("dot"):
filename = "%s/pipeline_plot.png" % test_config["OutputDirectory"]
print("Found dot package, printing pipeline graph to %s" % filename)
glider.plot(filename)
def test_map(rootdir):
nodes = (
CSVExtract("extract", nrows=10)
| Map("transform", func=row_lower, as_list=True)
| CSVLoad("load")
)
glider, infile, outfile = file_glider(rootdir, "csv", nodes)
with open(outfile, "w") as f:
glider.consume([infile], load=dict(f=f))
def test_func(rootdir):
nodes = (
CSVExtract("extract", nrows=10)
| Func("transform", func=lower_rows)
| Print("load")
)
glider, infile, outfile = file_glider(rootdir, "csv", nodes)
glider.consume([infile])
def test_dict_key_transform(rootdir):
nodes = (
CSVExtract("extract", nrows=10)
| DictKeyTransform(
"transform", **{"zip code": lambda x: x["Zip_Code"]}, drop=["Zip_Code"]
)
| PrettyPrint("load")
)
glider, infile, outfile = file_glider(rootdir, "csv", nodes)
glider.consume([infile])
def test_hash_key(rootdir):
nodes = CSVExtract("extract", nrows=10) | HashKey("transform") | PrettyPrint("load")
glider, infile, outfile = file_glider(rootdir, "csv", nodes)
glider.consume([infile])
def get_json_helper(url, **kwargs):
resp = requests.get(url, **kwargs)
return resp.json()
def test_poll_func(rootdir):
glider = Glider(
PollFunc(
"poll",
func=get_json_helper,
result_param="id",
result_value=1,
data_param="title",
)
| Print("print")
)
glider.consume(["https://jsonplaceholder.typicode.com/todos/1"])
def test_process_pool_submit(rootdir):
nodes = (
CSVExtract("extract", nrows=100)
| ProcessPoolSubmit("transform", push_type=PushTypes.Result)
| CSVLoad("load")
)
glider, infile, outfile = file_glider(rootdir, "csv", nodes)
with open(outfile, "w") as f:
glider.consume([infile], transform=dict(func=lower_rows), load=dict(f=f))
def test_process_pool_reducer(rootdir):
nodes = (
CSVExtract("extract", nrows=10)
| ProcessPoolSubmit("transform")
| FuturesReduce("reducer", flatten=True)
| Print("load")
)
glider, infile, outfile = file_glider(rootdir, "csv", nodes)
glider.consume([infile], transform=dict(func=lower_rows))
def test_thread_pool_submit(rootdir):
nodes = (
CSVExtract("extract", nrows=10)
| ThreadPoolSubmit(
"transform", push_type=PushTypes.Result, executor_kwargs=dict(max_workers=4)
)
| Print("load")
)
glider, infile, outfile = file_glider(rootdir, "csv", nodes)
glider.consume([infile], transform=dict(func=lower_rows))
def test_pool_submit_executor_param(rootdir):
nodes = (
CSVExtract("extract", nrows=10)
| ThreadPoolSubmit(
"transform", push_type=PushTypes.Result, executor_kwargs=dict(max_workers=4)
)
| Print("load")
)
glider, infile, outfile = file_glider(rootdir, "csv", nodes)
with ThreadPoolExecutor(max_workers=4) as executor, open(outfile, "w") as f:
glider.consume([infile], transform=dict(func=lower_rows, executor=executor))
def sleep2(x):
time.sleep(1)
def test_pool_timeout(rootdir):
nodes = (
CSVExtract("extract", nrows=10)
| ProcessPoolSubmit(
"transform",
push_type=PushTypes.Result,
executor_kwargs=dict(max_workers=4),
timeout=0.5,
)
| Print("load")
)
glider, infile, outfile = file_glider(rootdir, "csv", nodes)
with pytest.raises(TimeoutError), open(outfile, "w") as f:
glider.consume([infile], transform=dict(func=sleep2))
def test_flatten(rootdir):
nodes = (
CSVExtract("extract", nrows=10)
| ProcessPoolSubmit("transform")
| FuturesReduce("reducer", flatten=False)
| Flatten("flatten")
| Print("load")
)
glider, infile, outfile = file_glider(rootdir, "csv", nodes)
glider.consume([infile], transform=dict(func=lower_rows))
def test_update_downstream_context(rootdir):
nodes = CSVExtract("extract", nrows=10) | [
FormatPrint("print1"),
FormatPrint("print2"),
]
glider, infile, outfile = file_glider(rootdir, "csv", nodes)
glider["extract"].update_downstream_context(dict(indent=2))
glider.consume([infile])
class UpdateRequiredContextTest(Node):
def run(self, data, outfile=None):
with open(outfile, "w") as f:
self.update_downstream_context(dict(f=f))
self.push(data)
def test_update_downstream_context_required_arg(rootdir):
nodes = (
CSVExtract("extract", nrows=10) | PlaceholderNode("context") | CSVLoad("load")
)
glider, infile, outfile = file_glider(rootdir, "csv", nodes)
glider["context"] = UpdateRequiredContextTest("context", outfile=outfile)
glider.consume([infile])
def test_context_push_node(rootdir):
nodes = (
CSVExtract("extract", nrows=10)
| ContextPush("context", func=lambda node, data: dict(indent=4))
| [FormatPrint("print1"), FormatPrint("print2")]
)
glider, infile, outfile = file_glider(rootdir, "csv", nodes)
glider.consume([infile])
def test_config_context_json(rootdir):
nodes = CSVExtract(
"extract", nrows=ConfigContext("config_context.json", key="nrows")
) | LenPrint("print")
glider, infile, outfile = file_glider(rootdir, "csv", nodes)
glider.consume([infile])
def test_config_context_yaml(rootdir):
nodes = CSVExtract(
"extract", nrows=ConfigContext("config_context.yaml", key="nrows")
) | LenPrint("print")
glider, infile, outfile = file_glider(rootdir, "csv", nodes)
glider.consume([infile])
def test_config_context_ini(rootdir):
nodes = CSVExtract(
"extract",
nrows=ConfigContext(
"config_context.ini", key=lambda x: int(x["TEST"]["nrows"])
),
) | LenPrint("print")
glider, infile, outfile = file_glider(rootdir, "csv", nodes)
glider.consume([infile])
def test_datetime_window_push():
nodes = DateTimeWindowPush("windows") | PrettyPrint("print")
glider = Glider(nodes)
today = datetime.date.today()
glider.consume(
None,
windows=dict(
start_date=today - datetime.timedelta(days=3), end_date=today, num_windows=2
),
)
def test_date_window_push():
nodes = DateWindowPush("windows") | PrettyPrint("print")
glider = Glider(nodes)
today = datetime.date.today()
now = datetime.datetime.now()
glider.consume(
None,
windows=dict(
start_date=datetime.datetime(2019, 10, 25, 3, 2, 1),
end_date=datetime.datetime(2019, 10, 28, 3, 2, 1),
),
)
|
from boa3.builtin import public
@public
def main(value: str, some_bytes: bytes) -> bool:
return value not in some_bytes
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, List, Optional
from botorch.models.gp_regression import FixedNoiseGP
from botorch.models.kernels.contextual_lcea import LCEAKernel
from botorch.models.kernels.contextual_sac import SACKernel
from torch import Tensor
class SACGP(FixedNoiseGP):
"""The GP uses Structural Additive Contextual(SAC) kernel.
Args:
train_X: (n x d) X training data.
train_Y: (n x 1) Y training data.
train_Yvar: (n x 1) Noise variances of each training Y.
decomposition: Keys are context names. Values are the indexes of
parameters belong to the context. The parameter indexes are in
the same order across contexts.
"""
def __init__(
self,
train_X: Tensor,
train_Y: Tensor,
train_Yvar: Tensor,
decomposition: Dict[str, List[int]],
) -> None:
super().__init__(train_X=train_X, train_Y=train_Y, train_Yvar=train_Yvar)
self.covar_module = SACKernel(
decomposition=decomposition,
batch_shape=self._aug_batch_shape,
device=train_X.device,
)
self.decomposition = decomposition
self.to(train_X)
class LCEAGP(FixedNoiseGP):
r"""The GP with Latent Context Embedding Additive (LCE-A) Kernel.
Note that the model does not support batch training. Input training
data sets should have dim = 2.
Args:
train_X: (n x d) X training data.
train_Y: (n x 1) Y training data.
train_Yvar: (n x 1) Noise variance of Y.
decomposition: Keys are context names. Values are the indexes of
parameters belong to the context. The parameter indexes are in the
same order across contexts.
cat_feature_dict: Keys are context names and values are list of categorical
features i.e. {"context_name" : [cat_0, ..., cat_k]}. k equals to number
of categorical variables. If None, we use context names in the
decomposition as the only categorical feature i.e. k = 1
embs_feature_dict: Pre-trained continuous embedding features of each context.
embs_dim_list: Embedding dimension for each categorical variable. The length
equals to num of categorical features k. If None, emb dim is set to 1
for each categorical variable.
context_weight_dict: Known population Weights of each context.
"""
def __init__(
self,
train_X: Tensor,
train_Y: Tensor,
train_Yvar: Tensor,
decomposition: Dict[str, List[int]],
train_embedding: bool = True,
cat_feature_dict: Optional[Dict] = None,
embs_feature_dict: Optional[Dict] = None,
embs_dim_list: Optional[List[int]] = None,
context_weight_dict: Optional[Dict] = None,
) -> None:
super().__init__(train_X=train_X, train_Y=train_Y, train_Yvar=train_Yvar)
self.covar_module = LCEAKernel(
decomposition=decomposition,
batch_shape=self._aug_batch_shape,
train_embedding=train_embedding,
cat_feature_dict=cat_feature_dict,
embs_feature_dict=embs_feature_dict,
embs_dim_list=embs_dim_list,
context_weight_dict=context_weight_dict,
device=train_X.device,
)
self.decomposition = decomposition
self.to(train_X)
|
import unittest
from poker.card import Card
from poker.hand import Hand
from poker.validators import PairValidator
class HandTest(unittest.TestCase):
def test_starts_out_with_no_cards(self):
hand = Hand()
self.assertEqual(hand.cards, [])
def test_shows_all_its_cards_in_technical_representation(self):
cards = [
Card(rank = "Ace", suit = "Diamonds"),
Card(rank = "7", suit = "Clubs")
]
hand = Hand()
hand.add_cards(cards)
self.assertEqual(
repr(hand),
"7 of Clubs, Ace of Diamonds"
)
def test_receives_and_stores_cards(self):
ace_of_spades = Card(rank = "Ace", suit = "Spades")
six_of_clubs = Card(rank = "6", suit = "Clubs")
cards = [
ace_of_spades,
six_of_clubs
]
hand = Hand()
hand.add_cards(cards)
self.assertEqual(
hand.cards,
[
six_of_clubs,
ace_of_spades
]
)
def test_interacts_with_validator_to_get_winning_hand(self):
class HandWithOneValidator(Hand):
VALIDATORS = (PairValidator,)
ace_of_hearts = Card(rank = "Ace", suit = "Hearts")
ace_of_spades = Card(rank = "Ace", suit = "Spades")
cards = [ace_of_hearts, ace_of_spades]
hand = HandWithOneValidator()
hand.add_cards(cards = cards)
self.assertEqual(
hand.best_rank(),
(0, "Pair", [ace_of_hearts, ace_of_spades])
)
|
from django.contrib import admin
from . import models
from apps.common.utils import register
class UserGroupAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'type', 'app')
admin.site.register(models.UserGroup, UserGroupAdmin)
register('user_group')
|
from datetime import timedelta
from beanie import DeleteRules
from fastapi import APIRouter, Depends, Response
from fastapi.logger import logger
from awesome_sso.exceptions import BadRequest, HTTPException, InternalServerError
from awesome_sso.service.depends import get_sso_user_id, sso_registration, sso_user
from awesome_sso.service.settings import Settings
from awesome_sso.service.user.schema import AccessToken, AwesomeUserType, RegisterModel
from awesome_sso.util.jwt import SYMMETRIC_ALGORITHM, create_token
router = APIRouter(tags=["sso"])
@router.get("/health_check")
def health_check():
return ["OK"]
@router.post("/register", summary="register user")
async def register(register_model: RegisterModel = Depends(sso_registration)):
try:
user = await Settings[AwesomeUserType]().user_model.find_one( # type: ignore
Settings[AwesomeUserType]().user_model.email == register_model.email # type: ignore
)
if user is None:
user = await Settings[AwesomeUserType]().user_model.register(register_model) # type: ignore
else:
raise BadRequest(message="user email %s taken" % register_model.email)
except HTTPException as e:
logger.warning(str(e))
raise e
except Exception as e:
logger.warning(str(e))
raise InternalServerError(message=str(e))
return user
@router.post("/login", summary="get login access token", response_model=AccessToken)
async def login(user: AwesomeUserType = Depends(sso_user)):
jwt_payload = {"sso_user_id": str(user.sso_user_id)}
token = create_token(
jwt_payload,
Settings.symmetric_key,
SYMMETRIC_ALGORITHM,
expires_delta=timedelta(days=7),
)
return AccessToken(access_token=token)
@router.post("/unregister")
async def unregister(sso_user_id: str = Depends(get_sso_user_id)):
user = await Settings[AwesomeUserType]().user_model.find_one( # type: ignore
Settings[AwesomeUserType]().user_model.sso_user_id == sso_user_id # type: ignore
)
if user is None:
return Response(status_code=200, content="requested user not exist")
else:
await user.delete_data()
await user.delete(link_rule=DeleteRules.DELETE_LINKS)
return Response(status_code=200, content="user unregistered")
|
import asyncio
import logging
import sys
import pickle
from typing import Dict, Tuple
SEPARATOR = b'salih'
class MessageObject:
def __init__(self, function_name: str, message_id: int, *args, result=None, **kwargs):
self._function_name = function_name
self._message_id = message_id
self._args = args
self._kwargs = kwargs
self._result = result
self._error = False
@property
def function_name(self):
return self._function_name
@property
def message_id(self) -> int:
return self._message_id
@property
def result(self):
return self._result
@result.setter
def result(self, result):
self._result = result
@property
def args(self) -> Tuple:
return self._args
@property
def kwargs(self) -> Dict:
return self._kwargs
@property
def error(self) -> bool:
return self._error
@error.setter
def error(self, error: bool):
self._error = error
class AsyncIPyCLink:
"""Represents an abstracted async socket connection that handles
communication between a :class:`AsyncIPyCHost` and a :class:`AsyncIPyCClient`
This class is internally managed and typically should not be instantiated on
its own.
Parameters
-----------
reader: :class:`asyncio.StreamReader`
The managed inbound data reader.
writer: :class:`asyncio.StreamWriter`
The managed outbound data writer
client: Union[:class:`AsyncIPyCHost`, :class:`AsyncIPyCClient`]
The communication object that is responsible for managing this connection.
"""
def __init__(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter, client):
self._reader = reader
self._writer = writer
self._logger = logging.getLogger(self.__class__.__name__)
self._logger.debug(f"Established link")
self._active = True
self._client = client
self._tasks: [Dict, asyncio.Future] = {}
async def close(self):
"""|coro|
Closes all communication channels with a peer and attempts to send them EOF.
Informs the parent :class:`AsyncIPyCHost` or :class:`AsyncIPyCClient` of the
closed connection.
"""
self._logger.debug(f"Beginning to close link")
self._reader = None
if self._writer.can_write_eof():
self._writer.write_eof()
try:
await self._writer.drain()
except ConnectionAbortedError:
pass
self._writer.close()
if sys.version_info >= (3, 7):
await self._writer.wait_closed()
self._writer = None
self._active = False
self._client.connections.remove(self)
self._logger.debug(f"Closed link")
def is_active(self):
""":class:`bool`: Indicates if the communication channels are closed, at EOF, or no longer viable."""
# Quickly check if the state of the reader changed from the remote
if not self._reader or self._reader.at_eof() or not self._writer:
self._active = False
return self._active
async def send(self, message_object: MessageObject, drain_immediately=True):
"""|coro|
Send a serializable object to the receiving end. If the object is not a custom
serializable object, python's builtins will be used. If the object is a custom
serializable, the receiving end must also have this object in their list of custom
deserializers.
.. warning::
After the result of serialization, either via custom or builtin, the bytes ``0x01`` and ``0x02``
must not appear anywhere. If your payload does contain these bytes or chars, you must
substitute them prior to this function call.
Parameters
------------
message_object: :class:`object`
The object to be sent to the receiving end.
drain_immediately: Optional[:class:`bool`]
Whether to flush the output buffer right now or not.
Defaults to ``True``.
"""
if not self.is_active():
self._logger.debug(f"Attempted to send data when the writer or link is closed! Ignoring.")
return
self._writer.write(serialize(message_object))
if drain_immediately:
self._logger.debug(f"Draining the writer")
await self._writer.drain()
task = asyncio.Future()
self._tasks[message_object.message_id] = task
return await task
async def start_listening(self):
while self.is_active():
await self.receive()
async def receive(self):
"""|coro|
Receive a serializable object from the other end. If the object is not a custom
serializable object, python's builtins will be used, otherwise the custom defined
deserializer will be used.
Returns
--------
Optional[:class:`object`]
The object that was sent from the sending end. If the deserialization was not successful
and ``return_on_error`` was set to ``True``, or EOF was encountered resulting in a closed
connection, ``None`` is returned.
"""
if not self.is_active():
self._logger.debug(f"Attempted to read data when the writer or link is closed! Returning nothing.")
return
self._logger.debug(f"Waiting for communication from the other side")
try:
data = await self._reader.readuntil(separator=SEPARATOR)
except ConnectionAbortedError:
self._logger.debug(f"The downstream connection was aborted")
await self.close()
return
except asyncio.exceptions.IncompleteReadError:
self._logger.debug(f'Read canceled for incomplete read error')
await self.close()
return
message_object = deserialize(data)
if not isinstance(message_object, MessageObject):
raise Exception("None Message Object Received")
if self._reader.at_eof():
self._logger.debug(f"The downstream writer closed the connection")
await self.close()
return None
if self._client.__class__.__name__ == "AsyncIPyCHost":
result = None
try:
func = getattr(self._client.klass, message_object.function_name)
result = await func(*message_object.args, **message_object.kwargs) if asyncio.iscoroutinefunction(func)\
else func(*message_object.args, **message_object.kwargs)
except Exception as e:
result = e
message_object.error = True
finally:
message_object.result = result
self._writer.write(serialize(message_object))
else:
task = self._tasks.pop(message_object.message_id)
if message_object.error:
task.set_exception(message_object.result)
else:
task.set_result(message_object.result)
def serialize(message_object: MessageObject) -> bytes:
return pickle.dumps(message_object) + SEPARATOR
def deserialize(data: bytes) -> MessageObject:
return pickle.loads(data[:-len(SEPARATOR)])
|
"""Overview of lists."""
from functions import demo, placeholders, getinput, Status, Demo
index = placeholders('index')
item = placeholders('item')
lst = placeholders('filled_list')
slice2 = placeholders('slice2')
slice3 = placeholders('slice3')
# A list is, well, a list of things. They are similar to
# arrays in other programming languages.
# They can contain any data type and hold any number of
# items (to an extent).
# Syntax for lists:
empty_list = []
filled_list = [1, 'Hello', 3.75]
# Lists use square brackets [] with comma-separated values inside
# List methods:
# add an item to the end of a list
lst.append(item)
# remove the first occurence of an item
lst.remove(item)
# remove an item by index
lst.pop(index)
# add an item at the index given
lst.insert(index, item)
# join 2 lists together
lst.extend(lst)
# count the number of times an item appears in the list
lst.count(item)
# get the number of items in the list
len(lst)
# get an item by its index in the list - the first item
# has an index of 0. The last item in the list can be
# retreived using -1 as the index. The index of the last
# item is 1 less than the list's length
lst[index]
lst[1] # 2nd item in list
lst[-2] # 2nd from last item
# get a portion of items out of a list using a slice:
# the 1st is the starting index and the 2nd is the ending
# index which is not included. So a slice of 1:4 will return
# items at index 1, 2, 3 (but not 4).
lst[slice2]
lst[0:3]
lst[:3] # omitting 1st value means it starts at the start
lst[2:] # omitting 2nd value means it finishes at the end
# A third parameter can be given which denotes the 'step' rate.
# A step of 2 means it will return the items but skip every other item
lst[slice3]
lst[0:4:2] # the indexes 0, 2 are returned
lst[1::3] # the indexes 1, 4, 7, etc... are returned until the end
# of the list as the end parameter is omitted
demo('List of Numbers')
numbers = Demo('numbers')
results = Demo('result')
numbers.demo = [1, 2, 3, 4, 5, 6, 7, 8]
numbers('create a list of numbers from 1 to 8')
numbers.demo.append(9)
numbers('add 9 to the end of the list')
numbers.demo.insert(0, 0)
# index^ ^item
numbers('add number 0 to the start')
results.demo = numbers.demo[3]
results('retreive index 3 from the list')
results.demo = numbers.demo[4:7]
results('get items 4-7 (excluding 7) from the list')
results.demo = numbers.demo[:8:3]
results('get every 3rd item up to, but not including, 8')
|
# Networked Tecnology
# * ______________ networking ______________
# * | application | <==> | application |
# * -------------- --------------
# the <========> is called 'socket' (TCP connections)
# internet can be a socket
# A 'port' is an application-specific software communications endpoint (TCP connections)
# é tipo as portas do Monstros SA
# python como sempre ja ajuda nois, ele ja tem um suporte pra TCP sockets
import socket
mysock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
mysock.connect(('data.pr4e.org', 80))
# host: data.pr4e.org
# port: 80
# até o momeento apenas foi efetuada a conexão entre as duas aplicações
# p/ garantir q a comunicação seja padronizada e eficiente há protocolos
# ! HTTP (Hypertext Transfer Protocol)
# A set of protocols/rules that all the communications uses
# vamos pra um programa mais real
# * ______________ socket ,_________________
# * | my computer |================(Port 80) www.py4e.com |
# * -------------- '-----------------
import socket
mysock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
mysock.connect(('data.pr4e.org', 80))
cmd = 'GET http://data.pr4e.org/romeo.txt HTTP/1.0\r\n\r\n'.encode()
mysock.send(cmd)
while True:
data = mysock.recv(512)
if (len(data) < 1): # EOF
break
print(data.decode())
mysock.close()
# we kinda send a request of some data, than when we recieve we print it
# PLUS: HOW INTERNET WORKS < http://www.net-intro.com/ >
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
import sys
def get_xi_2d(quadrilateral,p):
[p0, p1, p2, p3] = quadrilateral
xp1 = p[0]
xp2 = p[1]
xp3 = p[2]
x11 = p0[0]
x12 = p0[1]
x13 = p0[2]
x21 = p1[0]
x22 = p1[1]
x23 = p1[2]
x31 = p2[0]
x32 = p2[1]
x33 = p2[2]
x41 = p3[0]
x42 = p3[1]
x43 = p3[2]
xi1 = (2*x11*x32 - x11*x42 - 2*x12*x31 + x12*x41 - x21*x32 + x22*x31 + xp1*(x12 - x22 - x32 + x42) + xp2*(-x11 + x21 + x31 - x41) + np.sqrt(x11**2*x42**2 - 2*x11**2*x42*xp2 + x11**2*xp2**2 - 2*x11*x12*x41*x42 + 2*x11*x12*x41*xp2 + 2*x11*x12*x42*xp1 - 2*x11*x12*xp1*xp2 - 2*x11*x21*x32*x42 + 2*x11*x21*x32*xp2 + 2*x11*x21*x42*xp2 - 2*x11*x21*xp2**2 - 2*x11*x22*x31*x42 + 2*x11*x22*x31*xp2 + 4*x11*x22*x32*x41 - 4*x11*x22*x32*xp1 - 4*x11*x22*x41*xp2 + 2*x11*x22*x42*xp1 + 2*x11*x22*xp1*xp2 + 2*x11*x31*x42*xp2 - 2*x11*x31*xp2**2 - 4*x11*x32*x41*xp2 + 2*x11*x32*x42*xp1 + 2*x11*x32*xp1*xp2 + 2*x11*x41*x42*xp2 + 2*x11*x41*xp2**2 - 2*x11*x42**2*xp1 - 2*x11*x42*xp1*xp2 + x12**2*x41**2 - 2*x12**2*x41*xp1 + x12**2*xp1**2 + 4*x12*x21*x31*x42 - 4*x12*x21*x31*xp2 - 2*x12*x21*x32*x41 + 2*x12*x21*x32*xp1 + 2*x12*x21*x41*xp2 - 4*x12*x21*x42*xp1 + 2*x12*x21*xp1*xp2 - 2*x12*x22*x31*x41 + 2*x12*x22*x31*xp1 + 2*x12*x22*x41*xp1 - 2*x12*x22*xp1**2 + 2*x12*x31*x41*xp2 - 4*x12*x31*x42*xp1 + 2*x12*x31*xp1*xp2 + 2*x12*x32*x41*xp1 - 2*x12*x32*xp1**2 - 2*x12*x41**2*xp2 + 2*x12*x41*x42*xp1 - 2*x12*x41*xp1*xp2 + 2*x12*x42*xp1**2 + x21**2*x32**2 - 2*x21**2*x32*xp2 + x21**2*xp2**2 - 2*x21*x22*x31*x32 + 2*x21*x22*x31*xp2 + 2*x21*x22*x32*xp1 - 2*x21*x22*xp1*xp2 + 2*x21*x31*x32*xp2 - 4*x21*x31*x42*xp2 + 2*x21*x31*xp2**2 - 2*x21*x32**2*xp1 + 2*x21*x32*x41*xp2 + 2*x21*x32*x42*xp1 - 2*x21*x32*xp1*xp2 - 2*x21*x41*xp2**2 + 2*x21*x42*xp1*xp2 + x22**2*x31**2 - 2*x22**2*x31*xp1 + x22**2*xp1**2 - 2*x22*x31**2*xp2 + 2*x22*x31*x32*xp1 + 2*x22*x31*x41*xp2 + 2*x22*x31*x42*xp1 - 2*x22*x31*xp1*xp2 - 4*x22*x32*x41*xp1 + 2*x22*x32*xp1**2 + 2*x22*x41*xp1*xp2 - 2*x22*x42*xp1**2 + x31**2*xp2**2 - 2*x31*x32*xp1*xp2 - 2*x31*x41*xp2**2 + 2*x31*x42*xp1*xp2 + x32**2*xp1**2 + 2*x32*x41*xp1*xp2 - 2*x32*x42*xp1**2 + x41**2*xp2**2 - 2*x41*x42*xp1*xp2 + x42**2*xp1**2))/(2*(x11*x32 - x11*x42 - x12*x31 + x12*x41 - x21*x32 + x21*x42 + x22*x31 - x22*x41))
xi2a = (2*x11*x22 - x11*x42 - 2*x12*x21 + x12*x41 + x21*x32 - x22*x31 + xp1*(x12 - x22 - x32 + x42) + xp2*(-x11 + x21 + x31 - x41) - np.sqrt(x11**2*x42**2 - 2*x11**2*x42*xp2 + x11**2*xp2**2 - 2*x11*x12*x41*x42 + 2*x11*x12*x41*xp2 + 2*x11*x12*x42*xp1 - 2*x11*x12*xp1*xp2 - 2*x11*x21*x32*x42 + 2*x11*x21*x32*xp2 + 2*x11*x21*x42*xp2 - 2*x11*x21*xp2**2 - 2*x11*x22*x31*x42 + 2*x11*x22*x31*xp2 + 4*x11*x22*x32*x41 - 4*x11*x22*x32*xp1 - 4*x11*x22*x41*xp2 + 2*x11*x22*x42*xp1 + 2*x11*x22*xp1*xp2 + 2*x11*x31*x42*xp2 - 2*x11*x31*xp2**2 - 4*x11*x32*x41*xp2 + 2*x11*x32*x42*xp1 + 2*x11*x32*xp1*xp2 + 2*x11*x41*x42*xp2 + 2*x11*x41*xp2**2 - 2*x11*x42**2*xp1 - 2*x11*x42*xp1*xp2 + x12**2*x41**2 - 2*x12**2*x41*xp1 + x12**2*xp1**2 + 4*x12*x21*x31*x42 - 4*x12*x21*x31*xp2 - 2*x12*x21*x32*x41 + 2*x12*x21*x32*xp1 + 2*x12*x21*x41*xp2 - 4*x12*x21*x42*xp1 + 2*x12*x21*xp1*xp2 - 2*x12*x22*x31*x41 + 2*x12*x22*x31*xp1 + 2*x12*x22*x41*xp1 - 2*x12*x22*xp1**2 + 2*x12*x31*x41*xp2 - 4*x12*x31*x42*xp1 + 2*x12*x31*xp1*xp2 + 2*x12*x32*x41*xp1 - 2*x12*x32*xp1**2 - 2*x12*x41**2*xp2 + 2*x12*x41*x42*xp1 - 2*x12*x41*xp1*xp2 + 2*x12*x42*xp1**2 + x21**2*x32**2 - 2*x21**2*x32*xp2 + x21**2*xp2**2 - 2*x21*x22*x31*x32 + 2*x21*x22*x31*xp2 + 2*x21*x22*x32*xp1 - 2*x21*x22*xp1*xp2 + 2*x21*x31*x32*xp2 - 4*x21*x31*x42*xp2 + 2*x21*x31*xp2**2 - 2*x21*x32**2*xp1 + 2*x21*x32*x41*xp2 + 2*x21*x32*x42*xp1 - 2*x21*x32*xp1*xp2 - 2*x21*x41*xp2**2 + 2*x21*x42*xp1*xp2 + x22**2*x31**2 - 2*x22**2*x31*xp1 + x22**2*xp1**2 - 2*x22*x31**2*xp2 + 2*x22*x31*x32*xp1 + 2*x22*x31*x41*xp2 + 2*x22*x31*x42*xp1 - 2*x22*x31*xp1*xp2 - 4*x22*x32*x41*xp1 + 2*x22*x32*xp1**2 + 2*x22*x41*xp1*xp2 - 2*x22*x42*xp1**2 + x31**2*xp2**2 - 2*x31*x32*xp1*xp2 - 2*x31*x41*xp2**2 + 2*x31*x42*xp1*xp2 + x32**2*xp1**2 + 2*x32*x41*xp1*xp2 - 2*x32*x42*xp1**2 + x41**2*xp2**2 - 2*x41*x42*xp1*xp2 + x42**2*xp1**2))/(2*(x11*x22 - x11*x42 - x12*x21 + x12*x41 + x21*x32 - x22*x31 + x31*x42 - x32*x41))
xi2b = (x11*xi1 - x11 - x21*xi1 + xp1)/(x11*xi1 - x11 - x21*xi1 - x31*xi1 + x31 + x41*xi1)
return (xi1,xi2a,xi2b)
def get_xi_3d(hexahedron,p):
[p0, p1, p2, p3, p4, p5, p6, p7] = hexahedron
xp1 = p[0]
xp2 = p[1]
xp3 = p[2]
x11 = p0[0]
x12 = p0[1]
x13 = p0[2]
x21 = p1[0]
x22 = p1[1]
x23 = p1[2]
x31 = p2[0]
x32 = p2[1]
x33 = p2[2]
x41 = p3[0]
x42 = p3[1]
x43 = p3[2]
x51 = p4[0]
x52 = p4[1]
x53 = p4[2]
x61 = p5[0]
x62 = p5[1]
x63 = p5[2]
x71 = p6[0]
x72 = p6[1]
x73 = p6[2]
x81 = p7[0]
x82 = p7[1]
x83 = p7[2]
xi3 = (2*x11*x32 - x11*x72 - x11*xp2 - 2*x12*x31 + x12*x71 + x12*xp1 + x31*x52 + x31*xp2 - x32*x51 - x32*xp1 + x51*xp2 - x52*xp1 - x71*xp2 + x72*xp1 - np.sqrt(x11**2*x72**2 - 2*x11**2*x72*xp2 + x11**2*xp2**2 - 2*x11*x12*x71*x72 + 2*x11*x12*x71*xp2 + 2*x11*x12*x72*xp1 - 2*x11*x12*xp1*xp2 - 2*x11*x31*x52*x72 + 2*x11*x31*x52*xp2 + 2*x11*x31*x72*xp2 - 2*x11*x31*xp2**2 - 2*x11*x32*x51*x72 + 2*x11*x32*x51*xp2 + 4*x11*x32*x52*x71 - 4*x11*x32*x52*xp1 - 4*x11*x32*x71*xp2 + 2*x11*x32*x72*xp1 + 2*x11*x32*xp1*xp2 + 2*x11*x51*x72*xp2 - 2*x11*x51*xp2**2 - 4*x11*x52*x71*xp2 + 2*x11*x52*x72*xp1 + 2*x11*x52*xp1*xp2 + 2*x11*x71*x72*xp2 + 2*x11*x71*xp2**2 - 2*x11*x72**2*xp1 - 2*x11*x72*xp1*xp2 + x12**2*x71**2 - 2*x12**2*x71*xp1 + x12**2*xp1**2 + 4*x12*x31*x51*x72 - 4*x12*x31*x51*xp2 - 2*x12*x31*x52*x71 + 2*x12*x31*x52*xp1 + 2*x12*x31*x71*xp2 - 4*x12*x31*x72*xp1 + 2*x12*x31*xp1*xp2 - 2*x12*x32*x51*x71 + 2*x12*x32*x51*xp1 + 2*x12*x32*x71*xp1 - 2*x12*x32*xp1**2 + 2*x12*x51*x71*xp2 - 4*x12*x51*x72*xp1 + 2*x12*x51*xp1*xp2 + 2*x12*x52*x71*xp1 - 2*x12*x52*xp1**2 - 2*x12*x71**2*xp2 + 2*x12*x71*x72*xp1 - 2*x12*x71*xp1*xp2 + 2*x12*x72*xp1**2 + x31**2*x52**2 - 2*x31**2*x52*xp2 + x31**2*xp2**2 - 2*x31*x32*x51*x52 + 2*x31*x32*x51*xp2 + 2*x31*x32*x52*xp1 - 2*x31*x32*xp1*xp2 + 2*x31*x51*x52*xp2 - 4*x31*x51*x72*xp2 + 2*x31*x51*xp2**2 - 2*x31*x52**2*xp1 + 2*x31*x52*x71*xp2 + 2*x31*x52*x72*xp1 - 2*x31*x52*xp1*xp2 - 2*x31*x71*xp2**2 + 2*x31*x72*xp1*xp2 + x32**2*x51**2 - 2*x32**2*x51*xp1 + x32**2*xp1**2 - 2*x32*x51**2*xp2 + 2*x32*x51*x52*xp1 + 2*x32*x51*x71*xp2 + 2*x32*x51*x72*xp1 - 2*x32*x51*xp1*xp2 - 4*x32*x52*x71*xp1 + 2*x32*x52*xp1**2 + 2*x32*x71*xp1*xp2 - 2*x32*x72*xp1**2 + x51**2*xp2**2 - 2*x51*x52*xp1*xp2 - 2*x51*x71*xp2**2 + 2*x51*x72*xp1*xp2 + x52**2*xp1**2 + 2*x52*x71*xp1*xp2 - 2*x52*x72*xp1**2 + x71**2*xp2**2 - 2*x71*x72*xp1*xp2 + x72**2*xp1**2))/(2*x11*x32 - 2*x11*x72 - 2*x12*x31 + 2*x12*x71 + 2*x31*x52 - 2*x32*x51 + 2*x51*x72 - 2*x52*x71)
xi3b = (2*x11*x32 - x11*x72 - x11*xp2 - 2*x12*x31 + x12*x71 + x12*xp1 + x31*x52 + x31*xp2 - x32*x51 - x32*xp1 + x51*xp2 - x52*xp1 - x71*xp2 + x72*xp1 + np.sqrt(x11**2*x72**2 - 2*x11**2*x72*xp2 + x11**2*xp2**2 - 2*x11*x12*x71*x72 + 2*x11*x12*x71*xp2 + 2*x11*x12*x72*xp1 - 2*x11*x12*xp1*xp2 - 2*x11*x31*x52*x72 + 2*x11*x31*x52*xp2 + 2*x11*x31*x72*xp2 - 2*x11*x31*xp2**2 - 2*x11*x32*x51*x72 + 2*x11*x32*x51*xp2 + 4*x11*x32*x52*x71 - 4*x11*x32*x52*xp1 - 4*x11*x32*x71*xp2 + 2*x11*x32*x72*xp1 + 2*x11*x32*xp1*xp2 + 2*x11*x51*x72*xp2 - 2*x11*x51*xp2**2 - 4*x11*x52*x71*xp2 + 2*x11*x52*x72*xp1 + 2*x11*x52*xp1*xp2 + 2*x11*x71*x72*xp2 + 2*x11*x71*xp2**2 - 2*x11*x72**2*xp1 - 2*x11*x72*xp1*xp2 + x12**2*x71**2 - 2*x12**2*x71*xp1 + x12**2*xp1**2 + 4*x12*x31*x51*x72 - 4*x12*x31*x51*xp2 - 2*x12*x31*x52*x71 + 2*x12*x31*x52*xp1 + 2*x12*x31*x71*xp2 - 4*x12*x31*x72*xp1 + 2*x12*x31*xp1*xp2 - 2*x12*x32*x51*x71 + 2*x12*x32*x51*xp1 + 2*x12*x32*x71*xp1 - 2*x12*x32*xp1**2 + 2*x12*x51*x71*xp2 - 4*x12*x51*x72*xp1 + 2*x12*x51*xp1*xp2 + 2*x12*x52*x71*xp1 - 2*x12*x52*xp1**2 - 2*x12*x71**2*xp2 + 2*x12*x71*x72*xp1 - 2*x12*x71*xp1*xp2 + 2*x12*x72*xp1**2 + x31**2*x52**2 - 2*x31**2*x52*xp2 + x31**2*xp2**2 - 2*x31*x32*x51*x52 + 2*x31*x32*x51*xp2 + 2*x31*x32*x52*xp1 - 2*x31*x32*xp1*xp2 + 2*x31*x51*x52*xp2 - 4*x31*x51*x72*xp2 + 2*x31*x51*xp2**2 - 2*x31*x52**2*xp1 + 2*x31*x52*x71*xp2 + 2*x31*x52*x72*xp1 - 2*x31*x52*xp1*xp2 - 2*x31*x71*xp2**2 + 2*x31*x72*xp1*xp2 + x32**2*x51**2 - 2*x32**2*x51*xp1 + x32**2*xp1**2 - 2*x32*x51**2*xp2 + 2*x32*x51*x52*xp1 + 2*x32*x51*x71*xp2 + 2*x32*x51*x72*xp1 - 2*x32*x51*xp1*xp2 - 4*x32*x52*x71*xp1 + 2*x32*x52*xp1**2 + 2*x32*x71*xp1*xp2 - 2*x32*x72*xp1**2 + x51**2*xp2**2 - 2*x51*x52*xp1*xp2 - 2*x51*x71*xp2**2 + 2*x51*x72*xp1*xp2 + x52**2*xp1**2 + 2*x52*x71*xp1*xp2 - 2*x52*x72*xp1**2 + x71**2*xp2**2 - 2*x71*x72*xp1*xp2 + x72**2*xp1**2))/(2*x11*x32 - 2*x11*x72 - 2*x12*x31 + 2*x12*x71 + 2*x31*x52 - 2*x32*x51 + 2*x51*x72 - 2*x52*x71)
xi1 = (xp1*(x13*(-xi3 + 1) + x53*xi3) - xp1*(x23*(-xi3 + 1) + x63*xi3) - xp1*(x33*(-xi3 + 1) + x73*xi3) + xp1*(x43*(-xi3 + 1) + x83*xi3) - xp2*(x11*(-xi3 + 1) + x51*xi3) + xp2*(x21*(-xi3 + 1) + x61*xi3) + xp2*(x31*(-xi3 + 1) + x71*xi3) - xp2*(x41*(-xi3 + 1) + x81*xi3) + 2*(x11*(-xi3 + 1) + x51*xi3)*(x33*(-xi3 + 1) + x73*xi3) - (x11*(-xi3 + 1) + x51*xi3)*(x43*(-xi3 + 1) + x83*xi3) - 2*(x13*(-xi3 + 1) + x53*xi3)*(x31*(-xi3 + 1) + x71*xi3) + (x13*(-xi3 + 1) + x53*xi3)*(x41*(-xi3 + 1) + x81*xi3) - (x21*(-xi3 + 1) + x61*xi3)*(x33*(-xi3 + 1) + x73*xi3) + (x23*(-xi3 + 1) + x63*xi3)*(x31*(-xi3 + 1) + x71*xi3) + np.sqrt(xp1**2*(x13*(-xi3 + 1) + x53*xi3)**2 - 2*xp1**2*(x13*(-xi3 + 1) + x53*xi3)*(x23*(-xi3 + 1) + x63*xi3) - 2*xp1**2*(x13*(-xi3 + 1) + x53*xi3)*(x33*(-xi3 + 1) + x73*xi3) + 2*xp1**2*(x13*(-xi3 + 1) + x53*xi3)*(x43*(-xi3 + 1) + x83*xi3) + xp1**2*(x23*(-xi3 + 1) + x63*xi3)**2 + 2*xp1**2*(x23*(-xi3 + 1) + x63*xi3)*(x33*(-xi3 + 1) + x73*xi3) - 2*xp1**2*(x23*(-xi3 + 1) + x63*xi3)*(x43*(-xi3 + 1) + x83*xi3) + xp1**2*(x33*(-xi3 + 1) + x73*xi3)**2 - 2*xp1**2*(x33*(-xi3 + 1) + x73*xi3)*(x43*(-xi3 + 1) + x83*xi3) + xp1**2*(x43*(-xi3 + 1) + x83*xi3)**2 - 2*xp1*xp2*(x11*(-xi3 + 1) + x51*xi3)*(x13*(-xi3 + 1) + x53*xi3) + 2*xp1*xp2*(x11*(-xi3 + 1) + x51*xi3)*(x23*(-xi3 + 1) + x63*xi3) + 2*xp1*xp2*(x11*(-xi3 + 1) + x51*xi3)*(x33*(-xi3 + 1) + x73*xi3) - 2*xp1*xp2*(x11*(-xi3 + 1) + x51*xi3)*(x43*(-xi3 + 1) + x83*xi3) + 2*xp1*xp2*(x13*(-xi3 + 1) + x53*xi3)*(x21*(-xi3 + 1) + x61*xi3) + 2*xp1*xp2*(x13*(-xi3 + 1) + x53*xi3)*(x31*(-xi3 + 1) + x71*xi3) - 2*xp1*xp2*(x13*(-xi3 + 1) + x53*xi3)*(x41*(-xi3 + 1) + x81*xi3) - 2*xp1*xp2*(x21*(-xi3 + 1) + x61*xi3)*(x23*(-xi3 + 1) + x63*xi3) - 2*xp1*xp2*(x21*(-xi3 + 1) + x61*xi3)*(x33*(-xi3 + 1) + x73*xi3) + 2*xp1*xp2*(x21*(-xi3 + 1) + x61*xi3)*(x43*(-xi3 + 1) + x83*xi3) - 2*xp1*xp2*(x23*(-xi3 + 1) + x63*xi3)*(x31*(-xi3 + 1) + x71*xi3) + 2*xp1*xp2*(x23*(-xi3 + 1) + x63*xi3)*(x41*(-xi3 + 1) + x81*xi3) - 2*xp1*xp2*(x31*(-xi3 + 1) + x71*xi3)*(x33*(-xi3 + 1) + x73*xi3) + 2*xp1*xp2*(x31*(-xi3 + 1) + x71*xi3)*(x43*(-xi3 + 1) + x83*xi3) + 2*xp1*xp2*(x33*(-xi3 + 1) + x73*xi3)*(x41*(-xi3 + 1) + x81*xi3) - 2*xp1*xp2*(x41*(-xi3 + 1) + x81*xi3)*(x43*(-xi3 + 1) + x83*xi3) + 2*xp1*(x11*(-xi3 + 1) + x51*xi3)*(x13*(-xi3 + 1) + x53*xi3)*(x43*(-xi3 + 1) + x83*xi3) - 4*xp1*(x11*(-xi3 + 1) + x51*xi3)*(x23*(-xi3 + 1) + x63*xi3)*(x33*(-xi3 + 1) + x73*xi3) + 2*xp1*(x11*(-xi3 + 1) + x51*xi3)*(x23*(-xi3 + 1) + x63*xi3)*(x43*(-xi3 + 1) + x83*xi3) + 2*xp1*(x11*(-xi3 + 1) + x51*xi3)*(x33*(-xi3 + 1) + x73*xi3)*(x43*(-xi3 + 1) + x83*xi3) - 2*xp1*(x11*(-xi3 + 1) + x51*xi3)*(x43*(-xi3 + 1) + x83*xi3)**2 - 2*xp1*(x13*(-xi3 + 1) + x53*xi3)**2*(x41*(-xi3 + 1) + x81*xi3) + 2*xp1*(x13*(-xi3 + 1) + x53*xi3)*(x21*(-xi3 + 1) + x61*xi3)*(x33*(-xi3 + 1) + x73*xi3) - 4*xp1*(x13*(-xi3 + 1) + x53*xi3)*(x21*(-xi3 + 1) + x61*xi3)*(x43*(-xi3 + 1) + x83*xi3) + 2*xp1*(x13*(-xi3 + 1) + x53*xi3)*(x23*(-xi3 + 1) + x63*xi3)*(x31*(-xi3 + 1) + x71*xi3) + 2*xp1*(x13*(-xi3 + 1) + x53*xi3)*(x23*(-xi3 + 1) + x63*xi3)*(x41*(-xi3 + 1) + x81*xi3) - 4*xp1*(x13*(-xi3 + 1) + x53*xi3)*(x31*(-xi3 + 1) + x71*xi3)*(x43*(-xi3 + 1) + x83*xi3) + 2*xp1*(x13*(-xi3 + 1) + x53*xi3)*(x33*(-xi3 + 1) + x73*xi3)*(x41*(-xi3 + 1) + x81*xi3) + 2*xp1*(x13*(-xi3 + 1) + x53*xi3)*(x41*(-xi3 + 1) + x81*xi3)*(x43*(-xi3 + 1) + x83*xi3) + 2*xp1*(x21*(-xi3 + 1) + x61*xi3)*(x23*(-xi3 + 1) + x63*xi3)*(x33*(-xi3 + 1) + x73*xi3) - 2*xp1*(x21*(-xi3 + 1) + x61*xi3)*(x33*(-xi3 + 1) + x73*xi3)**2 + 2*xp1*(x21*(-xi3 + 1) + x61*xi3)*(x33*(-xi3 + 1) + x73*xi3)*(x43*(-xi3 + 1) + x83*xi3) - 2*xp1*(x23*(-xi3 + 1) + x63*xi3)**2*(x31*(-xi3 + 1) + x71*xi3) + 2*xp1*(x23*(-xi3 + 1) + x63*xi3)*(x31*(-xi3 + 1) + x71*xi3)*(x33*(-xi3 + 1) + x73*xi3) + 2*xp1*(x23*(-xi3 + 1) + x63*xi3)*(x31*(-xi3 + 1) + x71*xi3)*(x43*(-xi3 + 1) + x83*xi3) - 4*xp1*(x23*(-xi3 + 1) + x63*xi3)*(x33*(-xi3 + 1) + x73*xi3)*(x41*(-xi3 + 1) + x81*xi3) + xp2**2*(x11*(-xi3 + 1) + x51*xi3)**2 - 2*xp2**2*(x11*(-xi3 + 1) + x51*xi3)*(x21*(-xi3 + 1) + x61*xi3) - 2*xp2**2*(x11*(-xi3 + 1) + x51*xi3)*(x31*(-xi3 + 1) + x71*xi3) + 2*xp2**2*(x11*(-xi3 + 1) + x51*xi3)*(x41*(-xi3 + 1) + x81*xi3) + xp2**2*(x21*(-xi3 + 1) + x61*xi3)**2 + 2*xp2**2*(x21*(-xi3 + 1) + x61*xi3)*(x31*(-xi3 + 1) + x71*xi3) - 2*xp2**2*(x21*(-xi3 + 1) + x61*xi3)*(x41*(-xi3 + 1) + x81*xi3) + xp2**2*(x31*(-xi3 + 1) + x71*xi3)**2 - 2*xp2**2*(x31*(-xi3 + 1) + x71*xi3)*(x41*(-xi3 + 1) + x81*xi3) + xp2**2*(x41*(-xi3 + 1) + x81*xi3)**2 - 2*xp2*(x11*(-xi3 + 1) + x51*xi3)**2*(x43*(-xi3 + 1) + x83*xi3) + 2*xp2*(x11*(-xi3 + 1) + x51*xi3)*(x13*(-xi3 + 1) + x53*xi3)*(x41*(-xi3 + 1) + x81*xi3) + 2*xp2*(x11*(-xi3 + 1) + x51*xi3)*(x21*(-xi3 + 1) + x61*xi3)*(x33*(-xi3 + 1) + x73*xi3) + 2*xp2*(x11*(-xi3 + 1) + x51*xi3)*(x21*(-xi3 + 1) + x61*xi3)*(x43*(-xi3 + 1) + x83*xi3) + 2*xp2*(x11*(-xi3 + 1) + x51*xi3)*(x23*(-xi3 + 1) + x63*xi3)*(x31*(-xi3 + 1) + x71*xi3) - 4*xp2*(x11*(-xi3 + 1) + x51*xi3)*(x23*(-xi3 + 1) + x63*xi3)*(x41*(-xi3 + 1) + x81*xi3) + 2*xp2*(x11*(-xi3 + 1) + x51*xi3)*(x31*(-xi3 + 1) + x71*xi3)*(x43*(-xi3 + 1) + x83*xi3) - 4*xp2*(x11*(-xi3 + 1) + x51*xi3)*(x33*(-xi3 + 1) + x73*xi3)*(x41*(-xi3 + 1) + x81*xi3) + 2*xp2*(x11*(-xi3 + 1) + x51*xi3)*(x41*(-xi3 + 1) + x81*xi3)*(x43*(-xi3 + 1) + x83*xi3) - 4*xp2*(x13*(-xi3 + 1) + x53*xi3)*(x21*(-xi3 + 1) + x61*xi3)*(x31*(-xi3 + 1) + x71*xi3) + 2*xp2*(x13*(-xi3 + 1) + x53*xi3)*(x21*(-xi3 + 1) + x61*xi3)*(x41*(-xi3 + 1) + x81*xi3) + 2*xp2*(x13*(-xi3 + 1) + x53*xi3)*(x31*(-xi3 + 1) + x71*xi3)*(x41*(-xi3 + 1) + x81*xi3) - 2*xp2*(x13*(-xi3 + 1) + x53*xi3)*(x41*(-xi3 + 1) + x81*xi3)**2 - 2*xp2*(x21*(-xi3 + 1) + x61*xi3)**2*(x33*(-xi3 + 1) + x73*xi3) + 2*xp2*(x21*(-xi3 + 1) + x61*xi3)*(x23*(-xi3 + 1) + x63*xi3)*(x31*(-xi3 + 1) + x71*xi3) + 2*xp2*(x21*(-xi3 + 1) + x61*xi3)*(x31*(-xi3 + 1) + x71*xi3)*(x33*(-xi3 + 1) + x73*xi3) - 4*xp2*(x21*(-xi3 + 1) + x61*xi3)*(x31*(-xi3 + 1) + x71*xi3)*(x43*(-xi3 + 1) + x83*xi3) + 2*xp2*(x21*(-xi3 + 1) + x61*xi3)*(x33*(-xi3 + 1) + x73*xi3)*(x41*(-xi3 + 1) + x81*xi3) - 2*xp2*(x23*(-xi3 + 1) + x63*xi3)*(x31*(-xi3 + 1) + x71*xi3)**2 + 2*xp2*(x23*(-xi3 + 1) + x63*xi3)*(x31*(-xi3 + 1) + x71*xi3)*(x41*(-xi3 + 1) + x81*xi3) + (x11*(-xi3 + 1) + x51*xi3)**2*(x43*(-xi3 + 1) + x83*xi3)**2 - 2*(x11*(-xi3 + 1) + x51*xi3)*(x13*(-xi3 + 1) + x53*xi3)*(x41*(-xi3 + 1) + x81*xi3)*(x43*(-xi3 + 1) + x83*xi3) - 2*(x11*(-xi3 + 1) + x51*xi3)*(x21*(-xi3 + 1) + x61*xi3)*(x33*(-xi3 + 1) + x73*xi3)*(x43*(-xi3 + 1) + x83*xi3) - 2*(x11*(-xi3 + 1) + x51*xi3)*(x23*(-xi3 + 1) + x63*xi3)*(x31*(-xi3 + 1) + x71*xi3)*(x43*(-xi3 + 1) + x83*xi3) + 4*(x11*(-xi3 + 1) + x51*xi3)*(x23*(-xi3 + 1) + x63*xi3)*(x33*(-xi3 + 1) + x73*xi3)*(x41*(-xi3 + 1) + x81*xi3) + (x13*(-xi3 + 1) + x53*xi3)**2*(x41*(-xi3 + 1) + x81*xi3)**2 + 4*(x13*(-xi3 + 1) + x53*xi3)*(x21*(-xi3 + 1) + x61*xi3)*(x31*(-xi3 + 1) + x71*xi3)*(x43*(-xi3 + 1) + x83*xi3) - 2*(x13*(-xi3 + 1) + x53*xi3)*(x21*(-xi3 + 1) + x61*xi3)*(x33*(-xi3 + 1) + x73*xi3)*(x41*(-xi3 + 1) + x81*xi3) - 2*(x13*(-xi3 + 1) + x53*xi3)*(x23*(-xi3 + 1) + x63*xi3)*(x31*(-xi3 + 1) + x71*xi3)*(x41*(-xi3 + 1) + x81*xi3) + (x21*(-xi3 + 1) + x61*xi3)**2*(x33*(-xi3 + 1) + x73*xi3)**2 - 2*(x21*(-xi3 + 1) + x61*xi3)*(x23*(-xi3 + 1) + x63*xi3)*(x31*(-xi3 + 1) + x71*xi3)*(x33*(-xi3 + 1) + x73*xi3) + (x23*(-xi3 + 1) + x63*xi3)**2*(x31*(-xi3 + 1) + x71*xi3)**2))/(2*(x11*(-xi3 + 1) + x51*xi3)*(x33*(-xi3 + 1) + x73*xi3) - 2*(x11*(-xi3 + 1) + x51*xi3)*(x43*(-xi3 + 1) + x83*xi3) - 2*(x13*(-xi3 + 1) + x53*xi3)*(x31*(-xi3 + 1) + x71*xi3) + 2*(x13*(-xi3 + 1) + x53*xi3)*(x41*(-xi3 + 1) + x81*xi3) - 2*(x21*(-xi3 + 1) + x61*xi3)*(x33*(-xi3 + 1) + x73*xi3) + 2*(x21*(-xi3 + 1) + x61*xi3)*(x43*(-xi3 + 1) + x83*xi3) + 2*(x23*(-xi3 + 1) + x63*xi3)*(x31*(-xi3 + 1) + x71*xi3) - 2*(x23*(-xi3 + 1) + x63*xi3)*(x41*(-xi3 + 1) + x81*xi3))
xi2 = (x11*xi1*xi3 - x11*xi1 - x11*xi3 + x11 - x21*xi1*xi3 + x21*xi1 - x51*xi1*xi3 + x51*xi3 + x61*xi1*xi3 - xp1)/(x11*xi1*xi3 - x11*xi1 - x11*xi3 + x11 - x21*xi1*xi3 + x21*xi1 - x31*xi1*xi3 + x31*xi1 + x31*xi3 - x31 + x41*xi1*xi3 - x41*xi1 - x51*xi1*xi3 + x51*xi3 + x61*xi1*xi3 + x71*xi1*xi3 - x71*xi3 - x81*xi1*xi3)
return (xi1,xi2,xi3,xi3b)
np.random.seed(1)
max_factor = 0
def point_is_in_tetrahedron(tetrahedron,correct_orientation,p):
[p3, p0, p1, p2] = tetrahedron
global max_factor
debug = False
xp1 = p[0]
xp2 = p[1]
xp3 = p[2]
x11 = p0[0]
x12 = p0[1]
x13 = p0[2]
x21 = p1[0]
x22 = p1[1]
x23 = p1[2]
x31 = p2[0]
x32 = p2[1]
x33 = p2[2]
x41 = p3[0]
x42 = p3[1]
x43 = p3[2]
if debug:
print ""
print "----"
print "tetrahedron:", tetrahedron, ", p:",p
det = (x11 - x41)*(x22 - x42)*(x33 - x43) - (x11 - x41)*(x23 - x43)*(x32 - x42) - (x12 - x42)*(x21 - x41)*(x33 - x43) + (x12 - x42)*(x23 - x43)*(x31 - x41) + (x13 - x43)*(x21 - x41)*(x32 - x42) - (x13 - x43)*(x22 - x42)*(x31 - x41)
xi1 = 1/det * ((-x41 + xp1)*((x22 - x42)*(x33 - x43) - (x23 - x43)*(x32 - x42)) + (-x42 + xp2)*(-(x21 - x41)*(x33 - x43) + (x23 - x43)*(x31 - x41)) + (-x43 + xp3)*((x21 - x41)*(x32 - x42) - (x22 - x42)*(x31 - x41)))
xi2 = 1/det * ((-x41 + xp1)*(-(x12 - x42)*(x33 - x43) + (x13 - x43)*(x32 - x42)) + (-x42 + xp2)*((x11 - x41)*(x33 - x43) - (x13 - x43)*(x31 - x41)) + (-x43 + xp3)*(-(x11 - x41)*(x32 - x42) + (x12 - x42)*(x31 - x41)))
xi3 = 1/det * ((-x41 + xp1)*((x12 - x42)*(x23 - x43) - (x13 - x43)*(x22 - x42)) + (-x42 + xp2)*(-(x11 - x41)*(x23 - x43) + (x13 - x43)*(x21 - x41)) + (-x43 + xp3)*((x11 - x41)*(x22 - x42) - (x12 - x42)*(x21 - x41)))
if debug:
print "det: ", det, ",xi1: ",xi1,", xi2:",xi2, ",xi3:",xi3
factor = 3-xi1**2-xi2**2-xi3**2
#factor = (np.sqrt(3)-np.linalg.norm(np.array([xi1,xi2,xi3])))
point_is_inside = (xi1 >= 0 and xi2 >= 0 and xi3 >= 0)
max_factor = max(factor,max_factor)
if not correct_orientation[0]:
xi1 = 1. - xi1
if not correct_orientation[1]:
xi2 = 1. - xi2
if not correct_orientation[2]:
xi3 = 1. - xi3
if debug:
print "t matrix: "
tmat = np.array([[x11-x41,x21-x41,x31-x41],[x12-x42,x22-x42,x32-x42],[x13-x43,x23-x43,x33-x43]])
print tmat
print "adj: "
adj = np.array([[(x22 - x42)*(x33 - x43) - (x23 - x43)*(x32 - x42), -(x21 - x41)*(x33 - x43) + (x23 - x43)*(x31 - x41), (x21 - x41)*(x32 - x42) - (x22 - x42)*(x31 - x41)], [-(x12 - x42)*(x33 - x43) + (x13 - x43)*(x32 - x42), (x11 - x41)*(x33 - x43) - (x13 - x43)*(x31 - x41), -(x11 - x41)*(x32 - x42) + (x12 - x42)*(x31 - x41)], [(x12 - x42)*(x23 - x43) - (x13 - x43)*(x22 - x42), -(x11 - x41)*(x23 - x43) + (x13 - x43)*(x21 - x41), (x11 - x41)*(x22 - x42) - (x12 - x42)*(x21 - x41)]])
print adj
print "det: ", det, ",xi1: ",xi1,", xi2:",xi2, ",xi3:",xi3,", factor:",factor
print ""
print "check:"
xi = np.array([[xi1],[xi2],[xi3]])
xi = np.array([[1.0],[0.0],[0.0]])
print tmat.dot(xi)
print ""
print tmat.dot(xi),"=",p-p3
print "p-p3:",(p - p3)
tinv = np.array([[(-(-(-x12 + x42)*((x11 - x41)*(x23 - x43) - (x13 - x43)*(x21 - x41)) + (-x13 + x43)*((x11 - x41)*(x22 - x42) - (x12 - x42)*(x21 - x41)))*(-(x21 - x41)*((x11 - x41)*(x32 - x42) - (x12 - x42)*(x31 - x41)) + (x31 - x41)*((x11 - x41)*(x22 - x42) - (x12 - x42)*(x21 - x41))) + (((x11 - x41)*(x22 - x42) - (x12 - x42)*(x21 - x41))*((x11 - x41)*(x33 - x43) - (x13 - x43)*(x31 - x41)) - ((x11 - x41)*(x23 - x43) - (x13 - x43)*(x21 - x41))*((x11 - x41)*(x32 - x42) - (x12 - x42)*(x31 - x41)))*((x11 - x41)*(x22 - x42) - (-x12 + x42)*(x21 - x41) - (x12 - x42)*(x21 - x41)))/((x11 - x41)*((x11 - x41)*(x22 - x42) - (x12 - x42)*(x21 - x41))*(((x11 - x41)*(x22 - x42) - (x12 - x42)*(x21 - x41))*((x11 - x41)*(x33 - x43) - (x13 - x43)*(x31 - x41)) - ((x11 - x41)*(x23 - x43) - (x13 - x43)*(x21 - x41))*((x11 - x41)*(x32 - x42) - (x12 - x42)*(x31 - x41)))), (-(x11 - x41)*(x21 - x41)*(((x11 - x41)*(x22 - x42) - (x12 - x42)*(x21 - x41))*((x11 - x41)*(x33 - x43) - (x13 - x43)*(x31 - x41)) - ((x11 - x41)*(x23 - x43) - (x13 - x43)*(x21 - x41))*((x11 - x41)*(x32 - x42) - (x12 - x42)*(x31 - x41))) + (x11 - x41)*((x11 - x41)*(x23 - x43) - (x13 - x43)*(x21 - x41))*(-(x21 - x41)*((x11 - x41)*(x32 - x42) - (x12 - x42)*(x31 - x41)) + (x31 - x41)*((x11 - x41)*(x22 - x42) - (x12 - x42)*(x21 - x41))))/((x11 - x41)*((x11 - x41)*(x22 - x42) - (x12 - x42)*(x21 - x41))*(((x11 - x41)*(x22 - x42) - (x12 - x42)*(x21 - x41))*((x11 - x41)*(x33 - x43) - (x13 - x43)*(x31 - x41)) - ((x11 - x41)*(x23 - x43) - (x13 - x43)*(x21 - x41))*((x11 - x41)*(x32 - x42) - (x12 - x42)*(x31 - x41)))), -(-(x21 - x41)*((x11 - x41)*(x32 - x42) - (x12 - x42)*(x31 - x41)) + (x31 - x41)*((x11 - x41)*(x22 - x42) - (x12 - x42)*(x21 - x41)))/(((x11 - x41)*(x22 - x42) - (x12 - x42)*(x21 - x41))*((x11 - x41)*(x33 - x43) - (x13 - x43)*(x31 - x41)) - ((x11 - x41)*(x23 - x43) - (x13 - x43)*(x21 - x41))*((x11 - x41)*(x32 - x42) - (x12 - x42)*(x31 - x41)))], [((-x12 + x42)*(((x11 - x41)*(x22 - x42) - (x12 - x42)*(x21 - x41))*((x11 - x41)*(x33 - x43) - (x13 - x43)*(x31 - x41)) - ((x11 - x41)*(x23 - x43) - (x13 - x43)*(x21 - x41))*((x11 - x41)*(x32 - x42) - (x12 - x42)*(x31 - x41))) - ((x11 - x41)*(x32 - x42) - (x12 - x42)*(x31 - x41))*(-(-x12 + x42)*((x11 - x41)*(x23 - x43) - (x13 - x43)*(x21 - x41)) + (-x13 + x43)*((x11 - x41)*(x22 - x42) - (x12 - x42)*(x21 - x41))))/(((x11 - x41)*(x22 - x42) - (x12 - x42)*(x21 - x41))*(((x11 - x41)*(x22 - x42) - (x12 - x42)*(x21 - x41))*((x11 - x41)*(x33 - x43) - (x13 - x43)*(x31 - x41)) - ((x11 - x41)*(x23 - x43) - (x13 - x43)*(x21 - x41))*((x11 - x41)*(x32 - x42) - (x12 - x42)*(x31 - x41)))), ((x11 - x41)*((x11 - x41)*(x23 - x43) - (x13 - x43)*(x21 - x41))*((x11 - x41)*(x32 - x42) - (x12 - x42)*(x31 - x41)) + (x11 - x41)*(((x11 - x41)*(x22 - x42) - (x12 - x42)*(x21 - x41))*((x11 - x41)*(x33 - x43) - (x13 - x43)*(x31 - x41)) - ((x11 - x41)*(x23 - x43) - (x13 - x43)*(x21 - x41))*((x11 - x41)*(x32 - x42) - (x12 - x42)*(x31 - x41))))/(((x11 - x41)*(x22 - x42) - (x12 - x42)*(x21 - x41))*(((x11 - x41)*(x22 - x42) - (x12 - x42)*(x21 - x41))*((x11 - x41)*(x33 - x43) - (x13 - x43)*(x31 - x41)) - ((x11 - x41)*(x23 - x43) - (x13 - x43)*(x21 - x41))*((x11 - x41)*(x32 - x42) - (x12 - x42)*(x31 - x41)))), -(x11 - x41)*((x11 - x41)*(x32 - x42) - (x12 - x42)*(x31 - x41))/(((x11 - x41)*(x22 - x42) - (x12 - x42)*(x21 - x41))*((x11 - x41)*(x33 - x43) - (x13 - x43)*(x31 - x41)) - ((x11 - x41)*(x23 - x43) - (x13 - x43)*(x21 - x41))*((x11 - x41)*(x32 - x42) - (x12 - x42)*(x31 - x41)))], [(-(-x12 + x42)*((x11 - x41)*(x23 - x43) - (x13 - x43)*(x21 - x41)) + (-x13 + x43)*((x11 - x41)*(x22 - x42) - (x12 - x42)*(x21 - x41)))/(((x11 - x41)*(x22 - x42) - (x12 - x42)*(x21 - x41))*((x11 - x41)*(x33 - x43) - (x13 - x43)*(x31 - x41)) - ((x11 - x41)*(x23 - x43) - (x13 - x43)*(x21 - x41))*((x11 - x41)*(x32 - x42) - (x12 - x42)*(x31 - x41))), -(x11 - x41)*((x11 - x41)*(x23 - x43) - (x13 - x43)*(x21 - x41))/(((x11 - x41)*(x22 - x42) - (x12 - x42)*(x21 - x41))*((x11 - x41)*(x33 - x43) - (x13 - x43)*(x31 - x41)) - ((x11 - x41)*(x23 - x43) - (x13 - x43)*(x21 - x41))*((x11 - x41)*(x32 - x42) - (x12 - x42)*(x31 - x41))), (x11 - x41)*((x11 - x41)*(x22 - x42) - (x12 - x42)*(x21 - x41))/(((x11 - x41)*(x22 - x42) - (x12 - x42)*(x21 - x41))*((x11 - x41)*(x33 - x43) - (x13 - x43)*(x31 - x41)) - ((x11 - x41)*(x23 - x43) - (x13 - x43)*(x21 - x41))*((x11 - x41)*(x32 - x42) - (x12 - x42)*(x31 - x41)))]])
print "t^-1:"
print tinv
det = (x11 - x41)*(x22 - x42)*(x33 - x43) - (x11 - x41)*(x23 - x43)*(x32 - x42) - (x12 - x42)*(x21 - x41)*(x33 - x43) + (x12 - x42)*(x23 - x43)*(x31 - x41) + (x13 - x43)*(x21 - x41)*(x32 - x42) - (x13 - x43)*(x22 - x42)*(x31 - x41)
print "det:",det
print "adj/det:"
print adj/det
xir = tinv.dot(np.reshape(p-p3,(3,1)))
print "t^-1 (p-p3)="
print xir
print "="
print (adj/det).dot(np.reshape(p-p3,(3,1)))
return (point_is_inside, (xi1, xi2, xi3), factor)
def get_xi_3d2(hexahedron,p):
[p0, p1, p2, p3, p4, p5, p6, p7] = hexahedron
debug = False
xi_sum = np.zeros(3)
denom = 0
no_factor = True
only_some = False
# p0
r = point_is_in_tetrahedron([p0, p1, p2, p4], [True,True,True], p)
xi = np.array(r[1])
if not r[0]:
if debug:
print "p0 out"
if debug:
print "0 xi: ",xi
factor = 1./(0.1+np.linalg.norm(p-p0))
if no_factor:
factor = 1.0
xi_sum += xi*factor
denom += factor
# p1
if not only_some:
r = point_is_in_tetrahedron([p1, p0, p5, p3], [False,True,True], p)
xi = np.array(r[1])
if not r[0]:
if debug:
print "p1 out"
xi[1],xi[2] = xi[2],xi[1]
if debug:
print "1 xi: ",xi
factor = 1./(0.1+np.linalg.norm(p-p1))
if no_factor:
factor = 1.0
xi_sum += xi*factor
denom += factor
# p2
r = point_is_in_tetrahedron([p2, p3, p6, p0], [True,True,False], p)
xi = np.array(r[1])
if not r[0]:
if debug:
print "p2 out"
xi[1],xi[2] = xi[2],xi[1]
if debug:
print "2 xi: ",xi
factor = 1./(0.1+np.linalg.norm(p-p2))
if no_factor:
factor = 1.0
xi_sum += xi*factor
denom += factor
# p3
r = point_is_in_tetrahedron([p3, p2, p1, p7], [False,False,True], p)
xi = np.array(r[1])
if not r[0]:
if debug:
print "p3 out"
if debug:
print "3 xi: ",xi
factor = 1./(0.1+np.linalg.norm(p-p3))
if no_factor:
factor = 1.0
xi_sum += xi*factor
denom += factor
# p4
if not only_some:
r = point_is_in_tetrahedron([p4, p5, p0, p6], [True,False,True], p)
xi = np.array(r[1])
if not r[0]:
if debug:
print "p4 out"
xi[1],xi[2] = xi[2],xi[1]
if debug:
print "4 xi: ",xi
factor = 1./(0.1+np.linalg.norm(p-p4))
if no_factor:
factor = 1.0
xi_sum += xi*factor
denom += factor
# p5
r = point_is_in_tetrahedron([p5, p4, p7, p1], [False,True,False], p)
xi = np.array(r[1])
if not r[0]:
if debug:
print "p5 out"
if debug:
print "5 xi: ",xi
factor = 1./(0.1+np.linalg.norm(p-p5))
if no_factor:
factor = 1.0
xi_sum += xi*factor
denom += factor
# p6
r = point_is_in_tetrahedron([p6, p7, p4, p2], [True,False,False], p)
xi = np.array(r[1])
if not r[0]:
if debug:
print "p6 out"
if debug:
print "6 xi: ",xi
factor = 1./(0.1+np.linalg.norm(p-p6))
if no_factor:
factor = 1.0
xi_sum += xi*factor
denom += factor
# p7
if not only_some:
r = point_is_in_tetrahedron([p7, p6, p3, p5], [False,False,False], p)
xi = np.array(r[1])
if not r[0]:
if debug:
print "p7 out"
xi[1],xi[2] = xi[2],xi[1]
if debug:
print "7 xi: ",xi
factor = 1./(0.1+np.linalg.norm(p-p7))
if no_factor:
factor = 1.0
xi_sum += xi*factor
denom += factor
xi_sum /= denom
if debug:
print "final xi: ", xi_sum
eps = 1e-12
if (0.0-eps <= xi_sum[0] <= 1.0+eps) and (0.0-eps <= xi_sum[1] <= 1.0+eps) and (0.0-eps <= xi_sum[2] <= 1.0+eps):
if debug:
print "inside"
return xi_sum
else:
if debug:
print "outside"
return None
def point_is_in_element(hexahedron,p):
[p0, p1, p2, p3, p4, p5, p6, p7] = hexahedron
# bottom [p0,p1,p3,p2]
a30 = (-p3+p0)
a01 = (-p0+p1)
a12 = (-p1+p2)
a32 = (-p3+p2)
a20 = (-p2+p0)
v0 = np.cross(a30, a01).dot(-p0+p) >= 0
v1 = np.cross(a01, a12).dot(-p1+p) >= 0
v2 = np.cross(a12, a32).dot(-p3+p) >= 0
v3 = np.cross(a32, a20).dot(-p2+p) >= 0
# top [p4,p6,p7,p5]
a74 = (-p7+p4)
a46 = (-p4+p6)
a65 = (-p6+p5)
a75 = (-p7+p5)
a54 = (-p5+p4)
v4 = np.cross(a74, a46).dot(-p4+p) >= 0
v5 = np.cross(a46, a65).dot(-p6+p) >= 0
v6 = np.cross(a65, a75).dot(-p7+p) >= 0
v7 = np.cross(a75, a54).dot(-p5+p) >= 0
# right [p1,p5,p7,p3]
a71 = (-p7+p1)
a15 = (-p1+p5)
a53 = (-p5+p3)
a73 = (-p7+p3)
a31 = (-p3+p1)
v8 = np.cross(a71, a15).dot(-p1+p) >= 0
v9 = np.cross(a15, a53).dot(-p5+p) >= 0
v10 = np.cross(a53, a73).dot(-p7+p) >= 0
v11 = np.cross(a73, a31).dot(-p3+p) >= 0
# left [p0,p2,p6,p4]
a60 = (-p6+p0)
a02 = (-p0+p2)
a24 = (-p2+p4)
a64 = (-p6+p4)
a40 = (-p4+p0)
v12 = np.cross(a60, a02).dot(-p0+p) >= 0
v13 = np.cross(a02, a24).dot(-p2+p) >= 0
v14 = np.cross(a24, a64).dot(-p6+p) >= 0
v15 = np.cross(a64, a40).dot(-p4+p) >= 0
# front [p0,p4,p5,p1]
a50 = (-p5+p0)
a04 = (-p0+p4)
a41 = (-p4+p1)
a51 = (-p5+p1)
a10 = (-p1+p0)
v16 = np.cross(a50, a04).dot(-p0+p) >= 0
v17 = np.cross(a04, a41).dot(-p4+p) >= 0
v18 = np.cross(a41, a51).dot(-p5+p) >= 0
v19 = np.cross(a51, a10).dot(-p1+p) >= 0
# back [p2,p3,p7,p6]
a72 = (-p7+p2)
a23 = (-p2+p3)
a36 = (-p3+p6)
a76 = (-p7+p6)
a62 = (-p6+p2)
v20 = np.cross(a72, a23).dot(-p2+p) >= 0
v21 = np.cross(a23, a36).dot(-p3+p) >= 0
v22 = np.cross(a36, a76).dot(-p7+p) >= 0
v23 = np.cross(a76, a62).dot(-p6+p) >= 0
b1 = point_is_in_front_of_quadrilateral([p0,p1,p3,p2],p) # bottom
b2 = point_is_in_front_of_quadrilateral([p4,p6,p7,p5],p) # top
b3 = point_is_in_front_of_quadrilateral([p1,p5,p7,p3],p) # right
b4 = point_is_in_front_of_quadrilateral([p0,p2,p6,p4],p) # left
b5 = point_is_in_front_of_quadrilateral([p0,p4,p5,p1],p) # front
b6 = point_is_in_front_of_quadrilateral([p2,p3,p7,p6],p) # back
is_inside0 = v0 and v1 and v2 and v3 and v4 and v5 and v6 and v7 and v8 and v9 and v10 and v11 and v12 and v13 and v14 and v15 and v16 and v17 and v18 and v19 and v20 and v21 and v22 and v23
is_inside = b1 and b2 and b3 and b4 and b5 and b6
if is_inside0 != is_inside:
print "b: ",b1,b2,b3,b4,b5,b6
print "v: ",v0, v1, v2, v3, ",", v4, v5, v6, v7, ",", v8, v9, v10, v11, ",", v12, v13, v14, v15, ",", v16, v17, v18, v19, ",", v20, v21, v22, v23
print "error!"
debug = True
if debug:
print "v: ",v0, v1, v2, v3, ",", v4, v5, v6, v7, ",", v8, v9, v10, v11, ",", v12, v13, v14, v15, ",", v16, v17, v18, v19 , ",", v20, v21, v22, v23
print ""
print "point ",p
if b1 and b2 and b3 and b4 and b5 and b6:
print "inside"
else:
print "outside"
pp = p
p = [p0, p1, p2, p3, p4, p5, p6, p7]
import stl
from stl import mesh
out_3d_mesh_triangles = [
[p[1],p[0],p[2]],[p[1],p[2],p[3]], # bottom
[p[0],p[3],p[1]],[p[0],p[2],p[3]], # bottom
[p[4],p[5],p[7]],[p[4],p[7],p[6]], # top
[p[0],p[1],p[5]],[p[0],p[5],p[4]], # front
[p[2],p[7],p[3]],[p[2],p[6],p[7]], # back
[p[2],p[0],p[4]],[p[2],p[4],p[6]], # left
[p[1],p[3],p[7]],[p[1],p[7],p[5]], # right
[pp, pp+np.array([0.1,0.0,0.0]), pp-np.array([0.1,0.0,0.0])],
[pp, pp+np.array([0.0,0.1,0.0]), pp-np.array([0.0,0.1,0.0])],
[pp, pp+np.array([0.0,0.0,0.1]), pp-np.array([0.0,0.0,0.1])],
]
# write debugging output stl meshes
def write_stl(triangles, outfile, description):
# create output mesh
n_triangles = len(triangles)
# Create the mesh
out_mesh = mesh.Mesh(np.zeros(n_triangles, dtype=mesh.Mesh.dtype))
for i, f in enumerate(triangles):
out_mesh.vectors[i] = f
#for j in range(3):
#print "set (",i,",",j,")=",f[j]," (=",stl_mesh.vectors[i][j],")"
#out_mesh.update_normals()
out_mesh.save(outfile, mode=stl.Mode.ASCII)
print "saved {} triangles to \"{}\" ({})".format(n_triangles,outfile,description)
write_stl(out_3d_mesh_triangles, "auat.stl", "aut")
return is_inside
def point_is_in_front_of_quadrilateral(quadrilateral,p):
[p0, p1, p2, p3] = quadrilateral
v0 = np.cross((-p2+p0), (-p0+p1)).dot(-p0+p)
v1 = np.cross((-p0+p1), (-p1+p3)).dot(-p1+p)
v2 = np.cross((-p1+p2), (-p2+p3)).dot(-p2+p)
v3 = np.cross((-p2+p3), (-p3+p0)).dot(-p3+p)
c2 = np.cross((-p1+p2), (-p2+p3))
c2 = c2/np.linalg.norm(c2)
c22 = -p2+p
c22 = c22/np.linalg.norm(c22)
v2 = c2.dot(c22)
print "angle=",np.arccos(v2)*180./np.pi
debug = True
if debug:
print "quad ",quadrilateral
print "c2=",c2,",p22=",c22
print "v2 = ",(-p1+p2),"x",(-p2+p3)," (=",np.cross((-p1+p2), (-p2+p3)),"), dot",-p2+p
print ", v0:",v0,", v1:",v1,", v2:",v2,", v3:",v3
return v0 >= 0 and v1 >= 0 and v2 >= 0 and v3 >= 0
if True:
#point (58.9434,146.219,37), element 1
# p0 ((61.6746,146.275,37),
# p1 (60.3037,146.374,37),
# p2 (62.1413,145.011,37),
# p3 (60.6606,145.111,37),
# p4 (71.9646,148.898,50.8421),
# p5 (69.9016,149.867,50.8421),
# p6 (71.7574,146.739,50.8421),
# p7 (69.8146,147.667,50.8421))
#DEBUG: 0 xi: (2.06187,0.204832,4.08175e-16)
#DEBUG: 1 xi: (2.04524,4.08645e-16,0.203918)
#DEBUG: 2 xi: (1.90644,4.12565e-16,0.196281)
#DEBUG: 3 xi: (1.96677,0.199601,4.10861e-16)
#DEBUG: 4 xi: (1.26432,0,0.592938)
#DEBUG: 5 xi: (1.64441,0.353881,3.33067e-16)
#DEBUG: 6 xi: (1.63102,1.14093,5.55112e-16)
#DEBUG: 7 xi: (1.88966,5.55112e-16,0.871531)
#VERB3: pointIsInElement, point (63.0191,146.036,37), element 0((63.0191,146.036,37),(61.6746,146.275,37),(64.2323,145.398,37),(62.1413,145.011,37),(73.7444,147.455,50.8421),(71.9646,148.898,50.8421),(75.2802,145.762,50.8421),(71.7574,146.739,50.8421))
#VERB3: xi: (0,0,0)
#VERB3: xi: (1.11022e-16,-7.1741e-32,-3.73501e-18)
#VERB3: xi: (0,0,0)
#VERB3: xi: (0.43771,0.361389,3.27812e-16)
#VERB3: xi: (0,0,0)
#VERB3: xi: (0.287173,-0.365592,7.77156e-16)
#VERB3: p5 out
# test
#DEBUG: pointIsInElement, point
# p (59.368,144.955,37), element 7
# p0 (59.368,144.955,37),
# p1 (57.0694,144.573,37),
# p2 (59.3694,144.08,37),
# p3 (56.4743,143.333,37),
# p4 (67.8127,148.044,50.8421),
# p5 (64.6207,148.895,50.8421),
# p6 (67.3157,146.434,50.8421),
# p7 (63.9783,146.701,50.8421)
#DEBUG: xi: (0,0,0)
#DEBUG: xi: (3.06205,0,-11.8288)
#DEBUG: p1 out
# [p1, p0, p5, p3]
#VERB3: isInside: 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1
# ((),(),(),(),(),(),(),()
p0 = np.array((72.253,151.97,120.053))
p1 = np.array((57.9479,156.418,120.053))
p2 = np.array((71.5834,139.519,120.053))
p3 = np.array((64.2421,147.55,120.053))
p4 = np.array((72.9941,155.63,133.895))
p5 = np.array((57.1602,159.617,133.895))
p6 = np.array((71.1095,141.566,133.895))
p7 = np.array((63.6357,150.208,133.895))
pp = np.array([70,150,122])
p = [p0, p1, p2, p3, p4, p5, p6, p7]
import stl
from stl import mesh
out_3d_mesh_triangles = [
[p[0],p[3],p[1]],[p[0],p[2],p[3]], # bottom
[p[4],p[5],p[7]],[p[4],p[7],p[6]], # top
[p[0],p[1],p[5]],[p[0],p[5],p[4]], # front
[p[2],p[7],p[3]],[p[2],p[6],p[7]], # back
[p[2],p[0],p[4]],[p[2],p[4],p[6]], # left
[p[1],p[3],p[7]],[p[1],p[7],p[5]], # right
[pp, pp+np.array([0.1,0.0,0.0]), pp+np.array([0.2,0.0,0.0])],
]
# write debugging output stl meshes
def write_stl(triangles, outfile, description):
# create output mesh
n_triangles = len(triangles)
# Create the mesh
out_mesh = mesh.Mesh(np.zeros(n_triangles, dtype=mesh.Mesh.dtype))
for i, f in enumerate(triangles):
out_mesh.vectors[i] = f
#for j in range(3):
#print "set (",i,",",j,")=",f[j]," (=",stl_mesh.vectors[i][j],")"
#out_mesh.update_normals()
out_mesh.save(outfile, mode=stl.Mode.ASCII)
print "saved {} triangles to \"{}\" ({})".format(n_triangles,outfile,description)
write_stl(out_3d_mesh_triangles, "aut.stl", "aut")
p = pp
point_is_in_element([p0, p1, p2, p3, p4, p5, p6, p7], p)
sys.exit(0)
# test
factor = 0.050
#factor = 0
p0 = np.array([0.0, 0.0, 0.0]) + np.random.rand(3)*factor
p1 = np.array([1.0, 0.0, 0.0]) + np.random.rand(3)*factor
p2 = np.array([0.0, 1.0, 0.0]) + np.random.rand(3)*factor
p3 = np.array([1.0, 1.0, 0.0]) + np.random.rand(3)*factor
p4 = np.array([0.0, 0.0, 1.0]) + np.random.rand(3)*factor
p5 = np.array([1.0, 0.0, 1.0]) + np.random.rand(3)*factor
p6 = np.array([0.0, 1.0, 1.0]) + np.random.rand(3)*factor
p7 = np.array([1.0, 1.0, 1.0]) + np.random.rand(3)*factor
p = np.array([0.3, 0.4, 0.2])
print "3d"
xis = np.random.rand(100000,3)
#xis =
error_sum = 0
n = 0
for xi in xis:
(xi1,xi2,xi3) = xi
p = (1-xi1)*(1-xi2)*(1-xi3)*p0 + xi1*(1-xi2)*(1-xi3)*p1 + (1-xi1)*xi2*(1-xi3)*p2 + xi1*xi2*(1-xi3)*p3 + (1-xi1)*(1-xi2)*xi3*p4 + xi1*(1-xi2)*xi3*p5 + (1-xi1)*xi2*xi3*p6 + xi1*xi2*xi3*p7
xi_comp = get_xi_3d2([p0, p1, p2, p3, p4, p5, p6, p7], p)
in_el0 = xi_comp is not None
in_el1 = point_is_in_element([p0, p1, p2, p3, p4, p5, p6, p7], p)
if in_el0 != in_el1:
print "break"
#break
if xi_comp is None:
#print "None"
continue
error = np.linalg.norm(xi_comp-xi)
error_sum += error
n += 1
#if error < 1e-12:
# print xi,"ok"
#else:
# print xi,"failed, error: ", error,", computed: ",xi_comp,", point: ",p
print "avg error: ", error_sum/n
print "max_factor:",max_factor
#print "2d"
#print get_xi_2d([p0, p1, p2, p3], p)
|
class OpCode(object):
def __init__(self, arg):
self.arg = int(arg)
def execute(self):
pass
@staticmethod
def parseline(line, lineno):
code, arg = line.split()
try:
return OpCode.make(code, arg)
except ValueError:
raise(ValueError(f"line {lineno} has an unexpected opcode: {code}"))
@staticmethod
def make(code, arg):
if code == "acc":
return Acc(arg)
elif code == "jmp":
return Jmp(arg)
elif code == "nop":
return Nop(arg)
else:
raise(ValueError(f"unexpected opcode: {code}"))
def swap(self):
if isinstance(self, Jmp):
return Nop(self.arg)
elif isinstance(self, Nop):
return Jmp(self.arg)
else:
return self
class Acc(OpCode):
def execute(self):
return 1, self.arg
class Jmp(OpCode):
def execute(self):
return self.arg, 0
class Nop(OpCode):
def execute(self):
return 1, 0
class Machine(object):
def __init__(self, acc=0):
self.acc = acc
self.tape = []
def load_tape(self, fname):
numlines = 0
with open(fname, 'r') as handle:
for lineno, line in enumerate(handle.readlines()):
self.tape.append(OpCode.parseline(line, lineno))
++numlines
return numlines
def __len__(self):
return len(self.tape)
def reset(self):
self.acc = 0
def execute(self):
self.reset()
ptr, N = 0, len(self)
seen = [False] * N
while ptr < N and not seen[ptr]:
seen[ptr] = True
shift, acc = self.tape[ptr].execute()
ptr += shift
self.acc += acc
exited_early = (ptr != N)
return self.acc, exited_early
def swap(self, ptr):
self.tape[ptr] = self.tape[ptr].swap()
def debug(self):
for ptr in range(len(self)):
self.swap(ptr)
accumulator, exited_early = self.execute()
if not exited_early:
return accumulator
self.swap(ptr)
def main():
machine = Machine()
machine.load_tape("input.txt")
part1, _ = machine.execute()
part2 = machine.debug()
print(f"Part I: {part1}")
print(f"Part II: {part2}")
if __name__ == '__main__':
main()
|
from . import api, commons
def check_params(line_id: str, station_id: str, direction_sens: str):
response = api.get_missions_next(line_id, station_id, direction_sens)
# invalid station
if response.ambiguityMessage is not None:
raise commons.RatpException(
"invalid line code and/or station code and/or direction"
)
def lines_by_name(name_query: str = ""):
results = []
# get all lines
lines = api.get_lines_realtime_realm()
# filter by name
for line in sorted(lines, key=lambda k: k["code"]):
# ignore lines that do not contain query string
if name_query not in line.name.lower():
continue
results.append(simplified_line_data(line))
return results
def lines_by_code(code_query):
results = []
lines = api.get_lines_by_code(code_query)
for line in sorted(lines, key=lambda k: k["code"]):
# stop if no results
if line["id"] is None:
break
results.append(simplified_line_data(line))
return results
def simplified_line_data(line):
if line.image is None:
# image = 'data:image/gif;base64,R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7'
image = ""
else:
image = f"http://opendata-tr.ratp.fr/wsiv/static/line/{line.image}"
return {
"id": line.id,
"reseau": line.reseau.name,
"code": line.code,
"name": line.name,
"image": image,
}
def simplified_station_data(station: dict) -> dict:
return {
"name": station.name,
"line_station_id": station.id,
"line": simplified_line_data(station.line),
}
def directions(line_id: str) -> dict:
response = api.get_directions(line_id)
results = {}
for direction in response.directions:
results[direction.sens] = direction.name
return results
def stations_by_line(line_id: str) -> list:
stations = api.get_stations_by_line(line_id)
results = []
for station in stations:
results.append(simplified_station_data(station))
return results
def stations_by_name(name_query: str) -> list:
response = api.get_stations_by_name(name_query)
results = []
for station in response.stations:
results.append(simplified_station_data(station))
return results
def next_departures(line_id: str, station_id: str, direction_sens: str) -> dict:
response = api.get_missions_next(line_id, station_id, direction_sens)
# invalid station
if response.ambiguityMessage is not None:
raise commons.RatpException("invalid station code and/or direction")
missions = []
for mission in response.missions:
# handle case where service has ended
if len(mission.stations) > 1:
destination_name = mission.stations[1].name
else:
destination_name = "---"
if len(mission.stationsDates) > 0:
stations_date = mission.stationsDates[0]
else:
stations_date = "------------"
missions.append(
{
"code": mission.code,
"destinationName": destination_name,
"datetime": stations_date,
# 'platform': mission.stationsPlatforms[0], # only for RER
"message": mission.stationsMessages[0],
}
)
if len(response.perturbations) > 0:
perturbations = response.perturbations[0].message.text
else:
perturbations = ""
return {"missions": missions, "perturbations": perturbations}
|
# Generated by make-pins.py, do NOT edit!
PINS_AF = (
('LED1', (1, 'LPI2C1_SCL'), (3, 'GPT3_COMPARE1'), (5, 'GPIO3_PIN7'), (10, 'GPIO9_PIN7'), (11, 'FLEXPWM1_PWMX2'), ),
('LED2', (1, 'LPI2C1_SDA'), (3, 'GPT3_COMPARE2'), (5, 'GPIO3_PIN8'), (10, 'GPIO9_PIN8'), (11, 'FLEXPWM1_PWMX3'), ),
('LED3', (3, 'GPT3_COMPARE3'), (5, 'GPIO3_PIN9'), (10, 'GPIO9_PIN9'), (11, 'FLEXPWM2_PWMX0'), ),
('LED4', (2, 'GPT1_CAPTURE2'), (5, 'GPIO3_PIN12'), (10, 'GPIO9_PIN12'), (11, 'FLEXPWM2_PWMX3'), ),
('KEY', (5, 'GPIO13_PIN0'), ),
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.