text stringlengths 8 6.05M |
|---|
#Implementing a Stack using an array
def stack_using_array_push(array,a):
rear=0
top=len(array)-1
array.append(a)
top=top+1
return top
def stack_using_array_pop(array):
rear=0
top=len(array)-1
del(array[top])
top=top-1
return top
array=[1,2,3,4,5,6]
#top_elem=stack_using_array_push(array,7)
#print 'New Top after Push= ',top_elem
#print 'List after Push: ',array
top_elem=stack_using_array_pop(array)
print 'New Top after Pop= ',top_elem
print 'List after Pop: ',array
|
import pandas as pd
import numpy as np
data = pd.read_csv('data/pollution_us_2000_2016.csv', encoding='utf8', engine='python')
data = data.drop(data.columns[0], axis=1)
data = data.fillna(data.groupby(['Date Local', 'County Code']).transform('mean'))
data.to_csv('data/prepocessed.csv', ',', encoding='utf8')
print (data.head)
|
d1 = int(input())
h1 = int(input())
m1 = int(input())
d2 = int(input())
h2 = int(input())
m2 = int(input())
delta_d = d2 - d1
delta_h = h2 - h1
delta_m = m2 - m1
print((delta_m * 60) + (delta_h * 3600) + (delta_d * 24 * 3600))
|
import numpy as np
import random
import math
class parameters():
NSteps = 3000
NTraj = 5000
dtN = 0.01
beta = 1.0
M = 1 # mass of the particle
nstate =2
#dirName = "result"
nb = 3
lb_n = -(nb-1)/2
ub_n = (nb-1)/2
ndof = 1
#fs_to_au = 41.341 # a.u./fs
#nskip = 1
# MODEL SPECIFIC
#ε = 5.0
#ξ = 4.0
#β = 1.0
#ωc = 2.0
#Δ = 1.0 # Non-varied parameter
def Hel0(R):
return 0.5*np.sum(R**2.0) + (1.0/10.0)*np.sum(R**3.0) + (1.0/100.0)*np.sum(R**4.0)
#0.5*sum(x2(:)**2)+(1.0/10.0)*sum(x2(:)**3)+(1.0/100.0)*sum(x2(:)**4))
|
#!/usr/bin/python
"""
txt2xls.py [text-file-folder]
text必须都是UTF-8编码(注意COG和summary)
否则会报错:UnicodeDecodeError: 'utf-8' codec can't decode byte 0x91 in position 803: invalid start byte
"""
import sys
mypath = sys.argv[1]
from os import listdir
from os.path import isfile, join
textfiles = [ join(mypath,f) for f in listdir(mypath) if isfile(join(mypath,f)) and '.txt' in f]
#判断是否为数字类型的字符("1.11" "1" "3e-5")
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
import xlwt
import xlrd
font = xlwt.Font()
font.name = 'Times New Roman'
#font.bold = True
#整数
styleInt = xlwt.XFStyle()
styleInt.num_format_str = '#,###'
styleInt.font = font
#文字
styleALL = xlwt.XFStyle()
styleALL.font = font
#小数
styleFloat= xlwt.XFStyle()
styleFloat.num_format_str = '#,###0.00'
styleFloat.font = font
#科学记数
styleEE= xlwt.XFStyle()
styleEE.num_format_str = '0.00E+0'
styleEE.font = font
#header
styleHeader =xlwt.easyxf('font: color-index green, name Times New Roman, bold on');
for textfile in textfiles:
# f = open(textfile, 'r+')
row_list = []
# for row in f:
# row_list.append(row.split('\t'))
with open(textfile, 'r') as f:
for row in f.readlines():
row_list.append(row.split('\t'))
column_list = zip(*row_list)
# for column_list in f:
# column_list.append(column.split('|'))
workbook = xlwt.Workbook()
worksheet = workbook.add_sheet('Sheet1')
i = 0
for column in column_list:
for item in range(len(column)):
value = column[item].strip()
if item == 0:
worksheet.write(item, i, value, style=styleHeader)
else:
if is_number(value): #数字类型的字符
if "-" in value : #科学计数(因为也可能包含小数点。放在第一个筛选)
worksheet.write(item, i, float(value), style=styleEE)
elif "." in value: #小数
worksheet.write(item, i, float(value), style=styleFloat)
elif value == "0": #零值 显示为”0“
worksheet.write(item, i, int(value), style=styleALL)
else: #整数
worksheet.write(item, i, int(value), style=styleInt)
else:
worksheet.write(item, i, value, style=styleALL)
i+=1
workbook.save(textfile.replace('.txt', '.xls'))
|
import datetime
from enum import Enum
from sqlalchemy import Column, DateTime, ForeignKey, Integer, LargeBinary, String, Text
from sqlalchemy.ext.declarative import DeclarativeMeta, declarative_base
from sqlalchemy.orm import relationship
Base: DeclarativeMeta = declarative_base()
class UserRole(str, Enum):
CLIENT = 'client'
ADMIN = 'admin'
class User(Base):
__tablename__ = 'User'
id = Column(Integer, primary_key=True)
username = Column(String, unique=True, nullable=False)
full_name = Column(String, nullable=True)
hashed_password = Column(String, nullable=False)
role = Column(String, nullable=False, default=UserRole.CLIENT)
class Post(Base):
__tablename__ = 'Post'
id = Column(Integer, primary_key=True)
author_id = Column(Integer, ForeignKey('User.id'))
header = Column(String, nullable=False)
photo = Column(LargeBinary, nullable=True)
text = Column(Text, nullable=True)
posted_at = Column(DateTime, nullable=False, default=datetime.datetime.utcnow)
author = relationship('User')
class Comment(Base):
__tablename__ = 'Comment'
id = Column(Integer, primary_key=True)
author_id = Column(Integer, ForeignKey('User.id'))
post_id = Column(Integer, ForeignKey('Post.id'))
text = Column(Text, nullable=True)
posted_at = Column(DateTime, nullable=False, default=datetime.datetime.utcnow)
author = relationship('User')
post = relationship('Post')
|
from __future__ import print_function
import numpy as np
import pickle
import socket
import sys
import os
import time
path = os.environ['XDG_RUNTIME_DIR']
server_address = path + '/uds_socket'
# Create a UDS socket
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
print('connecting to', server_address)
try:
sock.connect(server_address)
except socket.error, msg:
print(msg)
sys.exit(1)
print('Sending array')
a = np.random.rand(700,700)
data = pickle.dumps(a, protocol=2)
sock.sendall(data)
sock.send('End')
print('Sent!')
time.sleep(1)
x = sock.recv(4096)
sock.close()
print('recv : ', x)
print('Data sent') |
from random import random,sample,uniform,randint,gauss
import numpy as np
import pandas as pd
import copy
import time
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
import FeaturesSelection as FeatureSelection
from Network import Network
from operator import itemgetter
import time
def func(x):
if x ==0:
return 0
return 1
def read_data():
#Features Descriptions
dt = pd.read_csv('processed.cleveland.data', sep=',', encoding='ISO-8859-1', engine='python',header=None)
dt.columns=['Age','Sex','CP','Trestbps','Chol','Fbs','restecg','Thalach','exang','oldpeack','slope','ca','thal','num']
dt = dt.replace('?', np.nan)
dataset=dt.dropna(how='any')
dataset['ca'] = dataset['ca'].astype(float)
dataset['thal'] = dataset['thal'].astype(float)
dataset['num'] = dataset['num'].apply(func)
target=dataset['num']
del dataset['num']
'''-------------------------Feature Selection-----------------------------'''
# Correlation Matriz
#FeatureSelection.CorrelationMatrizWithHeatMap(dataset, target)
n_features = 9
dataset_select_Uni = FeatureSelection.Univariate_Selection(dataset, target, n_features)
dataset_select = FeatureSelection.Feature_Importance(dataset, target, n_features)
'''-------------------------Divide Dataset into Train and Test-----------------------------'''
data_train, data_test, target_train, y_test = train_test_split(dataset_select, target, test_size=0.2,
random_state=0)
data_train = data_train.reset_index(drop=True)
y_train = target_train.reset_index(drop=True)
data_test = data_test.reset_index(drop=True)
y_test = y_test.reset_index(drop=True)
'''-------------------------Scaling-----------------------------'''
colunms = list(data_train)
# data_train_scaled, data_test_scaled = Scaling.Scaling_StandardScaler(data_train, data_test, colunms)
scaler = MinMaxScaler()
scaler.fit(data_train)
X_train = scaler.transform(data_train)
X_test = scaler.transform(data_test)
#data_train_scaled, data_test_scaled = Scaling.Scaling_RobustScaler(data_train, data_test, colunms)
return X_train,y_train,X_test,y_test
def get_all_scores(X,y,nets):
return [net.mean_error(X, y) for net in nets]
def tour_sel(t_size):
def tournament(pop):
size_pop= len(pop)
mate_pool = []
for i in range(size_pop):
winner = one_tour(pop,t_size)
mate_pool.append(winner)
return mate_pool
return tournament
def one_tour(population,size):
"""Maximization Problem. Deterministic"""
pool = sample(population, size)
pool.sort(key=itemgetter(1))
return pool[0]
def get_random_point(type,nets):
'''
@type = either 'weight' or 'bias'
@returns tuple (layer_index, point_index)
note: if type is set to 'weight', point_index will return (row_index, col_index)
'''
nn = nets[0]
layer_index, point_index = randint(0, nn.num_layers-2), 0
if type == 'weight':
row = randint(0,nn.weights[layer_index].shape[0]-1)
col = randint(0,nn.weights[layer_index].shape[1]-1)
point_index = (row, col)
elif type == 'bias':
point_index = randint(0,nn.biases[layer_index].size-1)
return (layer_index, point_index)
def mutation(child,mut_rate,nets):
nn = copy.deepcopy(child)
"""
# mutate bias
for _ in range(nets[0].bias_nitem):
# get some random points
layer, point = get_random_point('bias',nets)
# add some random value between -0.5 and 0.5
if uniform(0,1) < mut_rate:
nn.biases[layer][point] += uniform(-0.5, 0.5)
# mutate weight
for _ in range(nets[0].weight_nitem):
# get some random points
layer, point = get_random_point('weight',nets)
# add some random value between -0.5 and 0.5
#print(layer,point[0],point[1])
#print(nn.weights[layer])
#print(nn.weights[layer][point[0]][point[1]])
if uniform(0,1) < mut_rate:
nn.weights[layer][point[0]][point[1]] += uniform(-0.5, 0.5)"""
for i in range(len(nets[0].weights)):
for j in range(len(nets[0].weights[i])):
for k in range(len(nets[0].weights[i][j])):
value = random()
if(value < mut_rate):
muta_value = gauss(0,1)
new_gene = nn.weights[i][j][k] + muta_value
"""if new_gene < 0: # Estes "2" sao random podem tentar outro numero
new_gene = 0
elif new_gene > 1:
new_gene = 1"""
nn.weights[i][j][k] = new_gene
for i in range(len(nets[0].biases)):
for j in range(len(nets[0].biases[i])):
value = random()
if(value < mut_rate):
muta_value = gauss(0,1)
new_gene = nn.biases[i][j] + muta_value
"""if new_gene < 0: # Estes "2" sao random podem tentar outro numero
new_gene = 1
elif new_gene > 1:
new_gene = 1"""
nn.biases[i][j] = new_gene
return nn
def crossover(father, mother,cross_rate,nets):
"""
@father = neural-net object representing father
@mother = neural-net object representing mother
@returns = new child based on father/mother genetic information
"""
value = random()
if(value < cross_rate):
child1 = copy.deepcopy(father[0])
child2 = copy.deepcopy(mother[0])
cromo1 = father[0]
cromo2 = mother[0]
for i in range(len(nets[0].weights)):
for j in range(len(nets[0].weights[i])):
for k in range(len(nets[0].weights[i][j])):
value = random()
if(value < 0.5):
child1.weights[i][j][k] = cromo2.weights[i][j][k]
child2.weights[i][j][k] = cromo1.weights[i][j][k]
for i in range(len(nets[0].biases)):
for j in range(len(nets[0].biases[i])):
value = random()
if(value < 0.5):
child1.biases[i][j] = cromo2.biases[i][j]
child2.biases[i][j] = cromo1.biases[i][j]
return ((child1,0),(child2,0))
else:
return (father,mother)
def one_point_cross(indiv_1, indiv_2,prob_cross):
value = random()
if value < prob_cross:
cromo_1 = indiv_1[0]
cromo_2 = indiv_2[0]
pos = randint(0,len(cromo_1))
f1 = cromo_1[0:pos] + cromo_2[pos:]
f2 = cromo_2[0:pos] + cromo_1[pos:]
return ((f1,0),(f2,0))
else:
return (indiv_1,indiv_2)
def sel_survivors_elite(elite):
def elitism(parents,offspring):
size = len(parents)
comp_elite = int(size* elite)
offspring.sort(key=itemgetter(1))
parents.sort(key=itemgetter(1))
new_population = parents[:comp_elite] + offspring[:size - comp_elite]
new_population = [pop[0] for pop in new_population]
return new_population
return elitism
def GeneticAlgo(best_ele,fitness,run,X_train,y_train,X_test,y_test,gen_size,pop_size,net_sizes,mut_rate,cross_rate,sel_parents,recombination,sel_survivors):
file_name = str(run) + ".txt"
newfile = open(file_name,"w")
start_time = time.time()
# Init pop_size diferent Networks
pop = [Network(net_size) for i in range(pop_size)]
#evaluate the population
score_list = list(zip(pop, get_all_scores(X_train,y_train,pop)))
for j in range(gen_size):
mate_pool = sel_parents(score_list)
progenitores = []
for i in range(0,pop_size-1,2):
indiv_1= mate_pool[i]
indiv_2 = mate_pool[i+1]
filhos = recombination(indiv_1,indiv_2, cross_rate,pop)
progenitores.extend(filhos)
descendentes = []
for cromo,fit in progenitores:
novo_indiv = mutation(cromo,mut_rate,pop)
descendentes.append(novo_indiv)
score_descendentes = list(zip(descendentes, get_all_scores(X_train,y_train,descendentes)))
pop = sel_survivors(score_list,score_descendentes)
score_list = list(zip(pop, get_all_scores(X_train,y_train,pop)))
score_list.sort(key=itemgetter(1))
"""print("Current iteration : {}".format(j+1))
print("Time taken by far : %.1f seconds" % (time.time() - start_time))
print("Current top member's network accuracy: %.2f%%\n" % score_list[0][0].accuracy(X_train,y_train))
print("Mean Error:", score_list[0][0].mean_error(X_train,y_train))"""
if(fitness > score_list[0][1]):
fitness = score_list[0][1]
best_ele = score_list[0][0]
print(fitness)
newfile.write(str(score_list[0][0].accuracy(X_train,y_train)) + "\n")
"""print("\n")
print(score_list[0][0])
print("\n")"""
newfile.close()
return fitness,best_ele
if __name__ == "__main__":
X_train, y_train, X_test, y_test = read_data()
pop_size = 50
gen_size = 100
mut_rate = 0.05
crossover_rate = 0.9
elit_rate = 0.3
net_size = [9,100,50,20,2]
fitness = [99999999999,None]
#self.weights = [np.random.randn(y, x) for x, y in zip(sizes[:-1], sizes[1:])]
for i in range(30):
fitness = GeneticAlgo(fitness[1],fitness[0],i,X_train,y_train,X_test,y_test,gen_size,pop_size,net_size,mut_rate,crossover_rate,tour_sel(3),crossover,sel_survivors_elite(elit_rate))
print("Best Network form all the runs: ", fitness[1].accuracy(X_test,y_test)) |
class Solution(object):
def merge(self, nums1, m, nums2, n):
curr = m + n -1
i, j = m -1, n-1
while i >= 0 and j >= 0:
if nums1[i] >= nums2[j]:
nums1[curr] = nums1[i]
i -= 1
else:
nums1[curr] = nums2[j]
j -= 1
curr -= 1
while j >= 0:
nums1[curr] = nums2[j]
j -= 1
curr -= 1
nums1 = [1, 2, 3, 8, 9, 11, 0, 0, 0, 0, 0, 0]
nums2 = [4, 5, 6, 7, 10, 14]
Solution().merge(nums1, 6, nums2, 6)
print(nums1)
nums1 = [0]
nums2 = [1]
Solution().merge(nums1, 0, nums2, 1)
print(nums1)
|
#test generic_language.py
from dotenv import load_dotenv, find_dotenv
from pathlib import Path
import json
import os
import pymysql
import traceback
import time
import sys
import re
import subprocess
path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(path + "/..")
from genericlanguage import GenericLanguage
from language import Language
class TLanguage(Language):
def __init__(self):
super().__init__()
def getRecognitionHints(self):
return []
def checkSyntax(self, fileName, fileContent, start, end):
return []
def getAutocompletions(self, con, tokens, content, line, position, chatId, branchId):
return []
def test_generic_language_common_methods():
lang = GenericLanguage()
assert len(lang.getRecognitionHints()) == 0
assert len(lang.checkSyntax("1.txt", "", 0, 1)) == 0
assert len(lang.getAutocompletions(None, [], "", 1, 1, 0, "main")) == 0
def test_generic_language_recognize_statement():
lang = GenericLanguage()
result = lang.recognizeStatement("zero one two three four six seven eight nine fall tower underscore tab temp tabs space bass anthem", [], "")
assert result.replace("\t", " ") == "012346789 fall tower _ anthem"
result = lang.recognizeStatement("front-end back-end front end back end black ant bronte and bronx france front and front ends fall tower Master Slave", [["fall", "falley"], ["tower", "bashnya"]], "")
assert result == "frontend backend frontend backend backend frontend frontend frontend frontend falley bashnya master slave"
def test_language():
lang = TLanguage()
result = lang.recognizeStatement("zero", [], "")
assert result.replace("\t", " ") == "zero" |
import re
import logging
import urlparse
from flask import Blueprint, render_template, redirect, request, \
g, url_for, session, abort
from utils import *
from models import *
from db import *
err451_pages = Blueprint('err451', __name__,
template_folder='templates/451')
def get_referrer_domain():
parts = urlparse.urlparse(request.headers['Referer'])
return 'http://' + parts.netloc
@err451_pages.route('/')
@err451_pages.route('/<path:site>')
@err451_pages.route('/<isp>/<path:site>')
def err451(site=None, isp=None):
if site is None:
return abort(404)
site = fix_path(site)
try:
cjurl = CourtJudgmentURL.select_one(g.conn, url=site)
except ObjectNotFound:
abort(404)
judgment = cjurl.get_court_judgment()
if isp:
orders = judgment.get_court_orders_by_network()
if isp not in orders:
abort(404)
orders = [orders[isp]]
else:
orders = list(judgment.get_court_orders())
networks = [x['network_name'] for x in orders]
return render_template('451.html',
site=site,
cjurl=cjurl,
judgment=judgment,
power=judgment.get_power(),
networks=networks,
orders=orders)
|
#! /usr/bin/env python
"""Main script for executing calculations using aospy.
Before using this script
------------------------
It is best to copy this template into a separate directory before populating it
with the objects from your own object library. You can also always get a fresh
copy from https://github.com/spencerahill/aospy/examples/aospy_main.py
How to use this script
----------------------
On the example library and data
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This script comes pre-populated with objects taken from the example aospy
object library that is included in this directory in the `example_obj_lib.py`
module. So you can try it out on the sample data without modifying anything at
all.
This simple example library includes only one Proj, one Model, and one Run, but
it also includes multiple Var and Region objects over which you can automate
computations. The date range, sub-annual averaging period, and output data
types can all also be modified.
On your own library and data
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
As a user, there are only two places you need to modify the code:
1. (Only done once) Replace `example_obj_lib` with the name of your object
library. Consult the documentation for instructions on how to make your
object library findable by Python if this statement generates errors.
2. (Done as often as desired) Replace the dummy project, model, run, var, and
region objects with your objects that you want to perform
calculations with.
Also alter the other parameters -- date_range, etc. -- to your liking.
Running the script
------------------
Once the parameters are all set as desired, execute the script from the command
line ::
./aospy_main.py # after `cd` to the directory where you've made your copy
"""
# C.f. instructions above, replace `example_obj_lib` with your own object
# library if you wish to use this on your own data.
from aospy import submit_mult_calcs
import example_obj_lib as lib
# This dictionary contains all of the specifications of calculations that you
# wish to permute over.
calc_suite_specs = dict(
# Consult `Calc` API reference for further explanation of each option and
# accepted values.
# The desired library of aospy objects.
library=lib,
# List of the Proj objects, or 'default', or 'all'.
projects=[lib.example_proj],
# List of the Model objects, or 'default', or 'all'.
models=[lib.example_model],
# List of the Run objects, or 'default', or 'all'.
runs=[lib.example_run],
# List of the Var objects, or 'default', or 'all'.
variables=[lib.precip_largescale, lib.precip_convective, lib.precip_total,
lib.precip_conv_frac],
# List of the Region objects, or 'default', or 'all'.
regions='all',
# Start and end dates (inclusive). Either 'default' or a list comprising
# tuples of the form (start_date, end_date), where start_date and end_date
# are datetime.datetime objects. Be sure to add `import datetime` above if
# using `datetime.datetime` objects.
date_ranges='default',
# Sub-annual time-interval to average over. List of 'ann', seasonal
# string (e.g. 'djf'), or month integer (1 for Jan, 2 for Feb, etc).
output_time_intervals=['ann'],
# List of strings indicating the desired spatiotemporal reductions.
output_time_regional_reductions=['av', 'std', 'reg.av', 'reg.std',
'reg.ts'],
# List of desired vertical reductions to perform.
output_vertical_reductions=[None],
# List of time spacing of input data.
input_time_intervals=['monthly'],
# List of time type of input data.
input_time_datatypes=['ts'],
# List the time offset dictionaries (if desired) to apply to the input
# data (e.g. [{'days': -15}]).
input_time_offsets=[None],
# List of vertical data type of input data.
input_vertical_datatypes=[False],
)
# This dictionary contains options regarding how the calculations are displayed
# to you, submitted for execution, and saved upon execution.
calc_exec_options = dict(
# List calculations to be performed and prompt for your verification before
# submitting them for execution.
prompt_verify=True,
# Submit all calculations in parallel. If parallelize is True and client
# is None, a LocalCluster will be started; the client argument can be used
# to specify an external dask.distributed Client for use in parallelizing
# computations
parallelize=False,
client=None,
# Save results of calculations to .tar files, one for each Run object.
# These tar files are placed using the same directory structure as the
# standard output relative to their root directory, which is specified via
# the `tar_direc_out` argument of each Proj object's instantiation.
write_to_tar=True,
)
# Don't modify this statement.
if __name__ == '__main__':
calcs = submit_mult_calcs(calc_suite_specs, calc_exec_options)
|
import json
from lxml import etree
import re
"""
Contains all the functions to extract the reports' info
"""
cut_dict = {
'extract_peimphash': '<section id="static_analysis">',
'extract_signatures': '<section id="signatures">',
'extract_hosts': '<div class="tab-pane fade" id="network_hosts_tab">',
'extract_domains': '<div class="tab-pane fade in active" id="network_domains_tab">',
'extract_files': '<div class="tab-pane fade in active" id="summary_files">',
'extract_keys': '<div class="tab-pane fade" id="summary_keys">',
'extract_mutexes': '<div class="tab-pane fade" id="summary_mutexes">',
'extract_peversioninfo': '<section id="static_analysis">',
'extract_pesections': '<section id="static_analysis">',
'extract_peresources': '<section id="static_analysis">',
'extract_peimports': '<section id="static_analysis">',
'extract_strings': '<section id="static_strings">',
'extract_antivirus': '<section id="static_antivirus">',
'extract_dynamic': '<script type="text/javascript">',
'extract_http': '<div class="tab-pane fade" id="network_http_tab">',
'extract_irc': '<div class="tab-pane fade" id="network_irc_tab">',
'extract_smtp': '<div class="tab-pane fade" id="network_smtp_tab">',
'extract_dropped': '<div class="tab-pane fade" id="dropped">',
}
def extract_peimphash(doc):
"""
Returns the PE imphash or empty
"""
imphash = list(map(lambda x: x.strip(), doc.xpath('//section[@id="static_analysis"]/div/div[@class="well"]/text()')))
return imphash[0] if imphash else None
def extract_signatures(doc):
"""
Returns the matched signatures or empty
"""
# Lambda to remove the #
sigs = list(map(lambda x: x[1:], doc.xpath('//section[@id="signatures"]/a/@href')))
return sigs if sigs else None
def extract_hosts(doc):
"""
Returns the hosts or empty
"""
# Lambda to remove whitespaces
return list(map(lambda x: x.strip(), doc.xpath('//div[@id="network_hosts_tab"]/section[@id="hosts"]/table/tr/td/text()'))) or None
def extract_domains(doc):
"""
Returns the domains + host dict or empty
"""
# Cannot use zip since some domains have no matching host
domains = dict()
for row in doc.xpath('//div[@id="network_domains_tab"]/section[@id="domains"]/table/tr[position()>1]'):
host = row.xpath('td[2]/text()')
domain = row.xpath('td[1]/text()')
if not domain:
continue
if not host:
host = ''
else:
host = host[0]
domains[domain[0]] = host
return domains or None
def extract_files(doc):
"""
Returns the touched files or empty
"""
# Lambda to remove whitespaces, filter to remove empty values
return list(filter(None, map(lambda x: x.strip(), doc.xpath('//div[@id="summary_files"]/div/text()')))) or None
def extract_keys(doc):
"""
Returns the touched registry keys or empty
"""
# Lambda to remove whitespaces, filter to remove empty values
return list(filter(None, map(lambda x: x.strip(), doc.xpath('//div[@id="summary_keys"]/div/text()')))) or None
def extract_mutexes(doc):
"""
Returns the mutexes or empty
"""
# Lambda to remove whitespaces, filter to remove empty values
return list(filter(None, map(lambda x: x.strip(), doc.xpath('//div[@id="summary_mutexes"]/div/text()')))) or None
def extract_peversioninfo(doc):
"""
Returns PE version info as a dict or empty
"""
info = dict()
for tr in doc.xpath('//section[@id="static_analysis"]//div[@id="pe_versioninfo"]/table/tr'):
key = tr.xpath('th/text()')
val = tr.xpath('td/span/text()')
# Skip rows without value or heaader
if not val or not key:
continue
info[key[0]] = val[0]
return info or None
def extract_pesections(doc):
"""
Returns PE sections as a dict or empty
"""
sections = []
# Use the table header as keys
headers = list(map(lambda x: x.lower(),
doc.xpath('//section[@id="static_analysis"]//div[@id="pe_sections"]/table/tr[1]/th/text()')))
for tr in doc.xpath('//section[@id="static_analysis"]//div[@id="pe_sections"]/table/tr[position()>1]'):
sections.append(dict(zip(headers, tr.xpath('td/text()'))))
return sections or None
def extract_peresources(doc):
"""
Returns PE resources as a dict or empty
"""
sections = []
headers = list(map(lambda x: x.lower(),
doc.xpath('//section[@id="static_analysis"]//div[@id="pe_resources"]/table/tr[1]/th/text()')))
for tr in doc.xpath('//section[@id="static_analysis"]//div[@id="pe_resources"]/table/tr[position()>1]'):
sections.append(dict(zip(headers, tr.xpath('td/text()'))))
return sections or None
def extract_peimports(doc):
"""
Returns PE imports as a dict or empty
"""
imports = dict()
# Imports from each dll are inside a div
for well in doc.xpath('//section[@id="static_analysis"]//div[@id="pe_imports"]/div[@class="well"]'):
dll = well.xpath('div[1]/strong/text()')[0].lower().replace('library ', '')
functions = well.xpath('div[position()>1]/span/a/text()')
imports[dll] = functions
return imports or None
def extract_strings(doc):
"""
Returns strings or empty
"""
return doc.xpath('//section[@id="static_strings"]/div[@class="well"]/div/text()') or None
def extract_antivirus(doc):
"""
Returns the antivirus as a dict or empty
"""
av = doc.xpath('//section[@id="static_antivirus"]/table/tr[position()>1]/td[1]/text()')
clss = doc.xpath('//section[@id="static_antivirus"]/table/tr[position()>1]/td[2]/span/text()')
return dict(zip(av, clss)) or None
def extract_dynamic(doc):
"""
Returns the dynamic calls as a dict or empty
Must use the source HTML due to maximum number of chars
text() returns (10000000). Some dynamic traces are bigger
"""
data = doc.xpath('//script[@type="text/javascript" and contains(., "graph_raw_data")]/text()')
# If there's no match
if not data:
return None
# Get the json data only, removing beginning var def and ; ending
data = data[0].strip().replace('var graph_raw_data = ', '').strip()[:-1]
return json.loads(data)
def extract_http(doc):
"""
Returns the HTTP requests as dict or empty
"""
http = []
for row in doc.xpath('//div[@id="network_http_tab"]/table/tr[position()>1]'):
http.append({row.xpath('td[1]/text()')[0]: row.xpath('td[2]/pre/text()')[0]})
return http or None
def extract_irc(doc):
"""
Returns the IRC traffic or empty
"""
return list(map(lambda x: x.strip(), doc.xpath('//div[@id="network_irc_tab"]/pre/text()'))) or None
def extract_smtp(doc):
"""
Returns pair with number of SMTP requests and example or empty
"""
example = doc.xpath('//div[@id="network_smtp_tab"]/pre/text()')
if example:
example = example[0].strip()
number = doc.xpath('//div[@id="network_smtp_tab"]/p/text()')[0].split(': ')[1]
return (number, example)
return None
def extract_dropped(doc):
"""
Returns list of dropped files or empty
"""
files = []
for drop in doc.xpath('//div[@id="dropped"]//table'):
keys = map(lambda x: x.lower(), drop.xpath('tr/th/text()'))
values = map(lambda x: x.strip(), drop.xpath('tr/td/text()|tr/td/b/text()'))
files.append(dict(zip(keys, values)))
return files or None
def remove_whitespaces(content):
"""
Removes whitespaces from content
"""
re_replace = re.compile(r'>\s+<', re.MULTILINE)
re_replace2 = re.compile(r'\s{3,}', re.MULTILINE)
content = re.sub(re_replace, '><', content)
content = re.sub(re_replace2, '', content)
return content
def generate_func_cutpoints(content):
"""
Generates list of pairs of function and where it should
be on the content. Each position is based on the previous one
"""
# Calculate the position
key_pos = []
for key, val in cut_dict.items():
pos = content.find(val)
if pos == -1:
continue
key_pos.append((key, pos))
func_pos = sorted(key_pos, key=lambda x: x[1])
size = len(func_pos)
final_pos = []
for idx, val in enumerate(func_pos[:0:-1]):
final_pos.append((val[0], val[1] - func_pos[size - idx - 2][1]))
final_pos.append((func_pos[0]))
final_pos = final_pos[::-1]
return final_pos
def extract_dynamic_huge(content):
"""
Parses dynamic input > 10MB with regexes
"""
start_str = 'var graph_raw_data = '
stop_str = '</script>'
start_idx = content.find(start_str)
content = content[len(start_str) + start_idx:].strip()
stop_idx = content.find(stop_str)
content = content[:stop_idx].strip()[:-1]
return json.loads(content)
# Keep RE compiled
re_strings = re.compile(r'<div>([^<]+)<\/div>')
re_files = re.compile(r'([^<]+)<br \/>')
re_keys = re.compile(r'([^<]+)<br \/>')
re_mutexes = re.compile(r'([^<]+)<br \/>')
def extract_strings_huge(content):
"""
Parses strings > 10MB with regexes
"""
return re.findall(re_strings, content)
def extract_files_huge(content):
"""
Parses files > 10MB with regexes
"""
start_str = '<div class="well mono">'
stop_str = '<div class="tab-pane fade" id="summary_keys">'
start_idx = content.find(start_str)
stop_idx = content.find(stop_str)
content = content[len(start_str) + start_idx:stop_idx]
return re.findall(re_files, content)
def extract_keys_huge(content):
"""
Parses keys > 10MB with regexes
"""
start_str = '<div class="well mono">'
stop_str = '<div class="tab-pane fade" id="summary_mutexes">'
start_idx = content.find(start_str)
stop_idx = content.find(stop_str)
content = content[len(start_str) + start_idx:stop_idx]
return re.findall(re_keys, content)
def extract_mutexes_huge(content):
"""
Parses mutexes > 10MB with regexes
"""
start_str = '<div class="well mono">'
stop_str = '</section>'
start_idx = content.find(start_str)
stop_idx = content.find(stop_str)
content = content[len(start_str) + start_idx:stop_idx]
return re.findall(re_mutexes, content)
|
"""Core dashboard views."""
import feedparser
import requests
from dateutil import parser
from requests.exceptions import RequestException
from django.contrib.auth import mixins as auth_mixins
from django.views import generic
from django.conf import settings
from .. import signals
MODOBOA_WEBSITE_URL = "https://modoboa.org/"
class DashboardView(auth_mixins.AccessMixin, generic.TemplateView):
"""Dashboard view."""
template_name = "core/dashboard.html"
def dispatch(self, request, *args, **kwargs):
"""Check if user can access dashboard."""
if not request.user.is_authenticated or not request.user.is_admin:
return self.handle_no_permission()
return super(DashboardView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
"""Add context variables."""
context = super(DashboardView, self).get_context_data(**kwargs)
context.update({
"selection": "dashboard", "widgets": {"left": [], "right": []}
})
# Fetch latest news
if self.request.user.language == "fr":
lang = "fr"
else:
lang = "en"
context.update({"selection": "dashboard"})
feed_url = "{}{}/weblog/feeds/".format(MODOBOA_WEBSITE_URL, lang)
if self.request.user.role != "SuperAdmins":
custom_feed_url = (
self.request.localconfig.parameters.get_value("rss_feed_url"))
if custom_feed_url:
feed_url = custom_feed_url
entries = []
if not settings.DISABLE_DASHBOARD_EXTERNAL_QUERIES:
posts = feedparser.parse(feed_url)
for entry in posts["entries"][:5]:
entry["published"] = parser.parse(entry["published"])
entries.append(entry)
context["widgets"]["left"].append("core/_latest_news_widget.html")
context.update({"news": entries})
hide_features_widget = self.request.localconfig.parameters.get_value(
"hide_features_widget")
if self.request.user.is_superuser or not hide_features_widget:
url = "{}{}/api/projects/?featured=true".format(
MODOBOA_WEBSITE_URL, lang)
features = []
if not settings.DISABLE_DASHBOARD_EXTERNAL_QUERIES:
try:
response = requests.get(url)
except RequestException:
pass
else:
if response.status_code == 200:
features = response.json()
context["widgets"]["right"].append("core/_current_features.html")
context.update({"features": features})
# Extra widgets
result = signals.extra_admin_dashboard_widgets.send(
sender=self.__class__, user=self.request.user)
for _receiver, widgets in result:
for widget in widgets:
context["widgets"][widget["column"]].append(
widget["template"])
# FIXME: can raise conflicts...
context.update(widget["context"])
return context
|
from threading import Thread, Condition, Lock, Event
import httplib as http
from octopus.core.communication.requestmanager import RequestManager
class HTTPRequester(Thread):
def __init__(self, taskList):
super(HTTPRequester, self).__init__()
self.taskList = taskList
self.stopFlag = False
def stopRequested(self):
return self.stopFlag
def stop(self):
self.stopFlag = True
def run(self):
while True:
self.taskList.cond.acquire()
while not self.taskList.tasks:
if self.stopFlag:
self.taskList.cond.release()
return
self.taskList.cond.wait()
# get next url to retrieve from taskList.tasks
id, host, port, path, headers, data, method = self.taskList.tasks.pop()
self.taskList.cond.release()
req = RequestManager(host, port)
try:
data = getattr(req, method.lower())(path, data, headers)
self.taskList.setResponse(id, data)
except http.BadStatusLine:
self.taskList.setResponse(id, 'BadStatusLine Error')
class HTTPRequestersSync(object):
NOT_READY, IN_PROGRESS, READY = range(3)
def __init__(self, threadCount=8):
self.tasks = []
self.cond = Condition()
self.threads = [HTTPRequester(self) for i in xrange(threadCount)]
self.responses = {}
self.idCounter = 0
self.responsesLock = Lock()
self.pendingRequests = 0
self.workDone = Event()
self.prepare()
self.start()
def start(self):
for thread in self.threads:
thread.start()
def stopAll(self):
for thread in self.threads:
thread.stop()
self.cond.acquire()
self.cond.notifyAll()
self.cond.release()
for thread in self.threads:
thread.join()
def addRequest(self, host, port, path, headers={}, data=None, method="GET"):
self.cond.acquire()
id = self.idCounter + 1
self.idCounter = id
self.tasks.append((id, host, port, path, headers, data, method))
self.cond.notify(1)
self.cond.release()
self.responsesLock.acquire()
self.pendingRequests += 1
self.responsesLock.release()
return id
def setResponse(self, id, response):
self.responsesLock.acquire()
self.responses[id] = response
self.pendingRequests -= 1
self.responsesLock.release()
self.workDone.set()
def executeRequests(self):
self.status = self.IN_PROGRESS
if self.pendingRequests:
while True:
self.workDone.wait(5.0)
self.workDone.clear()
self.responsesLock.acquire()
pendingRequests = self.pendingRequests
self.responsesLock.release()
if pendingRequests <= 0:
break
self.status = self.READY
def prepare(self):
self.status = self.NOT_READY
self.tasks = []
self.pendingRequests = 0
self.responses.clear()
def getResponseByRequestId(self, id):
if self.status == HTTPRequestersSync.READY:
return self.responses[id]
if self.status == HTTPRequestersSync.IN_PROGRESS:
raise RuntimeError("The request synchronization is still in progress")
if self.status == HTTPRequestersSync.NOT_READY:
raise RuntimeError("You should perform an .executeRequests() call first")
if __name__ == '__main__':
import time
t0 = time.time()
t = HTTPRequestersSync(8)
requests = []
for i in xrange(5):
requests.append(t.addRequest("www.google.fr", 80, "/"))
t.executeRequests()
for id in requests:
resp = t.getResponseByRequestId(id)
print id, ":", "%s..." % resp[:10]
t.stopAll()
t0 = time.time() - t0
print "done in %ss" % t0
|
import HypothesisFactory
import world
class Theory():
def __init__(self, kind):
self.kind=kind
self.machines=world.machines
self.hf=HypothesisFactory.HypothesisFactory()
def unnormalized_posterior(self, data=None):
if data is None:
return self.prior()
else:
return self.data_likelihood(data)*self.prior()
def data_likelihood(self, data=None):
#hf=HypothesisFactory.HypothesisFactory()
lik=1
for machine in self.machines:
hyp_lik=0
allhyp=self.hf.create_all_hypotheses(machine)
for hyp in allhyp:
hyp_lik+=hyp.likelihood(data)*self.hypothesis_likelihood(hyp)
lik*=hyp_lik
return lik
def prior(self):
return 1.0
def hypothesis_likelihood(self, hypothesis):
if self.kind==0:
return int(hypothesis.kind==0)
elif self.kind==1:
return int(hypothesis.kind==1)
elif self.kind==2: #color match
if hypothesis.kind==2 and hypothesis.color==hypothesis.machine[0]:
return 1
else:
return 0
elif self.kind==3: #shape match
if hypothesis.kind==3 and hypothesis.shape==hypothesis.machine[1]:
return 1
else:
return 0
elif self.kind==4: #color AND shape match
if hypothesis.kind==4 and hypothesis.color==hypothesis.machine[0]\
and hypothesis.shape==hypothesis.machine[1]:
return 1
else:
return 0
elif self.kind==5: #color OR shape match
if hypothesis.kind==5 and (hypothesis.color==hypothesis.machine[0]\
and hypothesis.shape==hypothesis.machine[1]): #check this and. only 1 gets support.
return 1
else:
return 0
elif self.kind==6: #specific color (ANY color)
if hypothesis.kind==2:
return 1
else:
return 0
elif self.kind==7: #specific shape (ANY shape)
if hypothesis.kind==3:
return 1
else:
return 0
elif self.kind==8: #specific shape AND color (ANY)
if hypothesis.kind==4:
return 1
else:
return 0
elif self.kind==9: #NOT match color
if hypothesis.kind==2 and hypothesis.color!=hypothesis.machine[0]:
return 1./(world.n_colors-1)
else:
return 0
elif self.kind==10: #NOT match shape
if hypothesis.kind==3 and hypothesis.shape!=hypothesis.machine[1]:
return 1./(world.n_shapes-1)
else:
return 0
elif self.kind==11: #INDEPENDENT
return 1./self.hf.n_hypotheses(hypothesis.machine)
#CHECK CHECK CHECK THIS SALADDD
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2018 ARM Ltd.
# Copyright (c) 2023 Izuma Networks
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
import os
import cbor2
import struct
from six import iteritems, text_type
from pyclibrary import CParser
from collections import namedtuple
import cryptography.hazmat.primitives.asymmetric.ec as ec
import cryptography.hazmat.backends as backends
from cryptography import x509
from cryptography.hazmat.primitives import serialization, hashes
import datetime
CERTIFICATE_KEYS = (
"MBED_CLOUD_DEV_BOOTSTRAP_DEVICE_CERTIFICATE",
"MBED_CLOUD_DEV_BOOTSTRAP_SERVER_ROOT_CA_CERTIFICATE",
"arm_uc_default_certificate",
)
KEY_KEYS = "MBED_CLOUD_DEV_BOOTSTRAP_DEVICE_PRIVATE_KEY"
UPDATE_KEYS = (
"arm_uc_default_certificate",
"arm_uc_class_id",
"arm_uc_vendor_id",
)
# noqa - E501
KEY_MAP = {
"MBED_CLOUD_DEV_BOOTSTRAP_DEVICE_CERTIFICATE": "mbed.BootstrapDeviceCert",
"MBED_CLOUD_DEV_BOOTSTRAP_SERVER_ROOT_CA_CERTIFICATE": "mbed.BootstrapServerCACert",
"MBED_CLOUD_DEV_BOOTSTRAP_DEVICE_PRIVATE_KEY": "mbed.BootstrapDevicePrivateKey",
"MBED_CLOUD_DEV_BOOTSTRAP_ENDPOINT_NAME": "mbed.EndpointName",
"MBED_CLOUD_DEV_BOOTSTRAP_SERVER_URI": "mbed.BootstrapServerURI",
"MBED_CLOUD_DEV_ACCOUNT_ID": "mbed.AccountID",
"MBED_CLOUD_DEV_MANUFACTURER": "mbed.Manufacturer",
"MBED_CLOUD_DEV_MODEL_NUMBER": "mbed.ModelNumber",
"MBED_CLOUD_DEV_SERIAL_NUMBER": "mbed.SerialNumber",
"MBED_CLOUD_DEV_DEVICE_TYPE": "mbed.DeviceType",
"MBED_CLOUD_DEV_HARDWARE_VERSION": "mbed.HardwareVersion",
"MBED_CLOUD_DEV_MEMORY_TOTAL_KB": "mbed.MemoryTotalKB",
"arm_uc_default_certificate": "mbed.UpdateAuthCert",
"arm_uc_class_id": "mbed.ClassId",
"arm_uc_vendor_id": "mbed.VendorId",
}
ConfigParam = namedtuple("ConfigParam", ["Data", "Name"])
Certificate = namedtuple("Certificate", ["Data", "Format", "Name"])
Key = namedtuple("Key", ["Data", "Format", "Name", "Type"])
class CBORConverter:
def __init__(self, development_certificate, update_resource, cbor_file):
self.development_certificate = development_certificate
self.update_resource = update_resource
self.cbor_file = cbor_file
def __check_file_exists(self, path):
if not os.path.isfile(path):
print(f"File {path} does not exist.")
return False
return True
def parse_c_file(self):
if not self.__check_file_exists(
self.development_certificate
) or not self.__check_file_exists(self.update_resource):
return None
values = {}
values.update(
CParser([self.development_certificate]).defs.get("values")
)
values.update(
CParser(
[self.update_resource],
macros={
"MBED_CLOUD_DEV_UPDATE_ID": 1,
"MBED_CLOUD_DEV_UPDATE_CERT": 1,
},
).defs.get("values")
)
return values
def create_cbor_data(self, vars):
cbor_data = {
"Certificates": [],
"Keys": [],
"ConfigParams": [],
"SchemeVersion": "0.0.1",
}
use_bootstrap = (
1 if "MBED_CLOUD_DEV_BOOTSTRAP_SERVER_URI" in vars.keys() else 0
)
cbor_data["ConfigParams"].append(
ConfigParam(use_bootstrap, "mbed.UseBootstrap")._asdict()
)
for key in vars.keys():
var = vars.get(key)
cbor_var_key = KEY_MAP.get(key, None)
if cbor_var_key:
if key in CERTIFICATE_KEYS:
byte_data = struct.pack("%sB" % len(var), *var)
certificate = Certificate(
byte_data, "der", cbor_var_key
)._asdict()
cbor_data["Certificates"].append(certificate)
elif key in KEY_KEYS:
byte_data = struct.pack("%sB" % len(var), *var)
private_key = Key(
byte_data, "der", cbor_var_key, "ECCPrivate"
)._asdict()
cbor_data["Keys"].append(private_key)
elif key in UPDATE_KEYS:
byte_data = struct.pack("%sB" % len(var), *var)
config_param = ConfigParam(
byte_data, cbor_var_key
)._asdict()
cbor_data["ConfigParams"].append(config_param)
else:
config_param = ConfigParam(var, cbor_var_key)._asdict()
cbor_data["ConfigParams"].append(config_param)
else:
print("Key %s not in KEY_MAP." % key)
return cbor_data
def convert_to_cbor(self):
vars = self.parse_c_file()
if not vars:
print("No variables parsed.")
else:
cbor_data = self.create_cbor_data(vars)
with open(self.cbor_file, "wb") as out_file:
cbor2.dump(cbor_data, out_file)
class CBORUtils:
@staticmethod
def add_custom_certificate(cbor_file, custom_cert_name):
# Generate EC key pair
privatekey = ec.generate_private_key(
ec.SECP256R1(), backends.default_backend()
)
privatebytes = privatekey.private_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption(),
)
publickey = privatekey.public_key()
publicbytes = publickey.public_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PublicFormat.SubjectPublicKeyInfo,
)
# Create X509 self-signed certificate
subject = issuer = x509.Name(
[
x509.NameAttribute(x509.NameOID.COUNTRY_NAME, "FI"),
x509.NameAttribute(
x509.NameOID.STATE_OR_PROVINCE_NAME, "Oulu"
),
x509.NameAttribute(x509.NameOID.LOCALITY_NAME, "Oulu"),
x509.NameAttribute(
x509.NameOID.ORGANIZATION_NAME, "Izuma Networks"
),
x509.NameAttribute(
x509.NameOID.COMMON_NAME, text_type(custom_cert_name)
),
]
)
cert = (
x509.CertificateBuilder()
.subject_name(subject)
.issuer_name(issuer)
.public_key(publickey)
.serial_number(x509.random_serial_number())
.not_valid_before(datetime.datetime.utcnow())
.not_valid_after(
# Our certificate will be valid for 1 year
datetime.datetime.utcnow()
+ datetime.timedelta(days=365)
)
.add_extension(
x509.SubjectAlternativeName([x509.DNSName("localhost")]),
critical=False,
# Sign our certificate with our private key
)
.sign(privatekey, hashes.SHA256(), backends.default_backend())
)
certbytes = cert.public_bytes(serialization.Encoding.DER)
cbor_data = None
with open(cbor_file, "rb") as in_file:
cbor_data = cbor2.load(in_file)
privatekey_data = Key(
privatebytes, "der", custom_cert_name, "ECCPrivate"
)._asdict()
publickey_data = Key(
publicbytes, "der", custom_cert_name, "ECCPublic"
)._asdict()
cbor_data["Keys"].append(privatekey_data)
cbor_data["Keys"].append(publickey_data)
cert_data = Certificate(certbytes, "der", custom_cert_name)._asdict()
cbor_data["Certificates"].append(cert_data)
with open(cbor_file, "wb") as out_file:
cbor2.dump(cbor_data, out_file)
@staticmethod
def print_cbor(cbor_file):
cbor_data = None
with open(cbor_file, "rb") as in_file:
cbor_data = cbor2.load(in_file)
for k in ["Keys", "Certificates", "ConfigParams"]:
v = cbor_data.get(k)
print(v)
print(k)
if v is None:
continue
for item in v:
for kk, vv in iteritems(item):
print("\t" + text_type(kk) + " : " + repr(vv))
print("\t------------------------------")
print("\n")
|
file_name = input("Enter the name of file with extension :")
|
import json
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt, csrf_protect
from django.http import HttpResponse
from user_activity.models import *
# Create your views here.
@csrf_exempt
def get_activity_periods_view(request):
members = []
response_json = {"ok": False, "members": members}
all_activity_periods = ActivityPeriod.objects.all()
for activity_period in all_activity_periods:
user = activity_period.user
members.append({'id': user.user_id, 'real_name': user.user_name,
"tz": activity_period.tz,
'activity_periods': json.loads(activity_period.activity_period)})
return HttpResponse(json.dumps(response_json), status=200, content_type='application/json; charset=utf8')
|
hparams = {
'n_atts': 15,
'epochs': 600,
'hidden_size': 3,
'batch_size': 1024,
'pca_dims': 3,
'n_clusters': 24,
}
|
import scrapy
from datetime import datetime
from json import load, dump
from io import UnsupportedOperation
from scrapy.http import Request, FormRequest
from scrapy.utils.response import open_in_browser
class BookSpider(scrapy.Spider):
name = 'libgen_spider'
start_urls = ['http://libgen.rs/batchsearchindex.php']
libgen_found_books = []
def parse(self, response):
with open(r'C:\Users\Emmett\magi-spiders\data\ul_module_details.json', 'r', encoding="utf-8") as modules:
module_details = load(modules)
for module in module_details:
dsk_query = ''
for book in module['books']:
# ~~ temporary fix for edition, bookofmodules.py failed to see editions in brackets e.g "(8th edition)"
# code is from bookofmodules.py so is fixed for next semester
book['title'] = book['title'].replace('(', '').replace(')', '').replace("\\n", "").replace("\\", "")
book['edition'] = ''.join([word for word in book['title'].split(" ") if ('th' in word or 'rd' in word) and word[0] in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']])
#~~~~ end of bug fix
book['module_code'] = module['module_code']
book['search_string'] = book['title']
if(book['edition'] != ''):
search_string = book['title'].split(book['edition'])[0]
book['search_string'] = search_string
dsk_query += book['search_string']+'\r\n'
print(dsk_query)
yield FormRequest('http://libgen.rs/batchsearchindex.php', meta={'books': module['books']}, formdata={'dsk': dsk_query}, callback=self.results_per_book)
def results_per_book(self, response):
books = response.meta['books']
query_link = (response.xpath('/html/body/table/tr/td/a/@href')).getall()
query_text = (response.xpath('/html/body/table/tr/td/text()')).getall()
required_text_index = 0
link_incrementer = 0
book_incrementer = 0
for i in range(0, len(query_text)-1, 2):
if(int(query_text[i+1]) > 0):
link = query_link[link_incrementer]
print(books[book_incrementer])
yield response.follow(link, meta={'query_link': query_link[link_incrementer], 'book': books[book_incrementer]}, callback=self.get_book)
book_incrementer+=1
required_text_index+=1
link_incrementer+=1
def get_book(self, response):
# open_in_browser(response)
module_book = response.meta['book']
module_book_author = module_book['author'].replace(',', '').replace('.', '').replace(';', '').replace('\'', '')
module_book_title = module_book['title']
print(module_book_title)
i=0
result_count=0
lrg_sml_mb = []
result_sizes = []
result_mirrors = []
result_authors = []
result_years = []
result_languages = []
book_search_results = []
# results_file_sizes = [int(x.xpath("text()").get().replace(' Mb', '')) for x in (response.xpath("/html/body/table[3]/*/*"))[11:] if 'Mb' in x.get()]
# results_mirrors = [x.xpath('@href').get() for x in (response.xpath("/html/body/table[3]/*/*/*"))[11:] if '[1]' in x.get()]
# print((response.xpath("/html/body/table[3]/*/*"))[11:])
for result in (response.xpath("/html/body/table[3]/*/*"))[11:]:
author_link = ''
mirror = ''
author_link = result.xpath('*/@href').get()
mirror = result.xpath('*').get()
try:
if('author' in author_link):
try:
result_author = result.xpath('*/text()').get().replace(',', '').replace(';', '').replace('.', '').replace('\'', '')
except:
result_author = 'Null'
result_authors.append(result_author)
elif('[1]' in mirror):
result_mirrors.append(result.xpath('*/@href').get())
except TypeError as e:
item = result.xpath('text()').get()
if(item == None):
break
elif(' Mb' in item):
size = int(item.replace(' Mb', ''))
result_sizes.append(size)
lrg_sml_mb.append(size)
elif(' Kb' in item):
size = int(item.replace(' Kb', ''))
result_sizes.append(size/1000)
lrg_sml_mb.append(size/1000)
elif(item.isnumeric() and ('20' in item or '19' in item) and len(item) == 4):
result_years.append(item)
elif(item.isalpha()):
result_languages.append(item)
# print(result_sizes)
# print(result_authors)
# print(result_mirrors)
lrg_sml_mb.sort(reverse=True)
unique_languages = list(set(result_languages))
for mb in lrg_sml_mb:
working_index = result_sizes.index(mb)
# print(module_required_text_author, '\n', result_authors[working_index])
author_verify = [x for x in module_book_author.split(" ") if x in result_authors[working_index] and len(x)>1]
double_author_verify = [x for x in result_authors[working_index].split(" ") if x in module_book_author and len(x)>1]
if(len(author_verify) > 0 or len(double_author_verify)>0):
print('mb, lrg_sml_mb', mb, lrg_sml_mb)
final_mirror = result_mirrors[working_index]
print(author_verify)
print(final_mirror)
try:
module_book['found_year'] = result_years[working_index]
except IndexError as e:
module_book['found_year'] = 0
try:
module_book['found_language'] = result_languages[working_index]
except IndexError as e:
module_book['found_language'] = 'Null'
module_book['found_author'] = result_authors[working_index]
yield Request(final_mirror, meta={'module_book': module_book}, callback=self.get_download_link)
break
def get_download_link(self, response):
# open_in_browser(response)
module_book = response.meta['module_book']
download_link = (response.css("#download > h2:nth-child(1) > a:nth-child(1)")).xpath("@href").get()
module_book['download_link'] = str(download_link)
self.libgen_found_books.append(module_book)
def closed(self, response):
with open(r'C:\Users\Emmett\magi-spiders\data\libgen_books.json', 'w+', encoding='utf-8') as output:
records = self.libgen_found_books
dump(records, output)
|
"""Identify the optimal coalition structure, that is, identify what
agents should be allocated to which coalition to maximise the sum of utility. The simplest
way to compute this involves computing the sum of utilities for every possible combination
of coalitions, and picking the highest one.
Implement a simple algorithm to compute the optimal coalition structure.
Your algorithm should simply generate all possible partitions and compute their value, finally
returning the maximal one. Keep track of the time taken to run for 1,2, . . . agents. At
what point does the algorithm start to take too long to run? """
import numpy as np
def generate_utilities(n):
return dict(zip(range(1, n + 1), np.random.randint(1, 100, n)))
def combinations(nums: list, n_left):
if n_left < 2:
return [[num] for num in nums]
c = []
for i in range(len(nums)-n_left + 1):
head = [nums[i]]
tail = nums[i+1:]
print(head, tail)
c += [head + ni for ni in combinations(tail, n_left-1)]
return c
def split_combinations(nums: list, n_left):
if n_left < 2:
r = []
for i in range(len(nums)):
r.append((nums[i], nums[:i]+nums[i+1:]))
return r
c = []
for i in range(len(nums)-n_left + 1):
head = [nums[i]]
tail = nums[i+1:]
print(head, tail)
for ni in combinations(tail, n_left-1):
c.append((head + ni[0], ni[1]))
return c
if __name__ == '__main__':
N = 2 # number of agents
utilities = generate_utilities(N)
print(f"{N} agents with utilities: {utilities}")
|
'''define a function that take list of words as argument and
return list with reverse of every element in that list'''
def element_reverse(l):
lista=[]
for i in l:
lista.append(i[::-1])
return lista
l=['jorge','toti','alma']
print(element_reverse(l))
print(l)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from model_utils import init_bn, init_layer
import models_extension
class Block(nn.Module):
def __init__(self, in_channels, out_channels):
super(Block, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=(3, 3), stride=(1,1),
padding=(1, 1), bias=False)
self.conv2 = nn.Conv2d(in_channels=out_channels,
out_channels=out_channels,
kernel_size=(3, 3), stride=1,
padding=(1, 1), bias=False)
self.avgpool = nn.AvgPool2d(kernel_size = 3, stride= 1, padding= 1)
self.bn1 = nn.BatchNorm2d(out_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
self.init_weights()
def init_weights(self):
init_layer(self.conv1)
init_layer(self.conv2)
init_bn(self.bn1)
init_bn(self.bn2)
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
return self.avgpool(x)
class SEDencoder(nn.Module):
def __init__(self, classes_num, seq_len, freq_bins, cuda):
super(SEDencoder, self).__init__()
self.cb1 = Block(in_channels=1, out_channels=32)
self.cb2 = Block(in_channels=32, out_channels=64)
self.cb3 = Block(in_channels=64, out_channels=128)
self.cb4 = Block(in_channels=128, out_channels=256)
self.class_conv = nn.Conv2d(in_channels=256, out_channels=classes_num,
kernel_size=(1, 1), stride=(1, 1),
padding=(0, 0), bias=True)
self.init_weights()
def init_weights(self):
init_layer(self.class_conv)
def forward(self, input):
(_, seq_len, freq_bins) = input.shape
x = input.view(-1, 1, seq_len, freq_bins)
'''(samples_num, feature_maps, time_steps, freq_bins)'''
x = self.cb1(x)
x = self.cb2(x)
x = self.cb3(x)
x = self.cb4(x)
class_x = self.class_conv(x)
'''(samples_num, class_number, time_steps, freq_bins)'''
return x, class_x
class AuxillaryDecoder(nn.Module):
def __init__(self, classes_num, seq_len, freq_bins, cuda):
super(AuxillaryDecoder, self).__init__()
self.cb1 = Block(in_channels=256, out_channels=128)
self.cb2 = Block(in_channels=128, out_channels=64)
self.cb3 = Block(in_channels=64, out_channels=32)
self.cb4 = Block(in_channels=32, out_channels=1)
def forward(self, input):
'''input is of shape (samples_num, feature_maps, time_steps, freq_bins)'''
x = self.cb1(input)
x = self.cb2(x)
x = self.cb3(x)
x = self.cb4(x)
'''x is of (samples_num, 1, time_steps, freq_bins)'''
dec = x.squeeze(1)
'''dec is of shape (samples_num, time_steps, freq_bins)'''
return dec
class DualStageAttention(nn.Module):
def __init__(self, seq_len, freq_bins):
super(DualStageAttention, self).__init__()
self.fc_mel_prob = nn.Linear(freq_bins, freq_bins)
self.fc_mel_att = nn.Linear(freq_bins, freq_bins)
self.fc_time_prob = nn.Linear(seq_len, seq_len) # operates on (bs, time, class)
self.fc_time_att = nn.Linear(seq_len, seq_len) # operates on (bs, time, class)
def forward(self, class_x):
"""
Args:
class_x: (batch_size, classes_num, time_steps, freq_bins)
Returns:
output_dict: dictionary containing y_pred and attention weights
"""
mel_probs = torch.sigmoid(self.fc_mel_prob(class_x))
mel_attw = F.softmax(self.fc_mel_att(class_x), dim = -1)
mel_x = (mel_probs * mel_attw).sum(dim = -1)
mel_x = mel_x.squeeze(-1)
'''
mel_probs: (batch_size, classes_num, time_steps, freq_bins)
mel_attw: (batch_size, classes_num, time_steps, freq_bins)
mel_x: (batch_size, classes_num, time_steps)
'''
time_probs = torch.sigmoid(self.fc_time_prob(mel_x))
time_attw = F.softmax(self.fc_time_att(mel_x), dim = -1)
time_x = (time_probs * time_attw).sum(dim = -1)
time_x = time_x.clamp(0, 1)
out = time_x.squeeze(-1)
'''
time_probs: (batch_size, classes_num, time_steps)
time_attw: (batch_size, classes_num, time_steps)
time_x: (batch_size, classes_num)
'''
return mel_attw, time_attw, mel_x, time_x, out
class MTL_SEDNetwork(nn.Module):
def __init__(self, classes_num, seq_len, freq_bins, cuda):
super(MTL_SEDNetwork, self).__init__()
self.enc = SEDencoder(classes_num, seq_len, freq_bins, cuda)
self.dec = AuxillaryDecoder(classes_num, seq_len, freq_bins, cuda)
self.dsa = DualStageAttention(seq_len, freq_bins)
def forward(self, input):
"""
Args:
input: (batch_size, time_steps, freq_bins)
Returns:
output_dict: dictionary containing y_pred and attention weights
"""
x, class_x = self.enc(input)
'''
x: (batch_size, filters, time_steps, freq_bins)
class_x: (batch_size, classes_num, time_steps, freq_bins)
'''
input_x = self.dec(x)
'''
input_x: (batch_size, time_steps, freq_bins)
'''
mel_attw, time_attw, mel_x, time_x, out = self.dsa(class_x)
output_dict = {'y_pred': out, 'x_rec': input_x, 'class_wise_input': class_x, \
'mel_attw':mel_attw, 'time_attw':time_attw, 'mel_x': mel_x, 'time_x':time_x}
return output_dict
def get_model(model_type):
if model_type == 'MTL_SEDNetwork':
return MTL_SEDNetwork
if model_type =='GMP':
return models_extension.VggishGMP
if model_type =='GAP':
return models_extension.VggishGAP
if model_type =='GWRP':
return models_extension.VggishGWRP
if model_type =='AttrousCNN_2DAttention':
return models_extension.AttrousCNN_2DAttention
else:
raise Exception('Incorrect model type!')
|
import unittest
from katas.kyu_7.counting_in_the_amazon import count_arara
class CountAraraTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(count_arara(1), 'anane')
def test_equals_2(self):
self.assertEqual(count_arara(2), 'adak')
def test_equals_3(self):
self.assertEqual(count_arara(3), 'adak anane')
def test_equals_4(self):
self.assertEqual(count_arara(9), 'adak adak adak adak anane')
|
import sensor, image, time
from pyb import Pin
#设置颜色的阈值
thresholds=[(100, 0, 73, 40, 37, 127),(14, 52, 8, 126, 14, 127)] #前面地上的果蔬的色块 后面树上的果蔬的色块
#设置感光元件的参数
sensor.reset()#初始化感光元件
sensor.set_pixformat(sensor.RGB565)#函数是设置像素模式,sensor.RGB565代表彩色,sensor.GRAYSCALE代表灰度
sensor.set_framesize(sensor.QVGA)#设置图像的大小 参数可以调整 这个参数代表着320*240
sensor.skip_frames(time = 2000)
sensor.set_auto_gain(False)#颜色追踪时需要关闭自动增益
sensor.set_auto_whitebal(False)#颜色追踪时需要关闭白平衡
clock = time.clock()
p_out=Pin('P0',Pin.OUT_PP) #地上果蔬对应的识别引脚
p_out1=Pin('P1',Pin.OUT_PP) #树上果蔬对应的识别引脚
p_out.high()
p_out1.high()
while(True):
clock.tick()
img = sensor.snapshot()#拍摄一张照片,img为一个image对象
#image.find_blobs(thresholds, roi=Auto, x_stride=2, y_stride=1, invert=False, area_threshold=10, pixels_threshold=10, merge=False, margin=0, threshold_cb=None, merge_cb=None)
#find_blods()有很多参数 第一个参数是设置颜色阈值 同时第一个参数必须是一个列表
#第二个参数是感兴趣区就是在画面的哪个位置进行识别不设置roi默认在整个图像进行识别
#设置查找颜色的x方向上的最小宽度的像素
#设置反转阈值就是反向查找 默认不反向查找
#area_threshold设置色块的面积大小 小于这个面积被过滤掉 包含背景颜色面积
#pixels_threshold设置只包含色块的被框起来的面积大小
#margin设置是否把框框合并 多个颜色的框框是否合并
#该函数的返回值是一个列表
blobs=img.find_blobs([thresholds[0]],pixels_threshold=200, area_threshold=200, merge=True)
blobs1=img.find_blobs([thresholds[1]],pixels_threshold=200, area_threshold=200, merge=True)
if len(blobs)!=0:
print('helloworld')
p_out.low()
else:
p_out.high()
if len(blobs1)!=0:
print('helloworld1')
p_out1.low()
else:
p_out1.high()
for blob in blobs:
img.draw_rectangle(blob.rect())#在图像当中画一个矩形框框
img.draw_cross(blob.cx(),blob.cy())#在图像中画一个十字
for blob in blobs1:
img.draw_rectangle(blob.rect())#在图像当中画一个矩形框框
img.draw_cross(blob.cx(),blob.cy())#在图像中画一个十字
print(clock.fps())
|
hrs =float(input("Enter Hours:"))
rate =float(input("Enter Rate:"))
if hrs >40:
print((40*rate) + (hrs -40) * rate * 1.5)
else:
print(hrs * rate)
|
# Generated by Django 3.2 on 2021-04-28 13:15
from django.db import migrations, models
import multiselectfield.db.fields
class Migration(migrations.Migration):
dependencies = [
('Tour_app', '0023_alter_tour_package_departure'),
]
operations = [
migrations.RenameField(
model_name='tour_inquiry',
old_name='departure_date',
new_name='date',
),
migrations.AddField(
model_name='tour_package',
name='Tour_ID',
field=models.CharField(default=' ', max_length=100),
),
migrations.AlterField(
model_name='tour_package',
name='departure',
field=multiselectfield.db.fields.MultiSelectField(choices=[('Whole Year', 'Whole Year'), ('Jan', 'Jan'), ('Fab', 'Fab'), ('Mar', 'Mar'), ('April', 'April'), ('May', 'May'), ('Jun', 'Jun'), ('July', 'July'), ('Aug', 'Aug'), ('Sept', 'Sept'), ('Oct', 'Oct'), ('Nov', 'Nov'), ('Dec', 'Dec')], default=' ', max_length=62),
),
]
|
from netCDF4 import Dataset
from sklearn.preprocessing import StandardScaler
import numpy as np
import pandas as pd
# ===================================================
# Function: Gets data from each year and concatonates it into single array
def GetData(year_index, VariableDataSet, var, lat_north, lat_south, long_min, long_max, time_min, time_max, year, levels):
year = 1948 + year_index
dataset = Dataset('C:/Users/user/Documents/Personal/Research/MachineLearningClimate19/original_dataset/' + var + '/' + var + '.' + str(year) + '.nc', 'r')
# Leap Year
if (dataset.dimensions['time'].size) == 366:
time_min = 136
time_max = 274
else:
time_min = 135
time_max = 273
# Creating Dataset
dataset = dataset.variables[var][time_min:time_max,0:levels,lat_north:lat_south,long_min:long_max]
dataset = np.asarray(dataset).astype(float)
# Concatenating
VariableDataSet = np.vstack((VariableDataSet, dataset))
return VariableDataSet
# ====================================================
# Main
def main():
variables = ['air', 'uwnd', 'hgt', 'vwnd']
folders = variables
# Indices that coorespond to lat/long in form: COOR(INDEX) -> 20S(45) to 40N(20) 60E(24) to 120E(49)
lat_north = 20
lat_south = 45
long_min = 24
long_max = 49
time_min = 135
time_max = 273
years = 67
levels = 17
# Indices for extended region 10 deg to include madagascar
long_min = 20
long_max = 53
lat_dpoints = abs(lat_south - lat_north)
long_dpoints = abs(long_max - long_min)
time_dpoints = abs(time_max - time_min)
for var in range(0,len(variables)):
print('Starting ' + variables[var] + ' variable...\n')
VariableDataSet = np.zeros((0,levels,lat_dpoints,long_dpoints), dtype = float)
output_file_name = variables[var] + '_raw_4d_dataset_ext'
# Running the function for each year
for year_index in range(0,years):
year = year_index + 1948
print(year)
VariableDataSet = GetData(year_index, VariableDataSet, variables[var], lat_north, lat_south, long_min, long_max, time_min, time_max, year, levels)
np.save(output_file_name, VariableDataSet)
if __name__ == "__main__":
main() |
from dagster import execute_pipeline
from zitarice.pipelines.cereals_pipeline import hello_cereal_pipeline
def test_hello_cereal_pipeline():
res = execute_pipeline(hello_cereal_pipeline)
assert res.success
assert len(res.result_for_solid("hello_cereal").output_value()) == 77
|
import datetime
import logging
from django.db.models import Sum
from django.utils import timezone
from apps.commcalc.models import Commission
from apps.jobs.models import Jobs
from apps.users.models import UserProfile
JOB_TYPE=['N/A','Plumbing','Electrical','Furnishing','Construction']
class MetricManager(object):
def __init__(self):
self.start_year, self.end_year = self.__get_start_end_date()
def __convert_naive_to_aware(self, _date):
return datetime.datetime.combine(_date,datetime.time(0,0))
def __get_start_end_date(self,):
current_date=timezone.now().date()
current_year=current_date.year
start_year=self.__convert_naive_to_aware(datetime.datetime(current_year,1,1))
end_year=self.__convert_naive_to_aware(datetime.datetime(current_year,12,31))
return (start_year, end_year)
def get_jobs_status_info(self, start_date=None, end_date=None):
"""Returns the total jobs status count for start_date and end_date
"""
status=['New','Inspection','Accepted','Completed','Rejected','Discarded']
status_count=[0]*len(status)
if start_date and end_date:
pass
else:
jobs=Jobs.objects.all()
total=float(len(jobs))
for job in jobs:
index=int(job.status)
status_count[index] += 1
return [dict(name=status[i],y=round(status_count[i]/total*100,2)) for i in range(len(status))]
def get_job_type_info(self, start_date=None, end_date=None):
"""Returns the total jobs type count for start_date and end_date
"""
type_count=[0]*len(JOB_TYPE)
if start_date and end_date:
pass
else:
jobs=Jobs.objects.all()
for job in jobs:
index=int(job.jobtype)
type_count[index] += 1
return [dict(name=JOB_TYPE[i],data=[type_count[i]]) for i in range(len(JOB_TYPE))]
def get_user_jobs_count(self,):
"""Return the total jobs and users per month
"""
# start_year, end_year = self.__get_start_end_date()
jobs=Jobs.objects.filter(creation_date__range=[self.start_year, self.end_year])
users=UserProfile.objects.filter(date_joined__range=[self.start_year, self.end_year] ,user_type=2)
jobs_count=[0]*12
users_count=[0]*12
for job in jobs:
jobs_count[job.creation_date.month - 1] += 1
for user in users:
users_count[user.date_joined.month - 1] += 1
data=[{'name':'Jobs', 'data':jobs_count},{'name':'Users','data':users_count}]
return data
def get_revenue(self,):
"""Return the revenue stream
"""
jobs = Jobs.objects.filter(status=3)
commissions = Commission.objects.filter(is_paid=True).aggregate(Sum('amount'))
data=[]
# users
data.append(UserProfile.objects.filter(user_type=2, is_active=True).count())
# handymen
data.append(UserProfile.objects.filter(user_type=1, is_active=True).count())
revenue=jobs.aggregate(Sum('fee'))['fee__sum']
data.append("Rs.{:,.2f}".format(revenue))
# revenue per user
data.append("Rs.{:,.2f}".format(round(revenue/jobs.values('customer').distinct().count(), 2)))
# revenue per job
data.append("Rs.{:,.2f}".format(round(revenue/jobs.count(),2)))
data.append("Rs.{:,.2f}".format(commissions['amount__sum']))
return data
def get_job_type_per_year_info(self):
"""Return job type per year
"""
jobs=Jobs.objects.filter(creation_date__range=[self.start_year, self.end_year])
job_type_count={str(i):[0]*12 for i in range(len(JOB_TYPE))}
for job in jobs:
job_type_count[job.jobtype][job.creation_date.month - 1] += 1
return [dict(name=JOB_TYPE[i],data=job_type_count[str(i)]) for i in range(len(JOB_TYPE))]
def get_revenue_per_year(self):
"""Return revenue per year
"""
REVENUE_STREAM = ['Total Revenue'] + JOB_TYPE[1:]
revenue={str(i):[0]*12 for i in range(len(REVENUE_STREAM))}
jobs=Jobs.objects.filter(creation_date__range=[self.start_year, self.end_year], status=3)
for job in jobs:
revenue['0'][job.completion_date.month - 1] += job.fee.amount
revenue[job.jobtype][job.completion_date.month - 1] += job.fee.amount
return [dict(name=REVENUE_STREAM[int(i)],data=revenue[i]) for i in sorted(revenue.keys())]
|
from django.conf.urls import url
from . import views
app_name = 'qa' # To use the name in templates as {% url 'qa:index' %}
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^question/create/$', views.question_new, name='new_question'),
url(r'^question/(?P<pk>[0-9]+)/edit/$', views.question_edit, name='question_edit'),
url(r'^question/(?P<pk>[0-9]+)/delete/$', views.question_delete, name='question_delete'),
url(r'^question/(?P<pk>[0-9]+)/$', views.DetailView.as_view(), name='answers'),
url(r'^answer/(?P<pk>[0-9]+)/new/$', views.new_answer, name='new_answer'),
url(r'^answer/(?P<pk>[0-9]+)/edit/$', views.answer_edit, name='answer_edit'),
url(r'^answer/(?P<pk>[0-9]+)/delete/$', views.answer_delete, name='answer_delete'),
url(r'^question/(?P<pk>[0-9]+)/vote/$', views.question_vote, name='question_vote'),
url(r'^answer/(?P<pk>[0-9]+)/vote/$', views.answer_vote, name='answer_vote'),
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: samuelololol
# Email: samuelololol at google mail dot company
# Website: https://github.com/samuelololol/pythonapps/tree/master/thread
import threading
import Queue
thread_numbers = 4
class mythread(threading.Thread):
def run(self):
global Pool
tname = threading.currentThread().getName()
while True:
item = Pool.get()
if item != None:
if item == 0:
break
work(item,tname)
Pool.task_done()
Pool.task_done()
def work(item,name):
global mutex
mutex.acquire()
print item,name
mutex.release()
def main():
global Pool, mutex, thread_numbers
mutex = threading.Lock()
threads = []
Pool = Queue.Queue(0)
# example data
ha = [1,2,3,4,5,6,7,8,9]
# create threads
for x in range(thread_numbers):
threads.append(mythread())
# all threads start
for t in threads:
t.start()
# push item in to FIFO queue: Pool
for item in ha:
Pool.put(item)
# add the leave condition for each thread
for idx in range(thread_numbers):
Pool.put(0)
Pool.join()
if __name__ == '__main__':
main()
|
# Defaults and configuration for TMBF
harmonic = 80
bunch = 450
# Settings for basic tune measurement.
tune_threshold = 0.3
min_block_sep = 20
min_block_len = 20
# Settings for peak tune measurement
peak_smoothing = '/16'
peak_fit_threshold = 0.3
peak_max_error = 1
peak_min_width = 0
peak_max_width = 1
# Default tune selection
tune_select = 'Peak Fit'
sweep_dwell_time = 100
blanking_interval = 10000
blanking_source = 'SCLK Input'
sweep_range = 0.05
alarm_range = 0.01
tune_direction = 'Forwards'
keep_feedback = 'No feedback'
dac_output = 0 # By default DAC output is off
bunch_mode = 'All Bunches' # By default detect on all bunches
detector_input = 'FIR'
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Mode specific settings
# Multibunch tune measurement
TUNE_sweep_gain = '-42dB'
# Accelerator physics mode
AP_sweep_gain = '0dB'
AP_tune = 0.25
AP_sweep_range = 0.245
AP_alarm_range = 0.245
AP_detector_input = 'ADC'
AP_min_block_len = 5
# Feedback on, single bunch tune measurement
FB_dac_output = 1 # Enable FIR output in this mode
FB_keep_feedback = 'Keep feedback'
FB_harmonic = 933
FB_sweep_gain = '-48dB'
# Lab setup
T_tune = 0.7885
T_harmonic = 37
T_detector_input = 'ADC'
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-07-05 10:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("organisations", "0052_auto_20180703_1430")]
operations = [
migrations.AddField(
model_name="divisiongeography",
name="source",
field=models.CharField(blank=True, max_length=255),
),
migrations.AddField(
model_name="organisationgeography",
name="source",
field=models.CharField(blank=True, max_length=255),
),
]
|
# -*- coding: utf-8 -*-
class Solution:
def minPartitions(self, n: str) -> int:
return max(int(c) for c in n)
if __name__ == "__main__":
solution = Solution()
assert 3 == solution.minPartitions("32")
assert 8 == solution.minPartitions("82374")
assert 9 == solution.minPartitions("27346209830709182346")
|
#train_model.py
import keras
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Flatten,\
Conv2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
import numpy as np
from alexnet import alexnet80x60
#from keras.applications.mobilenet import MobileNet
WIDTH = 80
HEIGHT = 60
LR = 1e-3
EPOCHS = 2
MODEL_NAME = f'tf2payload-heavy-{LR}-alexnetv2-{EPOCHS}-epochs.model'
train_data = np.load('training_data_balanced.npy', allow_pickle = True)
train = train_data[:-200]
test = train_data[-200:]
train_x = np.array([i[0] for i in train]).reshape(-1,WIDTH,HEIGHT,3)
train_y = [i[1] for i in train]
test_x = np.array([i[0] for i in test]).reshape(-1,WIDTH,HEIGHT,3)
test_y = [i[1] for i in test]
model = alexnet80x60() #MobileNet(input_shape=(WIDTH,HEIGHT,1), include_top=False)
#opt = keras.optimizers.SGD(learning_rate=0.01, momentum=0.5, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
#have to do this to avoid list error
train_x = np.array(train_x)
train_y = np.array(train_y)
test_x = np.array(test_x)
test_y = np.array(test_y)
model.fit(train_x, train_y, batch_size = 40, epochs = EPOCHS, verbose = 1)
val_loss, val_acc = model.evaluate(test_x, test_y)
print("Val loss: ", val_loss,"Val acc: ", val_acc)
model.save(MODEL_NAME)
# #Alexnet implementation from https://www.mydatahack.com/building-alexnet-with-keras/
# model = Sequential()
# #4728, 80, 60, 1 is the shape of the input data
# # 1st Convolutional Layer
# model.add(Conv2D(filters=96, input_shape=(80, 60, 1), kernel_size=(11,11), strides=(4,4), padding='same', data_format='channels_first'))
# model.add(Activation('relu'))
# # Pooling
# model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid'))
# # Batch Normalisation before passing it to the next layer
# model.add(BatchNormalization())
# # 2nd Convolutional Layer
# model.add(Conv2D(filters=256, kernel_size=(11,11), strides=(1,1), padding='valid', data_format='channels_first'))
# model.add(Activation('relu'))
# # Pooling
# model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid'))
# # Batch Normalisation
# model.add(BatchNormalization())
# # 3rd Convolutional Layer
# model.add(Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), padding='valid', data_format='channels_first'))
# model.add(Activation('relu'))
# # Batch Normalisation
# model.add(BatchNormalization())
# # 4th Convolutional Layer
# model.add(Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), padding='valid', data_format='channels_first'))
# model.add(Activation('relu'))
# # Batch Normalisation
# model.add(BatchNormalization())
# # 5th Convolutional Layer
# model.add(Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), padding='valid', data_format='channels_first'))
# model.add(Activation('relu'))
# # Pooling
# model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid'))
# # Batch Normalisation
# model.add(BatchNormalization())
# # Passing it to a dense layer
# model.add(Flatten())
# # 1st Dense Layer
# model.add(Dense(4096, input_shape=(224*224*3,)))
# model.add(Activation('relu'))
# # Add Dropout to prevent overfitting
# model.add(Dropout(0.4))
# # Batch Normalisation
# model.add(BatchNormalization())
# # 2nd Dense Layer
# model.add(Dense(4096))
# model.add(Activation('relu'))
# # Add Dropout
# model.add(Dropout(0.7))
# # Batch Normalisation
# model.add(BatchNormalization())
# # 3rd Dense Layer
# model.add(Dense(1000))
# model.add(Activation('relu'))
# # Add Dropout
# model.add(Dropout(0.7))
# # Batch Normalisation
# model.add(BatchNormalization())
# # Output Layer
# model.add(Dense(17))
# model.add(Activation('softmax')) |
#! /usr/bin/env python
from findfiles import *
import math
import sys
def distribute_readsets(rfsets, nslots, nsequential=1, nnodes=None):
'''
Args:
nslots(int): number of simultaneous processes that can be run on a node. This is
provided as -j to parallel
nnodes(int): total number of nodes being requested
'''
nproc = len(rfsets) # The number of processes that need to run
if nnodes is None:
max_proc_per_node = nslots * nsequential # Maximum number of processes that will run on 1 node
nnodes = (nproc / max_proc_per_node) + (nproc % max_proc_per_node > 0)
else:
if nnodes * nslots * nsequential < nproc:
nsequential = (nproc / (nnodes * nslots)) + ((nproc % (nnodes * nslots)) > 0)
max_proc_per_node = nslots * nsequential
rfsets.sort(key=lambda x:-x.size())
nodes = [list() for _ in range(nnodes)]
for j,rfset in enumerate(rfsets):
# Option 1: Assign process to the smallest node, until maximum
# node_sizes = sorted([(i,sum(_.size() for _ in n)) for i,n in enumerate(nodes)], key=lambda x:x[1])
# destnode = [i for i,sz in node_sizes if len(nodes[i]) < max_proc_per_node][0]
# Option 2: Assign process sequentially, i.e. 0,1,2,3,0,1,2,3,0,1,2.
# Node 0 will end up with the most data
# destnode = j%len(nodes)
# Option 3: Assign process ascending/descending, i.e. 0,1,2,3,3,2,1,0,0,1,2
# More balanced than option 2, doesn't require size like option 1
nn = len(nodes)
destnode = (j%nn) if (j/nn)%2==0 else (nn-(j%nn)-1)
# Add process to destination node
nodes[destnode].append(rfset)
return nodes
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Run a test of file discovery scripts.')
parser.add_argument('--include_files',
help='''Only include files that match the given search string. Can
be a regular expression.''')
parser.add_argument('--exclude_files',
help='''Exclude files that match the given search string. Can be a
regular expression.''')
parser.add_argument('--file_type', default="FASTQ", choices=FILE_EXTENSIONS.keys(),
help='''Types of files to find.''')
parser.add_argument('--by_readname', action='store_true',
help='''Use readname instead of filename to find pairs. The
advantage here is that the files can have any random name,
as long as the read names match they will be paired. There
are some pitfalls that might cause this to fail: 1) If you
have multiple copies of the same file with different
filenames; and 2) If some file sets include both paired and
unpaired files.''')
parser.add_argument('--no_check', action='store_true',
help='''Do not check the readnames when finding pairs by filename.''')
parser.add_argument('--nslots', type=int, default=16,
help='''Number of simultaneous processes that can be run on a
node. For single-threaded programs, this is equal to the
number of CPUs on each node. Otherwise, nslots should be
the total number of CPUs divided by the number of CPUs
used by each process (rounded up). For example, if each
process uses 300% CPU and each node has 16 CPUs, nslots
should be set to 5''')
parser.add_argument('--nnodes', type=int,
help='''Number of nodes to run on.''')
parser.add_argument('--nsequential', type=int, default=1,
help='''Number of processes to run sequentially.''')
parser.add_argument('infiles', nargs='*', default=".")
args = parser.parse_args()
if args.infiles == '.':
biofiles = [BioFile(f) for f in rec_find_files('.') if os.path.isfile(f)]
else:
biofiles = [BioFile(f) for f in args.infiles if os.path.isfile(f)]
if len(biofiles) == 0: sys.exit("No matching files were found.")
biofiles = filter_files(biofiles, args.file_type, args.include_files, args.exclude_files)
if len(biofiles) == 0: sys.exit("No matching files were found.")
print >>sys.stderr, '%d %s files were found' % (len(biofiles), args.file_type)
if args.file_type != 'FASTQ' and args.file_type != 'FASTA':
for bf in biofiles:
print >>sys.stderr, bf
sys.exit()
print >>sys.stderr, 'Finding mate files'
if not args.by_readname:
rfsets = find_mate_files_byfilename(biofiles, not args.no_check)
else:
rfsets = find_mate_files_byreadname(biofiles)
print >>sys.stderr, '%d file sets were found:' % (len(rfsets))
print >>sys.stderr, '\t%d paired sets' % sum(rfset.paired() for rfset in rfsets)
print >>sys.stderr, '\t%d unpaired sets' % sum(rfset.unpaired() for rfset in rfsets)
print >>sys.stderr, '\t%d paired+unpaired sets' % sum(rfset.both() for rfset in rfsets)
nodes = distribute_readsets(rfsets, args.nslots, args.nsequential, args.nnodes)
for i,node in enumerate(nodes):
print >>sys.stderr, '#################################### Node%02d ####################################' % i
s = '# nproc: %d size: %s' % (len(node), sizeof_fmt(sum(_.size() for _ in node)))
print >>sys.stderr, '%s#' % s.ljust(79)
print >>sys.stderr, '################################################################################'
print '\n'.join(str(rfset) for rfset in node)
|
from ncclient import manager
import json
from pprint import pprint
import xmltodict
import xml.dom.minidom
netconf_manager = manager.connect(host='ios-xe-mgmt-latest.cisco.com',
port=10000,username='developer',
password='C1sco12345',
hostkey_verify=False,
allow_agent=False,
look_for_keys=False)
running_config = netconf_manager.get_config('running')
# pprint(xml.dom.minidom.parseString(str(running_config)).toprettyxml())
responsejson = xmltodict.parse(str(running_config))
pprint(responsejson['rpc-reply']['data']['native']['interface'])
netconf_manager.close_session()
|
import turtle
turtle.shape("turtle")
turtle.forward(100)
turtle.left(60)
turtle.forward(100)
turtle.left(60)
turtle.forward(100)
turtle.left(60)
turtle.forward(100)
turtle.left(60)
turtle.forward(100)
turtle.left(60)
turtle.forward(100)
turtle.left(60)
turtle.forward(100)
turtle.right(60)
turtle.forward(100)
turtle.right(60)
turtle.forward(100)
turtle.right(60)
turtle.forward(100)
turtle.right(60)
turtle.forward(100)
turtle.right(60)
turtle.forward(100)
turtle.right(60)
turtle.right(60)
turtle.forward(100)
turtle.right(60)
turtle.forward(100)
turtle.right(60)
turtle.forward(100)
turtle.right(60)
turtle.forward(100)
turtle.right(60)
turtle.forward(100)
turtle.right(60)
turtle.right(60)
turtle.forward(100)
turtle.right(60)
turtle.right(60)
turtle.forward(100)
turtle.right(60)
turtle.forward(100)
turtle.right(60)
turtle.forward(100)
turtle.right(60)
turtle.forward(100)
turtle.right(60)
turtle.forward(100)
turtle.right(60)
turtle.right(60)
turtle.forward(100)
turtle.right(60)
turtle.right(60)
turtle.forward(100)
turtle.right(60)
turtle.right(60)
turtle.forward(100)
turtle.right(60)
turtle.left(60)
turtle.forward(100)
turtle.right(60)
turtle.left(60)
turtle.forward(100)
turtle.right(60)
turtle.forward(100)
turtle.right(60)
turtle.right(60)
turtle.forward(100)
turtle.right(60)
turtle.left(60)
turtle.forward(100)
turtle.right(60)
turtle.left(60)
turtle.forward(100)
turtle.right(60)
turtle.forward(100)
turtle.right(60)
turtle.forward(100)
turtle.right(60)
turtle.left(60)
turtle.forward(100)
turtle.right(60)
turtle.left(60)
turtle.forward(100)
turtle.right(60)
turtle.forward(100)
turtle.right(60)
turtle.right(60)
turtle.left(60)
turtle.forward(100)
turtle.right(60)
turtle.left(60)
turtle.forward(100)
turtle.right(60)
turtle.forward(100)
turtle.right(60)
turtle.forward(100)
turtle.right(60)
turtle.left(60)
turtle.forward(100)
turtle.right(60)
turtle.left(60)
turtle.forward(100)
turtle.right(60)
turtle.forward(100)
turtle.right(60)
|
from scipy.io import wavfile
import math
import re
import numpy as np
import matplotlib.pyplot as plt
def moving_average(interval, window_size):
window = np.ones(int(window_size)) / float(window_size)
return np.convolve(interval, window, 'same')
file_no = '17'
audio_file ='F:\Projects\Active Projects\Project Intern_IITB\Vowel Evaluation PE V3\Analyze\Vowel_Evaluation_V3_I6_M12\\' + file_no + '.wav'
textgridFA = 'F:\Projects\Active Projects\Project Intern_IITB\Vowel Evaluation PE V3\Analyze\Vowel_Evaluation_V3_I6_M12\\' + file_no + 'FA.TextGrid'
textgridPE = 'F:\Projects\Active Projects\Project Intern_IITB\Vowel Evaluation PE V3\Analyze\Vowel_Evaluation_V3_I6_M12\\' + file_no + 'PE.TextGrid'
window_dur = 50
hop_dur = 7
threshold_smooth =120
fs, data = wavfile.read(audio_file) # Reading data from wav file in an array
data = data / float(2 ** 15) # Normalizing it to [-1,1] range from [-2^15,2^15]
window_size = int(window_dur * fs * 0.001) # Converting window length to samples
hop_size = int(hop_dur * fs * 0.001) # Converting hop length to samples
window_type = np.hanning(window_size) # Window type: Hanning (by default)
no_frames = int(math.ceil(len(data) / (float(hop_size)))) # Determining the number of frames
zero_array = np.zeros(window_size) # Appending appropriate number of zeros
data = np.concatenate((data, zero_array))
length = len(data) # Finding length of the actual data
x_values = np.arange(0, len(data), 1) / float(fs)
#----------------------------------------------------------------------------------------------------------------------#
noise_energy = 0 # Initializing noise energy
energy = [0] * length # Initializing list energy
for bit in range(length):
energy[bit] = data[bit] * data[bit] # Squaring each point of the data to calculate noise energy
for ne in range(0, 800):
noise_energy += energy[ne] # Taking the first 800 samples of the original sound file
noise_energy /= 800 # Averaging the square of the first 800 noise samples
#----------------------------------------------------------------------------------------------------------------------#
st_energy = []
for i in range(no_frames): # Calculating frame wise short term energy
frame = data[i * hop_size:i * hop_size + window_size] * window_type # Multiplying each frame with a hamming window
st_energy.append(sum(frame ** 2)) # Calculating the short term energy
max_st_energy = max(st_energy) # Maximum value of Short term energy curve
for i in range(no_frames):
st_energy[i] = st_energy[i]/max_st_energy # Normalizing the curve
o_st_energy = st_energy
#----------------------------------------------------------------------------------------------------------------------#
if len(st_energy) < threshold_smooth:
st_energy = st_energy
else:
st_energy = moving_average(st_energy,20)
#----------------------------------------------------------------------------------------------------------------------#
peak = [] # Initializing list
count_of_peaks = 0 # Initializing no of peaks
for p in range(len(st_energy)):
if p == 0: # First element
if st_energy[p] > st_energy[p + 1]: # If the first element is greater than the succeeding element it is a peak.
peak.append(st_energy[p]) # Append the energy level of the peak
count_of_peaks += 1 # Increment count
else:
peak.append(0) # Else append a zero
elif p == len(st_energy) - 1: # Last element
if st_energy[p] > st_energy[p - 1]: # If the last element is greater than the preceding element it is a peak.
peak.append(st_energy[p]) # Append the energy level of the peak
count_of_peaks += 1 # Increment count
else:
peak.append(0) # Else append a zero
else: # All the other elements
if st_energy[p] > st_energy[p + 1] and st_energy[p] > st_energy[p - 1]: # If the element is greater than the element preceding and succeeding it, it is a peak.
peak.append(st_energy[p]) # Append the energy level of the peak
count_of_peaks += 1 # Increment count
else:
peak.append(0) # Else append a zero
#----------------------------------------------------------------------------------------------------------------------#
threshold = 0.01 + 0.04 * (noise_energy + (sum(peak) / count_of_peaks)) # The threshold which eliminates minor peaks.
#----------------------------------------------------------------------------------------------------------------------#
count_of_peaks_threshold = 0
peak_threshold = []
location_peak = []
for p in range(len(peak)):
if threshold < peak[p]: # If the peak value is greater than the threshold
peak_threshold.append(peak[p]) # Append the energy level to a new list
count_of_peaks_threshold += 1 # Increment count
location_peak.append(p) # Make note of the location of the peak
else:
peak_threshold.append(0) # Else append zero
#----------------------------------------------------------------------------------------------------------------------#
valley = []
count_of_valleys = 0
location_valley = []
for p in range(len(st_energy)):
if p == 0: # For the first element
if st_energy[p] < st_energy[p + 1]: # If the first element is lesser than the succeeding element
valley.append(st_energy[p]) # Append the energy level of the valley
count_of_valleys += 1 # Increment the count
location_valley.append(p) # Make note of the position of the valley
else:
valley.append(0) # Else append zero
elif p == len(st_energy) - 1: # For the last element
if st_energy[p] < st_energy[p - 1]: # If the last element is lesser than the preceding element
valley.append(st_energy[p]) # Append the energy level of the valley
count_of_valleys += 1 # Increment the count
location_valley.append(p) # Make note of the position of the valley
else:
valley.append(0) # Else append zero
else:
if st_energy[p] < st_energy[p + 1] and st_energy[p] < st_energy[
p - 1]: # If the element is lesser than the element preceding and succeeding it
valley.append(st_energy[p]) # Append the energy level of the valley
count_of_valleys += 1 # Increment the count
location_valley.append(p) # Make note of the position of the valley
else:
valley.append(0) # Else append zero
#----------------------------------------------------------------------------------------------------------------------#
location = location_peak + location_valley # Combing the list of the location of all the peaks and valleys
location.sort() # Sorting it so that each peak has a valley to it's left and right
ripple_valley = []
ripple_peak = []
ripple = []
# What we need is only the valleys to the left and right of the peak. The other valleys are not important
for k in range(len(location_peak)):
q = location.index(location_peak[k]) # Extracting the location of the peak
if location_peak[k] == len(peak) - 1: # If the peak is the last element of the short term energy curve
ripple.append(location[q - 1]) # The location of the valley before the last peak is added
ripple_valley.append(location[q - 1]) # The location of the valley before the last peak is added
ripple.append(location[q]) # The location of the peak is added
ripple_peak.append(location[q]) # The location of the peak is added
ripple.append(location[q - 1]) # The location of the valley before the last peak is added, as there is no valley after it
ripple_valley.append(location[q - 1]) # The location of the valley before the last peak is added, as there is no valley after it
elif location_peak[k] == 0: # If the peak is the first element of the short term energy curve
ripple.append(location[q + 1]) # The location of the valley after the first peak is added
ripple_valley.append(location[q + 1]) # The location of the valley after the first peak is added
ripple.append(location[q]) # The location of the peak is added
ripple_peak.append(location[q]) # The location of the peak is added
ripple.append(location[q + 1]) # The location of the valley after the first peak is added, as there is no valley after it
ripple_valley.append(location[q + 1]) # The location of the valley after the first peak is added, as there is no valley after it
else: # For every other element
ripple.append(location[q - 1]) # The location of the valley before the peak is added
ripple_valley.append(location[q - 1]) # The location of the valley before the peak is added
ripple.append(location[q]) # The location of the peak is added
ripple_peak.append(location[q]) # The location of the peak is added
ripple.append(location[q + 1]) # The location of the valley after the peak is added
ripple_valley.append(location[q + 1]) # The location of the valley after the peak is added
#----------------------------------------------------------------------------------------------------------------------#
value_valley = []
for j in range(len(ripple_valley)):
value_valley.append(st_energy[ripple_valley[j]])
#----------------------------------------------------------------------------------------------------------------------#
ripple_value = []
for k in range(1, len(ripple), 3):
ripple_value.append(
(st_energy[ripple[k]] - st_energy[ripple[k + 1]]) / (st_energy[ripple[k]] - st_energy[ripple[k - 1]]))
loc = []
for k in range(len(ripple_value)):
loc.append(location_peak[ripple_value.index(ripple_value[k])])
#----------------------------------------------------------------------------------------------------------------------#
for k in range(len(ripple_value)):
if k != len(ripple_value) - 1:
if location_peak[ripple_value.index(ripple_value[k + 1])] - location_peak[ripple_value.index(ripple_value[k])] < 20:
if ripple_value[k] > 3.0 and ripple_value[k + 1] < 1.4 or ripple_value[k] > 1.02 and ripple_value[k + 1] < 0.3:
v1 = st_energy[location_peak[ripple_value.index(ripple_value[k])]]
v2 = st_energy[location_peak[ripple_value.index(ripple_value[k + 1])]]
if v1 >= v2:
loc.remove(location_peak[ripple_value.index(ripple_value[k + 1])])
else:
loc.remove(location_peak[ripple_value.index(ripple_value[k])])
else:
if ripple_value[k] > 3.0:
loc.remove(location_peak[ripple_value.index(ripple_value[k])])
#----------------------------------------------------------------------------------------------------------------------#
peak_threshold[:] = []
for j in range(no_frames):
if j in loc:
peak_threshold.append(st_energy[loc.index(j)])
else:
peak_threshold.append(0)
#----------------------------------------------------------------------------------------------------------------------#
#----------------------------------------------------------------------------------------------------------------------#
mark = []
for p in range(len(peak_threshold)): # Extracting a 50 ms slice of the audio file based on the frame number
if peak_threshold[p] is not 0:
mark.append(p * hop_size)
mark.append(p * hop_size + window_size)
#----------------------------------------------------------------------------------------------------------------------#
#----------------------------------------------------------------------------------------------------------------------#
#----------------------------------------------------------------------------------------------------------------------#
text_grid_1 = open(textgridFA, 'r') # Open the FA TextGrid
text_grid_2 = open(textgridPE, 'r') # Open the TextGrid created by the script
data_1 = text_grid_1.read() # Read and assign the content of the FA TextGrid to data_1
data_2 = text_grid_2.read() # Read and assign the content of the created TextGrid to data_2
time_1 = [] # Creating an empty list to record time
time_2 = []
counter = 0
#----------------------------------------------------------------------------------------------------------------------#
for m in re.finditer('text = "', data_1):
if data_1[m.start() - 33] == '=':
time_1.append(float(
data_1[m.start() - 32] + data_1[m.start() - 31] + data_1[m.start() - 30] + data_1[m.start() - 29] +
data_1[m.start() - 28] + data_1[m.start() - 27] + data_1[m.start() - 26]))
time_1.append(float(
data_1[m.start() - 13] + data_1[m.start() - 12] + data_1[m.start() - 11] + data_1[m.start() - 10] +
data_1[m.start() - 9] + data_1[m.start() - 8] + data_1[m.start() - 7] + data_1[m.start() - 6] +
data_1[m.start() - 5]))
else:
time_1.append(float(
data_1[m.start() - 33] + data_1[m.start() - 32] + data_1[m.start() - 31] + data_1[m.start() - 30] +
data_1[m.start() - 29] + data_1[m.start() - 28] + data_1[m.start() - 27] + data_1[m.start() - 26]))
time_1.append(float(
data_1[m.start() - 13] + data_1[m.start() - 12] + data_1[m.start() - 11] + data_1[m.start() - 10] +
data_1[m.start() - 9] + data_1[m.start() - 8] + data_1[m.start() - 7] + data_1[m.start() - 6] +
data_1[m.start() - 5]))
#----------------------------------------------------------------------------------------------------------------------#
if data_1[m.start() + 9] == '"':
time_1.append((data_1[m.start() + 8]))
elif data_1[m.start() + 10] == '"':
time_1.append((data_1[m.start() + 8] + data_1[m.start() + 9]))
else:
time_1.append((data_1[m.start() + 8] + data_1[m.start() + 9] + data_1[m.start() + 10]))
time_1.append(counter)
counter += 1
#----------------------------------------------------------------------------------------------------------------------#
for m in re.finditer('"Vowel"', data_2):
time_2.append(float(
data_2[m.start() - 34] + data_2[m.start() - 33] + data_2[m.start() - 32] + data_2[m.start() - 31] +
data_2[m.start() - 30] + data_2[m.start() - 29]))
time_2.append(float(
data_2[m.start() - 17] + data_2[m.start() - 16] + data_2[m.start() - 15] + data_2[m.start() - 14] +
data_2[m.start() - 13] + data_2[m.start() - 12]))
#----------------------------------------------------------------------------------------------------------------------#
# listing = []
# print time_1
# print time_2
# for outer in range(0, len(time_2), 2):
# for inner in range(0, len(time_1), 4):
# if time_1[inner] <= time_2[outer] < time_1[inner + 1] and time_1[inner] < time_2[outer + 1] <= time_1[
# inner + 1]:
# listing.append(time_1[inner + 2])
# listing.append(time_1[inner + 3])
#
# for outer in range(0, len(time_2), 2):
# for inner in range(0, len(time_1) - 4, 4):
# if time_1[inner] < time_2[outer] < time_1[inner + 1] and time_1[inner + 4] < time_2[outer + 1] < time_1[
# inner + 5]:
# listing.append(time_1[inner + 2])
# listing.append(time_1[inner + 3])
# listing.append(time_1[inner + 6])
# listing.append(time_1[inner + 7])
#
# count = 0
# vowel_data = ['aa', 'AA', 'ae', 'aw', 'ay', 'ee', 'ex', 'ii', 'II', 'oo', 'OO', 'oy', 'uu', 'UU']
#
# already_here = []
# for vowel_sound in range(0, len(listing), 2):
# if listing[vowel_sound] in vowel_data and listing[vowel_sound + 1] not in already_here:
# count += 1
# already_here.append(listing[vowel_sound + 1])
#---------------------------------------------------------------------------------------------------------------------#
plt.subplot(211)
plt.plot(x_values, data) # The Original Data
plt.xlim(0,x_values[-1]) # Limiting it to fixed range for representational purposes
for j in range(0, len(time_1), 4):
plt.vlines(time_1[j], min(data)+0.30*min(data), max(data), 'black') # Syllable Boundaries
for j in range(2, len(time_1), 4):
plt.text(time_1[j - 2], min(data)+0.28*min(data), time_1[j], fontsize=15, color='green', rotation=0) # Syllable Labels
for j in range(len(time_2)):
plt.vlines(time_2[j], min(data), max(data), 'red') # Vowel Boundaries
for j in range(0, len(time_2), 2):
plt.text(time_2[j], max(data), 'Vowel', fontsize=12, color='red') # Vowel Label
for j in range(0,len(time_2),2): # Bounding arrows for Vowel
plt.arrow(time_2[j], max(data), (time_2[j + 1] - time_2[j])-0.01, 0, head_width=0.005, head_length=0.01,color='red')
plt.arrow(time_2[j+1], max(data), -(time_2[j + 1] - time_2[j]) + 0.01, 0, head_width=0.005, head_length=0.01,color='red')
for j in range(0,len(time_1),4): # Bounding arrows for Syllable
plt.arrow(time_1[j], min(data)+0.30*min(data), (time_1[j + 1] - time_1[j])-0.01, 0, head_width=0.005, head_length=0.01)
plt.arrow(time_1[j+1], min(data)+0.30*min(data), -(time_1[j + 1] - time_1[j]) + 0.01, 0, head_width=0.005, head_length=0.01)
plt.xlabel('Time (In seconds)')
plt.ylabel('Amplitude')
plt.title('Sound Waveform',color='blue')
plt.subplot(212)
plt.plot(o_st_energy,color='black')
plt.xlim(0,len(o_st_energy))
plt.xlabel('No. of frames')
plt.ylabel('Normalised Magnitude')
plt.title('Short Term Energy')
plt.show()
#---------------------------------------------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------------------------------------------#
plt.subplot(211)
plt.plot(x_values, data) # The Original Data
plt.xlim(0,x_values[-1]) # Limiting it to fixed range for representational purposes
for j in range(0, len(time_1), 4):
plt.vlines(time_1[j], min(data)+0.50*min(data), max(data), 'black') # Syllable Boundaries
for j in range(2, len(time_1), 4):
plt.text(time_1[j - 2], min(data)+0.5*min(data), time_1[j], fontsize=15, color='green', rotation=0) # Syllable Label
for j in range(len(time_2)):
plt.vlines(time_2[j], min(data), max(data), 'red') # Vowel Boundaries
for j in range(0, len(time_2), 2):
plt.text(time_2[j], max(data), 'Vowel', fontsize=12, color='red') # Vowel Label
plt.subplot(212)
plt.plot(st_energy) # Smoothed Short term energy
plt.plot(o_st_energy) # Original Short term energy
for i in range(len(location_peak)):
plt.scatter(location_peak[i], st_energy[location_peak[i]], color='red', label='Peak')
plt.scatter(ripple_valley, value_valley, color='green', label='Valley')
for j in range(len(location_peak)):
plt.text(location_peak[j], st_energy[location_peak[j]], str(round(ripple_value[j], 2)))
for j in range(len(loc)):
plt.vlines(loc[j], min(o_st_energy), max(o_st_energy), 'black') # Vowel Centres
plt.xlim(0,len(st_energy)) # Limiting it to fixed range for representational purposes
plt.show()
|
import numpy as np
from environment import *
class Maze(Environment):
'''
Five Inputs:
1. Home
2. Junction
3. Maze End
4. Reward
5. Bias
'''
def __init__(self, noise_std = 0.001, n_think = 3):
self.n_input = 5
self.noise_std = noise_std
self.n_think = n_think
self.debug = False
def home(self, animat):
out = self.thinking(animat, np.array([1.0, 0.0, 0.0, 0.0, 1.0]))
if self.debug: print("MS:" + str(out[0]))
return True if abs(out[0]) <= 1/3 else False
def corridor(self, animat):
out = self.thinking(animat, np.array([0.0, 0.0, 0.0, 0.0, 1.0]))
if self.debug: print("CO:" + str(out[0]))
return True if abs(out[0]) <= 1/3 else False
def junction(self, animat):
out = self.thinking(animat, np.array([0.0, 1.0, 0.0, 0.0, 1.0]))
if self.debug: print("JN:" + str(out[0]))
return out[0]
def maze_end(self, animat, reward):
out = self.thinking(animat, np.array([0.0, 0.0, 1.0, reward, 1.0]))
if self.debug: print("ME:" + str(out[0]))
return out[0]
def thinking(self, animat, input):
return [animat.perform(input + self.noise()) for i in range(self.n_think)][-1]
def noise(self):
return np.random.randn(5) * self.noise_std
if __name__ == '__main__':
'''
Some tests
'''
def assert_maze(condition, msg = "NG in 'maze.py'"):
assert condition, msg
class Mock:
def __init__(self):
self.count = 0
self.out = 0.0
self.cmp = np.zeros(5)
self.flag = False
def perform(self, input):
self.count += 1
self.flag = np.array_equal(input, self.cmp)
return np.concatenate((np.array([self.out]), input))
m = Maze(noise_std = 0.0, n_think = 3)
animat = Mock()
# tests for home
animat.count = 0
animat.out = 0.0
animat.cmp = np.array([1.0, 0.0, 0.0, 0.0, 1.0])
assert_maze(m.home(animat))
animat.out = 1.0
assert_maze(not m.home(animat))
animat.out = -1.0
assert_maze(not m.home(animat))
assert_maze(animat.count == 9)
assert_maze(animat.flag)
# tests for corridor
animat.count = 0
animat.out = 0.0
animat.cmp = np.array([0.0, 0.0, 0.0, 0.0, 1.0])
assert_maze(m.corridor(animat))
animat.out = 1.0
assert_maze(not m.corridor(animat))
animat.out = -1.0
assert_maze(not m.corridor(animat))
assert_maze(animat.count == 9)
assert_maze(animat.flag)
# tests for junction
animat.count = 0
animat.out = 0.0
animat.cmp = np.array([0.0, 1.0, 0.0, 0.0, 1.0])
assert_maze(m.junction(animat) == 0.0)
animat.out = 1.0
assert_maze(m.junction(animat) == 1.0)
animat.out = -1.0
assert_maze(m.junction(animat) == -1.0)
assert_maze(animat.count == 9)
assert_maze(animat.flag)
# tests for maze_end
animat.count = 0
animat.out = 0.0
animat.cmp = np.array([0.0, 0.0, 1.0, 0.5, 1.0])
assert_maze(m.maze_end(animat, 0.5) == 0.0)
assert_maze(animat.count == 3)
assert_maze(animat.flag)
animat.cmp = np.array([0.0, 1.0, 0.0, 0.5, 1.0])
assert_maze(m.maze_end(animat, 0.0) == 0.0)
assert_maze(not animat.flag)
|
import numpy as np
import scipy as sp
import scipy.interpolate
import math
from datetime import datetime
import matplotlib
import matplotlib.pyplot as plt
import json
atm_d, atm_p, bal_cd = [], [], [] # Data for atmosphere, cd
for entry in open("data/atm_d.txt").read().split(): # Make a copy of the data folder and correct the path if this doesn't work for you
alt, density = entry.strip(")").strip("(").split(',')
atm_d.append((np.float32(alt), np.float32(density)))
for entry in open("data/atm_p.txt").read().split():
alt, pressure = entry.strip(")").strip("(").split(',')
atm_p.append((np.float32(alt), np.float32(pressure)))
for entry in open("data/cd.txt").read().split():
mach, cd_val = entry.strip(")").strip("(").split(',')
bal_cd.append((np.float32(mach), np.float32(cd_val)))
mu = 398600441800000 # Constants for gravity, atmosphere
earth_r = 6370000
gamma = 1.4
a_drop, v_drop = 150000, 0 # Second stage parameters
m_stage = 11
rho = sp.interpolate.interp1d(*zip(*atm_d), copy = False, bounds_error = False, fill_value = (atm_d[0][1], 0))
p = sp.interpolate.interp1d(*zip(*atm_p), copy = False, bounds_error = False, fill_value = (atm_p[0][1], 0))
cd = sp.interpolate.interp1d(*zip(*bal_cd), copy = False, bounds_error = False, fill_value = (bal_cd[0][1], bal_cd[-1][1]))
def c(altitude):
'''Local speed of sound at any altitude'''
if (rho(altitude) == 0 or p(altitude) == 0):
return 0
else:
return np.sqrt(gamma*p(altitude) / rho(altitude))
def grav(altitude):
''' Calculate local gravity'''
return mu / ((earth_r + altitude)**2)
def bal_drag(altitude, velocity, mach, radius, deployed):
''' Calculate ballute drag'''
if (deployed == False or mach == 0):
return 0
else:
return (0.5 * np.pi * (radius**2) * cd(mach) * rho(altitude) * (velocity**2))
def simulate(N, max_t, Q_deploy, radius, mass, drop_altitude, drop_velocity, detailed = True):
'''Numerical integration, fourth order Runge-Kutta (twice) '''
alt, vel = [drop_altitude], [drop_velocity]
mach, acc, drag, Q, deployed = [vel[0]/c(alt[0])], [-grav(alt[0])], [0.5 * rho(alt[0]) * (vel[0]**2)], [0], False
t_list, dt = np.linspace(0, max_t-1, N), max_t/N
for t in range(1, len(t_list)):
if ((Q[-1] >= Q_deploy) and (deployed == False)):
deployed = True
index_deploy = t
kv1 = -grav(alt[-1]) + bal_drag(alt[-1], vel[-1], vel[-1]/c(alt[-1]), radius, deployed)/mass
kv2 = -grav(alt[-1] + (0.5*vel[-1]*dt)) + bal_drag(alt[-1] + (0.5*vel[-1]*dt), vel[-1] + (0.5*kv1*dt), (vel[-1] + (0.5*kv1*dt))/c(alt[-1] + (0.5*vel[-1]*dt)), radius, deployed)/mass
kv3 = -grav(alt[-1] + (0.5*vel[-1]*dt)) + bal_drag(alt[-1] + (0.5*vel[-1]*dt), vel[-1] + (0.5*kv2*dt), (vel[-1] + (0.5*kv2*dt))/c(alt[-1] + (0.5*vel[-1]*dt)), radius, deployed)/mass
kv4 = -grav(alt[-1] + (vel[-1]*dt)) + bal_drag(alt[-1] + (vel[-1]*dt), vel[-1] + (kv3*dt), (vel[-1] + (kv3*dt))/c(alt[-1] + (vel[-1]*dt)), radius, deployed)/mass
# Calculates coefficients for Runge-Kutta. Runge-Kutta is a set of high-order numerical integration methods
# Error with this method reduces with (number of steps)^4, unlike Euler where error reduction is proportional to number of steps
vel.append(vel[-1] + (dt*((kv1 + (2*kv2) +(2*kv3) + kv4)/6))) # Compute the new velocity with above coefficients
acc.append((vel[-1]-vel[-2])/dt) # Determine the acceleration from change in velocity for graphing
#kx1 = vel[-2]
#kx2 = vel[-2] + (kv1*dt)/2
#kx3 = vel[-2] + (kv2*dt)/2
#kx4 = vel[-2] + (kv3*dt)
#alt.append(alt[-1] + (dt*((kx1 + (2*kx2) +(2*kx3) + kx4)/6)))
# Can substitute the above "kx" coefficients for the kv coefficients,as below, improving performance
alt.append(alt[-1] + (dt*vel[-2]) + ((dt**2)*(kv1 + kv2 + kv3)/6)) # Compute the new altitude with above coefficients
Q.append(0.5 * rho(alt[-1]) * (vel[-1]**2))
mach.append(vel[-1]/c(alt[-1]))
drag.append(bal_drag(alt[-1], vel[-1], mach[-1], radius, deployed))
if (alt[-1] < 0):
t_list = np.linspace(0, t_list[t], len(alt))
break
if (detailed == True):
return alt, np.negative(vel), np.negative(mach), np.negative(acc), drag, Q, t_list, list(t_list).index(sp.interpolate.interp1d(alt, t_list, kind="nearest", copy = False, bounds_error = False, fill_value = max_t)(1000)), index_deploy
else:
return [max(np.negative(vel), max(list(map(abs, np.negative(acc))))), max(drag), max(Q), t_list[-1], vel[list(t_list).index(sp.interpolate.interp1d(alt, t_list, kind="nearest", copy = False, bounds_error = False, fill_value = max_t)(1000))]]
|
JUDGE_STATUS_WAITING = 0 # waiting
JUDGE_STATUS_COMPILING = 1 # compiling
JUDGE_STATUS_JUDGING = 2 # judging
JUDGE_STATUS_INPROCESS = 3 # doing (but the judger did not return judging or compiling yet)
JUDGE_STATUS_AC = 10 # Answer Correct
JUDGE_STATUS_PC = 11 # Partly Correct
JUDGE_STATUS_TLE = 12 # Time Limit Exceed
JUDGE_STATUS_MLE = 13 # Memory Limit Exceed
JUDGE_STATUS_RE = 14 # Runtime Error
JUDGE_STATUS_WA = 15 # Wrong Answer
JUDGE_STATUS_UKE = 16 # Unknown Error
JUDGE_STATUS_CE = 17 # Compile Error
JUDGE_STATUS_OLE = 18 # Output Limit Exceed
JUDGE_STATUS_SE = 20 # System Error
JUDGE_STATUS_FE = 21 # File Error
JUDGE_STATUS_CFGE = 22 # Configuration Error
JUDGE_STATUS_JRLE = 23 # Judger Resource Limit Exceed
JUDGE_STATUS_UL = 24 # Unsupported Language
|
#!/home/minori/.pyenv/shims/python
# coding:utf-8
import sqlite3
connnector = sqlite3.connect("sqlite_test.db")
text_factory = str
from string import Template
from os import path
from httphandler import Request, Response#, get_htmltemplate
import cgitb; cgitb.enable()
con = sqlite3.connect('./bookmark.dat')
cur = con.cursor()
try:
cur.execute("""CREATE TABLE bookmark (title text,url text);""")
except:
pass
req = Request()
f = req.form
value_dic = {'message':'', 'title':'', 'url':'', 'bookmarks':''}
if f.has_key('post'):
if not f.getvalue('title', '') or not f.getvalue('url', ''):
value_dic['message'] = u'タイトルとURLは必須項目です'
value_dic['title'] = unicode(f.getvalue('title', ''), 'utf-8', 'ignore')
value_dic['url'] = f.getvalue('url', '')
else:
sql = """INSERT into bookmark(title, url) VALUES(?, ?)"""
cur.execute(sql,(f.getvalue('title', ''), f.getvalue('url', '')))
con.commit()
#追加部分
res = Response()
f = open(path.join(path.dirname(__file__), 'bookmarkform.html'))
t = Template(unicode(f.read(), 'utf-8', 'ignore'))
body = t.substitute(value_dic)
res.set_body(body)
print res
|
import os
from . import celery, session
from .s3client import upload_video
from .models import Video
@celery.task(name="pipeline.upload")
def upload(video_id, local_path, path, field):
upload_video(local_path, path)
try:
video = Video.query.get(video_id)
video.update(
**{field: f"https://pvlhead.ams3.cdn.digitaloceanspaces.com/{path}"}
)
except Exception as e:
session.rollback()
return f"Uploading failed: {e}"
finally:
if os.path.exists(local_path):
os.remove(local_path)
return "success"
|
#CSVファイルを読み込んで別のCSVに出力する
import csv
from itertools import islice
#ファイルオブジェクトとしてCSVを開く
with open('gaku-mg1642.csv','r',encoding='shift-jis') as csvFile:
#readerオブジェクトを取得
dataReader = csv.reader(csvFile)
#一時領域用リスト
datalist = []
#1行ごとのリストとして取得できるので8行目から1列目と2列目を配列に2次元配列として保存する
for row in islice(dataReader,7,None):
datalist.append([row[0],row[1]])
with open('sample.csv','w') as outFile:
writer = csv.writer(outFile, lineterminator='\r\n')
writer.writerows(datalist)
|
# -*- coding: utf-8 -*-
"""
Created on 08.01.2013 12:55:29
@author: Oleksandr Poliatykin
"""
import requests
r = requests.get('http://ya.ru', timeout=1)
print(r.status_code)
print(r.headers['content-type'])
print(r.encoding)
print(r.text)
|
a=10;
b=30;
c=a+b;
|
# -*- coding: UTF-8 -*-
#Plugin to switch only between Talk or Off speech modes
#Author: Alberto Buffolino
import addonHandler
import globalPluginHandler
import speech
import msg
import time
import win32api , win32con
import SendKeys
import win32clipboard
addonHandler.initTranslation()
try:
from globalCommands import SCRCAT_SPEECH
except:
SCRCAT_SPEECH = None
def mousepos():
ss = win32api.GetCursorPos()
return ss
def click2(x,y):
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,x,y,0,0)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,x,y,0,0)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,x,y,0,0)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,x,y,0,0)
def getdata():
win32clipboard.OpenClipboard()
data = win32clipboard.GetClipboardData()
win32clipboard.CloseClipboard()
return data
def emptydata():
win32clipboard.OpenClipboard()
win32clipboard.EmptyClipboard()
win32clipboard.CloseClipboard()
def ending():
pos1 = mousepos()
time.sleep(0.5)
pos2 = mousepos()
if pos1 == pos2 :
click2(pos1[0],pos1[1])
SendKeys.SendKeys("^(c)")
## SendKeys.SendKeys("{CAP}(c)")
xx = unicode(getdata(),"big5")
msg.message(xx)
emptydata()
else:
pass
class GlobalPlugin(globalPluginHandler.GlobalPlugin):
scriptCategory = SCRCAT_SPEECH
def script_CalibreNvdaMode(self, gesture):
while True:
ending()
def script_Calibretest(self,guesture):
msg.message("boom boom boom boom boom boom")
script_CalibreNvdaMode.__doc__ = _("FOR CALIBRE")
script_Calibretest.__doc__ = _("for test")
__gestures = {
"kb:NVDA+h": "Calibretest",
"kb:NVDA+g": "CalibreNvdaMode",
}
|
from __future__ import print_function
# import keras
# from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.models import model_from_json
from keras.optimizers import RMSprop
from keras.callbacks import EarlyStopping
import pandas as pd
epochs = 10
batch_size = 16
train_df = pd.read_excel('data_sets/ex2_data_set.xlsx')
test_df = pd.read_excel('data_sets/ex2_test_set.xlsx')
# Creates the test arrays
test_X = test_df.drop(columns=['children'])
test_y = test_df[['children']]
#create a dataframe with all training data except the target column
train_X = train_df.drop(columns=['children'])
#create a dataframe with only the target column
train_y = train_df[['children']]
#get number of columns in training data
n_cols = train_X.shape[1]
# BEGIN - create model
# #create model
# model = Sequential()
#
#
# #add model layers
# model.add(Dense(256, activation='relu', input_shape=(n_cols,)))
# model.add(Dense(64, activation='relu'))
# model.add(Dense(10, activation='relu'))
# model.add(Dense(1, activation='linear'))
#
# #compile model using mse as a measure of model performance
# model.compile(optimizer='adam', loss='logcosh', metrics=['accuracy'])
#
# #set early stopping monitor so the model stops training when it won't improve anymore
# # early_stopping_monitor = EarlyStopping(monitor='loss', patience=.8)
# #train model
# model.fit(train_X, train_y,
# batch_size=batch_size,
# epochs=epochs,
# verbose=1,
# validation_data=(test_X, test_y),
# # callbacks=[early_stopping_monitor]
# )
#
# test_y_predictions = model.predict(test_X)
#
# # for i in range(test_y_predictions)
# # print("Predicted: ", test_y_predictions[i], " actual: ", test_y[i])
# print("Predicted children: ", test_y_predictions)
# print("Actual children: ", test_y[['children']])
# END - create model
# BEGIN - save json and create model
# # serialize model to JSON
# model_json = model.to_json()
# with open("model.json", "w") as json_file:
# json_file.write(model_json)
#
# # serialize weights to HDF5
# model.save_weights("model.h5")
# print("Saved model to disk")
# END - save json and create model
# You can run just the following part as the model was already trained and saved locally
# BEGIN - load json and create model
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("model.h5")
print("Loaded model from disk")
# evaluate loaded model on test data
loaded_model.compile(loss='logcosh', optimizer='adam', metrics=['accuracy'])
# score = loaded_model.evaluate(test_X, test_y, verbose=0)
# print("%s: %.2f%%" % (loaded_model.metrics_names[1], score[1] * 100))
print(test_X)
# test_data = {'age': [26], 'married_for': [0], 'income': [9], 'zone': [1]}
test_data = {'age': [28], 'married_for': [0], 'income': [6], 'zone': [2]}
df = pd.DataFrame(test_data)
print(df)
test_y_predictions = loaded_model.predict(df)
print("Predicted children: ", test_y_predictions)
print("Actual children: ", 0)
# print("Actual children: ", test_y[['children']])
# END - load json and create model
# batch_size = 128
# num_classes = 10
# epochs = 1
#
#
# model = Sequential()
# model.add(Dense(128, activation='relu', input_shape=(784,)))
# model.add(Dropout(0.2))
# model.add(Dense(128, activation='relu'))
# model.add(Dropout(0.2))
# model.add(Dense(num_classes, activation='softmax'))
#
# model.summary()
#
# model.compile(loss='categorical_crossentropy',
# optimizer=RMSprop(),
# metrics=['accuracy'])
#
# history = model.fit(x_train, y_train,
# batch_size=batch_size,
# epochs=epochs,
# verbose=1,
# validation_data=(x_test, y_test))
# score = model.evaluate(x_test, y_test, verbose=0)
# print('Test loss:', score[0])
# print('Test accuracy:', score[1]) |
class Card(object):
conversion_dict = {"T": "10", "J": "Joker", "Q": "Queen", "K": "King"}
def __init__(self, rank, suit):
if rank in "23456789TJQKA":
self._rank = rank
else:
raise ValueError("Invalid rank")
if suit in ["spades", "clubs", "diamonds", "hearts"]:
self._suit = suit
else:
raise ValueError("Invalid suit")
@property
def rank(self):
return self._rank
@property
def suit(self):
return self._suit
def __gt__(self, other):
if self._suit == other._suit:
return "23456789TJQKA".index(self._rank) > "23456789TJQKA".index(
other._rank
)
if self._rank == other._rank:
return ["spades", "clubs", "diamonds", "hearts"].index(self._suit) > [
"spades",
"clubs",
"diamonds",
"hearts",
].index(other._suit)
return "23456789TJQKA".index(self._rank) > "23456789TJQKA".index(other._rank)
def __lt__(self, other):
if self._suit == other._suit:
return "23456789TJQKA".index(self._rank) < "23456789TJQKA".index(
other._rank
)
if self._rank == other._rank:
return ["spades", "clubs", "diamonds", "hearts"].index(self._suit) < [
"spades",
"clubs",
"diamonds",
"hearts",
].index(other._suit)
return "23456789TJQKA".index(self._rank) < "23456789TJQKA".index(other._rank)
def __eq__(self, other):
if isinstance(other, Card):
if self._rank == other._rank and self._suit == other._suit:
return True
else:
return False
def __str__(self):
if self._rank in self.conversion_dict:
return "{} of {}".format(self.conversion_dict[self._rank], self._suit)
return "{} of {}".format(self._rank, self._suit)
|
# -*- coding: utf-8 -*-
__author__ = 'soroosh'
import logging
from suds.client import Client
logging.basicConfig(level=logging.INFO)
logging.debug('creating client started')
c = Client('http://services.yaser.ir/Quran/?wsdl')
logging.debug('client created')
logging.info(c)
result = c.service.GetRandomAyeh()
print result
|
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class Image(models.Model):
pic = models.ImageField(null=False, blank=False)
text = models.TextField(blank=True, max_length=200)
def get_absolute_url(self):
return "/images/%s/" % self.id
|
import boto3
import json
def get_all_instance_desc():
"""
Function that lists and describes all instance info
"""
# Connecting to aws
conn = boto3.client('ec2')
# List all regions
regions = [region['RegionName'] for region in conn.describe_regions()['Regions']]
instance_info = []
# check for elastic Ip
elastic_check = False
# Iterating through all regions
for region in regions:
conn = boto3.client('ec2', region_name=region)
# Describing all instances
response = conn.describe_instances()
# Selecting all required keys from the function
req_info = ['Reservations', 'Placement', 'PublicIpAddress', 'SubnetId', 'SubnetId']
req_info_list = []
req_info_list_refined = []
# Getting the values of keys of req_info
for k, v in response.items():
for i in range(0, len(req_info)):
if k == req_info[i]:
req_info_list.append(response[k])
# Since its stored as a list unwrapping the list
req_info_list = req_info_list[0]
# Refining more to get only instance information
for ins in req_info_list:
more_filters = ['Instances']
for k in ins.keys():
for i in range(0, len(more_filters)):
if k == more_filters[i]:
req_info_list_refined.append(ins[k])
# Refining instance info to get only select information
# iterating throught the refined list
for j in req_info_list_refined:
temp = j
temp = temp[0]
# information requried from each instance
req_keys = ['ImageId', 'InstanceId', 'InstanceType', 'LaunchTime', 'Placement', 'PublicIpAddress',
'SubnetId', 'VpcId',
'NetworkInterfaces',
]
# Unwrapping the list
instance_state = temp['State']
instance_state = instance_state['Name']
req_info = []
for k in temp.keys():
for i in range(len(req_keys)):
if k == req_keys[i]:
req_info.append(temp[k])
# Removing Network interface info and only checking for elastic IP
if instance_state == 'stopped':
del req_info[-1]
req_info.append("Instance stopped")
elif instance_state == 'running':
temp = req_info[-1]
temp = temp[0]
temp = temp['Association']
if temp['IpOwnerId'] == 'amazon':
elastic_check = False
else:
elastic_check = True
del req_info[-1]
req_info.append(elastic_check)
# Appending final info to the req list
instance_info.append(req_info)
ec2_instances = []
req_keys = ['ImageId', 'InstanceId', 'InstanceType', 'LaunchTime', 'Placement', 'PublicIpAddress',
'SubnetId', 'VpcId', 'Elastic IP']
for i in instance_info:
if len(i) == 8:
i.insert(5, "Instance Stopped")
dictionary_inst = dict(zip(req_keys,i))
ec2_instances.append(dictionary_inst)
final_dict_inst = {"Instances":ec2_instances}
json_inst = json.dumps(final_dict_inst, indent=4,default=str)
print(json_inst)
get_all_instance_desc()
|
import pandas as pd
import matplotlib.pylab as plt
from matplotlib.pylab import rcParams
rcParams['figure.figsize'] = 15, 6
from statsmodels.tsa.arima_model import ARIMA
from sklearn.metrics import mean_squared_error
dateparse = lambda dates: pd.datetime.strptime(dates, '%Y-%m-%d')
datas = pd.read_csv('state Data/Washington.csv', header=0, usecols=['Date Local', 'NO2 Mean'], parse_dates=['Date Local'], index_col='Date Local', date_parser=dateparse)
print(datas)
dataValues = datas.values
train, test = dataValues[0:int(len(dataValues) * 0.80)], dataValues[int(len(dataValues) * 0.80):len(dataValues)]
trainingData = [x for x in train]
predictions = []
for item in range(len(test)):
model = ARIMA(trainingData, order=(5,1,0))
model_fit = model.fit(disp=0)
output = model_fit.forecast()
predicted = output[0]
predictions.append(predicted)
observation = test[item]
trainingData.append(observation)
print('predicted=%f, expected=%f' % (predicted, observation))
error = mean_squared_error(test[0:960], predictions)
print('Test MSE: %.3f' % error)
plt.plot(test , label='Actual')
plt.title('ARIMA Predictions DC')
plt.ylabel('NO')
plt.xlabel('Number of Months')
plt.plot(predictions, color='red',label='Prediction')
plt.legend(loc='best')
plt.show()
plt.savefig('Test_MSE.png')
|
print("Do nothing!!!ASF")
for x in range(1, 11):
print("GIT IS AWERSOME!!!")
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 7 15:24:49 2019
@author: HP
"""
class Node:
def __init__(self,key):
self.k=key
self.path=None
self.ways=[]
self.adj=[]
def insert(self,node):
self.adj.append(node)
class Graph:
def __init__(self):
self.vertices=[]
self.edges=[]
def add_edge(self,u,v,weight=None):
new_edge=[u,v,weight]
self.edges.append(new_edge)
gr = { 'a' : ['b','c','d'], 'b' : ['c','d'], 'c' : ['d'], 'd' : [] }
g=Graph()
for key in gr:
new_node=Node(key)
g.vertices.append(new_node)
for i in g.vertices:
val=i.k
for j in gr[val]:
ind=ord(j)-ord('a')
i.insert(g.vertices[ind])
g.add_edge(i,g.vertices[ind])
def simple_path(u,v):
if u==v:
return 1
elif u.path!=None:
return u.path
else:
u.path=0
for w in u.adj:
c=simple_path(w,v)
u.path=u.path+c
if c!=0:
for way in w.ways:
new_way=[]
new_way=new_way+way
new_way.insert(0,u)
u.ways.append(new_way)
return u.path
def paths(u,v):
way=[]
way.insert(0,v)
v.ways.append(way)
d=simple_path(u,v)
for way in u.ways:
for node in way:
print(node.k)
print(' ')
return d
print(paths(g.vertices[0],g.vertices[3]))
|
from scipy.misc import imread
from scipy.misc import imresize
def preprocess_input(images):
images = images/255.0
return images
def _imread(image_name):
return imread(image_name)
def _imresize(image_array, size):
return imresize(image_array, size)
def get_class_to_arg(dataset_name):
if dataset_name == 'german_open_2017':
# 0 means fruit, 1 container
return {'apple':0, 'pear':0, 'paper_bag':1}
|
from django.shortcuts import render
# Create your views here.
def primeravista(request):
return render(request,'usuario/listarusuario.html') |
import arcade
from models import World, Tower, Bullet
SCREEN_WIDTH = 750
SCREEN_HEIGHT = 550
class Model(arcade.Sprite):
def __init__(self, *args, **kwargs):
self.model = kwargs.pop('model', None)
super().__init__(*args, **kwargs)
def sync_with_model(self):
if self.model:
self.set_position(self.model.x, self.model.y)
self.angle = self.model.angle
def draw(self):
self.sync_with_model()
super().draw()
class TowerDefense(arcade.Window):
def __init__(self, width, height):
super().__init__(width, height, title="Tower Defense")
self.bullet_list = arcade.SpriteList()
arcade.set_background_color(arcade.color.WHITE)
self.text_angle = 0
self.time_elapsed = 0
self.money = 0
self.world = World(width, height)
self.tower_sprite = Model("assets/Tower1.png", model=self.world.tower)
self.monster_sprite = []
for monster in self.world.monsters:
self.monster_sprites.append(ModelSprite("assets/Monster_easy1.png", scale=0.5, model=monster))
self.bullet_sprite = []
for bullet in self.world.bullets:
self.monster_sprites.append(ModelSprite("assets/bulletOne.png", scale=0.5, model=bullet))
for i in range(100):
self.monster = arcade.Sprite("assets/Monster_easy1.png")
self.monster.center_x = 50
self.monster.center_y = 175
self.all_sprites_list.append(self.monster)
self.monster_list.append(self.monster)
bullet = Bullet("assets/bulletOne.png", 0.5 * 1.5)
bullet.center_x = self.tower_sprite.center_x
bullet.center_y = self.tower_sprite.center_y
self.all_sprites_list.append(bullet)
self.bullet_list.append(bullet)
##self.tower = Tower(300, 400)
##self.tower_sprite = arcade.Sprite("assets/Tower1.png")
##self.bulletOne = Bullet(165, 175)
##self.bulletOne_sprite = arcade.Sprite("assets/bulletOne.png")
self.gameBoard = arcade.Sprite("assets/gameBoard.png")
self.gameBoard.set_position(350, 300)
def animate(self, delta):
self.world.animate(delta)
self.all_sprites_list.update()
for bullet in self.bullet_list:
hit_list = arcade.check_for_collision_with_list(bullet, self.monster_list)
if len(hit_list) > 0:
bullet.kill()
for monster in self.monster_list:
monster.kill()
self.money += 20
## if bullet.bottom > SCREEN_HEIGHT:
## bullet.kill()
def on_draw(self):
arcade.start_render()
self.all_sprites_list.draw()
self.gameBoard.draw()
self.tower_sprite.draw()
self.monster_sprite.draw()
self.bullet.draw()
for sprite in self.monster_sprites:
sprite.draw()
arcade.draw_text("Time: {:5.1f}".format(self.time_elapsed), 500, 30, arcade.color.BLACK, 20)
arcade.draw_text("MONEY: {:5d}".format(self.money), 100, 75, arcade.color.YELLOW)
##if self.time_elapsed % 5 == 0:
##self.sprite_list.append(self.easyMonster)
self.money += 10
##class Tower:
class Monster:
def animate(delta):
if self.x <= 800:
self.x += 5
print(self.x, self.y)
if self.x >= 175 and self.y < 265:
self.y += 5
self.x = 175
print(self.x, self.y)
if self.x > 265 and self.y != 365:
self.y += 5
self.x = 270
print(self.x, self.y)
if self.x <= 270 and self.x > 120 and self.y == 365:
self.x -= 10
print(self.x, self.y)
if self.x > 120 and self.y > 365 and self.y <= 450 :
self.y += 5
self.x = 120
class Bullet(arcade.Sprite):
def update(self):
self.center_x += 5
self.center_y += 5
if __name__ == '__main__':
window = TowerDefense(SCREEN_WIDTH, SCREEN_HEIGHT)
arcade.run()
|
def Soma(a):
r=0
for i in range(a):
r = r+i
return r
s = Soma(4)
print(s)
# i = int(input("Início"))
# f = int(input("Fim"))
#
# for i in range(1, f+1,1):
# print(i)
|
num_cups = int(input("How many cups of coffee do you need?"))
water_in_ml = 200 * num_cups
milk_in_ml = 50 * num_cups
coffee_beans_in_gram = 15 * num_cups
print("For " + str(num_cups) + "cups of coffe, you need:")
print(str(water_in_ml) +" ml of water,")
print(str(milk_in_ml) +" ml of milk, and")
print(str(coffee_beans_in_gram) +" grams of coffee beans.")
|
def getN():
return int(input())
def getNM():
return map(int, input().split())
def getList():
return list(map(int, input().split()))
def getArray(intn):
return [int(input()) for i in range(intn)]
def input():
return sys.stdin.readline().rstrip()
def rand_N(ran1, ran2):
return random.randint(ran1, ran2)
def rand_List(ran1, ran2, rantime):
return [random.randint(ran1, ran2) for i in range(rantime)]
def rand_ints_nodup(ran1, ran2, rantime):
ns = []
while len(ns) < rantime:
n = random.randint(ran1, ran2)
if not n in ns:
ns.append(n)
return sorted(ns)
def rand_query(ran1, ran2, rantime):
r_query = []
while len(r_query) < rantime:
n_q = rand_ints_nodup(ran1, ran2, 2)
if not n_q in r_query:
r_query.append(n_q)
return sorted(r_query)
from sys import exit
import sys
sys.setrecursionlimit(1000000000)
mod = 10 ** 9 + 7
dx = [1, 0, -1, 0]
dy = [0, 1, 0, -1]
#############
# Main Code #
#############
N = 4
inf = float('inf')
d = [
[0, 2, inf, inf],
[inf, 0, 3, 9],
[1, inf, 0, 6],
[inf, inf, 4, 0]
]
dp = [[-1] * N for i in range(1 << N)]
def rec(s, v, dp):
if dp[s][v] >= 0:
return dp[s][v]
if s == (1 << N) - 1 and v == 0:
dp[s][v] = 0
return 0
res = float('inf')
for u in range(N):
if s & (1 << u) == 0:
res = min(res,rec(s|(1 << u), u, dp) + d[v][u])
dp[s][v] = res
return res
# 結局のところ0からスタートしようが1からスタートしようが同じ道を通る
print(rec(0,0,dp))
# ABC054 C - One-stroke Path
N, M = getNM()
dist = [[] for i in range(N + 1)]
for i in range(M):
a, b = getNM()
dist[a - 1].append(b - 1)
dist[b - 1].append(a - 1)
cnt = 0
pos = deque([[1 << 0, 0]])
while len(pos) > 0:
s, v = pos.popleft()
if s == (1 << N) - 1:
cnt += 1
for u in dist[v]:
if s & (1 << u):
continue
pos.append([s | (1 << u), u])
print(cnt)
# N * N の距離の票をあらかじめ作ろう
def counter(sta, K, G):
# dp[bit][i]これまでに踏んだ場所がbitであり、現在の場所がiである
dp = [[float('inf')] * K for i in range(1 << K)]
dp[1 << sta][sta] = 0
for bit in range(1, 1 << K):
if not bit & (1 << sta):
continue
# s:現在の場所
for s in range(K):
# sを踏んだことになっていなければ飛ばす
if not bit & (1 << s):
continue
# t:次の場所
for t in range(K):
# tを過去踏んでいない and s → tへのエッジがある
if (bit & (1 << t)) == 0:
dp[bit|(1 << t)][t] = min(dp[bit|(1 << t)][t], dp[bit][s] + G[s][t])
return min(dp[-1])
# 任意の地点からスタート
def counter(K, G):
# dp[bit][i]これまでに踏んだ場所がbitであり、現在の場所がiである
dp = [[float('inf')] * K for i in range(1 << K)]
for i in range(K):
dp[1 << i][i] = 0
for bit in range(1, 1 << K):
if not bit:
continue
# s:現在の場所
for s in range(K):
# sを踏んだことになっていなければ飛ばす
if not bit & (1 << s):
continue
# t:次の場所
for t in range(K):
# tを過去踏んでいない and s → tへのエッジがある
if (bit & (1 << t)) == 0:
dp[bit|(1 << t)][t] = min(dp[bit|(1 << t)][t], dp[bit][s] + G[s][t])
return min(dp[-1])
# ARC056 C - 部門分け
N, K = getNM()
V = [getList() for i in range(N)]
diff = sum([sum(v) for v in V]) // 2
dp = [K] * (1 << N) # 固有値k
dp[0] = 0
# 部分集合を作り、その中の任意の2つを選んであれこれする
for bit in range(1 << N):
o = [i for i in range(N) if bit & (1 << i)]
n = len(o)
for i in range(n):
for j in range(i + 1, n):
dp[bit] += V[o[i]][o[j]]
# 部分集合についてさらに2つのグループに分ける
for bit in range(1 << N):
j = bit # 例: 1010(10)
while j:
# 1010と0
dp[bit] = max(dp[bit], dp[j] + dp[bit ^ j])
j -= 1 # 1010 → 1001 1だけ減らして数字を変える
j &= bit # 1010 → 1000 実質引き算 同じ要素があるところまで数字を減らす
print(dp[-1] - diff)
# フラグが立ってないところについて最寄りのフラグを教えてくれる
def close(bit, n):
# n = bit.bit_length()
res = [[] for i in range(n)]
build = -1
not_build = []
for i in range(n):
# フラグが立っている
if bit & (1 << i):
build = i
# 右側のフラグについて
while not_build:
p = not_build.pop()
res[p].append(build)
else:
# 左側のフラグについて
if build >= 0:
res[i].append(build)
not_build.append(i)
return res
|
import socket, sys
t_host = sys.argv[1]
t_port = 80
if "www." not in sys.argv[1]:
t_host = "www."+sys.argv[1]
if t_host is not None:
# Crea un objeto socket que use una dirección o hostname IPv4 (IF_INET) usando TCP (SOCK_STREAM)
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((t_host, t_port))
# Envía datos
eq = str.encode("GET / HTTP/1.1\r\nHost: "+t_host+"\r\n\r\n")
client.send(req)
resp = client.recv(4096)
print(resp.decode(errors="ignore"))
|
#B
N5=input()
rev2=''.join(reversed(N5))
if(N5==rev2):
print("yes")
else:
print("no")
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
import math
import random
import gc
import platform
np.random.seed(123)
sysstr = platform.system()
print(sysstr)
if sysstr == 'Windows':
import queue as Queue
elif sysstr == 'Darwin':
import Queue
qm = [0 for col in range(2)]
Ym = [0 for col in range(2)]
count = 50000
V = 1000
k = [1.0, 1.0]
# alpha = 10000.0
Rm = [0 for col in range(2)]
alp = 0.9
for n in range(0, 2):
qm[n] = Queue.Queue(maxsize=count)
maxvalue = 0.0
max_last = 0.0
r_keep = [0.0 for col in range(2)]
r = [0.0 for col in range(2)]
channel_lambda = [0.3, 0.6]
arrival_lambda = [0.5, 0.7]
channel = [0 for col in range(2)]
schedule = -1
delay_every_step = [[0 for col in range(count)] for row in range(2)]
tran_count = [0 for col in range(2)]
arrival = [0 for col in range(2)]
max_delay = [0 for col in range(2)]
def Channel():
for n in range(0, 2):
tmp = np.random.binomial(1, channel_lambda[n], 1)
channel[n] = tmp[0]
def Arrival(i):
for m in range(0, 2):
if Rm[m] == 1 and qm[m].full() is False:
qm[m].put(i)
arrival[m] += 1
# def Clear():
# for m in range(0, 2):
# while qm[m].empty() is False:
# qm[m].get()
for i in range(0, count):
maxvalue = -100000000.0
max_last = -100000000.0
for m in range(0, 2):
r_keep[m] = 0.0
r[0] = -1.0
while r[0] <= 1.0:
r[1] = -1.0
while r[1] <= 1.0 and (r[0] + r[1]) <= 2.0:
if r[0] < 0:
tmp1 = r[0]
else:
tmp1 = math.log(1 + r[0])
if r[1] < 0:
tmp2 = r[1]
else:
tmp2 = math.log(1 + r[1])
tmp3 = V * (tmp1 + tmp2)
tmp4 = Ym[0] * r[0] + Ym[1] * r[1]
maxvalue = tmp3 - tmp4
if maxvalue > max_last:
max_last = maxvalue
r_keep[0] = r[0]
r_keep[1] = r[1]
r[1] += 0.025
r[0] += 0.025
Channel()
if qm[0].empty() is False:
# tmp1 = qm[0].qsize() * channel[0]
tmp1 = (i - qm[0].queue[0]) * channel[0]
else:
tmp1 = 0
if qm[1].empty() is False:
# tmp2 = qm[1].qsize() * channel[1]
tmp2 = (i - qm[1].queue[0]) * channel[1]
else:
tmp2 = 0
if tmp1 > tmp2:
schedule = 0
elif tmp1 == tmp2:
tmp = [0, 1]
tmp3 = random.sample(tmp, 1)
schedule = tmp3[0]
else:
schedule = 1
for m in range(0, 2):
tmp1 = qm[m].qsize()
tmp = float(tmp1)
if tmp < Ym[m]:
Rm[m] = 1
else:
Rm[m] = 0
if qm[schedule].empty() is False and channel[schedule] == 1:
tmp = i - qm[schedule].get()
tran_count[schedule] += 1
if tmp > max_delay[schedule]:
max_delay[schedule] = tmp
# print(qm[0].qsize(), qm[1].qsize())
for m in range(0, 2):
if qm[m].empty() is False:
delay_every_step[m][i] = (i - qm[m].queue[0]) * k[m]
else:
delay_every_step[m][i] = 0.0
for m in range(0, 2):
tmp = Ym[m] - Rm[m] + r_keep[m]
if tmp > 0:
Ym[m] = tmp
else:
Ym[m] = 0
Arrival(i)
print(arrival)
plt.figure(1)
x = np.linspace(0, count, count)
plt.xlabel('Time')
plt.ylabel('HOL delay')
plt.plot(x, delay_every_step[0], label='HOL delay, link 0, CL-HOL-MW')
plt.plot(x, delay_every_step[1], label='HOL delay, link 1, CL-HOL-MW')
plt.legend(loc='lower right')
plt.show()
del count, V, maxvalue, max_last, r_keep, r
del channel, schedule, channel_lambda
del tran_count, arrival_lambda
gc.collect()
|
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config.from_pyfile('../includes/flask.cfg', silent=True)
db = SQLAlchemy(app)
from mapyourcity import views, db, models |
from flask import Blueprint
from flask_restful import Api
pm = Blueprint("pm_view", __name__, url_prefix="/pms/pm/v1.0")
# pm_api = Api(pm)
from .pm import *
|
import paho.mqtt.client as mqtt
from pyfirmata import Arduino,util
board = Arduino("/dev/ttyACM0")
led = board.get_pin('d:13:o')
fan = board.get_pin('d:12:o')
class IoT():
new_fan=0
new_light=0
def __init__(self,fan=0,light=0):
self.fan=fan
self.light=light
##########fan##########
## if self.new_fan=='on':
## print(client.publish("/Subscribe",'Fan on'))
## elif self.new_fan=='off':
## print(client.publish("/Subscribe",'Fan off'))
## ##########Light##########
## if self.new_light=='on':
## print(client.publish("/Subscribe",'Light on'))
## elif self.new_light=='off':
## print(client.publish("/Subscribe",'Light off'))
##
##
##
## print('fan state:',self.new_fan,'--- light state:',self.new_light)
##
if self.fan!=0:
self.new_fan=fan
if self.light!=0:
self.new_light=light
pass
def on_connect(self,client,userdata,flags,rc):
print('Connected with result code:'+str(rc))
client.subscribe('Publish/#')
client.subscribe('Publish1/#')
def on_message_light(self,client,userdata,msg):
command=str(msg.payload.decode('utf-8'))
print(command,'Now light is: ')
if command=='a':
led.write(1)
print('on')
elif command=='b':
led.write(0)
print('off')
self.__init__(light=str(msg.payload.decode('utf-8')))
pass
def on_message_fan(self,client,userdata,msg):
command=str(msg.payload.decode('utf-8'))
print(command,'Now fan is: ')
if command=='a':
fan.write(1)
print('on')
elif command=='b':
fan.write(0)
print('off')
self.__init__(fan=str(msg.payload.decode('utf-8')))
pass
if _name__=='__main_':
IoT=IoT()
client=mqtt.Client()
client.on_connect=IoT.on_connect
client.message_callback_add('Publish/#',IoT.on_message_light)
client.message_callback_add('Publish1/#',IoT.on_message_fan)
client.connect('m16.cloudmqtt.com',12939,60)
client.username_pw_set("xilebdfu","MknOzEMGsFs0")
client.loop_forever()
|
from QueenHeuristic import QueenHeuristic
from StatesCreator import StatesCreator
class ChessQueenWorld(object):
def __init__(self):
# Heuristic object
self.heuristic = QueenHeuristic()
# States creator object
self.states_creator = StatesCreator()
# Max. number of iterations for searching a solution.
self.max_iter_count = 500
def solve_random_board(self):
gen_state = self.states_creator.generate_random_start_state()
(sol_path, total_steps) = self.solve_given_board(gen_state)
solution = sol_path if sol_path else False
return solution, gen_state, total_steps
def solve_sample_board(self, num):
gen_state = self.states_creator.get_sample_start_state(num)[0]
(sol_path, total_steps) = self.solve_given_board(gen_state)
#self._show_solution(sol_path)
def solve_given_board(self, start_state):
# Prepare variables
solution_path = []
current_state = start_state
conf_count = self.heuristic.count_total_conflicts(current_state)
solution_path.append(current_state)
total_steps = 0
# Show board
self._show_board(start_state)
print('Number of conflicts: %d') % conf_count
# Repeat until there are no conflicts.
iter_n = 0
while conf_count > 0:
# Check n. of performed iterations
if iter_n > self.max_iter_count:
return False, 0
iter_n += 1
#print ('========iter. %d========') % iter_n
# Create a new state from current state.
(new_state, performed_steps) = self.heuristic.choose_min_conflict_positions(current_state)
# Check if the new state is alredy in solution path.
if new_state in solution_path:
continue # If yes, re-run the method.
# If not, it's part of solution.
conf_count = self.heuristic.count_total_conflicts(new_state)
solution_path.append(new_state)
current_state = new_state
total_steps += len(performed_steps)
# Show solution
print # blank line
if solution_path:
print('>>>Solved in %d steps.') % total_steps
self._show_board(solution_path[-1])
else:
print('<<<NOT solved after %d iterations.') % self.max_iter_count
# result
return solution_path, total_steps
def bulk_solve(self, boards_n, csv = False):
# Prepare variables for statistics
steps_sum = 0.0
solved_n = 0
unsolved_boards = {}
solved_stats = {} # key is number of conflicts in start board
# Generate and solve some boards
for i in range(1, boards_n+1):
print('======Board n. %d======') % i
# Get and solve random board.
(sol_path, start_state, total_steps) = self.solve_random_board()
conf_count = self.heuristic.count_total_conflicts(start_state)
#self._show_board(start_state)
# Check if a solution was found.
if sol_path:
# Increment common values.
solved_n += 1
steps_sum += total_steps
# Save values for initial conflicts count.
if conf_count in solved_stats:
solved_stats[conf_count][0] += 1
solved_stats[conf_count][1] += total_steps
else:
solved_stats[conf_count] = []
solved_stats[conf_count].append(1.0)
solved_stats[conf_count].append(total_steps)
else:
if conf_count in unsolved_boards:
unsolved_boards[conf_count].append(start_state)
else:
unsolved_boards[conf_count] = [start_state]
# blank line
print
# Show basic statistics
avg_steps_c = round(steps_sum / boards_n, 2)
percent_solved = (float(solved_n) / boards_n) * 100
print('======BASIC STATS======')
print('Number of generated boards: %d') % boards_n
print('Number of solved boards: %d ... ' + str(round(percent_solved, 2)) + ' %%') % solved_n
print('Average number of steps for solution: ' + str(avg_steps_c))
# Show detailed statistics
print('======DETAILED STATS======')
for conf_count, (solved_count, steps_sum) in sorted(solved_stats.iteritems()):
avg_steps_c = round(steps_sum / solved_count, 2)
total_boards = solved_count + len(unsolved_boards.get(solved_count, []))
percent_solved = (float(solved_count) / total_boards) * 100
if csv:
print str(conf_count)+','+str(round(percent_solved, 2))+','+str(avg_steps_c)
else:
print('===%d conflicts===') % conf_count
print('Generated boards: %d') % total_boards
print('Solved boards: %d ... ' + str(round(percent_solved, 2)) + ' %%') % solved_count
print('Average steps: ' + str(avg_steps_c))
def _show_board(self, state):
for row in state:
print row
def _show_solution(self, solution_path):
print('>>>>Solution<<<<')
if not solution_path:
exit('No solution found.')
print('Number of iterations: %d') % (len(solution_path) - 1)
print('Final state: ')
self._show_board(solution_path[-1])
print('{n. of conflists: %d}') % self.heuristic.count_total_conflicts(solution_path[-1])
|
from pyspark.sql import SparkSession, Row
from bigdata.utilities import split_df_column
class SparkTask:
def __init__(self, excel_file_path):
self.excel_file_path = excel_file_path
self.spark = SparkSession.builder.appName("forage-bigdata-task").getOrCreate()
self.df = self.read_excel()
def read_excel(self):
return self.spark.read.format("com.crealytics.spark.excel")\
.option("Header", "true").option("inferSchema", "true")\
.load(self.excel_file_path)
def process_df(self):
print(f"DF processing of excel file {self.excel_file_path}")
filtered_df = self.df.filter((self.df['status'] == "authorized") & (self.df['card_present_flag'] == 0))
long_lat_split_df = split_df_column(
filtered_df,
"long_lat",
"long",
"lat"
)
merchant_long_lat_split_df = split_df_column(
long_lat_split_df,
"merchant_long_lat",
"merchant_long",
"merchant_lat"
)
print(f" Input data rows: before processing: {self.df.count()} "
f"and after processing: {merchant_long_lat_split_df.count()}")
return merchant_long_lat_split_df
def process_rdd(self):
print(f"\nRDD processing of excel file {self.excel_file_path}")
rdd = self.df.rdd
filtered_rdd = rdd.filter(lambda row: row.status == "authorized")\
.filter(lambda row: row.card_present_flag == 0)
long_lat_split_rdd = filtered_rdd.map(
lambda k: Row(**k.asDict(),
long=float(k.long_lat.split(' ')[0]),
lat=float(k.long_lat.split(' ')[1])))
merchant_long_lat_split_rdd = long_lat_split_rdd.map(
lambda k: Row(**k.asDict(),
merch_long=float(k.merchant_long_lat.split(' ')[0]),
merch_lat=float(k.merchant_long_lat.split(' ')[1])))
print(f" Input data rows: before processing: {rdd.count()} "
f"and after processing: {merchant_long_lat_split_rdd.count()}")
return merchant_long_lat_split_rdd
|
from django.apps import AppConfig
class DaruWheelConfig(AppConfig):
name = "daru_wheel"
def ready(self):
import daru_wheel.signals
|
import numpy as np
import theano
import theano.tensor as T
from theano import ifelse
from lasagne import init
from lasagne import nonlinearities
from lasagne import layers
__all__ = [
"BatchNormalizationLayer"
]
class BatchNormalizationLayer(layers.base.Layer):
"""
Batch normalization Layer [1]
The user is required to setup updates for the learned parameters (Gamma
and Beta). The values nessesary for creating the updates can be
obtained by passing a dict as the moving_avg_hooks keyword to
get_output().
REF:
[1] http://arxiv.org/abs/1502.03167
:parameters:
- input_layer : `Layer` instance
The layer from which this layer will obtain its input
- nonlinearity : callable or None (default: lasagne.nonlinearities.rectify)
The nonlinearity that is applied to the layer activations. If None
is provided, the layer will be linear.
- epsilon : scalar float. Stabilizing training. Setting this too
close to zero will result in nans.
:usage:
>>> from lasagne.layers import InputLayer, BatchNormalizationLayer,
DenseLayer
>>> from lasagne.nonlinearities import linear, rectify
>>> l_in = InputLayer((100, 20))
l_dense = Denselayer(l_in, 50, nonlinearity=linear)
>>> l_bn = BatchNormalizationLayer(l_dense, nonlinearity=rectify)
>>> hooks, input, updates = {}, T.matrix, []
>>> l_out = l_bn.get_output(
input, deterministic=False, moving_avg_hooks=hooks)
>>> mulfac = 1.0/100.0
>>> batchnormparams = list(itertools.chain(
*[i[1] for i in hooks['BatchNormalizationLayer:movingavg']]))
>>> batchnormvalues = list(itertools.chain(
*[i[0] for i in hooks['BatchNormalizationLayer:movingavg']]))
>>> for tensor, param in zip(tensors, params):
updates.append((param, (1.0-mulfac)*param + mulfac*tensor))
# append updates to your normal update list
"""
def __init__(self, incoming,
gamma = init.Uniform([0.95, 1.05]),
beta = init.Constant(0.),
nonlinearity=nonlinearities.rectify,
epsilon = 0.001,
**kwargs):
super(BatchNormalizationLayer, self).__init__(incoming, **kwargs)
if nonlinearity is None:
self.nonlinearity = nonlinearities.identity
else:
self.nonlinearity = nonlinearity
self.num_units = int(np.prod(self.input_shape[1:]))
self.gamma = self.add_param(gamma, (self.num_units,),
name="BatchNormalizationLayer:gamma",trainable=True)
self.beta = self.add_param(beta, (self.num_units,),
name="BatchNormalizationLayer:beta",trainable=True)
self.epsilon = epsilon
self.mean_inference = theano.shared(
np.zeros((1, self.num_units), dtype=theano.config.floatX),
borrow=True,
broadcastable=(True, False))
self.mean_inference.name = "shared:mean-" + self.name ####
self.variance_inference = theano.shared(
np.zeros((1, self.num_units), dtype=theano.config.floatX),
borrow=True,
broadcastable=(True, False))
self.variance_inference.name = "shared:variance-" + self.name ####
def get_output_shape_for(self, input_shape):
return input_shape
def get_output_for(self, input, moving_avg_hooks=None,
deterministic=False, *args, **kwargs):
reshape = False
if input.ndim > 2:
output_shape = input.shape
reshape = True
input = input.flatten(2)
if deterministic is False:
m = T.mean(input, axis=0, keepdims=True)
v = T.sqrt(T.var(input, axis=0, keepdims=True)+self.epsilon)
m.name = "tensor:mean-" + self.name
v.name = "tensor:variance-" + self.name
key = "BatchNormalizationLayer:movingavg"
if key not in moving_avg_hooks:
# moving_avg_hooks[key] = {}
moving_avg_hooks[key] = []
# moving_avg_hooks[key][self.name] = [[m,v], [self.mean_inference, self.variance_inference]]
moving_avg_hooks[key].append([[m,v], [self.mean_inference, self.variance_inference]])
else:
m = self.mean_inference
v = self.variance_inference
input_hat = (input - m) / v # normalize
y = self.gamma*input_hat + self.beta # scale and shift
if reshape:#input.ndim > 2:
y = T.reshape(y, output_shape)
return self.nonlinearity(y)
|
for i in range(1,21): #print odd numbers from 1 to 21
if((i % 2)!= 0):
print(i)
|
from PyInstaller.utils.hooks import logger, get_module_file_attribute
import os
def pre_safe_import_module(psim_api):
import PyMca5.PyMca as PyMca
for p in PyMca.__path__:
psim_api.append_package_path(p)
|
# Copyright (c) 2017-2023 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Conversion methods to Ledger API Protobuf-generated types from dazl/Pythonic types.
"""
from __future__ import annotations
from typing import TYPE_CHECKING, Any
from ..._gen.com.daml.ledger.api import v1 as lapipb
from ...damlast.daml_lf_1 import TypeConName
from ...damlast.util import module_local_name, module_name, package_ref
from ...prim import ContractId, timedelta_to_duration
from ...values.protobuf import ProtobufEncoder, set_value
from ..serializers import AbstractSerializer
if TYPE_CHECKING:
from ...client.commands import CommandPayload
__all__ = ["ProtobufSerializer"]
class ProtobufSerializer(AbstractSerializer):
mapper = ProtobufEncoder()
################################################################################################
# COMMAND serializers
################################################################################################
def serialize_command_request(
self, command_payload: "CommandPayload"
) -> "lapipb.SubmitAndWaitRequest":
commands = [self.serialize_command(command) for command in command_payload.commands]
if command_payload.deduplication_time is not None:
return lapipb.SubmitAndWaitRequest(
commands=lapipb.Commands(
ledger_id=command_payload.ledger_id,
workflow_id=command_payload.workflow_id,
application_id=command_payload.application_id,
command_id=command_payload.command_id,
party=command_payload.party,
commands=commands,
deduplication_time=timedelta_to_duration(command_payload.deduplication_time),
)
)
else:
return lapipb.SubmitAndWaitRequest(
commands=lapipb.Commands(
ledger_id=command_payload.ledger_id,
workflow_id=command_payload.workflow_id,
application_id=command_payload.application_id,
command_id=command_payload.command_id,
party=command_payload.party,
commands=commands,
)
)
def serialize_create_command(
self, name: "TypeConName", template_args: "Any"
) -> "lapipb.Command":
create_ctor, create_value = template_args
if create_ctor != "record":
raise ValueError("Template values must resemble records")
cmd = lapipb.CreateCommand()
_set_template(cmd.template_id, name)
cmd.create_arguments.MergeFrom(create_value)
return lapipb.Command(create=cmd)
def serialize_exercise_command(
self, contract_id: "ContractId", choice_name: str, choice_args: "Any"
) -> lapipb.Command:
type_ref = contract_id.value_type
ctor, value = choice_args
cmd = lapipb.ExerciseCommand()
_set_template(cmd.template_id, type_ref)
cmd.contract_id = contract_id.value
cmd.choice = choice_name
set_value(cmd.choice_argument, ctor, value)
return lapipb.Command(exercise=cmd)
def serialize_exercise_by_key_command(
self,
template_name: "TypeConName",
key_arguments: Any,
choice_name: str,
choice_arguments: Any,
) -> lapipb.Command:
key_ctor, key_value = key_arguments
choice_ctor, choice_value = choice_arguments
cmd = lapipb.ExerciseByKeyCommand()
_set_template(cmd.template_id, template_name)
set_value(cmd.contract_key, key_ctor, key_value)
cmd.choice = choice_name
set_value(cmd.choice_argument, choice_ctor, choice_value)
return lapipb.Command(exerciseByKey=cmd)
def serialize_create_and_exercise_command(
self,
template_name: "TypeConName",
create_arguments: "Any",
choice_name: str,
choice_arguments: Any,
) -> lapipb.Command:
create_ctor, create_value = create_arguments
if create_ctor != "record":
raise ValueError("Template values must resemble records")
choice_ctor, choice_value = choice_arguments
cmd = lapipb.CreateAndExerciseCommand()
_set_template(cmd.template_id, template_name)
cmd.create_arguments.MergeFrom(create_value)
cmd.choice = choice_name
set_value(cmd.choice_argument, choice_ctor, choice_value)
return lapipb.Command(createAndExercise=cmd)
def _set_template(message: "lapipb.Identifier", name: "TypeConName") -> None:
message.package_id = package_ref(name)
message.module_name = str(module_name(name))
message.entity_name = module_local_name(name)
|
from setuptools import setup, find_packages
setup(
name="rocky-django",
version="1.0.0-RC01-SNAPSHOT",
author="copervan",
author_email="copervan@163.com",
description="rocky-django sdk",
platforms='windows',
# 找到所有包含__init__的包
packages=find_packages(),
# 静态文件等,配合MANIFEST.in (package_data 参数不太好使)
include_package_data=False,
# 单独的一些py脚本,不是在某些模块中
scripts=["manage.py"],
install_requires=['django==1.8.18'],
# 此项需要,否则卸载时报windows error
zip_safe=False
)
|
shop_link = 'http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/'
def test_should_see_add_to_basket(browser):
browser.implicitly_wait(5)
browser.get(shop_link)
count_button = len(browser.find_elements_by_css_selector(".btn-add-to-basket"))
assert count_button > 0, 'add to basket button is not found'
|
#!/usr/bin/env python3
#
# Copyright (c) 2017, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
import argparse
import io
import json
import os
import subprocess
import sys
import time
import utils
import gn as gn_py
HOST_OS = utils.GuessOS()
HOST_CPUS = utils.GuessCpus()
SCRIPT_DIR = os.path.dirname(sys.argv[0])
DART_ROOT = os.path.realpath(os.path.join(SCRIPT_DIR, '..'))
AVAILABLE_ARCHS = utils.ARCH_FAMILY.keys()
usage = """\
usage: %%prog [options] [targets]
This script invokes ninja to build Dart.
"""
def BuildOptions():
parser = argparse.ArgumentParser(
description='Runs GN (if necessary) followed by ninja',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
config_group = parser.add_argument_group('Configuration Related Arguments')
gn_py.AddCommonConfigurationArgs(config_group)
gn_group = parser.add_argument_group('GN Related Arguments')
gn_py.AddCommonGnOptionArgs(gn_group)
other_group = parser.add_argument_group('Other Arguments')
gn_py.AddOtherArgs(other_group)
other_group.add_argument("-j",
type=int,
help='Ninja -j option for Goma builds.',
default=1000)
other_group.add_argument("-l",
type=int,
help='Ninja -l option for Goma builds.',
default=64)
other_group.add_argument("--no-start-goma",
help="Don't try to start goma",
default=False,
action='store_true')
other_group.add_argument(
"--check-clean",
help="Check that a second invocation of Ninja has nothing to do",
default=False,
action='store_true')
parser.add_argument('build_targets', nargs='*')
return parser
def NotifyBuildDone(build_config, success, start):
if not success:
print("BUILD FAILED")
sys.stdout.flush()
# Display a notification if build time exceeded DART_BUILD_NOTIFICATION_DELAY.
notification_delay = float(
os.getenv('DART_BUILD_NOTIFICATION_DELAY', sys.float_info.max))
if (time.time() - start) < notification_delay:
return
if success:
message = 'Build succeeded.'
else:
message = 'Build failed.'
title = build_config
command = None
if HOST_OS == 'macos':
# Use AppleScript to display a UI non-modal notification.
script = 'display notification "%s" with title "%s" sound name "Glass"' % (
message, title)
command = "osascript -e '%s' &" % script
elif HOST_OS == 'linux':
if success:
icon = 'dialog-information'
else:
icon = 'dialog-error'
command = "notify-send -i '%s' '%s' '%s' &" % (icon, message, title)
elif HOST_OS == 'win32':
if success:
icon = 'info'
else:
icon = 'error'
command = (
"powershell -command \""
"[reflection.assembly]::loadwithpartialname('System.Windows.Forms')"
"| Out-Null;"
"[reflection.assembly]::loadwithpartialname('System.Drawing')"
"| Out-Null;"
"$n = new-object system.windows.forms.notifyicon;"
"$n.icon = [system.drawing.systemicons]::information;"
"$n.visible = $true;"
"$n.showballoontip(%d, '%s', '%s', "
"[system.windows.forms.tooltipicon]::%s);\"") % (
5000, # Notification stays on for this many milliseconds
message,
title,
icon)
if command:
# Ignore return code, if this command fails, it doesn't matter.
os.system(command)
def UseGoma(out_dir):
args_gn = os.path.join(out_dir, 'args.gn')
return 'use_goma = true' in open(args_gn, 'r').read()
# Try to start goma, but don't bail out if we can't. Instead print an error
# message, and let the build fail with its own error messages as well.
goma_started = False
def EnsureGomaStarted(out_dir):
global goma_started
if goma_started:
return True
args_gn_path = os.path.join(out_dir, 'args.gn')
goma_dir = None
with open(args_gn_path, 'r') as fp:
for line in fp:
if 'goma_dir' in line:
words = line.split()
goma_dir = words[2][1:-1] # goma_dir = "/path/to/goma"
if not goma_dir:
print('Could not find goma for ' + out_dir)
return False
if not os.path.exists(goma_dir) or not os.path.isdir(goma_dir):
print('Could not find goma at ' + goma_dir)
return False
goma_ctl = os.path.join(goma_dir, 'goma_ctl.py')
goma_ctl_command = [
'python3',
goma_ctl,
'ensure_start',
]
process = subprocess.Popen(goma_ctl_command)
process.wait()
if process.returncode != 0:
print(
"Tried to run goma_ctl.py, but it failed. Try running it manually: "
+ "\n\t" + ' '.join(goma_ctl_command))
return False
goma_started = True
return True
# Returns a tuple (build_config, command to run, whether goma is used)
def BuildOneConfig(options, targets, target_os, mode, arch, sanitizer):
build_config = utils.GetBuildConf(mode, arch, target_os, sanitizer)
out_dir = utils.GetBuildRoot(HOST_OS, mode, arch, target_os, sanitizer)
using_goma = False
command = ['buildtools/ninja/ninja', '-C', out_dir]
if options.verbose:
command += ['-v']
if UseGoma(out_dir):
if options.no_start_goma or EnsureGomaStarted(out_dir):
using_goma = True
command += [('-j%s' % str(options.j))]
command += [('-l%s' % str(options.l))]
else:
# If we couldn't ensure that goma is started, let the build start, but
# slowly so we can see any helpful error messages that pop out.
command += ['-j1']
command += targets
return (build_config, command, using_goma)
def RunOneBuildCommand(build_config, args, env):
start_time = time.time()
print(' '.join(args))
process = subprocess.Popen(args, env=env, stdin=None)
process.wait()
if process.returncode != 0:
NotifyBuildDone(build_config, success=False, start=start_time)
return 1
else:
NotifyBuildDone(build_config, success=True, start=start_time)
return 0
def CheckCleanBuild(build_config, args, env):
args = args + ['-n', '-d', 'explain']
print(' '.join(args))
process = subprocess.Popen(args,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=None)
out, err = process.communicate()
process.wait()
if process.returncode != 0:
return 1
if 'ninja: no work to do' not in out.decode('utf-8'):
print(err.decode('utf-8'))
return 1
return 0
def SanitizerEnvironmentVariables():
with io.open('tools/bots/test_matrix.json', encoding='utf-8') as fd:
config = json.loads(fd.read())
env = dict()
for k, v in config['sanitizer_options'].items():
env[str(k)] = str(v)
symbolizer_path = config['sanitizer_symbolizer'].get(HOST_OS, None)
if symbolizer_path:
symbolizer_path = str(os.path.join(DART_ROOT, symbolizer_path))
env['ASAN_SYMBOLIZER_PATH'] = symbolizer_path
env['LSAN_SYMBOLIZER_PATH'] = symbolizer_path
env['MSAN_SYMBOLIZER_PATH'] = symbolizer_path
env['TSAN_SYMBOLIZER_PATH'] = symbolizer_path
env['UBSAN_SYMBOLIZER_PATH'] = symbolizer_path
return env
def Main():
starttime = time.time()
# Parse the options.
parser = BuildOptions()
options = parser.parse_args()
targets = options.build_targets
if not gn_py.ProcessOptions(options):
parser.print_help()
return 1
# If binaries are built with sanitizers we should use those flags.
# If the binaries are not built with sanitizers the flag should have no
# effect.
env = dict(os.environ)
env.update(SanitizerEnvironmentVariables())
# macOS's python sets CPATH, LIBRARY_PATH, SDKROOT implicitly.
#
# See:
#
# * https://openradar.appspot.com/radar?id=5608755232243712
# * https://github.com/dart-lang/sdk/issues/52411
#
# Remove these environment variables to avoid affecting clang's behaviors.
if sys.platform == 'darwin':
env.pop('CPATH', None)
env.pop('LIBRARY_PATH', None)
env.pop('SDKROOT', None)
# Always run GN before building.
gn_py.RunGnOnConfiguredConfigurations(options)
# Build all targets for each requested configuration.
configs = []
for target_os in options.os:
for mode in options.mode:
for arch in options.arch:
for sanitizer in options.sanitizer:
configs.append(
BuildOneConfig(options, targets, target_os, mode, arch,
sanitizer))
# Build regular configs.
goma_builds = []
for (build_config, args, goma) in configs:
if args is None:
return 1
if goma:
goma_builds.append([env, args])
elif RunOneBuildCommand(build_config, args, env=env) != 0:
return 1
# Run goma builds in parallel.
active_goma_builds = []
for (env, args) in goma_builds:
print(' '.join(args))
process = subprocess.Popen(args, env=env)
active_goma_builds.append([args, process])
while active_goma_builds:
time.sleep(0.1)
for goma_build in active_goma_builds:
(args, process) = goma_build
if process.poll() is not None:
print(' '.join(args) + " done.")
active_goma_builds.remove(goma_build)
if process.returncode != 0:
for (_, to_kill) in active_goma_builds:
to_kill.terminate()
return 1
if options.check_clean:
for (build_config, args, goma) in configs:
if CheckCleanBuild(build_config, args, env=env) != 0:
return 1
endtime = time.time()
print("The build took %.3f seconds" % (endtime - starttime))
return 0
if __name__ == '__main__':
sys.exit(Main())
|
import sqlite3 as sql
def create_db(db_name):
with sql.connect(db_name) as conn:
c = conn.cursor()
c.execute('''CREATE TABLE actors (id integer primary key, name text, image text,
alignment text, location integer)''')
c.executemany('''INSERT INTO actors (name, image, alignment, location) VALUES (?, ?, ? , ?)''',
(('Bob', 'N/A', 0, 0), ('Alice', 'N/A', 0, 0), ('Christine', 'N/A', 0, 0)))
create_db('database.db')
|
import vivisect.impemu.emufunc as emufunc
from vivisect.impemu.impmagic import *
import ntdll
class malloc(emufunc.EmuFunc):
__callconv__ = "cdecl"
__argt__ = [ntdll.DWORD,]
__argn__ = ["dwSize",]
def __call__(self, emu):
size = self.getArgs(emu)[0]
r = HeapChunk(size)
ret = emu.setMagic(r)
self.setReturn(emu, ret)
#EMUFUNC:_CIacos
class _CIacos(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_CIasin
class _CIasin(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_CIatan
class _CIatan(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_CIatan2
class _CIatan2(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_CIcos
class _CIcos(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_CIcosh
class _CIcosh(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_CIexp
class _CIexp(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_CIfmod
class _CIfmod(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_CIlog
class _CIlog(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_CIlog10
class _CIlog10(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_CIpow
class _CIpow(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_CIsin
class _CIsin(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_CIsinh
class _CIsinh(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_CIsqrt
class _CIsqrt(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_CItan
class _CItan(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_CItanh
class _CItanh(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_CxxThrowException
class _CxxThrowException(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_EH_prolog
class _EH_prolog(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_Getdays
class _Getdays(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_Getmonths
class _Getmonths(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_Gettnames
class _Gettnames(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_Strftime
class _Strftime(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_XcptFilter
class _XcptFilter(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:__CxxCallUnwindDtor
class __CxxCallUnwindDtor(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:__CxxDetectRethrow
class __CxxDetectRethrow(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:__CxxExceptionFilter
class __CxxExceptionFilter(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:__CxxFrameHandler
class __CxxFrameHandler(emufunc.EmuFunc):
__callconv__ = 'unknown'
__argn__ = [None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:__CxxLongjmpUnwind
class __CxxLongjmpUnwind(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:__CxxQueryExceptionSize
class __CxxQueryExceptionSize(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:__CxxRegisterExceptionObject
class __CxxRegisterExceptionObject(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:__CxxUnregisterExceptionObject
class __CxxUnregisterExceptionObject(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:__DestructExceptionObject
class __DestructExceptionObject(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:__RTCastToVoid
class __RTCastToVoid(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:__RTDynamicCast
class __RTDynamicCast(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:__RTtypeid
class __RTtypeid(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:__STRINGTOLD
class __STRINGTOLD(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:___lc_codepage_func
class ___lc_codepage_func(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:___lc_handle_func
class ___lc_handle_func(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:___mb_cur_max_func
class ___mb_cur_max_func(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:___setlc_active_func
class ___setlc_active_func(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:___unguarded_readlc_active_add_func
class ___unguarded_readlc_active_add_func(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:__crtCompareStringA
class __crtCompareStringA(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:__crtCompareStringW
class __crtCompareStringW(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:__crtGetLocaleInfoW
class __crtGetLocaleInfoW(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:__crtGetStringTypeW
class __crtGetStringTypeW(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:__crtLCMapStringA
class __crtLCMapStringA(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None, None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:__crtLCMapStringW
class __crtLCMapStringW(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:__dllonexit
class __dllonexit(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:__doserrno
class __doserrno(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:__fpecode
class __fpecode(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:__getmainargs
class __getmainargs(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:__iob_func
class __iob_func(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:__isascii
class __isascii(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:__iscsym
class __iscsym(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:__iscsymf
class __iscsymf(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:__lconv_init
class __lconv_init(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:__p___argc
class __p___argc(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:__p___argv
class __p___argv(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:__p___initenv
class __p___initenv(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:__p___mb_cur_max
class __p___mb_cur_max(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:__p___wargv
class __p___wargv(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:__p___winitenv
class __p___winitenv(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:__p__acmdln
class __p__acmdln(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:__p__amblksiz
class __p__amblksiz(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:__p__commode
class __p__commode(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:__p__daylight
class __p__daylight(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:__p__dstbias
class __p__dstbias(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:__p__environ
class __p__environ(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:__p__fileinfo
class __p__fileinfo(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:__p__fmode
class __p__fmode(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:__p__iob
class __p__iob(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:__p__mbcasemap
class __p__mbcasemap(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:__p__mbctype
class __p__mbctype(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:__p__osver
class __p__osver(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:__p__pctype
class __p__pctype(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:__p__pgmptr
class __p__pgmptr(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:__p__pwctype
class __p__pwctype(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:__p__timezone
class __p__timezone(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:__p__tzname
class __p__tzname(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:__p__wcmdln
class __p__wcmdln(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:__p__wenviron
class __p__wenviron(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:__p__winmajor
class __p__winmajor(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:__p__winminor
class __p__winminor(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:__p__winver
class __p__winver(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:__p__wpgmptr
class __p__wpgmptr(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:__pctype_func
class __pctype_func(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:__pxcptinfoptrs
class __pxcptinfoptrs(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:__set_app_type
class __set_app_type(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:__setusermatherr
class __setusermatherr(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:__threadhandle
class __threadhandle(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:__threadid
class __threadid(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:__toascii
class __toascii(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:__unDName
class __unDName(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:__unDNameEx
class __unDNameEx(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:__uncaught_exception
class __uncaught_exception(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:__wcserror
class __wcserror(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:__wgetmainargs
class __wgetmainargs(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_abnormal_termination
class _abnormal_termination(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_access
class _access(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_adj_fdiv_m16i
class _adj_fdiv_m16i(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_adj_fdiv_m32
class _adj_fdiv_m32(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_adj_fdiv_m32i
class _adj_fdiv_m32i(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_adj_fdiv_m64
class _adj_fdiv_m64(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_adj_fdiv_r
class _adj_fdiv_r(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_adj_fdivr_m16i
class _adj_fdivr_m16i(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_adj_fdivr_m32
class _adj_fdivr_m32(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_adj_fdivr_m32i
class _adj_fdivr_m32i(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_adj_fdivr_m64
class _adj_fdivr_m64(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_adj_fpatan
class _adj_fpatan(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_adj_fprem
class _adj_fprem(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_adj_fprem1
class _adj_fprem1(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_adj_fptan
class _adj_fptan(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_aligned_free
class _aligned_free(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_aligned_malloc
class _aligned_malloc(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_aligned_offset_malloc
class _aligned_offset_malloc(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_aligned_offset_realloc
class _aligned_offset_realloc(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_aligned_realloc
class _aligned_realloc(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_amsg_exit
class _amsg_exit(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_assert
class _assert(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_atodbl
class _atodbl(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_atoi64
class _atoi64(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_atoldbl
class _atoldbl(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_beep
class _beep(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_beginthread
class _beginthread(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = ["start_address", "stack_size", "arglist"]
__argt__ = [FUNCPTR, UINT32, Pointer, ]
#EMUFUNCDONE
#EMUFUNC:_beginthreadex
class _beginthreadex(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = ["security", "stack_size", "start_address", "arglist", "initflag", "thrdaddr"]
__argt__ = [Pointer, UINT32, FUNCPTR, Pointer, UINT32, UINT32, ]
#EMUFUNCDONE
#EMUFUNC:_c_exit
class _c_exit(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_cabs
class _cabs(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_callnewh
class _callnewh(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_cexit
class _cexit(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_cgets
class _cgets(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_cgetws
class _cgetws(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_chdir
class _chdir(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_chdrive
class _chdrive(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_chgsign
class _chgsign(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_chkesp
class _chkesp(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_chmod
class _chmod(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_chsize
class _chsize(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_clearfp
class _clearfp(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_close
class _close(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_commit
class _commit(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_control87
class _control87(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_controlfp
class _controlfp(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_copysign
class _copysign(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_cprintf
class _cprintf(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_cputs
class _cputs(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_cputws
class _cputws(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_creat
class _creat(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_cscanf
class _cscanf(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_ctime64
class _ctime64(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_cwait
class _cwait(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_cwprintf
class _cwprintf(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_cwscanf
class _cwscanf(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_dup
class _dup(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_dup2
class _dup2(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_ecvt
class _ecvt(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_endthread
class _endthread(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_endthreadex
class _endthreadex(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_eof
class _eof(emufunc.EmuFunc):
__callconv__ = 'unknown'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_errno
class _errno(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_except_handler2
class _except_handler2(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_except_handler3
class _except_handler3(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_execl
class _execl(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_execle
class _execle(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_execlp
class _execlp(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_execlpe
class _execlpe(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_execv
class _execv(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_execve
class _execve(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_execvp
class _execvp(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_execvpe
class _execvpe(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_exit
class _exit(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_expand
class _expand(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_fcloseall
class _fcloseall(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_fcvt
class _fcvt(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_fdopen
class _fdopen(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_fgetchar
class _fgetchar(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_fgetwchar
class _fgetwchar(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_filbuf
class _filbuf(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_filelength
class _filelength(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_filelengthi64
class _filelengthi64(emufunc.EmuFunc):
__callconv__ = 'unknown'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_fileno
class _fileno(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_findclose
class _findclose(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_findfirst
class _findfirst(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_findfirst64
class _findfirst64(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_findfirsti64
class _findfirsti64(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_findnext
class _findnext(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_findnext64
class _findnext64(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_findnexti64
class _findnexti64(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_finite
class _finite(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_flsbuf
class _flsbuf(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_flushall
class _flushall(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_fpclass
class _fpclass(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_fpieee_flt
class _fpieee_flt(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_fpreset
class _fpreset(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_fputchar
class _fputchar(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_fputwchar
class _fputwchar(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_fsopen
class _fsopen(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_fstat
class _fstat(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_fstat64
class _fstat64(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_fstati64
class _fstati64(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_ftime
class _ftime(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_ftime64
class _ftime64(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_ftol
class _ftol(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_fullpath
class _fullpath(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_futime
class _futime(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_futime64
class _futime64(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_gcvt
class _gcvt(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_get_heap_handle
class _get_heap_handle(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_get_osfhandle
class _get_osfhandle(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_get_sbh_threshold
class _get_sbh_threshold(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_getch
class _getch(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_getche
class _getche(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_getcwd
class _getcwd(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_getdcwd
class _getdcwd(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_getdiskfree
class _getdiskfree(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_getdllprocaddr
class _getdllprocaddr(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_getdrive
class _getdrive(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_getdrives
class _getdrives(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_getmaxstdio
class _getmaxstdio(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_getmbcp
class _getmbcp(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_getpid
class _getpid(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_getsystime
class _getsystime(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_getw
class _getw(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_getwch
class _getwch(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_getwche
class _getwche(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_getws
class _getws(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_global_unwind2
class _global_unwind2(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_gmtime64
class _gmtime64(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_heapadd
class _heapadd(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_heapchk
class _heapchk(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_heapmin
class _heapmin(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_heapset
class _heapset(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_heapused
class _heapused(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_heapwalk
class _heapwalk(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_hypot
class _hypot(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_i64toa
class _i64toa(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_i64tow
class _i64tow(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_initterm
class _initterm(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_inp
class _inp(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_inpd
class _inpd(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_inpw
class _inpw(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_isatty
class _isatty(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_isctype
class _isctype(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_ismbbalnum
class _ismbbalnum(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_ismbbalpha
class _ismbbalpha(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_ismbbgraph
class _ismbbgraph(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_ismbbkalnum
class _ismbbkalnum(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_ismbbkana
class _ismbbkana(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_ismbbkprint
class _ismbbkprint(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_ismbbkpunct
class _ismbbkpunct(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_ismbblead
class _ismbblead(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_ismbbprint
class _ismbbprint(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_ismbbpunct
class _ismbbpunct(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_ismbbtrail
class _ismbbtrail(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_ismbcalnum
class _ismbcalnum(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_ismbcalpha
class _ismbcalpha(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_ismbcdigit
class _ismbcdigit(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_ismbcgraph
class _ismbcgraph(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_ismbchira
class _ismbchira(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_ismbckata
class _ismbckata(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_ismbcl0
class _ismbcl0(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_ismbcl1
class _ismbcl1(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_ismbcl2
class _ismbcl2(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_ismbclegal
class _ismbclegal(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_ismbclower
class _ismbclower(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_ismbcprint
class _ismbcprint(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_ismbcpunct
class _ismbcpunct(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_ismbcspace
class _ismbcspace(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_ismbcsymbol
class _ismbcsymbol(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_ismbcupper
class _ismbcupper(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_ismbslead
class _ismbslead(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_ismbstrail
class _ismbstrail(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_isnan
class _isnan(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_itoa
class _itoa(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_itow
class _itow(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_j0
class _j0(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_j1
class _j1(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_jn
class _jn(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_kbhit
class _kbhit(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_lfind
class _lfind(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_loaddll
class _loaddll(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_local_unwind2
class _local_unwind2(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_localtime64
class _localtime64(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_lock
class _lock(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_locking
class _locking(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_logb
class _logb(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_longjmpex
class _longjmpex(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_lrotl
class _lrotl(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_lrotr
class _lrotr(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_lsearch
class _lsearch(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_lseek
class _lseek(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_lseeki64
class _lseeki64(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_ltoa
class _ltoa(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_ltow
class _ltow(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_makepath
class _makepath(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbbtombc
class _mbbtombc(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbbtype
class _mbbtype(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbccpy
class _mbccpy(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbcjistojms
class _mbcjistojms(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbcjmstojis
class _mbcjmstojis(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbclen
class _mbclen(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbctohira
class _mbctohira(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbctokata
class _mbctokata(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbctolower
class _mbctolower(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbctombb
class _mbctombb(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbctoupper
class _mbctoupper(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbsbtype
class _mbsbtype(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbscat
class _mbscat(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbschr
class _mbschr(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbscmp
class _mbscmp(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbscoll
class _mbscoll(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbscpy
class _mbscpy(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbscspn
class _mbscspn(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbsdec
class _mbsdec(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbsdup
class _mbsdup(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbsicmp
class _mbsicmp(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbsicoll
class _mbsicoll(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbsinc
class _mbsinc(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbslen
class _mbslen(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbslwr
class _mbslwr(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbsnbcat
class _mbsnbcat(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbsnbcmp
class _mbsnbcmp(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbsnbcnt
class _mbsnbcnt(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbsnbcoll
class _mbsnbcoll(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbsnbcpy
class _mbsnbcpy(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbsnbicmp
class _mbsnbicmp(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbsnbicoll
class _mbsnbicoll(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbsnbset
class _mbsnbset(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbsncat
class _mbsncat(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbsnccnt
class _mbsnccnt(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbsncmp
class _mbsncmp(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbsncoll
class _mbsncoll(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbsncpy
class _mbsncpy(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbsnextc
class _mbsnextc(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbsnicmp
class _mbsnicmp(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbsnicoll
class _mbsnicoll(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbsninc
class _mbsninc(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbsnset
class _mbsnset(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbspbrk
class _mbspbrk(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbsrchr
class _mbsrchr(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbsrev
class _mbsrev(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbsset
class _mbsset(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbsspn
class _mbsspn(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbsspnp
class _mbsspnp(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbsstr
class _mbsstr(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbstok
class _mbstok(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbstrlen
class _mbstrlen(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mbsupr
class _mbsupr(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_memccpy
class _memccpy(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_memicmp
class _memicmp(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mkdir
class _mkdir(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mktemp
class _mktemp(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_mktime64
class _mktime64(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_msize
class _msize(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_nextafter
class _nextafter(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_onexit
class _onexit(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_open
class _open(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_open_osfhandle
class _open_osfhandle(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_outp
class _outp(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_outpd
class _outpd(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_outpw
class _outpw(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_pclose
class _pclose(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_pipe
class _pipe(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_popen
class _popen(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_purecall
class _purecall(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_putch
class _putch(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_putenv
class _putenv(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_putw
class _putw(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_putwch
class _putwch(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_putws
class _putws(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_read
class _read(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_resetstkoflw
class _resetstkoflw(emufunc.EmuFunc):
__callconv__ = 'unknown'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_rmdir
class _rmdir(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_rmtmp
class _rmtmp(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_rotl
class _rotl(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_rotr
class _rotr(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_safe_fdiv
class _safe_fdiv(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_safe_fdivr
class _safe_fdivr(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_safe_fprem
class _safe_fprem(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_safe_fprem1
class _safe_fprem1(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_scalb
class _scalb(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_scprintf
class _scprintf(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_scwprintf
class _scwprintf(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_searchenv
class _searchenv(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_seh_longjmp_unwind
class _seh_longjmp_unwind(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_set_SSE2_enable
class _set_SSE2_enable(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_set_error_mode
class _set_error_mode(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_set_sbh_threshold
class _set_sbh_threshold(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_seterrormode
class _seterrormode(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_setjmp
class _setjmp(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_setjmp3
class _setjmp3(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_setmaxstdio
class _setmaxstdio(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_setmbcp
class _setmbcp(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_setmode
class _setmode(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_setsystime
class _setsystime(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_sleep
class _sleep(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_snprintf
class _snprintf(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_snscanf
class _snscanf(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_snwprintf
class _snwprintf(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_snwscanf
class _snwscanf(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_sopen
class _sopen(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_spawnl
class _spawnl(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_spawnle
class _spawnle(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_spawnlp
class _spawnlp(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_spawnlpe
class _spawnlpe(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_spawnv
class _spawnv(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_spawnve
class _spawnve(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_spawnvp
class _spawnvp(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_spawnvpe
class _spawnvpe(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_splitpath
class _splitpath(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_stat
class _stat(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_stat64
class _stat64(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_stati64
class _stati64(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_statusfp
class _statusfp(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_strcmpi
class _strcmpi(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_strdate
class _strdate(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_strdup
class _strdup(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_strerror
class _strerror(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_stricmp
class _stricmp(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_stricoll
class _stricoll(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_strlwr
class _strlwr(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_strncoll
class _strncoll(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_strnicmp
class _strnicmp(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_strnicoll
class _strnicoll(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_strnset
class _strnset(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_strrev
class _strrev(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_strset
class _strset(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_strtime
class _strtime(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_strtoi64
class _strtoi64(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_strtoui64
class _strtoui64(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_strupr
class _strupr(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_swab
class _swab(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_tell
class _tell(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_telli64
class _telli64(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_tempnam
class _tempnam(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_time64
class _time64(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_tolower
class _tolower(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_toupper
class _toupper(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_tzset
class _tzset(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:_ui64toa
class _ui64toa(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_ui64tow
class _ui64tow(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_ultoa
class _ultoa(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_ultow
class _ultow(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_umask
class _umask(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_ungetch
class _ungetch(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_ungetwch
class _ungetwch(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_unlink
class _unlink(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_unloaddll
class _unloaddll(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_unlock
class _unlock(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_utime
class _utime(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_utime64
class _utime64(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_vscprintf
class _vscprintf(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_vscwprintf
class _vscwprintf(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_vsnprintf
class _vsnprintf(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_vsnwprintf
class _vsnwprintf(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_waccess
class _waccess(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wasctime
class _wasctime(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wchdir
class _wchdir(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wchmod
class _wchmod(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wcreat
class _wcreat(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wcsdup
class _wcsdup(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wcserror
class _wcserror(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wcsicmp
class _wcsicmp(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wcsicoll
class _wcsicoll(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wcslwr
class _wcslwr(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wcsncoll
class _wcsncoll(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wcsnicmp
class _wcsnicmp(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wcsnicoll
class _wcsnicoll(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wcsnset
class _wcsnset(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wcsrev
class _wcsrev(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wcsset
class _wcsset(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wcstoi64
class _wcstoi64(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wcstoui64
class _wcstoui64(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wcsupr
class _wcsupr(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wctime
class _wctime(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wctime64
class _wctime64(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wexecl
class _wexecl(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wexecle
class _wexecle(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wexeclp
class _wexeclp(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wexeclpe
class _wexeclpe(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wexecv
class _wexecv(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wexecve
class _wexecve(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wexecvp
class _wexecvp(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wexecvpe
class _wexecvpe(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wfdopen
class _wfdopen(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wfindfirst
class _wfindfirst(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wfindfirst64
class _wfindfirst64(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wfindfirsti64
class _wfindfirsti64(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wfindnext
class _wfindnext(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wfindnext64
class _wfindnext64(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wfindnexti64
class _wfindnexti64(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wfopen
class _wfopen(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wfreopen
class _wfreopen(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wfsopen
class _wfsopen(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wfullpath
class _wfullpath(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wgetcwd
class _wgetcwd(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wgetdcwd
class _wgetdcwd(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wgetenv
class _wgetenv(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wmakepath
class _wmakepath(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wmkdir
class _wmkdir(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wmktemp
class _wmktemp(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wopen
class _wopen(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wperror
class _wperror(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wpopen
class _wpopen(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wputenv
class _wputenv(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wremove
class _wremove(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wrename
class _wrename(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_write
class _write(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wrmdir
class _wrmdir(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wsearchenv
class _wsearchenv(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wsetlocale
class _wsetlocale(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wsopen
class _wsopen(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wspawnl
class _wspawnl(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wspawnle
class _wspawnle(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wspawnlp
class _wspawnlp(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wspawnlpe
class _wspawnlpe(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wspawnv
class _wspawnv(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wspawnve
class _wspawnve(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wspawnvp
class _wspawnvp(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wspawnvpe
class _wspawnvpe(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wsplitpath
class _wsplitpath(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wstat
class _wstat(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None, None, None, None, None, None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, Unknown, Unknown, Unknown, Unknown, Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wstat64
class _wstat64(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None, None, None, None, None, None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, Unknown, Unknown, Unknown, Unknown, Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wstati64
class _wstati64(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None, None, None, None, None, None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, Unknown, Unknown, Unknown, Unknown, Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wstrdate
class _wstrdate(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wstrtime
class _wstrtime(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wsystem
class _wsystem(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wtempnam
class _wtempnam(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wtmpnam
class _wtmpnam(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wtof
class _wtof(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wtoi
class _wtoi(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wtoi64
class _wtoi64(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wtol
class _wtol(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wunlink
class _wunlink(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wutime
class _wutime(emufunc.EmuFunc):
__callconv__ = 'unknown'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_wutime64
class _wutime64(emufunc.EmuFunc):
__callconv__ = 'unknown'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_y0
class _y0(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_y1
class _y1(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:_yn
class _yn(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:acos
class acos(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:asctime
class asctime(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:asin
class asin(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:atan
class atan(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:atan2
class atan2(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:atexit
class atexit(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:atof
class atof(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:atoi
class atoi(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:atol
class atol(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:bsearch
class bsearch(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:calloc
class calloc(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:ceil
class ceil(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:clearerr
class clearerr(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:clock
class clock(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:cos
class cos(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:cosh
class cosh(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:ctime
class ctime(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:difftime
class difftime(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:div
class div(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:exp
class exp(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:fabs
class fabs(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:fclose
class fclose(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:feof
class feof(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:ferror
class ferror(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:fflush
class fflush(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:fgetc
class fgetc(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:fgetpos
class fgetpos(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:fgets
class fgets(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:fgetwc
class fgetwc(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:fgetws
class fgetws(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:floor
class floor(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:fmod
class fmod(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:fopen
class fopen(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:fprintf
class fprintf(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:fputc
class fputc(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:fputs
class fputs(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:fputwc
class fputwc(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:fputws
class fputws(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:fread
class fread(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:free
class free(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:freopen
class freopen(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:frexp
class frexp(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:fscanf
class fscanf(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:fseek
class fseek(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:fsetpos
class fsetpos(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:ftell
class ftell(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:fwprintf
class fwprintf(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:fwrite
class fwrite(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:fwscanf
class fwscanf(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:getc
class getc(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:getchar
class getchar(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:getenv
class getenv(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:gets
class gets(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:getwc
class getwc(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:getwchar
class getwchar(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:gmtime
class gmtime(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:is_wctype
class is_wctype(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:isalnum
class isalnum(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:isalpha
class isalpha(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:iscntrl
class iscntrl(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:isdigit
class isdigit(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:isgraph
class isgraph(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:isleadbyte
class isleadbyte(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:islower
class islower(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:isprint
class isprint(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:ispunct
class ispunct(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:isspace
class isspace(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:isupper
class isupper(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:iswalnum
class iswalnum(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:iswalpha
class iswalpha(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:iswascii
class iswascii(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:iswcntrl
class iswcntrl(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:iswctype
class iswctype(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:iswdigit
class iswdigit(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:iswgraph
class iswgraph(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:iswlower
class iswlower(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:iswprint
class iswprint(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:iswpunct
class iswpunct(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:iswspace
class iswspace(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:iswupper
class iswupper(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:iswxdigit
class iswxdigit(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:isxdigit
class isxdigit(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:ldexp
class ldexp(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:ldiv
class ldiv(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:localeconv
class localeconv(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:localtime
class localtime(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:log
class log(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:log10
class log10(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:main_entry
class main_entry(emufunc.EmuFunc):
__callconv__ = 'unknown'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:malloc
class malloc(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:mblen
class mblen(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:mbstowcs
class mbstowcs(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:mbtowc
class mbtowc(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:memchr
class memchr(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:memcmp
class memcmp(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:memcpy
class memcpy(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:memmove
class memmove(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:memset
class memset(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:mktime
class mktime(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:modf
class modf(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:perror
class perror(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:pow
class pow(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:printf
class printf(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:putc
class putc(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:putchar
class putchar(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:puts
class puts(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:putwc
class putwc(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:putwchar
class putwchar(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:qsort
class qsort(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:raise
class _raise(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:rand
class rand(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:realloc
class realloc(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:remove
class remove(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:rename
class rename(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:rewind
class rewind(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:scanf
class scanf(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:setbuf
class setbuf(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:setlocale
class setlocale(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:setvbuf
class setvbuf(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:signal
class signal(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:sin
class sin(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:sinh
class sinh(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:sprintf
class sprintf(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:sqrt
class sqrt(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:srand
class srand(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:sscanf
class sscanf(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:strcat
class strcat(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:strchr
class strchr(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:strcmp
class strcmp(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:strcoll
class strcoll(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:strcpy
class strcpy(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:strcspn
class strcspn(emufunc.EmuFunc):
__callconv__ = 'thiscall'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:strerror
class strerror(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:strftime
class strftime(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:strlen
class strlen(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:strncat
class strncat(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:strncmp
class strncmp(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:strncpy
class strncpy(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:strpbrk
class strpbrk(emufunc.EmuFunc):
__callconv__ = 'thiscall'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:strrchr
class strrchr(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:strspn
class strspn(emufunc.EmuFunc):
__callconv__ = 'thiscall'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:strstr
class strstr(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:strtod
class strtod(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:strtok
class strtok(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:strtol
class strtol(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:strtoul
class strtoul(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:strxfrm
class strxfrm(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:swprintf
class swprintf(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:swscanf
class swscanf(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:system
class system(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:tan
class tan(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:tanh
class tanh(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:time
class time(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:tmpfile
class tmpfile(emufunc.EmuFunc):
__callconv__ = 'stdcall'
__argn__ = []
__argt__ = []
#EMUFUNCDONE
#EMUFUNC:tmpnam
class tmpnam(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:tolower
class tolower(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:toupper
class toupper(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:towlower
class towlower(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:towupper
class towupper(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:ungetc
class ungetc(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:ungetwc
class ungetwc(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:vfprintf
class vfprintf(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:vfwprintf
class vfwprintf(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:vprintf
class vprintf(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:vsprintf
class vsprintf(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:vswprintf
class vswprintf(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:vwprintf
class vwprintf(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:wcscat
class wcscat(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:wcschr
class wcschr(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:wcscmp
class wcscmp(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:wcscoll
class wcscoll(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:wcscpy
class wcscpy(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:wcscspn
class wcscspn(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:wcsftime
class wcsftime(emufunc.EmuFunc):
__callconv__ = 'unknown'
__argn__ = [None, None, None, None]
__argt__ = [Unknown, Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:wcslen
class wcslen(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:wcsncat
class wcsncat(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:wcsncmp
class wcsncmp(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:wcsncpy
class wcsncpy(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:wcspbrk
class wcspbrk(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:wcsrchr
class wcsrchr(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None]
__argt__ = [Unknown, ]
#EMUFUNCDONE
#EMUFUNC:wcsspn
class wcsspn(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:wcsstr
class wcsstr(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:wcstod
class wcstod(emufunc.EmuFunc):
__callconv__ = 'unknown'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:wcstok
class wcstok(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:wcstol
class wcstol(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:wcstombs
class wcstombs(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:wcstoul
class wcstoul(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:wcsxfrm
class wcsxfrm(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None, None]
__argt__ = [Unknown, Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:wctomb
class wctomb(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:wprintf
class wprintf(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
#EMUFUNC:wscanf
class wscanf(emufunc.EmuFunc):
__callconv__ = 'cdecl'
__argn__ = [None, None]
__argt__ = [Unknown, Unknown, ]
#EMUFUNCDONE
|
import logging
from JoycontrolPlugin import JoycontrolPlugin
logger = logging.getLogger(__name__)
class BuyRemakeKit(JoycontrolPlugin):
async def run(self):
logger.info('リメイクキットを買いまくる!')
await self.button_push('a')
await self.wait(0.7)
await self.button_push('a')
await self.wait(2.0)
|
import datetime
try:
from lxml import etree
except ImportError:
import xml.etree.ElementTree as etree
XCAL_URN = 'urn:ietf:params:xml:ns:xcal'
PENTABARF_URN = 'http://pentabarf.org'
PENTABARF_NS = '{' + PENTABARF_URN + '}'
NSMAP = {
None: XCAL_URN,
'pentabarf': PENTABARF_URN
}
DATE_FORMAT = '%Y%m%dT%H%M%S'
class XCalExporter:
extension = 'xcal'
def write(self, fileobj, conf):
root = etree.Element('iCalendar', nsmap=NSMAP)
vc = etree.SubElement(root, 'vcalendar')
etree.SubElement(vc, 'version').text = '2.0'
etree.SubElement(vc, 'prodid').text = '-//Pentabarf//Schedule//EN'
etree.SubElement(vc, 'x-wr-caldesc').text = conf.title
etree.SubElement(vc, 'x-wr-calname').text = conf.title
domain = conf.get_domain()
for event in sorted(conf.events):
if not event.active or not event.room:
continue
xevent = etree.SubElement(vc, 'vevent')
etree.SubElement(xevent, 'method').text = 'PUBLISH'
etree.SubElement(xevent, 'uid').text = '{}@{}@{}'.format(
conf.slug, event.guid, domain)
etree.SubElement(xevent, PENTABARF_NS + 'event-id').text = str(event.guid)
etree.SubElement(xevent, PENTABARF_NS + 'event-slug').text = event.slug
etree.SubElement(xevent, PENTABARF_NS + 'title').text = event.title
etree.SubElement(xevent, PENTABARF_NS + 'subtitle').text = event.subtitle
etree.SubElement(xevent, PENTABARF_NS + 'language').text = event.language
etree.SubElement(xevent, PENTABARF_NS + 'language-code').text = event.language
duration = datetime.timedelta(minutes=event.duration)
etree.SubElement(xevent, 'dtstart').text = event.start.strftime(DATE_FORMAT)
etree.SubElement(xevent, 'dtend').text = (event.start + duration).strftime(DATE_FORMAT)
etree.SubElement(xevent, 'duration').text = str(event.duration / 60.0)
etree.SubElement(xevent, 'summary').text = event.title
etree.SubElement(xevent, 'description').text = event.abstract or event.description or ''
etree.SubElement(xevent, 'class').text = 'PUBLIC'
etree.SubElement(xevent, 'status').text = 'CONFIRMED'
etree.SubElement(xevent, 'category').text = 'Talk'
etree.SubElement(xevent, 'url').text = event.url or ''
etree.SubElement(xevent, 'location').text = event.room.name
for sp in event.speakers:
etree.SubElement(xevent, 'attendee').text = sp.name
try:
result = etree.tostring(root, encoding='unicode', pretty_print=True)
except TypeError:
# built-in ElementTree doesn't do pretty_print
result = etree.tostring(root, encoding='unicode')
fileobj.write("<?xml version='1.0' encoding='utf-8'?>\n")
fileobj.write(result)
|
"""Brazil Data Cube Wtss Business"""
import json
import struct
from math import ceil
import gdal
import numpy as np
from werkzeug.exceptions import BadRequest, NotFound
from bdc_wtss.cache import CacheService, RedisStrategy
from bdc_wtss.coverages import manager
from bdc_wtss.services import GDALService, STACService
CACHE = CacheService(RedisStrategy())
class TimeSeriesParams:
"""Object wrapper for Time Series Request Parameters"""
def __init__(self, **properties):
"""Creates a time series parameter object
Args:
**properties (dict) - Request parameters
"""
self.coverage = properties.get('coverage')
self.attributes = \
properties.get('attributes').split(',')
self.longitude = float(properties.get('longitude'))
self.latitude = float(properties.get('latitude'))
self.start_date = properties.get('start_date')
self.end_date = properties.get('end_date')
self._fragments = manager.get_coverage_fragments(self.coverage)
def to_dict(self):
"""Export Time series params to Python Dictionary"""
return {
k: v
for k, v in vars(self).items() if not k.startswith('_')
}
@property
def coverage_type(self):
"""Retrieves coverage type from given time series coverage"""
if len(self._fragments) == 1:
return ''
_, coverage_type = self._fragments
return coverage_type
@property
def coverage_name(self):
"""Retrieves coverage name from given time series coverage"""
if len(self._fragments) == 1:
return self._fragments[0]
coverage_name, _ = self._fragments
return coverage_name
class WTSSBusiness:
"""WTSS Business utility"""
@classmethod
def list_coverage(cls):
"""Retrieves a list of coverage offered"""
cache_key = 'wtss:collections'
collections = CACHE.get(cache_key)
coverages = []
if collections is not None and collections != b'[]':
coverages = json.loads(collections)
else:
coverages = STACService.collections()
CACHE.add(cache_key, json.dumps(coverages))
return coverages
@classmethod
def check_coverage(cls, coverage):
"""Utility to check coverage existence in memory"""
if coverage not in WTSSBusiness.list_coverage():
raise NotFound('Coverage "{}" not found'.format(coverage))
@classmethod
def get_collection(cls, name):
"""
Get collection from STAC Provider and
Store the information in cache.
"""
cache_key = 'wtss:collections:{}'.format(name)
collection = CACHE.get(cache_key)
if not collection:
try:
collection = STACService.collection(name)
except KeyError as e:
raise BadRequest(str(e))
CACHE.add(cache_key, json.dumps(collection))
else:
collection = json.loads(collection)
return collection
@classmethod
def describe(cls, coverage):
"""Retrieves coverage description"""
cls.check_coverage(coverage)
try:
collection = cls.get_collection(coverage)
timeline = collection.get('properties', {}) \
.get('bdc:timeline', [])
spatial = collection.get('extent', {}) \
.get('spatial', {}) \
.get('bbox', [])[0]
stac_crs = collection.get('properties', {}) \
.get('bdc:crs', [])
assert len(spatial) == 4
spatial_extent = dict(
xmin=float(spatial[1]),
ymin=float(spatial[0]),
xmax=float(spatial[2]),
ymax=float(spatial[3])
)
stac_attributes = collection.get('properties', {}) \
.get('bdc:bands', {})
attributes = []
resolutions = []
for attribute_title in cls.get_bands(collection):
attribute = stac_attributes.get(attribute_title)
res_x = attribute.get('resolution_x')
res_y = attribute.get('resolution_y')
resolutions.append((res_x, res_y))
attributes.append({
"name": attribute_title,
"description": attribute.get('description') \
or 'Description ' + attribute_title,
"datatype": attribute.get('data_type', ''),
"valid_range": {
"min": attribute.get('min', 0),
"max": attribute.get('max', 0)
},
"scale_factor": float(attribute.get('scale', 0)),
"missing_value": attribute.get('fill', 0)
})
res_x, res_y = min(resolutions)
return {
"name": collection.get('id'),
"description": collection.get('description') \
or 'Description ' + collection.get('id'),
"detail": "some url",
"dimensions": {
"x": {
"name": "col_id",
"min_idx": 1,
"max_idx": 4928
},
"y": {
"name": "row_id",
"min_idx": 1,
"max_idx": 3663
},
"t": {
"name": "time_id",
"min_idx": 1,
"max_idx": len(timeline)
}
},
"spatial_extent": spatial_extent,
"spatial_resolution": {
"x": res_x,
"y": res_y
},
"crs": {
"proj4": stac_crs,
"wkt": ""
},
"timeline": timeline,
"attributes": attributes
}
except:
raise NotFound()
@classmethod
def get_bands(cls, collection):
"""Get bands from STAC collection"""
if not collection.get('properties'):
raise TypeError('Invalid collection')
return list(
collection.get('properties', {}).get('bdc:bands', {}).keys()
)
@staticmethod
def get_features(ts_params):
"""Retrieves STAC features using WTSS time series params"""
bbox = '{0},{1},{0},{1}'.format(ts_params.longitude, ts_params.latitude)
try:
return STACService.features(
ts_params.coverage_name,
bbox=bbox,
collection_type=ts_params.coverage_type,
start_date=ts_params.start_date,
end_date=ts_params.end_date,
limit=500000
)
except RuntimeError:
raise BadRequest('No features found')
@classmethod
def time_series(cls, ts_params: TimeSeriesParams):
"""
Retrieves time series object
Args:
ts_params (TimeSeriesParams): WTSS Request parameters
Returns:
dict Time series object.
See `json-schemas/time_series_response.json`
"""
# Validate coverage existence
cls.check_coverage(ts_params.coverage)
# Retrieves STAC collection info
collection = cls.get_collection(ts_params.coverage)
# Retrieves collection bands provided
bands = cls.get_bands(collection)
# Retrieves the features that matches the WTSS Time Series arguments
features = cls.get_features(ts_params)
x, y = (0, 0)
ts = []
timeline = set()
for attribute in ts_params.attributes:
if attribute not in bands:
raise BadRequest(
'Band "{}" does not exists'.format(attribute)
)
ts_attr = {
"attribute": attribute,
"values": []
}
for feature in features.get('features'):
dataset = GDALService.open_remote(
feature.get('assets') \
.get(attribute) \
.get('href')
)
timeline.add(
feature.get('properties') \
.get('datetime')
)
if x == 0 or y == 0:
x, y = GDALService.transform_latlong_to_rowcol(
dataset, ts_params.latitude, ts_params.longitude
)
# Find which block number belongs to X,Y
band = dataset.GetRasterBand(1)
block_x, block_y = band.GetBlockSize()
x_size, y_size = band.XSize, band.YSize
long_block = x // block_x
lat_block = y // block_y
long_lat_block = (
long_block if long_block > -1 else \
long_block - ceil(x_size / block_x),
lat_block if lat_block > -1 else \
lat_block - - ceil(y_size / block_y)
)
# Find block array in Redis
BLOCK_KEY = 'wtss:{}:{}:block_{}_{}'.format(
feature['id'],
attribute,
long_lat_block[0],
long_lat_block[1]
)
encoded = CACHE.get(BLOCK_KEY)
if encoded:
h, w = struct.unpack('>II', encoded[:8])
blk = np.frombuffer(
encoded,
dtype=gdal.GetDataTypeName(band.DataType),
offset=8
).reshape(h, w)
else:
# pylint: disable=unbalanced-tuple-unpacking
blocks = GDALService.get_raster_blocks(dataset,
[long_lat_block])
if not blocks:
continue # skip
blk = blocks[0]
h, w = blk.shape
shape = struct.pack('>II', h, w)
encoded = shape + blk.tobytes()
CACHE.add(BLOCK_KEY, encoded)
offset_x = (
x_size - (long_lat_block[0] * block_x)
) - (x_size - x)
offset_y = (
y_size - (long_lat_block[1] * block_y)
) - (y_size - y)
ts_attr['values'].append(
float(blk[offset_y, offset_x])
)
ts_attr['values'].reverse()
ts.append(ts_attr)
return {
"query": ts_params.to_dict(),
"result": {
"attributes": ts,
"timeline": sorted(timeline),
"coordinates": {
"latitude": ts_params.latitude,
"longitude": ts_params.longitude,
"col": x,
"row": y
}
}
}
|
#! /usr/bin/env python
import sys
import os
import argparse
from array import *
import numpy as np
import ROOT
import yaml
from pyjetty.alice_analysis.analysis.user.substructure import run_analysis
# Prevent ROOT from stealing focus when plotting
ROOT.gROOT.SetBatch(True)
################################################################
class RunAnalysisJamesBase(run_analysis.RunAnalysis):
#---------------------------------------------------------------
# Constructor
#---------------------------------------------------------------
def __init__(self, config_file='', **kwargs):
super(RunAnalysisJamesBase, self).__init__(config_file, **kwargs)
# Initialize yaml config
self.initialize_user_config()
#---------------------------------------------------------------
# Initialize config file into class members
#---------------------------------------------------------------
def initialize_user_config(self):
# Read config file
with open(self.config_file, 'r') as stream:
config = yaml.safe_load(stream)
self.figure_approval_status = config['figure_approval_status']
self.plot_overlay_list = self.obs_config_dict['common_settings']['plot_overlay_list']
self.jet_matching_distance = config['jet_matching_distance']
if 'constituent_subtractor' in config:
self.max_distance = config['constituent_subtractor']['max_distance']
# Theory comparisons
if 'fPythia' in config:
self.fPythia_name = config['fPythia']
if 'fNLL' in config:
self.fNLL = config['fNLL']
else:
self.fNLL = False
if 'fNPcorrection_numerator' in config and 'fNPcorrection_denominator' in config:
self.NPcorrection = True
self.fNPcorrection_numerator = config['fNPcorrection_numerator']
self.fNPcorrection_denominator = config['fNPcorrection_denominator']
else:
self.NPcorrection = False
#----------------------------------------------------------------------
def plot_observable(self, jetR, obs_label, obs_setting, grooming_setting, min_pt_truth, max_pt_truth, plot_pythia=False):
name = 'cResult_R{}_{}_{}-{}'.format(jetR, obs_label, min_pt_truth, max_pt_truth)
c = ROOT.TCanvas(name, name, 600, 450)
c.Draw()
c.cd()
myPad = ROOT.TPad('myPad', 'The pad',0,0,1,1)
myPad.SetLeftMargin(0.2)
myPad.SetTopMargin(0.07)
myPad.SetRightMargin(0.04)
myPad.SetBottomMargin(0.13)
myPad.Draw()
myPad.cd()
xtitle = getattr(self, 'xtitle')
ytitle = getattr(self, 'ytitle')
color = 600-6
# Get histograms
name = 'hmain_{}_R{}_{}_{}-{}'.format(self.observable, jetR, obs_label, min_pt_truth, max_pt_truth)
h = getattr(self, name)
h.SetName(name)
h.SetMarkerSize(1.5)
h.SetMarkerStyle(20)
h.SetMarkerColor(color)
h.SetLineStyle(1)
h.SetLineWidth(2)
h.SetLineColor(color)
name = 'hResult_{}_systotal_R{}_{}_{}-{}'.format(self.observable, jetR, obs_label, min_pt_truth, max_pt_truth)
h_sys = getattr(self, name)
h_sys.SetName(name)
h_sys.SetLineColor(0)
h_sys.SetFillColor(color)
h_sys.SetFillColorAlpha(color, 0.3)
h_sys.SetFillStyle(1001)
h_sys.SetLineWidth(0)
n_obs_bins_truth = self.n_bins_truth(obs_label)
truth_bin_array = self.truth_bin_array(obs_label)
myBlankHisto = ROOT.TH1F('myBlankHisto','Blank Histogram', n_obs_bins_truth, truth_bin_array)
myBlankHisto.SetNdivisions(505)
myBlankHisto.SetXTitle(xtitle)
myBlankHisto.GetYaxis().SetTitleOffset(1.5)
myBlankHisto.SetYTitle(ytitle)
myBlankHisto.SetMaximum(3*h.GetMaximum())
if 'subjet_z' in self.observable or self.observable == 'jet_axis':
myBlankHisto.SetMaximum(1.7*h.GetMaximum())
myBlankHisto.SetMinimum(0.)
myBlankHisto.Draw("E")
if plot_pythia:
hPythia, fraction_tagged_pythia = self.pythia_prediction(jetR, obs_setting, grooming_setting, obs_label, min_pt_truth, max_pt_truth)
if hPythia:
hPythia.SetFillStyle(0)
hPythia.SetMarkerSize(1.5)
hPythia.SetMarkerStyle(21)
hPythia.SetMarkerColor(1)
hPythia.SetLineColor(1)
hPythia.SetLineWidth(1)
hPythia.Draw('E2 same')
else:
print('No PYTHIA prediction for {} {}'.format(self.observable, obs_label))
plot_pythia = False
h_sys.DrawCopy('E2 same')
h.DrawCopy('PE X0 same')
text_latex = ROOT.TLatex()
text_latex.SetNDC()
text = 'ALICE {}'.format(self.figure_approval_status)
text_latex.DrawLatex(0.57, 0.87, text)
text = 'pp #sqrt{#it{s}} = 5.02 TeV'
text_latex.SetTextSize(0.045)
text_latex.DrawLatex(0.57, 0.8, text)
text = str(min_pt_truth) + ' < #it{p}_{T, ch jet} < ' + str(max_pt_truth) + ' GeV/#it{c}'
text_latex.DrawLatex(0.57, 0.73, text)
text = '#it{R} = ' + str(jetR) + ' | #eta_{jet}| < 0.5'
text_latex.DrawLatex(0.57, 0.66, text)
subobs_label = self.utils.formatted_subobs_label(self.observable)
delta = 0.
if subobs_label:
text = '{} = {}'.format(subobs_label, obs_setting)
text_latex.DrawLatex(0.57, 0.59, text)
delta = 0.07
if grooming_setting:
text = self.utils.formatted_grooming_label(grooming_setting)
text_latex.DrawLatex(0.57, 0.59-delta, text)
if 'sd' in grooming_setting:
fraction_tagged = getattr(self, 'tagging_fraction_R{}_{}_{}-{}'.format(jetR, obs_label, min_pt_truth, max_pt_truth))
text_latex.SetTextSize(0.04)
text = '#it{f}_{tagged}^{data} = %3.3f' % fraction_tagged
text_latex.DrawLatex(0.57, 0.52-delta, text)
if plot_pythia:
text_latex.SetTextSize(0.04)
text = ('#it{f}_{tagged}^{data} = %3.3f' % fraction_tagged) + (', #it{f}_{tagged}^{pythia} = %3.3f' % fraction_tagged_pythia)
text_latex.DrawLatex(0.57, 0.52-delta, text)
myLegend = ROOT.TLegend(0.25,0.7,0.45,0.85)
self.utils.setup_legend(myLegend,0.035)
myLegend.AddEntry(h, 'ALICE pp', 'pe')
myLegend.AddEntry(h_sys, 'Sys. uncertainty', 'f')
if plot_pythia:
myLegend.AddEntry(hPythia, 'PYTHIA8 Monash2013', 'pe')
myLegend.Draw()
name = 'hUnfolded_R{}_{}_{}-{}{}'.format(self.utils.remove_periods(jetR), obs_label, int(min_pt_truth), int(max_pt_truth), self.file_format)
if plot_pythia:
name = 'hUnfolded_R{}_{}_{}-{}_Pythia{}'.format(self.utils.remove_periods(jetR), obs_label, int(min_pt_truth), int(max_pt_truth), self.file_format)
output_dir = getattr(self, 'output_dir_final_results')
output_dir_single = output_dir + '/single_results'
if not os.path.exists(output_dir_single):
os.mkdir(output_dir_single)
outputFilename = os.path.join(output_dir_single, name)
c.SaveAs(outputFilename)
c.Close()
# Write result to ROOT file
final_result_root_filename = os.path.join(output_dir, 'fFinalResults.root')
fFinalResults = ROOT.TFile(final_result_root_filename, 'UPDATE')
h.Write()
h_sys.Write()
hPythia.Write()
fFinalResults.Close()
#----------------------------------------------------------------------
def pythia_prediction(self, jetR, obs_setting, grooming_setting, obs_label, min_pt_truth, max_pt_truth):
plot_pythia_from_response = True
plot_pythia_from_mateusz = False
if plot_pythia_from_response:
hPythia = self.get_pythia_from_response(jetR, obs_label, min_pt_truth, max_pt_truth)
if grooming_setting and 'sd' in grooming_setting:
# If SD, the untagged jets are in the first bin
n_jets_inclusive = hPythia.Integral(1, hPythia.GetNbinsX())
n_jets_tagged = hPythia.Integral(hPythia.FindBin(self.truth_bin_array(obs_label)[0]), hPythia.GetNbinsX()+1)
else:
n_jets_inclusive = hPythia.Integral(1, hPythia.GetNbinsX())
n_jets_tagged = hPythia.Integral(hPythia.FindBin(self.truth_bin_array(obs_label)[0]), hPythia.GetNbinsX())
elif plot_pythia_from_mateusz:
fPythia_name = '/Users/jamesmulligan/Analysis_theta_g/Pythia_new/pythia.root'
fPythia = ROOT.TFile(fPythia_name, 'READ')
print(fPythia.ls())
hname = 'histogram_h_{}_B{}_{}-{}'.format(self.observable, obs_label, int(min_pt_truth), int(max_pt_truth))
hPythia = fPythia.Get(hname)
n_jets_inclusive = hPythia.Integral(0, hPythia.GetNbinsX())
n_jets_tagged = hPythia.Integral(hPythia2.FindBin(self.truth_bin_array(obs_label)[0]), hPythia2.GetNbinsX())
fraction_tagged_pythia = n_jets_tagged/n_jets_inclusive
hPythia.Scale(1./n_jets_inclusive, 'width')
return [hPythia, fraction_tagged_pythia]
#----------------------------------------------------------------------
def get_pythia_from_response(self, jetR, obs_label, min_pt_truth, max_pt_truth):
output_dir = getattr(self, 'output_dir_main')
file = os.path.join(output_dir, 'response.root')
f = ROOT.TFile(file, 'READ')
thn_name = 'hResponse_JetPt_{}_R{}_{}_rebinned'.format(self.observable, jetR, obs_label)
thn = f.Get(thn_name)
thn.GetAxis(1).SetRangeUser(min_pt_truth, max_pt_truth)
h = thn.Projection(3)
h.SetName('hPythia_{}_R{}_{}_{}-{}'.format(self.observable, jetR, obs_label, min_pt_truth, max_pt_truth))
h.SetDirectory(0)
for i in range(1, h.GetNbinsX() + 1):
h.SetBinError(i, 0)
return h
#----------------------------------------------------------------------
def plot_final_result_overlay(self, i_config, jetR, overlay_list):
print('Plotting overlay of {}'.format(overlay_list))
# Plot overlay of different subconfigs, for fixed pt bin
for bin in range(0, len(self.pt_bins_reported) - 1):
min_pt_truth = self.pt_bins_reported[bin]
max_pt_truth = self.pt_bins_reported[bin+1]
# Plot PYTHIA
self.plot_observable_overlay_subconfigs(i_config, jetR, overlay_list, min_pt_truth, max_pt_truth, plot_pythia=True, plot_ratio = True)
# Plot NLL
if self.fNLL:
self.plot_observable_overlay_subconfigs(i_config, jetR, overlay_list, min_pt_truth, max_pt_truth, plot_nll = True, plot_ratio = False)
#----------------------------------------------------------------------
def plot_observable_overlay_subconfigs(self, i_config, jetR, overlay_list, min_pt_truth, max_pt_truth, plot_pythia=False, plot_nll=False, plot_ratio=False):
name = 'cResult_overlay_R{}_allpt_{}-{}'.format(jetR, min_pt_truth, max_pt_truth)
if plot_ratio:
c = ROOT.TCanvas(name, name, 600, 650)
else:
c = ROOT.TCanvas(name, name, 600, 450)
c.Draw()
c.cd()
if plot_ratio:
pad1 = ROOT.TPad('myPad', 'The pad',0,0.3,1,1)
else:
pad1 = ROOT.TPad('myPad', 'The pad',0,0,1,1)
pad1.SetLeftMargin(0.2)
pad1.SetTopMargin(0.07)
pad1.SetRightMargin(0.04)
pad1.SetBottomMargin(0.13)
if plot_ratio:
pad1.SetBottomMargin(0.)
pad1.SetTicks(1,1)
pad1.Draw()
pad1.cd()
if self.observable == 'leading_subjet_z':
myLegend = ROOT.TLegend(0.45,0.3,0.61,0.54)
elif self.observable == 'inclusive_subjet_z':
myLegend = ROOT.TLegend(0.38,0.32,0.54,0.56)
else:
myLegend = ROOT.TLegend(0.45,0.33,0.61,0.57)
self.utils.setup_legend(myLegend,0.05, sep=-0.2)
if self.observable == 'leading_subjet_z':
myLegend.SetHeader('Leading anti-#it{k}_{T} subjets')
elif self.observable == 'inclusive_subjet_z':
myLegend.SetHeader('Inclusive anti-#it{k}_{T} subjets')
name = 'hmain_{}_R{}_{{}}_{}-{}'.format(self.observable, jetR, min_pt_truth, max_pt_truth)
ymax = 2*self.get_maximum(name, overlay_list)
ymin = 2e-4
ymin_ratio = 0.
ymax_ratio = 1.99
if self.observable == 'theta_g':
ymin_ratio = 0.5
ymax_ratio = 1.69
ymax*=1.1
if self.observable == 'leading_subjet_z':
ymax = 16.99
ymin = 1e-3
ymin_ratio = 0.5
ymax_ratio = 1.79
if self.observable == 'inclusive_subjet_z':
ymax = 2e4
ymin = 2e-1
pad1.SetLogy()
ymin_ratio = 0.5
ymax_ratio = 1.79
# Get xmin and xmax over all hists
xmin = 1
xmax = 0
for i, subconfig_name in enumerate(self.obs_subconfig_list):
if subconfig_name not in overlay_list:
continue
xmin_temp = self.obs_config_dict[subconfig_name]['obs_bins_truth'][0]
xmax_temp = self.obs_config_dict[subconfig_name]['obs_bins_truth'][-1]
if xmin_temp < xmin:
xmin = xmin_temp
if xmax_temp > xmax:
xmax = xmax_temp
# Loop through hists
for i, subconfig_name in enumerate(self.obs_subconfig_list):
if subconfig_name not in overlay_list:
continue
obs_setting = self.obs_settings[i]
grooming_setting = self.grooming_settings[i]
obs_label = self.utils.obs_label(obs_setting, grooming_setting)
if subconfig_name == overlay_list[0]:
marker = 20
marker_pythia = marker+4
color = 600-6
if subconfig_name == overlay_list[1]:
marker = 21
marker_pythia = marker+4
color = 632-4
if i > 1 and subconfig_name == overlay_list[2]:
marker = 33
marker_pythia = 27
color = 416-2
name = 'hmain_{}_R{}_{}_{}-{}'.format(self.observable, jetR, obs_label, min_pt_truth, max_pt_truth)
h = getattr(self, name)
h.SetMarkerSize(1.5)
h.SetMarkerStyle(marker)
h.SetMarkerColor(color)
h.SetLineStyle(1)
h.SetLineWidth(2)
h.SetLineColor(color)
h.GetXaxis().SetRangeUser(self.obs_config_dict[subconfig_name]['obs_bins_truth'][0], self.obs_config_dict[subconfig_name]['obs_bins_truth'][-1])
h_sys = getattr(self, 'hResult_{}_systotal_R{}_{}_{}-{}'.format(self.observable, jetR, obs_label, min_pt_truth, max_pt_truth))
h_sys.SetLineColor(0)
h_sys.SetFillColor(color)
h_sys.SetFillColorAlpha(color, 0.3)
h_sys.SetFillStyle(1001)
h_sys.SetLineWidth(0)
h_sys.GetXaxis().SetRangeUser(self.obs_config_dict[subconfig_name]['obs_bins_truth'][0], self.obs_config_dict[subconfig_name]['obs_bins_truth'][-1])
if subconfig_name == overlay_list[0]:
pad1.cd()
xtitle = getattr(self, 'xtitle')
ytitle = getattr(self, 'ytitle')
myBlankHisto = ROOT.TH1F('myBlankHisto','Blank Histogram', 1, xmin, xmax)
myBlankHisto.SetNdivisions(505)
myBlankHisto.GetXaxis().SetTitleSize(0.085)
myBlankHisto.SetXTitle(xtitle)
myBlankHisto.GetYaxis().SetTitleOffset(1.5)
myBlankHisto.SetYTitle(ytitle)
myBlankHisto.SetMaximum(ymax)
myBlankHisto.SetMinimum(ymin)
if plot_ratio:
myBlankHisto.SetMinimum(ymin) # Don't draw 0 on top panel
myBlankHisto.GetYaxis().SetTitleSize(0.075)
myBlankHisto.GetYaxis().SetTitleOffset(1.2)
myBlankHisto.GetYaxis().SetLabelSize(0.06)
myBlankHisto.Draw('E')
# Plot ratio
if plot_ratio:
c.cd()
pad2 = ROOT.TPad("pad2", "pad2", 0, 0.02, 1, 0.3)
pad2.SetTopMargin(0)
pad2.SetBottomMargin(0.4)
pad2.SetLeftMargin(0.2)
pad2.SetRightMargin(0.04)
pad2.SetTicks(0,1)
pad2.Draw()
pad2.cd()
myBlankHisto2 = myBlankHisto.Clone("myBlankHisto_C")
myBlankHisto2.SetYTitle("#frac{Data}{PYTHIA}")
myBlankHisto2.SetXTitle(xtitle)
myBlankHisto2.GetXaxis().SetTitleSize(30)
myBlankHisto2.GetXaxis().SetTitleFont(43)
myBlankHisto2.GetXaxis().SetTitleOffset(4.)
myBlankHisto2.GetXaxis().SetLabelFont(43)
myBlankHisto2.GetXaxis().SetLabelSize(25)
myBlankHisto2.GetXaxis().SetTickSize(0.07)
myBlankHisto2.GetYaxis().SetTitleSize(25)
myBlankHisto2.GetYaxis().SetTitleFont(43)
myBlankHisto2.GetYaxis().SetTitleOffset(2.2)
myBlankHisto2.GetYaxis().SetLabelFont(43)
myBlankHisto2.GetYaxis().SetLabelSize(25)
myBlankHisto2.GetYaxis().SetNdivisions(505)
myBlankHisto2.GetYaxis().SetTickSize(0.035)
myBlankHisto2.GetYaxis().SetRangeUser(ymin_ratio, ymax_ratio)
myBlankHisto2.Draw()
line = ROOT.TLine(xmin,1,xmax,1)
line.SetLineColor(920+2)
line.SetLineStyle(2)
line.Draw()
if plot_pythia:
hPythia, fraction_tagged_pythia = self.pythia_prediction(jetR, obs_setting, grooming_setting, obs_label, min_pt_truth, max_pt_truth)
plot_errors = False
if plot_errors:
hPythia.SetMarkerSize(0)
hPythia.SetMarkerStyle(0)
hPythia.SetMarkerColor(color)
hPythia.SetFillColor(color)
else:
hPythia.SetLineColor(color)
hPythia.SetLineColorAlpha(color, 0.5)
hPythia.SetLineWidth(4)
hPythia.GetXaxis().SetRangeUser(self.obs_config_dict[subconfig_name]['obs_bins_truth'][0], self.obs_config_dict[subconfig_name]['obs_bins_truth'][-1])
if plot_nll:
# Get parton-level prediction
attr_name = 'tgraph_NLL_{}_{}_{}-{}'.format(self.observable, obs_label, min_pt_truth, max_pt_truth)
if hasattr(self, attr_name):
g = getattr(self, attr_name)
else:
return
# Get correction
apply_nll_correction = False
if apply_nll_correction:
h_correction = getattr(self, 'hNPcorrection_{}_{}_{}-{}'.format(self.observable, obs_label, min_pt_truth, max_pt_truth))
# Apply correction
self.utils.multiply_tgraph(g, h_correction)
g.SetLineColor(color)
g.SetLineColorAlpha(color, 0.5)
g.SetLineWidth(4)
g.SetFillColor(color)
g.SetFillColorAlpha(color, 0.5)
# Scale inclusive subjets to N_jets rather than N_subjets -- fill in by hand for now
z_min = 0.71
if self.observable == 'leading_subjet_z':
integral_total = h.Integral(1, h.GetNbinsX(), 'width')
print(f'integral_total, leading_subjet_z, {obs_label}: {integral_total}')
h.Scale(1./integral_total)
h_sys.Scale(1./integral_total)
integral_total_pythia = hPythia.Integral(1, hPythia.GetNbinsX(), 'width')
print(f'integral_total_pythia, leading_subjet_z, {obs_label}: {integral_total_pythia}')
hPythia.Scale(1./integral_total_pythia)
integral = h.Integral(h.FindBin(z_min), h.GetNbinsX(), 'width')
print(f'integral, leading_subjet_z, {obs_label}: {integral}')
integral_pythia = hPythia.Integral(hPythia.FindBin(z_min), hPythia.GetNbinsX(), 'width')
print(f'integral_pythia, leading_subjet_z, {obs_label}: {integral_pythia}')
if self.observable == 'inclusive_subjet_z':
if np.isclose(float(obs_label), 0.1):
integral_leading_subjet = 0.7865922387180659 # R=0.4, r=0.1
integral_leading_subjet_pythia = 0.7441921938539977
elif np.isclose(float(obs_label), 0.2):
integral_leading_subjet = 0.9365660517984986 # R=0.4, r=0.2
integral_leading_subjet_pythia = 0.9296991675820692
integral = h.Integral(h.FindBin(z_min), h.GetNbinsX(), 'width')
print(f'integral, inclusive_subjet_z, {obs_label}: {integral}')
normalization = integral_leading_subjet / integral
print(f'normalization: {normalization}')
h.Scale(normalization)
h_sys.Scale(normalization)
integral_pythia = hPythia.Integral(hPythia.FindBin(z_min), hPythia.GetNbinsX(), 'width')
print(f'integral_pythia, inclusive_subjet_z, {obs_label}: {integral_pythia}')
normalization_pythia = integral_leading_subjet_pythia / integral_pythia
print(f'normalization_pythia: {normalization_pythia}')
hPythia.Scale(normalization_pythia)
# Compute <N_subjets>
n_subjets = h.Integral(1, h.GetNbinsX(), 'width')
print(f'<N_subjets>, {obs_label}: {n_subjets}')
# Compute z_loss for leading subjets
# Should come up with a better way to decide bin center
z_moment = 0.
if self.observable == 'leading_subjet_z':
for i in range(1, h.GetNbinsX()+1):
zr = h.GetBinCenter(i)
content = h.GetBinContent(i)
width = h.GetXaxis().GetBinWidth(i)
z_moment += zr*content*width
#print(f'bin: {i} (zr = {zr}, width = {width}): content = {content} -- {zr*content*width}')
z_loss = 1 - z_moment
#print(z_moment)
#print(f'z_loss for r={obs_label}: {1-z_moment}')
if plot_ratio:
hRatioSys = h_sys.Clone()
hRatioSys.SetName('{}_Ratio'.format(h_sys.GetName()))
if plot_pythia:
hRatioSys.Divide(hPythia)
hRatioSys.SetLineColor(0)
hRatioSys.SetFillColor(color)
hRatioSys.SetFillColorAlpha(color, 0.3)
hRatioSys.SetFillStyle(1001)
hRatioSys.SetLineWidth(0)
elif plot_nll:
gRatioSys = g.Clone()
gRatioSys.SetName('{}_{}_Ratio'.format(obs_label, g.GetName()))
self.utils.divide_tgraph(hRatioSys, gRatioSys, combine_errors=True)
gRatioSys.SetLineColor(0)
gRatioSys.SetFillColor(color)
gRatioSys.SetFillColorAlpha(color, 0.3)
gRatioSys.SetFillStyle(1001)
gRatioSys.SetLineWidth(0)
hRatioStat = h.Clone()
hRatioStat.SetName('{}_Ratio'.format(h.GetName()))
if plot_pythia:
hRatioStat.Divide(hPythia)
elif plot_nll:
self.utils.divide_tgraph(hRatioStat, g, combine_errors=False)
hRatioStat.SetMarkerSize(1.5)
hRatioStat.SetMarkerStyle(marker)
hRatioStat.SetMarkerColor(color)
hRatioStat.SetLineStyle(1)
hRatioStat.SetLineWidth(2)
hRatioStat.SetLineColor(color)
pad1.cd()
if plot_pythia:
plot_errors = False
if plot_errors:
hPythia.DrawCopy('E3 same')
else:
hPythia.DrawCopy('L hist same')
if plot_nll:
g.Draw('L3 same')
h_sys.DrawCopy('E2 same')
h.DrawCopy('PE X0 same')
if plot_ratio:
pad2.cd()
if plot_pythia:
hRatioSys.DrawCopy('E2 same')
elif plot_nll:
gRatioSys.Draw('L3 same')
hRatioStat.DrawCopy('PE X0 same')
subobs_label = self.utils.formatted_subobs_label(self.observable)
text = ''
if subobs_label:
text += '{} = {}'.format(subobs_label, obs_setting)
if grooming_setting:
text += self.utils.formatted_grooming_label(grooming_setting, verbose=True)
myLegend.AddEntry(h, '{}'.format(text), 'pe')
if self.observable == 'leading_subjet_z':
pad1.cd()
text_latex = ROOT.TLatex()
text_latex.SetNDC()
text_latex.SetTextSize(0.05)
x = 0.3
y = 0.3 - 0.9*float(obs_setting)
text = f'#LT#it{{z}}^{{loss}}_{{{subobs_label} = {obs_setting} }}#GT = {np.round(z_loss,2):.2f}'
text_latex.DrawLatex(x, y, text)
elif self.observable == 'inclusive_subjet_z':
pad1.cd()
text_latex = ROOT.TLatex()
text_latex.SetNDC()
text_latex.SetTextSize(0.045)
x = 0.47
y = 0.33 - 0.8*float(obs_setting)
text = f'#LT#it{{N}}^{{subjets }}_{{{subobs_label} = {obs_setting}}}#GT = {np.round(n_subjets,2):.1f}'
text_latex.DrawLatex(x, y, text)
pad1.cd()
myLegend.AddEntry(h_sys, 'Sys. uncertainty', 'f')
if plot_pythia:
myLegend.AddEntry(hPythia, 'PYTHIA8 Monash 2013', 'l')
if plot_nll:
myLegend.AddEntry(g, 'NLL', 'l')
text_latex = ROOT.TLatex()
text_latex.SetNDC()
text_latex.SetTextSize(0.065)
x = 0.25
y = 0.855
text = 'ALICE {}'.format(self.figure_approval_status)
text_latex.DrawLatex(x, y, text)
text_latex.SetTextSize(0.055)
text = 'pp #sqrt{#it{s}} = 5.02 TeV'
text_latex.DrawLatex(x, y-0.06, text)
text = 'Charged-particle anti-#it{k}_{T} jets'
text_latex.DrawLatex(x, y-0.12, text)
text = '#it{R} = ' + str(jetR) + ' | #it{{#eta}}_{{jet}}| < {}'.format(0.9-jetR)
text_latex.DrawLatex(x, y-0.18, text)
text = str(min_pt_truth) + ' < #it{p}_{T}^{ch jet} < ' + str(max_pt_truth) + ' GeV/#it{c}'
text_latex.DrawLatex(x, y-0.26, text)
myLegend.Draw()
if self.observable == 'theta_g':
rg_axis_tf1 = ROOT.TF1('rg_axis_tf1', 'x', 0, jetR-0.01)
rg_axis = ROOT.TGaxis(xmin, 2*ymax, xmax, 2*ymax, 'rg_axis_tf1', 505, '- S')
rg_axis.SetTitle('#it{R}_{g}')
rg_axis.SetTitleSize(25)
rg_axis.SetTitleFont(43)
rg_axis.SetTitleOffset(0.6)
rg_axis.SetLabelFont(43)
rg_axis.SetLabelSize(25)
rg_axis.SetTickSize(0.015)
rg_axis.SetLabelOffset(0.015)
rg_axis.Draw()
name = 'h_{}_R{}_{}-{}_{}{}'.format(self.observable, self.utils.remove_periods(jetR), int(min_pt_truth), int(max_pt_truth), i_config, self.file_format)
if plot_pythia:
name = 'h_{}_R{}_{}-{}_Pythia_{}{}'.format(self.observable, self.utils.remove_periods(jetR), int(min_pt_truth), int(max_pt_truth), i_config, self.file_format)
if plot_nll:
name = 'h_{}_R{}_{}-{}_NLL_{}{}'.format(self.observable, self.utils.remove_periods(jetR), int(min_pt_truth), int(max_pt_truth), i_config, self.file_format)
output_dir = getattr(self, 'output_dir_final_results') + '/all_results'
if not os.path.exists(output_dir):
os.mkdir(output_dir)
outputFilename = os.path.join(output_dir, name)
c.SaveAs(outputFilename)
c.Close()
#----------------------------------------------------------------------
# Return maximum y-value of unfolded results in a subconfig list
def get_maximum(self, name, overlay_list):
max = 0.
for i, subconfig_name in enumerate(self.obs_subconfig_list):
if subconfig_name not in overlay_list:
continue
obs_setting = self.obs_settings[i]
grooming_setting = self.grooming_settings[i]
obs_label = self.utils.obs_label(obs_setting, grooming_setting)
h = getattr(self, name.format(obs_label))
if h.GetMaximum() > max:
max = h.GetMaximum()
return max
|
'''
Variation on merge
Logic as below:
Intersection of two sorted lists
if A[i] < b[j], increment i
if B[j] < a[i], increment j
if A[i] == B[j]
while A[i] == B[j], increment j
append A[i] to C and increment i
'''
def merge(A, B): # Merge A[0:m], B[0:n]
(C, m, n) = ([], len(A), len(B))
(i, j) = (0, 0) # Current positions in A, B
while i + j < m + n:
if i == m: # Case 1: A is empty
break
elif j == n: # Case 2: B is empty
break
elif A[i] < B[j]: # Case 3: Head of A is smaller
i = i + 1
elif B[j] < A[i]: # Case 4: Head of B is smaller
j = j + 1
elif A[i] == B[j]: # Case 5: Head of A is equal to head of B
j = j + 1
C.append(A[i])
i = i + 1
return C
A = list(range(0, 20, 2))
B = list(range(10, 30, 2))
print(A)
print(B)
print(merge(A, B))
|
from PIL import Image
from numpy import *
from scipy.ndimage import filters
def unsharp_masking(im, sigma=5):
"""im: numpy.array image
cf. 1.4.1.gauss.py"""
im2 = filters.gaussian_filter(im,sigma)
return im2 - im
def unsharp_masking_color(im, sigma=5):
"""im: numpy.array image
cf. 1.4.1.gauss_color.py"""
im2 = zeros(im.shape)
for i in range(3):
im2[:,:,i] = filters.gaussian_filter(im[:,:,i],5)
im2 = uint8(im2)
return im2 - im
from pylab import *
def make_figure(im):
figure()
gray()
axis('off')
imshow(im)
im = array(Image.open('empire.jpg').convert('L'))
make_figure(im)
make_figure(unsharp_masking(im))
im = array(Image.open('empire.jpg'))
make_figure(im)
make_figure(unsharp_masking_color(im))
show()
|
import current_stack
import image_processing
import db_query
import error_log
def check_is_call_valid(screen_area, hand_value, element, stack_collection, db):
try:
cur_stack = current_stack.search_current_stack(screen_area, stack_collection, db)
bank_size = current_stack.search_bank_stack(screen_area, db)
call_size = image_processing.search_last_opponent_action(screen_area, db)
if not isinstance(call_size, str):
call_size = call_size['alias']
else:
call_size = current_stack.search_allin_stack(screen_area, db)
if call_size == '0.5':
call_size = float(call_size)
elif call_size == 'check':
call_size = 0.1
else:
call_size = int(call_size)
current_pot_odds = round(bank_size / call_size, 1)
if cur_stack <= call_size:
element = 'river'
necessary_pot_odds = db_query.get_pot_odds(hand_value, element, db)
if int(current_pot_odds) >= int(necessary_pot_odds):
return True
else:
return False
except Exception as e:
error_log.error_log('check_is_call_valid', str(e))
def check_is_call_after_opponent_river_agression(screen_area, hand_value, stack_collection, action, db):
try:
cur_stack = current_stack.search_current_stack(screen_area, stack_collection, db)
bank_size = current_stack.search_bank_stack(screen_area, db)
call_size = image_processing.search_last_opponent_action(screen_area, db)
if not isinstance(call_size, str):
call_size = call_size['alias']
else:
call_size = 5
if call_size == '0.5':
call_size = float(call_size)
elif call_size == 'check':
call_size = 0
else:
call_size = int(call_size)
if action == 'river_cbet' and cur_stack > bank_size and call_size not in ('0.5', '1', '2', '3', '4') \
and hand_value in ('low_two_pairs', 'two_pairs', 'top_pair', 'low_top_pair'):
return False
elif action == 'river_cbet' and cur_stack > bank_size and call_size in ('0.5', '1', '2', '3', '4') \
and hand_value in ('low_two_pairs', 'two_pairs', 'top_pair', 'low_top_pair'):
return True
else:
return None
except Exception as e:
error_log.error_log('check_is_call_after_opponent_river_agression', str(e))
|
"""mod article
Revision ID: 46c42db38eb6
Revises: 46385a332abd
Create Date: 2015-11-24 16:59:46.068280
"""
# revision identifiers, used by Alembic.
revision = '46c42db38eb6'
down_revision = '46385a332abd'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('articles', sa.Column('body', sa.UnicodeText(), nullable=False))
op.drop_column('articles', 'details')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('articles', sa.Column('details', mysql.TEXT(), nullable=False))
op.drop_column('articles', 'body')
### end Alembic commands ###
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/12/20 17:27
# @Author : OuYang
# @Site :
# @File : begin.py
# @Software: PyCharm
from scrapy import cmdline
cmdline.execute("scrapy crawl xiaohua".split())
|
# 自定义过滤规则
# from rest_framework.filters import BaseFilterBackend
#
# class DrugFilter(BaseFilterBackend):
# def filter_queryset(self, request, queryset, view):
# # 真正的过滤规则
# # params=request.GET.get('teacher')
# # queryset.filter('''''')
# return queryset[:1]
#
import django_filters
from django_filters.filterset import FilterSet
from django_filters import filters
from . import models
class DrugFilterSet(FilterSet):
drug_kind = django_filters.CharFilter(field_name='drug_kind', lookup_expr='icontains')
drug_otc = django_filters.CharFilter(field_name='drug_otc', lookup_expr='icontains')
drug_yibao = django_filters.CharFilter(field_name='drug_yibao', lookup_expr='icontains')
class Meta:
model=models.Drug
fields=['drug_kind','drug_otc','drug_yibao'] |
from typing import List
import requests
from core.vk_event_processor import Audio
from repositories.vkCacheRepository import VkCacheRepository
from player import Player
import asyncio
from utils.execute_blocking import execute_blocking
import logging
from models.ExecutionContext import ExecutionContext
from messageFormatter import MessageType, createRichMediaPayload
import discord
class VkService:
_player: Player = None
_vk_cache_repository: VkCacheRepository = None
def __init__(self, player, vk_cache_repository, config_repo):
self._player = player
self._vk_cache_repository = vk_cache_repository
self._config_repo = config_repo
async def enqueue_audio(self, audios: List[Audio], ctx: ExecutionContext):
if (ctx.voice_channel() == None):
return
for audio in audios:
async def item_callback():
id = audio.id
cached_path = self._vk_cache_repository.try_get_v2(id)
logging.info(f'Music cache for id {id} was found: ' + str(bool(cached_path)))
if (cached_path):
return discord.FFmpegPCMAudio(cached_path)
loaded_event = asyncio.Event()
ctx.loading_callback(loaded_event)
try:
doc = await execute_blocking(requests.get, audio.url)
finally:
loaded_event.set()
file_path = self._vk_cache_repository.cache_v2(id, doc.content)
return discord.FFmpegPCMAudio(file_path)
author = ctx.author.display_name
title = f'{audio.artist} - {audio.title}'
payload = createRichMediaPayload(
title = title,
author = author,
duration = audio.duration,
user = ctx.author.display_name,
avatar = str(ctx.author.avatar_url),
source = ':regional_indicator_v::regional_indicator_k:',
channel = ctx.voice_channel().name
)
await ctx.send_message(payload, MessageType.RichMedia)
self._player.enqueue(item_callback, title, ctx) |
# Напишите функцию-декоратор, оборачивающую результат другой функции в прямоугольник звездочек.
# Пояснение: если декорируемая функция возвращает “Привет”, то декоратор должен изменить вывод на:
# ********
# *Привет*
# ********
# ****
# *23*
# ****
# (кол-во звездочек зависит от длины возвращаемого значения)
def stars_decorator(func_to_decor):
def wrapper():
print("*" * (len(func_to_decor())+2))
print("*{}*".format(func_to_decor()))
print("*" * (len(func_to_decor())+2))
return wrapper
@stars_decorator
def print_hello():
return "Привет"
print_hello() |
#!/usr/bin/env python
import io
import os
import re
import sys
from subprocess import Popen, PIPE
from finter import *
from intervaltree import Interval, IntervalTree
def shellout(cmd):
process = Popen(cmd, stdout=PIPE, stderr=PIPE)
(stdout, stderr) = process.communicate()
stdout = stdout.decode("utf-8")
stderr = stderr.decode("utf-8")
#print('stdout: -%s-' % stdout)
#print('stderr: -%s-' % stderr)
process.wait()
return (stdout, stderr)
#------------------------------------------------------------------------------
# intervaltree stuff
#------------------------------------------------------------------------------
def intervals_from_text(lines):
""" convert list of "[0 5] blah" lines to intervals """
intervals = []
for (i,line) in enumerate(lines):
#print('line %d: %s' % (i,line))
if not line:
continue
m = re.match(r'\[(.*?),(.*?)\) (.*?) (.*)', line)
if not m:
raise Exception('MALFORMED: %s' % line)
# Interval .begin .end .type .comment
begin = int(m.group(1), 16)
end = int(m.group(2), 16)
type_ = m.group(3)
comment = m.group(4)
ibaggage = (type_, comment)
i = Interval(begin, end, ibaggage)
intervals.append(i)
return intervals
# minimum idea of "node"
class FinterNode():
def __init__(self, begin, end, type_, comment):
self.begin = begin
self.end = end
self.type_ = type_
self.comment = comment
self.children = []
self.parent = None
def __str__(self, depth=0):
result = ' '*depth+'FinterNode'
result += '[%d, %d)\n' % (self.begin, self.end)
for c in sorted(self.children, key=lambda x: x.begin):
result += c.__str__(depth+1)
return result
def sort_and_create_fragments(node, NodeClass=FinterNode):
result = []
if not node.children:
return
# fill gaps ahead of each child
current = node.begin
for child in sorted(node.children, key=lambda ch: ch.begin):
if current < child.begin:
frag = NodeClass(current, child.begin, 'raw', 'fragment')
frag.parent = node
result.append(frag)
result.append(child)
current = child.end
# fill possible gap after last child
if current != node.end:
frag = NodeClass(current, node.end, 'raw', 'fragment')
frag.parent = node
result.append(frag)
# replace children with list that includes gaps
node.children = result
# recur on children
for child in node.children:
sort_and_create_fragments(child, NodeClass)
def interval_tree_to_hierarchy(tree, NodeClass=FinterNode):
""" convert IntervalTree to a hierarchy """
# initialize interval -> node mapping
child2parent = {i:None for i in tree}
# consider every interval a possible parent
for parent in tree:
# whatever intervals they envelop are their possible children
children = tree.envelop(parent)
children = list(filter(lambda c: c.length() != parent.length(), children))
for c in children:
# children without a parent are adopted immediate
if not child2parent[c]:
child2parent[c] = parent
# else children select their smallest parents
else:
child2parent[c] = min(child2parent[c], parent, key=lambda x: x.length())
# wrap the child2parent relationships
hnRoot = NodeClass(tree.begin(), tree.end(), 'none', 'root')
interval_to_node = { i:NodeClass(i.begin, i.end, i.data[0], i.data[1]) for i in tree }
for (child, parent) in child2parent.items():
hnChild = interval_to_node[child]
if not parent:
hnChild.parent = hnRoot
hnRoot.children.append(hnChild)
else:
hnParent = interval_to_node[parent]
hnChild.parent = hnParent
hnParent.children.append(hnChild)
# create fragments
sort_and_create_fragments(hnRoot, NodeClass)
# done
return hnRoot
#------------------------------------------------------------------------------
# convenience stuff
#------------------------------------------------------------------------------
def find_dissector(fpath):
""" given a file path, return a dissector function """
# first try if `file` will help us
sig2dissector = [
(r'GPG symmetrically encrypted data', gpg.analyze),
(r'ELF 32-bit (LSB|MSB)', elf32.analyze),
(r'ELF 64-bit (LSB|MSB)', elf64.analyze),
(r'PE32 executable .* 80386', pe32.analyze),
(r'PE32\+ executable .* x86-64', pe64.analyze),
(r'Dalvik dex file', dex.analyze),
(r'MS-DOS executable', exe.analyze),
(r'Mach-O ', macho.analyze),
(r'RIFF \(little-endian\) data, WAVE audio', wav.analyze),
(r'^COMBO_BOOT', combo_boot.analyze)
]
(file_str, _) = shellout(['file', fpath])
analyze = None
for (sig, dissector) in sig2dissector:
if re.search(sig, file_str):
#print('matched on %s' % sig)
analyze = dissector
break
# next see if a file sample might help us
sample = open(fpath, 'rb').read(32)
if sample.startswith(b'COMBO_BOOT\x00\x00'):
analyze = combo_boot.analyze
if sample.startswith(b'AVB0'):
analyze = avb.analyze
# next guess based on file name or extension
if not analyze:
if fpath.endswith('.rel'):
if re.match(r'[XDQ][HL][234]\x0a', sample.decode('utf-8')):
analyze = rel.analyze
elif fpath.endswith('.ihx'):
analyze = ihx.analyze
elif fpath.endswith('.mkv'):
analyze = mkv.analyze
return analyze
def dissect_file(fpath, populate_fragments=True):
""" identify file path, call dissector """
analyze = find_dissector(fpath)
if not analyze:
return
fsize = os.path.getsize(fpath)
# capture stdout to StringIO
buf = io.StringIO()
old_stdout = sys.stdout
sys.stdout = buf
# call analyzer
lines = ''
with open(fpath, 'rb') as fp:
analyze(fp)
lines = buf.getvalue()
# uncapture stdout
buf.close()
sys.stdout = old_stdout
# lines to intervals
lines = lines.split('\n')
intervals = intervals_from_text(lines)
# filter out null intervals
intervals = [i for i in intervals if i.length()]
return IntervalTree(intervals)
def finter_type_to_struct_fmt(type_):
# currently they're 1:1
assert type_ in {'B', '<B', '>B', 'H', '<H', '>H', 'W', '<W', '>W', 'I', '<I', '>I', 'Q', '<Q', '>Q'}
return type_
|
f1 = 1
f2 = 1
for i in range(1,22):
print("%12ld %12ld" % (f1, f2) ,end = ' ')
f1 = f2 + f1
f2 = f2 + f1
if i % 3 == 0:
print('\n') |
# Generated by Django 2.2.3 on 2019-08-06 11:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Gamer', '0007_auto_20190806_1106'),
]
operations = [
migrations.AlterField(
model_name='user',
name='five',
field=models.CharField(max_length=1, null=True, verbose_name='选项'),
),
migrations.AlterField(
model_name='user',
name='four',
field=models.CharField(max_length=1, null=True, verbose_name='选项'),
),
migrations.AlterField(
model_name='user',
name='one',
field=models.CharField(max_length=1, null=True, verbose_name='选项'),
),
migrations.AlterField(
model_name='user',
name='three',
field=models.CharField(max_length=1, null=True, verbose_name='选项'),
),
migrations.AlterField(
model_name='user',
name='two',
field=models.CharField(max_length=1, null=True, verbose_name='选项'),
),
]
|
#!/usr/bin/python
# -*- coding: cp936 -*-
import sqlite3
import pandas as pd
from exceldoc import *
def importMarketPerToSQLite():
"""excel"""
with sqlite3.connect('C:\sqlite\db\hxdata.db') as db:
#ExcelDocument('..\input\营销人员和营业部列表.xlsx') as src:
insert_template = "INSERT INTO marketper " \
"(marketcode, markettype, marketname, marketmobile) " \
"VALUES (?, ?, ?, ?);"
#清空的数据库遗留的数据
db.execute('DELETE FROM marketper;')
#对于EXCEL文档里的每一个SHEET都导入数据库(simTrade中只有一个名为simTrade的SHEET)
df = pd.read_excel('..\input\营销人员和营业部列表.xlsx', sheetname = 'SQL Results')
print("df Column headings:")
print(df.columns)
#for sheet in src:
# if sheet.name == 'SQL Results':
df1 = df[['人员编号','人员类别','人员姓名','手机']]
print("df1 Column headings:")
print(df1.columns)
print(df1)
try:
print('3')
db.executemany(insert_template, df1.values) #iter_rows() 自动跳过了抬头首行
except sqlite3.Error as e:
print('2')
print(e)
db.rollback()
else:
db.commit()
#检查是不是所有的数据都被加载了
select_stmt = 'SELECT DISTINCT marketcode FROM marketper;'
for row in db.execute(select_stmt).fetchall():
print('marketPerson: 1')
print(';'.join(str(row)))
#importMarketPerToSQLite()
|
import random
print("Random:", dir(random))
print("Random:", random.random())
print("Ints:", random.randint(1, 100))
print("Range:", random.randrange(0, 100, 6)) #Provides multiples of 6
print("Range:", random.randrange(1, 100, 6)) #Provides multiples of 6 + 1
|
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
TODO: get this to work with python3
"""
import logging, copy
log = logging.getLogger(__name__)
import numpy as np, math
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle, Circle, Ellipse, PathPatch
import matplotlib.lines as mlines
import matplotlib.path as mpath
def ellipse_closest_approach_to_point( ex, ez, _c ):
"""
Ellipse natural frame, semi axes ex, ez. _c coordinates of point
:param ex: semi-major axis
:param ez: semi-major axis
:param c: xz coordinates of point
:return p: point on ellipse of closest approach to center of torus circle
Closest approach on the bulb ellipse to the center of torus "circle"
is a good point to target for hype/cone/whatever neck,
as are aiming to eliminate the cylinder neck anyhow
equation of RHS torus circle, in ellipse frame
(x - R)^2 + (z - z0)^2 - r^2 = 0
equation of ellipse
(x/ex)^2 + (z/ez)^2 - 1 = 0
"""
c = np.asarray( _c ) # center of RHS torus circle
assert c.shape == (2,)
t = np.linspace( 0, 2*np.pi, 1000000 )
e = np.zeros( [len(t), 2] )
e[:,0] = ex*np.cos(t)
e[:,1] = ez*np.sin(t) # 1M parametric points on the ellipse
p = e[np.sum(np.square(e-c), 1).argmin()] # point on ellipse closest to c
return p
def ellipse_points( xy=[0,-5.], ex=254., ez=190., n=1000 ):
"""
:param ec: center of ellipse
:param ex: xy radius of ellipse
:param ez: z radius of ellipse
:param n: number of points
:return e: array of shape (n,2) of points on the ellipse
"""
t = np.linspace( 0, 2*np.pi, n )
e = np.zeros([len(t), 2])
e[:,0] = ex*np.cos(t) + xy[0]
e[:,1] = ez*np.sin(t) + xy[1]
return e
def circle_points( xy=[0,0], tr=80, n=1000 ):
"""
:param tc: center of circle
:param tr: radius of circle
:param n: number of points
:return c: array of shape (n,2) of points on the circle
"""
t = np.linspace( 0, 2*np.pi, n )
c = np.zeros([len(t), 2])
c[:,0] = tr*np.cos(t) + xy[0]
c[:,1] = tr*np.sin(t) + xy[1]
return c
def points_inside_circle(points, center, radius):
"""
:param points: (n,2) array of points
:param center: (2,) coordinates of circle center
:param radius:
:return mask: boolean array of dimension (n,2) indicating if points are within the circle
"""
return np.sqrt(np.sum(np.square(points-center),1)) - radius < 0.
def ellipse_points_inside_circle():
tc = np.array([torus_x,torus_z])
tr = m4_torus_r
e = ellipse_points( xy=[0,-5.], ex=254., ez=190., n=1000000 )
class X(object):
def __init__(self, root):
self.root = root
def __repr__(self):
return "\n".join( map(repr, self.constituents()))
def find(self, shape):
return self.root.find(shape)
def find_one(self, shape):
ff = self.root.find(shape)
assert len(ff) == 1
return ff[0]
def constituents(self):
return self.root.constituents()
def replacement_cons(self):
"""
"""
i = self.find_one("STorus")
r = i.param[0]
R = i.param[1]
d = self.find_one("SEllipsoid")
ex = d.param[0]
ez = d.param[1]
print("r %s R %s ex %s ez %s " % (r,R,ex,ez))
print(" SEllipsoid d.xy %s " % repr(d.xy) )
print(" STorus i.xy %s " % repr(i.xy) )
z0 = i.xy[1] # torus z-plane in ellipsoid frame
p = ellipse_closest_approach_to_point( ex, ez, [R,z0] ) # [R,z0] is center of torus circle
pr, pz = p # at torus/ellipse closest point : no guarantee of intersection
print(" ellipse closest approach to torus %s " % repr(p) )
r2 = pr
r1 = R - r
mz = (z0 + pz)/2. # mid-z cone coordinate (ellipsoid frame)
hz = (pz - z0)/2. # cons half height
f = SCons( "f", [r1,r2,hz] )
B = np.array( [0, mz] )
print(" replacment SCons %s offset %s " % (repr(f),repr(B)))
return f, B
def spawn_rationalized(self):
"""
::
UnionSolid
/ \
Ellipsoid Subtraction
/ \
Tubs Torus
UnionSolid
/ \
Ellipsoid Cons
"""
name = self.__class__.__name__
x = copy.deepcopy(self)
# establish expectations for tree
e = x.find_one("SEllipsoid")
t = x.find_one("STorus")
ss = t.parent
assert ss is not None and ss.shape == "SSubtractionSolid"
us = ss.parent
assert us is not None and us.shape == "SUnionSolid"
assert us.left is not None and us.left == e and us.right == ss and ss.right == t
assert us.right is not None and us.right == ss
if name == "x018": # cathode vacuum cap
assert x.root.shape == "SIntersectionSolid"
x.root = e
e.parent = None
elif name == "x019": # remainder vacuum
assert x.root.shape == "SSubtractionSolid"
left = x.root.left
assert left.shape == "SUnionSolid"
left.parent = None
x.root = left
else:
pass
pass
if name in ["x019","x020","x021"]:
# calculate the parameters of the replacement cons
cons, offset = x.replacement_cons()
# tree surgery : replacing the right child of UnionSolid
us.right = cons
cons.parent = us
cons.ltransform = offset
pass
return x
class Shape(object):
"""
matplotlib patches do not support deferred placement it seems,
so do that here
"""
KWA = dict(fill=False)
dtype = np.float64
PRIMITIVE = ["SEllipsoid","STubs","STorus", "SCons", "SHype", "SBox", "SPolycone"]
COMPOSITE = ["SUnionSolid", "SSubtractionSolid", "SIntersectionSolid"]
def __repr__(self):
return "%s : %20s : %s : %s " % (
self.name,
self.shape,
repr(self.ltransform),
repr(self.param)
)
def __init__(self, name, param, **kwa ):
shape = self.__class__.__name__
assert shape in self.PRIMITIVE + self.COMPOSITE
primitive = shape in self.PRIMITIVE
composite = shape in self.COMPOSITE
d = self.KWA.copy()
d.update(kwa)
self.kwa = d
self.name = name
self.shape = shape
self.param = param
self.parent = None
self.ltransform = None
self.left = None
self.right = None
if composite:
left = self.param[0]
right = self.param[1]
right.ltransform = self.param[2]
left.parent = self
right.parent = self
self.left = left
self.right = right
pass
is_primitive = property(lambda self:self.left is None and self.right is None)
is_composite = property(lambda self:self.left is not None and self.right is not None)
def _get_xy(self):
"""
Assumes only translations, adds the node.ltransform obtained by following
parent links up the tree of shapes.
a Intersection
/ \
b m(D) Union m:Tubs
/ \
c k(C) Union Tubs
/ \
d f(B) Ellipsoid Subtraction
/ \
g(B) i(B+A) Tubs Torus
"""
xy = np.array([0,0], dtype=self.dtype )
node = self
while node is not None:
if node.ltransform is not None:
log.debug("adding ltransform %s " % node.ltransform)
xy += node.ltransform
pass
node = node.parent
pass
return xy
xy = property(_get_xy)
def constituents(self):
if self.is_primitive:
return [self]
else:
assert self.is_composite
cts = []
cts.extend( self.left.constituents() )
cts.extend( self.right.constituents() )
return cts
pass
def find(self, shape):
cts = self.constituents()
return filter( lambda ct:ct.shape == shape, cts )
def patches(self):
"""
Positioning is relying on self.xy of the primitives
with nothing being passed into composites.
For composites self.param[2] is the local right transform
"""
if self.shape == "SEllipsoid":
return self.make_ellipse( self.xy, self.param, **self.kwa )
elif self.shape == "STubs":
return self.make_rect( self.xy, self.param, **self.kwa)
elif self.shape == "STorus":
return self.make_torus( self.xy, self.param, **self.kwa)
elif self.shape == "SCons":
return self.make_cons( self.xy, self.param, **self.kwa)
elif self.shape == "SHype":
return self.make_hype( self.xy, self.param, **self.kwa)
elif self.shape == "SBox":
return self.make_rect( self.xy, self.param, **self.kwa)
elif self.shape == "SPolycone":
return self.make_polycone( self.xy, self.param, **self.kwa)
else:
assert self.is_composite
pts = []
pts.extend( self.left.patches() )
pts.extend( self.right.patches() )
return pts
pass
@classmethod
def create(cls, pt ):
pass
@classmethod
def make_rect(cls, xy , wh, **kwa ):
"""
:param xy: center of rectangle
:param wh: halfwidth, halfheight
"""
ll = ( xy[0] - wh[0], xy[1] - wh[1] )
return [Rectangle( ll, 2.*wh[0], 2.*wh[1], **kwa )]
@classmethod
def make_ellipse(cls, xy , param, **kwa ):
return [Ellipse( xy, width=2.*param[0], height=2.*param[1], **kwa )]
@classmethod
def make_circle(cls, xy , radius, **kwa ):
return [Circle( xy, radius=radius, **kwa )]
@classmethod
def make_torus(cls, xy, param, **kwa ):
r = param[0]
R = param[1]
pts = []
lhs = cls.make_circle( xy + [-R,0], r, **kwa)
rhs = cls.make_circle( xy + [+R,0], r, **kwa)
pts.extend(lhs)
pts.extend(rhs)
return pts
@classmethod
def make_pathpatch(cls, xy, vtxs, **kwa ):
"""see analytic/pathpatch.py"""
Path = mpath.Path
path_data = []
for i, vtx in enumerate(vtxs):
act = Path.MOVETO if i == 0 else Path.LINETO
path_data.append( (act, (vtx[0]+xy[0], vtx[1]+xy[1])) )
pass
path_data.append( (Path.CLOSEPOLY, (vtxs[0,0]+xy[0], vtxs[0,1]+xy[1])) )
pass
codes, verts = zip(*path_data)
path = Path(verts, codes)
patch = PathPatch(path, **kwa)
return [patch]
@classmethod
def make_cons(cls, xy , param, **kwa ):
"""
(-r2,z2) (r2,z2)
1---------2
\ /
0 ... 3
(-r1,z1) (r1,z1)
"""
r1 = param[0]
r2 = param[1]
hz = param[2]
z2 = hz + xy[1]
z1 = -hz + xy[1]
vtxs = np.zeros( (4,2) )
vtxs[0] = ( -r1, z1)
vtxs[1] = ( -r2, z2)
vtxs[2] = ( r2, z2)
vtxs[3] = ( r1, z1)
return cls.make_pathpatch( xy, vtxs, **kwa )
@classmethod
def make_polycone(cls, xy , param, **kwa ):
"""
"""
zp = param
nz = len(zp)
assert zp.shape == (nz, 3), zp
assert nz > 1 , zp
rmin = zp[:,0]
rmax = zp[:,1]
z = zp[:,2]
vtxs = np.zeros( (2*nz,2) )
for i in range(nz):
vtxs[i] = ( -rmax[i], z[i] )
vtxs[2*nz-i-1] = ( rmax[i], z[i] )
pass
log.debug(" xy : %r " % xy )
return cls.make_pathpatch( xy, vtxs, **kwa )
@classmethod
def make_hype(cls, xy , param, **kwa ):
"""
4----------- 5
3 6
2 7
1 8
0 ---------- 9
sqrt(x^2+y^2) = r0 * np.sqrt( (z/zf)^2 + 1 )
"""
r0 = param[0]
stereo = param[1]
hz = param[2]
zf = r0/np.tan(stereo)
r_ = lambda z:r0*np.sqrt( np.square(z/zf) + 1. )
nz = 20
zlhs = np.linspace( -hz, hz, nz )
zrhs = np.linspace( hz, -hz, nz )
vtxs = np.zeros( (nz*2,2) )
vtxs[:nz,0] = -r_(zlhs) + xy[0]
vtxs[:nz,1] = zlhs + xy[1]
vtxs[nz:,0] = r_(zrhs) + xy[0]
vtxs[nz:,1] = zrhs + xy[1]
return cls.make_pathpatch( xy, vtxs, **kwa )
class SEllipsoid(Shape):pass
class STubs(Shape):pass
class STorus(Shape):pass
class SCons(Shape):pass
class SHype(Shape):pass
class SPolycone(Shape):pass
class SUnionSolid(Shape):pass
class SSubtractionSolid(Shape):pass
class SIntersectionSolid(Shape):pass
if __name__ == '__main__':
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.