content
stringlengths 5
1.05M
|
|---|
import json
import os
import numpy as np
import tensorflow as tf
from flask import Flask, request
with open("dictionary-test.json", "r") as fopen:
dic = json.load(fopen)
cluster = tf.train.ClusterSpec({"worker": ["192.168.0.101:2222", "192.168.0.104:2223"]})
class Model:
def __init__(
self, size_layer, num_layers, embedded_size, dict_size, dimension_output, learning_rate
):
def cells(reuse=False):
return tf.nn.rnn_cell.BasicRNNCell(size_layer, reuse=reuse)
self.X = tf.placeholder(tf.int32, [None, None])
self.Y = tf.placeholder(tf.float32, [None, dimension_output])
encoder_embeddings = tf.Variable(tf.random_uniform([dict_size, embedded_size], -1, 1))
with tf.device("/job:worker/task:0"):
encoder_embedded = tf.nn.embedding_lookup(encoder_embeddings, self.X)
rnn_cells = tf.nn.rnn_cell.MultiRNNCell([cells() for _ in range(num_layers)])
outputs, _ = tf.nn.dynamic_rnn(rnn_cells, encoder_embedded, dtype=tf.float32)
with tf.device("/job:worker/task:1"):
rnn_W = tf.Variable(tf.random_normal((size_layer, dimension_output)))
rnn_B = tf.Variable(tf.random_normal([dimension_output]))
self.logits = tf.add(tf.matmul(outputs[:, -1], rnn_W), rnn_B, name="logits")
sess = tf.InteractiveSession("grpc://192.168.0.101:2222")
# based in freeze-model.ipynb
model = Model(
size_layer=64,
num_layers=1,
embedded_size=64,
dict_size=3366,
dimension_output=2,
learning_rate=1e-3,
)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(tf.global_variables())
saver.restore(sess, os.getcwd() + "/checkpoint/test")
label = ["negative", "positive"]
UNK = 3
maxlen = 50
app = Flask(__name__)
@app.route("/", methods=["GET"])
def hello():
text = request.args.get("text")
x = np.zeros((1, maxlen))
for no, k in enumerate(text.split()[:maxlen][::-1]):
val = dic[k] if k in dic else UNK
x[0, -1 - no] = val
index = np.argmax(sess.run(model.logits, feed_dict={model.X: x})[0])
return label[index]
application = app
|
from os.path import realpath, exists, join
from larch import Group
from larch.fitting import param_group, Parameter
from larch.io import read_xdi
from larch.xafs import feffit_dataset, feffit_transform, feffit, feffit_report, feffpath
from larch.wxlib import _newplot, _plot
def do_fit(self, which):
if which == 'testrun':
folder = self.testrun
else:
folder = self.baseline
data = read_xdi(join(self.path, 'NiO.chik'))
if hasattr(data, 'wavenumber'):
data.k = data.wavenumber
gds = param_group(amp = Parameter(1, vary=True),
enot = Parameter(0.01, vary=True),
alpha = Parameter(0.0001, vary=True),
sso = Parameter(0.003, vary=True),
ssni = Parameter(0.003, vary=True),
sso2 = Parameter(0.003, vary=True),
#sso3 = Parameter(0.003, vary=True),
ssni2 = Parameter(0.003, vary=True),
#ssni3 = Parameter(0.003, vary=True),
#ssni4 = Parameter(0.003, vary=True),
)
paths = list()
paths.append(feffpath(realpath(join(folder, "feff0001.dat")), # 1st shell O SS
s02 = 'amp',
e0 = 'enot',
sigma2 = 'sso',
deltar = 'alpha*reff'))
paths.append(feffpath(realpath(join(folder, "feff0002.dat")), # 2nd shell Ni SS
s02 = 'amp',
e0 = 'enot',
sigma2 = 'ssni',
deltar = 'alpha*reff'))
paths.append(feffpath(realpath(join(folder, "feff0003.dat")), # O-O triangle
s02 = 'amp',
e0 = 'enot',
sigma2 = '1.5*sso',
deltar = 'alpha*reff'))
paths.append(feffpath(realpath(join(folder, "feff0004.dat")), # O-Ni triangle
s02 = 'amp',
e0 = 'enot',
sigma2 = 'sso+ssni',
deltar = 'alpha*reff'))
paths.append(feffpath(realpath(join(folder, "feff0005.dat")), # 3rd shell O SS
s02 = 'amp',
e0 = 'enot',
sigma2 = 'sso2',
deltar = 'alpha*reff'))
paths.append(feffpath(realpath(join(folder, "feff0006.dat")), # 4th shell Ni SS
s02 = 'amp',
e0 = 'enot',
sigma2 = 'ssni2',
deltar = 'alpha*reff'))
paths.append(feffpath(realpath(join(folder, "feff0007.dat")), # O-O non-forward linear
s02 = 'amp',
e0 = 'enot',
sigma2 = 'sso*2',
deltar = 'alpha*reff'))
paths.append(feffpath(realpath(join(folder, "feff0008.dat")), # O-Ni forward scattering
s02 = 'amp',
e0 = 'enot',
sigma2 = 'ssni2',
deltar = 'alpha*reff'))
paths.append(feffpath(realpath(join(folder, "feff0009.dat")), # O-O forward through absorber
s02 = 'amp',
e0 = 'enot',
sigma2 = 'sso*2',
deltar = 'alpha*reff'))
paths.append(feffpath(realpath(join(folder, "feff0010.dat")), # O-Ni-O double forward
s02 = 'amp',
e0 = 'enot',
sigma2 = 'ssni2',
deltar = 'alpha*reff'))
trans = feffit_transform(kmin=3, kmax=15.938, kw=(2,1,3), dk=1, window='hanning', rmin=1.5, rmax=4.2)
dset = feffit_dataset(data=data, pathlist=paths, transform=trans)
fit = feffit(gds, dset)
if self.doplot:
offset = 0.6*max(dset.data.chir_mag)
_newplot(dset.data.r, dset.data.chir_mag+offset, xmax=8, win=2,
xlabel=r'$R \rm\,(\AA)$', label='data',
ylabel=r'$|\chi(R)| \rm\,(\AA^{-3})$',
title='Fit to '+self.folder, show_legend=True)
_plot(dset.model.r, dset.model.chir_mag+offset, label='fit', win=2)
_plot(dset.data.r, dset.data.chir_re, label='data', win=2)
_plot(dset.model.r, dset.model.chir_re, label='fit', win=2)
#end if
if self.verbose:
print(feffit_report(fit))
#end if
return fit
#end def
|
#!/usr/bin/env python
"""
update_dreqs_0243.py
Determine which MPI coupled variables need the external_variables global
attribute setting.
"""
import argparse
import json
import logging.config
import os
import pprint
import sys
import django
django.setup()
from pdata_app.models import DataRequest, VariableRequest
__version__ = '0.1.0b1'
DEFAULT_LOG_LEVEL = logging.WARNING
DEFAULT_LOG_FORMAT = '%(levelname)s: %(message)s'
logger = logging.getLogger(__name__)
TABLES_DIR = '/home/h04/jseddon/primavera/cmip6-cmor-tables/Tables'
def process_cell_measures(var_req, cell_measures, output_dict):
"""
Add the table and variable name to the appropriate cell measures entry.
"""
if not cell_measures:
# If blank then don't do anything.
return
# correct for typos in the data request
if cell_measures == 'area: areacello OR areacella':
cell_measures = 'area: areacella'
if cell_measures in output_dict:
if var_req.table_name in output_dict[cell_measures]:
if (var_req.cmor_name not in
output_dict[cell_measures][var_req.table_name]):
(output_dict[cell_measures][var_req.table_name].
append(var_req.cmor_name))
else:
output_dict[cell_measures][var_req.table_name] = [var_req.cmor_name,]
else:
output_dict[cell_measures] = {var_req.table_name:[var_req.cmor_name,]}
def parse_args():
"""
Parse command-line arguments
"""
parser = argparse.ArgumentParser(description='Add additional data requests')
parser.add_argument('-l', '--log-level', help='set logging level to one of '
'debug, info, warn (the default), or error')
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(__version__))
args = parser.parse_args()
return args
def main(args):
"""
Main entry point
"""
drs = DataRequest.objects.filter(
institute__short_name='MPI-M',
experiment__short_name__in=['control-1950', 'hist-1950',
'spinup-1950'],
datafile__isnull=False
).distinct()
tables = (drs.values_list('variable_request__table_name', flat=True).
distinct().order_by('variable_request__table_name'))
output_dict = {}
for tab_name in tables:
if tab_name.startswith('Prim'):
for dr in (drs.filter(variable_request__table_name=tab_name).
order_by('variable_request__cmor_name')):
cell_measures = (dr.variable_request.cell_measures)
process_cell_measures(dr.variable_request, cell_measures,
output_dict)
else:
json_file = os.path.join(TABLES_DIR, f'CMIP6_{tab_name}.json')
with open(json_file) as fh:
mip_table = json.load(fh)
for dr in (drs.filter(variable_request__table_name=tab_name).
order_by('variable_request__cmor_name')):
try:
cell_measures = (mip_table['variable_entry']
[dr.variable_request.cmor_name]['cell_measures'])
except KeyError:
continue
process_cell_measures(dr.variable_request, cell_measures,
output_dict)
print(pprint.pformat(output_dict))
if __name__ == "__main__":
cmd_args = parse_args()
# determine the log level
if cmd_args.log_level:
try:
log_level = getattr(logging, cmd_args.log_level.upper())
except AttributeError:
logger.setLevel(logging.WARNING)
logger.error('log-level must be one of: debug, info, warn or error')
sys.exit(1)
else:
log_level = DEFAULT_LOG_LEVEL
# configure the logger
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': DEFAULT_LOG_FORMAT,
},
},
'handlers': {
'default': {
'level': log_level,
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': log_level,
'propagate': True
}
}
})
# run the code
main(cmd_args)
|
"""
Main module which defines ApplicationWindow,
the main window of PythonTurtle.
"""
import multiprocessing
import os
try:
import wx
import wx.adv
import wx.aui
import wx.lib.buttons
import wx.lib.scrolledpanel
except ModuleNotFoundError:
print("wxPython doesn't seem to be installed. You need to install "
"the appropriate prerequisites for your operating system.")
print("Please consult the installation instructions in the README at "
"https://github.com/cool-RR/PythonTurtle#installation")
import sys
sys.exit(255)
import pythonturtle
from . import helppages
from . import shelltoprocess
from . import turtleprocess
from . import turtlewidget
from .misc.helpers import resource_filename, resource_string
class ApplicationWindow(wx.Frame):
"""
The main window of PythonTurtle.
"""
def __init__(self, *args, **keywords):
wx.Frame.__init__(self, *args, **keywords)
self.SetDoubleBuffered(True)
self.SetIcon(wx.Icon(resource_filename("icon.ico"),
wx.BITMAP_TYPE_ICO))
self.init_help_screen()
self.turtle_process = turtleprocess.TurtleProcess()
self.turtle_process.start()
self.turtle_queue = self.turtle_process.turtle_queue
self.init_menu_bar()
self.init_about_dialog_info()
self.splitter = wx.SplitterWindow(self, style=wx.SP_LIVE_UPDATE)
self.turtle_widget = turtlewidget.TurtleWidget(self.splitter,
self.turtle_queue)
self.bottom_sizer_panel = wx.Panel(self.splitter)
self.shell = \
shelltoprocess.Shell(self.bottom_sizer_panel,
queue_pack=self.turtle_process.queue_pack)
self.help_open_button_panel = \
wx.Panel(parent=self.bottom_sizer_panel)
help_open_button_bitmap = wx.Bitmap(resource_filename("teach_me.png"))
self.help_open_button = wx.lib.buttons.GenBitmapButton(
self.help_open_button_panel, -1, help_open_button_bitmap)
self.help_open_button_sizer = wx.BoxSizer(wx.VERTICAL)
self.help_open_button_sizer.Add(
self.help_open_button, 1, wx.EXPAND | wx.ALL, 5)
self.help_open_button_panel.SetSizer(self.help_open_button_sizer)
self.Bind(wx.EVT_BUTTON, self.show_help, self.help_open_button)
self.bottom_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.bottom_sizer.Add(self.shell, 1, wx.EXPAND)
self.bottom_sizer.Add(self.help_open_button_panel, 0, wx.EXPAND)
self.bottom_sizer_panel.SetSizer(self.bottom_sizer)
desired_shell_height = 210
self.splitter.SplitHorizontally(self.turtle_widget,
self.bottom_sizer_panel,
-desired_shell_height)
self.splitter.SetSashGravity(1)
self.sizer = wx.BoxSizer(wx.HORIZONTAL)
self.sizer.Add(self.splitter, 1, wx.EXPAND)
self.sizer.Add(self.help_screen, 1, wx.EXPAND)
self.hide_help()
self.SetSizer(self.sizer)
self.Centre()
self.Maximize()
self.Show()
self.Layout()
self.splitter.SetSashPosition(-desired_shell_height)
self.shell.setFocus()
def init_menu_bar(self):
"""
Initialize the menu bar.
"""
self.menu_bar = wx.MenuBar()
self.file_menu = wx.Menu()
self.exit_menu_item = wx.MenuItem(self.file_menu, -1, 'E&xit')
self.file_menu.Append(self.exit_menu_item)
self.Bind(wx.EVT_MENU, self.on_exit, source=self.exit_menu_item)
self.help_menu = wx.Menu()
self.help_menu_item = \
wx.MenuItem(self.help_menu, -1, '&Help\tF1', kind=wx.ITEM_CHECK)
self.help_menu.Append(self.help_menu_item)
self.Bind(wx.EVT_MENU, self.toggle_help, source=self.help_menu_item)
self.help_menu.AppendSeparator()
self.about_menu_item = wx.MenuItem(self.help_menu, -1, "&About...")
self.help_menu.Append(self.about_menu_item)
self.Bind(wx.EVT_MENU, self.on_about, source=self.about_menu_item)
self.menu_bar.Append(self.file_menu, '&File')
self.menu_bar.Append(self.help_menu, '&Help')
self.SetMenuBar(self.menu_bar)
def init_help_screen(self):
"""
Initializes the help screen.
"""
self.help_screen = wx.Panel(parent=self, size=(-1, -1))
self.help_notebook = \
wx.aui.AuiNotebook(parent=self.help_screen,
style=wx.aui.AUI_NB_TOP)
def give_focus_to_selected_page(event=None):
selected_page_number = self.help_notebook.GetSelection()
selected_page = self.help_notebook.GetPage(selected_page_number)
if self.FindFocus() != selected_page:
selected_page.SetFocus()
self.help_notebook.Bind(wx.EVT_SET_FOCUS,
give_focus_to_selected_page)
self.help_notebook.Bind(wx.EVT_CHILD_FOCUS,
give_focus_to_selected_page)
for page in helppages.page_list(parent=self.help_notebook):
self.help_notebook.AddPage(page, caption=page.caption)
self.help_close_button_panel = wx.Panel(parent=self.help_screen)
self.help_screen_sizer = wx.BoxSizer(wx.VERTICAL)
self.help_screen_sizer.Add(self.help_notebook, 1, wx.EXPAND)
self.help_screen_sizer.Add(self.help_close_button_panel, 0, wx.EXPAND)
self.help_screen.SetSizer(self.help_screen_sizer)
help_close_button_bitmap = wx.Bitmap(
resource_filename("lets_code.png"))
self.help_close_button = wx.lib.buttons.GenBitmapButton(
self.help_close_button_panel, -1, help_close_button_bitmap)
self.help_close_button_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.help_close_button_sizer.Add(self.help_close_button, 1,
wx.EXPAND | wx.ALL, 5)
self.help_close_button_panel.SetSizer(self.help_close_button_sizer)
self.Bind(wx.EVT_BUTTON, self.hide_help, self.help_close_button)
def show_help(self, event=None):
self.help_shown = True
self.help_menu_item.Check()
self.sizer.Show(self.help_screen)
self.sizer.Hide(self.splitter)
self.help_notebook.SetFocus()
self.sizer.Layout()
def hide_help(self, event=None):
self.help_shown = False
self.help_menu_item.Check(False)
self.sizer.Hide(self.help_screen)
self.sizer.Show(self.splitter)
self.shell.setFocus()
self.sizer.Layout()
def toggle_help(self, event=None):
if self.help_shown:
self.hide_help()
else:
self.show_help()
def on_exit(self, event=None):
return self.Close()
def init_about_dialog_info(self):
info = self.about_dialog_info = wx.adv.AboutDialogInfo()
description = resource_string('about.txt')
license_terms = resource_string('license.txt')
license_copyright = license_terms.split(os.linesep)[0]
developer_list = resource_string('developers.txt').split(os.linesep)
info.SetDescription(description)
info.SetLicence(license_terms)
info.SetCopyright(license_copyright)
info.SetName(pythonturtle.name)
info.SetVersion(pythonturtle.__version__)
info.SetWebSite(pythonturtle.__url__)
info.SetDevelopers(developer_list)
info.SetIcon(wx.Icon(resource_filename("turtle.png")))
def on_about(self, event=None):
wx.adv.AboutBox(self.about_dialog_info, self)
def run():
multiprocessing.freeze_support()
app = wx.App()
ApplicationWindow(None, -1, pythonturtle.name, size=(600, 600))
# import cProfile; cProfile.run("app.MainLoop()")
app.MainLoop()
|
from torch import nn
class CNN_CIFAR(nn.Module):
def __init__(self):
super(CNN_CIFAR, self).__init__()
self.conv2d_1 = nn.Conv2d(3, 32, kernel_size=5, padding=2)
self.max_pooling = nn.MaxPool2d(2, stride=2)
self.conv2d_2 = nn.Conv2d(32, 64, kernel_size=5, padding=2)
self.flatten = nn.Flatten()
self.fc_1 = nn.Linear(4096, 512)
self.fc_2 = nn.Linear(512, 10)
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv2d_1(x)
x = self.relu(x)
x = self.max_pooling(x)
x = self.conv2d_2(x)
x = self.relu(x)
x = self.max_pooling(x)
x = self.flatten(x)
x = self.relu(self.fc_1(x))
x = self.fc_2(x)
return x
class CNN_MNIST(nn.Module):
def __init__(self):
super(CNN_MNIST, self).__init__()
self.conv2d_1 = nn.Conv2d(1, 4, kernel_size=5, stride=1)
self.max_pooling = nn.MaxPool2d(2, stride=2)
self.conv2d_2 = nn.Conv2d(4, 10, kernel_size=5, stride=1)
self.flatten = nn.Flatten()
self.fc_1 = nn.Linear(160, 100)
self.fc_2 = nn.Linear(100, 10)
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv2d_1(x)
x = self.relu(x)
x = self.max_pooling(x)
x = self.conv2d_2(x)
x = self.relu(x)
x = self.max_pooling(x)
x = self.flatten(x)
x = self.relu(self.fc_1(x))
x = self.fc_2(x)
return x
|
import numpy as np
def check_loss(result_name,train_loss,val_loss):
'''
Returns explanations for loss plot if the model overfits
'''
f = open("./Results/" + result_name+"_sugg.txt","a+")
g = open("./Results/" + result_name+"_explain.txt","a+")
tloss = np.load(train_loss)
vloss = np.load(val_loss)
a = vloss - tloss
length = len(vloss)//2
flag = 0
res = all(j>i for i, j in zip(a[length:], a[length+1:])) and (vloss[-1] > 1.05*tloss[length])
if(res):
flag = 1
g.write("The model is having poor generalisation on the validation dataset that is having variance and hence in this context it is overfitting.\n")
f.write("Measures to reduce overfitting and improve its performance are as follows:\n")
f.write("If you can change the network then try using weight regularisation and/or dropouts to reduce the variance of the model thereby reducing its complexity.\n")
f.write("If you can't change the network, but only the dataset, try applying augmentations to artificially increase the size of the dataset or use SMOTE. This would decrease the generalisation error by adding diversity to the dataset.\n")
if not flag:
g.write("The validaiton and training losses are improving with epochs\n")
g.close()
f.close()
return
|
#coding:utf-8
import re
import pandas as pd
import jieba
import jieba.posseg as pseg # 词性标注
#from jieba.analyse import ChineseAnalyzer
import numpy as np
import json
from jieba.analyse import extract_tags
class extractkey:
def __init__(self):
#分词加载,预处理字典
jieba.load_userdict("./dict/expendword.txt")
# 分类做键值对字典
relation=pd.read_csv(u'./dict/relation.csv')
rela_group=relation.groupby([u'三级品类',u'二级品类'])
rela_dict={}
for g in rela_group:
#print(g[0])
rela_dict[g[0][0]]=g[0][1]
self.rela_dict=rela_dict
def keywords(self,proname):
proname=''.join(re.findall(u'[a-zA-Z\d\u4e00-\u9fff]+', proname))
'''
seg_list=pseg.cut(proname)#,cut_all=False)
for word in seg_list:
if word.flag in list(seg_obj.keys()):
seg_obj[word.flag].append(word.word)
else:
seg_obj[word.flag]=[]
seg_obj[word.flag].append(word.word)
return seg_obj
'''
seg_obj={}
seg_obj=','.join(jieba.cut(proname))#,cut_all=False)
seg_list=seg_obj.split(',')
return seg_list
def group_count(self,seg_obj,seg_list,weight):
"""
分组统计
"""
for item in seg_list:
if item in list(seg_obj.keys()):
seg_obj[item]+=weight
else:
seg_obj[item]=weight
return seg_obj
def progress(self,proname):
seg_obj={}
second_type=[]
proname=''.join(re.findall(u'[a-zA-Z\d\u4e00-\u9fff]+', proname))
seg_list=pseg.cut(proname)#,cut_all=False)
#获取权重,后续计算分值用
weight_arr=extract_tags(''.join(re.findall(u'[a-zA-Z\u4e00-\u9fff]+', proname)), topK=3, withWeight=True, allowPOS=())
feature=""
for item in weight_arr:
feature+=item[0]
#print(feature)
for word in seg_list:
#if word.flag.find("n")>-1:
#if word.flag=="n":
try:
if self.rela_dict[word.word]!=None:
second_type.append(self.rela_dict[word.word])
except Exception:
pass
if word.flag in list(seg_obj.keys()):
seg_obj[word.flag].append(word.word)
else:
seg_obj[word.flag]=[]
seg_obj[word.flag].append(word.word)
#else:
# pass
#seg_arr.append(seg_obj)
#second_arr.append(second_type)
type_sum={}
#计数
for stype in second_type:
if stype in list(type_sum.keys()):
type_sum[stype]+=1
else:
type_sum[stype]=1
target_name=""
target_number=1
#选择最多的二级品类
for item in type_sum.keys():
if type_sum[item]>=target_number:
target_name=item
target_number=type_sum[item]
else:
pass
return '-'.join(second_type),target_name,feature,'-'.join(seg_obj["t"]) if 't' in seg_obj.keys() else ""
#json.dumps(seg_obj,encoding='utf-8'),
if __name__ == "__main__":
data=pd.read_excel(u'./data/源数据.xlsx')
jieba.load_userdict("./dict/expendword.txt")
'''
# 分类做键值对字典
relation=pd.read_csv(u'./dict/relation.csv')
rela_group=relation.groupby([u'三级品类',u'二级品类'])
rela_dict={}
for g in rela_group:
#print(g[0])
rela_dict[g[0][0]]=g[0][1]
#print(g[0][0],g[0][1])
#print(rela_dict)
#每个标题,按词性分组
seg_arr=[]
second_arr=[]
target_arr=[]
extract_obj=extractkey()
for seg in data[u'产品标题']:
seg_obj,second_type,target_name=extract_obj.progress(seg,rela_dict)
seg_arr.append(seg_obj)
second_arr.append(second_type)
target_arr.append(target_name)
#print('精确模式:','/'.join(seg_list))
#print(seg_arr)
data[u"三级分词"]=seg_arr
data[u"二级分词"]=second_arr
data[u"二级品类结果"]=target_arr
data.to_csv('./data/品类关联.csv')
'''
'''
#词频统计
seg_arr=[]
seg_obj={}
for seg in data[u'产品标题']:
#seg = seg.decode("utf8")
seg=''.join(re.findall(u'[\d\u4e00-\u9fff]+', seg))
seg_list=pseg.cut(seg)#,cut_all=False)
for word in seg_list:
#print(seg_obj)
#print(word.word,word.flag)
if word in list(seg_obj.keys()):
seg_obj[word]=seg_obj[word]+1
else:
seg_obj[word]=1
#print('精确模式:','/'.join(seg_list))
print(sorted(seg_obj.items(), key=lambda seg:seg[1], reverse=True))
'''
# print(jieba.analyse.extract_tags("2019春季新款韩版大码女装减龄钉珠修身显瘦拼接网纱连衣裙", topK=100, withWeight=False, allowPOS=()))
# seg_list=jieba.cut(sent)
# print('默认模式:','/'.join(seg_list))
# seg_list=jieba.cut_for_search(sent)
# print('搜索引擎模式:','/'.join(seg_list))
# seg_list=jieba.cut(sent,cut_all=True)
# print('全模式:','/'.join(seg_list))
'''
from sklearn import datasets
iris = datasets.load_iris()
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
y_pred = gnb.fit(['aa','bb','cc'], [1,2,3]).predict(['aa'])
print(y_pred)
'''
|
# Generated by Django 3.2.7 on 2021-10-01 19:51
import django.contrib.postgres.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('medico', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Horario',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('horario', models.TimeField()),
],
),
migrations.CreateModel(
name='Agenda',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('dia', models.DateField()),
('horarios', django.contrib.postgres.fields.ArrayField(base_field=models.TimeField(), size=None)),
('medico', models.ForeignKey(help_text='Selecione um médico', on_delete=django.db.models.deletion.PROTECT, to='medico.medico')),
],
options={
'verbose_name': 'Agenda',
'verbose_name_plural': 'Agendas',
'ordering': ['dia'],
'unique_together': {('medico', 'dia')},
},
),
]
|
import copy
from typing import Tuple, Union
import control as _ctrl
import numpy as _np
from numpy import inf
from .linalg import block, eye, matrix, norm, zeros, eig_max
from .quantizer import DynamicQuantizer as _DynamicQuantizer
from .quantizer import StaticQuantizer as _StaticQuantizer
from .quantizer import _ConnectionType
_ctrl.use_numpy_matrix(False)
__all__ = [
'IdealSystem',
'Controller',
'Plant',
]
# TODO: support control.input_output_response()
# TODO: define function returns control.InputOutputSystem
class Controller(object):
"""
An instance of `Controller` represents the state-space model
of controller K given by
K : { x(t+1) = A x(t) + B1 r(t) + B2 y(t)
{ u(t) = C x(t) + D1 r(t) + D2 y(t)
"""
def __init__(self, A, B1, B2, C, D1, D2):
"""
Initializes an instance of `Controller`. You can create an
instance of `IdealSystem` with `Controller`[1]_.
Parameters
----------
A : array_like
Interpreted as a square matrix.
`n` is defined as the number of rows in matrix `A`.
B1 : array_like
Interpreted as a (`n` x `l`) matrix.
`l` is defined as the number of columns in matrix `B1`.
B2 : array_like
Interpreted as a (`n` x `p`) matrix.
`p` is defined as the number of columns in matrix `B2`.
C : array_like
Interpreted as a (`m` x `n`) matrix.
`m` is defined as the number of rows in matrix `C`.
D1 : array_like
Interpreted as a (`m` x `l`) matrix.
D2 : array_like
Interpreted as a (`m` x `p`) matrix.
Notes
-----
An instance of `Controller` represents the state-space model
of controller K given by
K : { x(t+1) = A x(t) + B1 r(t) + B2 y(t)
{ u(t) = C x(t) + D1 r(t) + D2 y(t)
References
----------
.. [5] Y. Minami and T. Muromaki: Differential evolution-based
synthesis of dynamic quantizers with fixed-structures; International
Journal of Computational Intelligence and Applications, Vol. 15,
No. 2, 1650008 (2016)
"""
try:
A_mat = matrix(A)
B1_mat = matrix(B1)
B2_mat = matrix(B2)
C_mat = matrix(C)
D1_mat = matrix(D1)
D2_mat = matrix(D2)
except:
raise TypeError(
"`A`, `B1`, `B2`, `C`, `D1`, and "
"`D2` must be interpreted as matrices."
)
self.n = A_mat.shape[0]
self.l = B1_mat.shape[1]
self.p = B2_mat.shape[1]
self.m = C_mat.shape[0]
if A_mat.shape != (self.n, self.n):
raise ValueError("A must be a square matrix.")
if B1_mat.shape != (self.n, self.l):
raise ValueError(
"The number of rows in matrices "
"`A` and `B1` must be the same."
)
if B2_mat.shape != (self.n, self.p):
raise ValueError(
"The number of rows in matrices "
"`A` and `B2` must be the same."
)
if C_mat.shape != (self.m, self.n):
raise ValueError(
"The number of columns in matrices "
"`A` and `C` must be the same."
)
if D1_mat.shape[0] != self.m:
raise ValueError(
"The number of rows in matrices "
"`C` and `D1` must be the same."
)
if D1_mat.shape[1] != self.l:
raise ValueError(
"The number of columns in matrices "
"`B1` and `D1` must be the same."
)
if D2_mat.shape[0] != self.m:
raise ValueError(
"The number of rows in matrices "
"`C` and `D2` must be the same."
)
if D2_mat.shape[1] != self.p:
raise ValueError(
"The number of columns in matrices "
"`B2` and `D2` must be the same."
)
self.A = A_mat
self.B1 = B1_mat
self.B2 = B2_mat
self.C = C_mat
self.D1 = D1_mat
self.D2 = D2_mat
class Plant(object):
"""
An instance of `Plant` represents the state-space model
of plant P given by
P : { x(t+1) = A x(t) + B u(t)
{ z(t) = C1 x(t)
{ y(t) = C2 x(t)
"""
def __init__(self, A, B, C1, C2):
"""
Initializes an instance of `Plant`. You can create an
instance of `IdealSystem` with `Plant`.
Parameters
----------
A : array_like
Interpreted as a square matrix.
`n` is defined as the number of rows in matrix `A`.
B : array_like
Interpreted as a (`n` x `m`) matrix.
`m` is defined as the number of columns in matrix `B`.
C1 : array_like
Interpreted as a (`l1` x `n`) matrix.
`l1` is defined as the number of rows in matrix `C1`.
C2 : array_like
Interpreted as a (`l2` x `n`) matrix.
`l2` is defined as the number of rows in matrix `C2`.
Notes
-----
An instance of `Plant` represents the state-space model
of plant P given by
P : { x(t+1) = A x(t) + B u(t)
{ z(t) = C1 x(t)
{ y(t) = C2 x(t)
References
----------
.. [5] Y. Minami and T. Muromaki: Differential evolution-based
synthesis of dynamic quantizers with fixed-structures; International
Journal of Computational Intelligence and Applications, Vol. 15,
No. 2, 1650008 (2016)
"""
try:
A_mat = matrix(A)
B_mat = matrix(B)
C1_mat = matrix(C1)
C2_mat = matrix(C2)
except:
raise TypeError(
"`A`, `B`, `C1` and `C2` must be interpreted as "
"matrices."
)
self.n = A_mat.shape[0]
self.m = B_mat.shape[1]
self.l1 = C1_mat.shape[0]
self.l2 = C2_mat.shape[0]
if A_mat.shape != (self.n, self.n):
raise ValueError("A must be a square matrix.")
if B_mat.shape != (self.n, self.m):
raise ValueError(
"The number of rows in matrices "
"`A` and `B` must be the same."
)
if C1_mat.shape != (self.l1, self.n):
raise ValueError(
"The number of columns in matrices "
"`A` and `C1` must be the same."
)
if C2_mat.shape != (self.l2, self.n):
raise ValueError(
"The number of columns in matrices "
"`A` and `C2` must be the same."
)
self.A = A_mat
self.B = B_mat
self.C1 = C1_mat
self.C2 = C2_mat
ss1 = _ctrl.ss(
self.A, self.B, self.C1, zeros((self.C1.shape[0], self.B.shape[1]))
)
ss2 = _ctrl.ss(
self.A, self.B, self.C2, zeros((self.C2.shape[0], self.B.shape[1]))
)
self.tf1 = _ctrl.ss2tf(ss1)
self.tf2 = _ctrl.ss2tf(ss2)
@staticmethod
def from_TF(tf: _ctrl.TransferFunction) -> "Plant":
"""
Creates an instance of `Plant` from transfer function. Note
that `C2` becomes 0 .
Parameters
----------
tf : control.TransferFunction
Transfer function from input u to output z.
Returns
-------
Plant
Notes
-----
An instance of `Plant` represents a plant
P given by
P : { x(t+1) = A x(t) + B u(t)
{ z(t) = C1 x(t)
{ y(t) = C2 x(t)
But if you use this method, `C2` becomes `0`.
"""
ss = _ctrl.tf2ss(tf)
ret = Plant(
A=matrix(ss.A),
B=matrix(ss.B),
C1=matrix(ss.C),
C2=matrix(zeros(ss.C.shape)),
)
ret.tf1 = tf
ret.tf2 = tf*0
return ret
class IdealSystem():
"""
Represents ideal system. 'Ideal' means that this system doesn't
contain a quantizer.
"""
def __str__(self) -> str:
def equation_with_matrix(left_side: str, matrix: _np.ndarray):
line_matrix = str(matrix).split("\n")
ret = left_side + " = "
indent_size = len(ret)
ret += line_matrix[0] + "\n"
for _ in range(1, len(line_matrix)):
ret += (" " * indent_size) + line_matrix[_] + "\n"
return ret
ret = "System given by\n"
ret += " { x(k+1) = A x(k) + B1 r(k) + B2 v(k)\n"
ret += "G { z(k) = C1 x(k) + D1 r(k)\n"
ret += " { u(k) = C2 x(k) + D2 r(k)\n"
ret += "where\n"
ret += "v = u\n"
ret += equation_with_matrix(" A", self. A)
ret += equation_with_matrix("B1", self.B1)
ret += equation_with_matrix("B2", self.B2)
ret += equation_with_matrix("C1", self.C1)
ret += equation_with_matrix("C2", self.C2)
ret += equation_with_matrix("D1", self.D1)
ret += equation_with_matrix("D2", self.D2)
ret += "shown in the following figure\n"
ret += " +-----------+ \n"
ret += " r -->| |--> z \n"
ret += " | G | \n"
ret += " +-->| |---+ \n"
ret += " | +-----------+ | \n"
ret += " +-------------------+ \n"
ret += " u = v \n"
return ret
def response_with_quantizer(self,
quantizer: Union[_DynamicQuantizer, _StaticQuantizer],
input,
x_0) -> Tuple[_np.ndarray]:
"""
Performs a simulation with `quantizer` and returns results.
Parameters
----------
quantizer : Union[DynamicQuantizer, StaticQuantizer]
input : array_like
x_0 : array_like
Returns
-------
(t, u, v, z): Tuple[np.ndarray]
Time, input, quantized input, and output.
References
----------
.. [1] S. Azuma and T. Sugie: Synthesis of optimal dynamic
quantizers for discrete-valued input control;IEEE Transactions
on Automatic Control, Vol. 53,pp. 2064–2075 (2008)
"""
# TODO: support xP_0, xK_0
# TODO: support time
r = matrix(input)
length = r.shape[1]
k = matrix(range(0, length))
z = zeros((self.l, length))
u = zeros((self.m, length))
v = copy.deepcopy(u)
x = zeros((len(x_0), length))
xi = zeros((len(quantizer.A), length))
x[:, 0:1] = matrix(x_0)
for i in range(length):
u[:, i:i+1] = matrix(self.C2 @ x[:, i:i+1] + self.D2 @ r[:, i:i+1])
v[:, i:i+1] = matrix(quantizer.q(quantizer.C @ xi[:, i:i+1] + u[:, i:i+1]))
z[:, i:i+1] = matrix(self.C1 @ x[:, i:i+1] + self.D1 @ r[:, i:i+1])
if i < length - 1:
xi[:, i+1:i+2] = matrix(quantizer.A @ xi[:, i:i+1] + quantizer.B @ (v[:, i:i+1] - u[:, i:i+1]))
x[:, i+1:i+2] = matrix(self.A @ x[:, i:i+1] + self.B1 @ r[:, i:i+1] + self.B2 @ v[:, i:i+1])
return k, u, v, z
def response(self, input, x_0) -> Tuple[_np.ndarray]:
"""
Performs a simulation and returns results.
Parameters
----------
input : array_like
x_0 : array_like
Returns
-------
(t, u, z): Tuple[np.ndarray]
Time, input, and output.
References
----------
.. [1] S. Azuma and T. Sugie: Synthesis of optimal dynamic
quantizers for discrete-valued input control;IEEE Transactions
on Automatic Control, Vol. 53,pp. 2064–2075 (2008)
"""
# TODO: support xP_0, xK_0
# TODO: support time
r = matrix(input)
length = r.shape[1]
k = matrix(range(0, length))
z = zeros((self.l, length))
u = zeros((self.m, length))
x = zeros((len(x_0), length))
x[:, 0:1] = matrix(x_0)
for i in range(length):
u[:, i:i+1] = matrix(self.C2 @ x[:, i:i+1] + self.D2 @ r[:, i:i+1])
z[:, i:i+1] = matrix(self.C1 @ x[:, i:i+1] + self.D1 @ r[:, i:i+1])
if i < length - 1:
x[:, i+1:i+2] = matrix(
self.A @ x[:, i:i+1] + self.B1 @ r[:, i:i+1] + self.B2 @ u[:, i:i+1]
)
return k, u, z
def __init__(self, A, B1, B2, C1, C2, D1, D2):
"""
Initializes an instance of `IdealSystem`. 'Ideal' means that
this system doesn't contain a quantizer.
Parameters
----------
A : array_like
Interpreted as a square matrix.
`n` is defined as the number of rows in matrix `A`.
B1 : array_like
Interpreted as a (`n` x `p`) matrix.
`p` is defined as the number of columns in matrix `B1`.
B2 : array_like
Interpreted as a (`n` x `m`) matrix.
`m` is defined as the number of columns in matrix `B2`.
C1 : array_like
Interpreted as a (`l` x `n`) matrix.
`l` is defined as the number of rows in matrix `C1`.
C2 : array_like
Interpreted as a (`m` x `n`) matrix.
D1 : array_like
Interpreted as a (`l` x `p`) matrix.
D2 : array_like
Interpreted as a (`m` x `p`) matrix.
Notes
-----
An instance of `IdealSystem` represents ideal system
G given by
G : { x(t+1) = A x(t) + B1 r(t) + B2 v(t)
{ z(t) = C1 x(t) + D1 r(t)
{ u(t) = C2 x(t) + D2 r(t)
References
----------
.. [1] S. Azuma and T. Sugie: Synthesis of optimal dynamic
quantizers for discrete-valued input control;IEEE Transactions
on Automatic Control, Vol. 53,pp. 2064–2075 (2008)
"""
try:
A_mat = matrix(A)
B1_mat = matrix(B1)
B2_mat = matrix(B2)
C1_mat = matrix(C1)
C2_mat = matrix(C2)
D1_mat = matrix(D1)
D2_mat = matrix(D2)
except:
raise TypeError(
"`A`, `B1`, `B2`, `C1`, `C2`, `D1`, and "
"`D2` must be interpreted as matrices."
)
self.n = A_mat.shape[0]
self.p = B1_mat.shape[1]
self.m = B2_mat.shape[1]
self.l = C1_mat.shape[0]
if A_mat.shape != (self.n, self.n):
raise ValueError("A must be a square matrix.")
if B1_mat.shape != (self.n, self.p):
raise ValueError(
"The number of rows in matrices "
"`A` and `B1` must be the same."
)
if B2_mat.shape != (self.n, self.m):
raise ValueError(
"The number of rows in matrices "
"`A` and `B2` must be the same."
)
if C1_mat.shape != (self.l, self.n):
raise ValueError(
"The number of columns in matrices "
"`A` and `C1` must be the same."
)
if C2_mat.shape != (self.m, self.n):
raise ValueError(
"The sizes of `B2` and the transpose of `C2` must be"
"the same."
)
if D1_mat.shape[0] != self.l:
raise ValueError(
"The number of rows in matrices "
"`C1` and `D1` must be the same."
)
if D1_mat.shape[1] != self.p:
raise ValueError(
"The number of columns in matrices "
"`B1` and `D1` must be the same."
)
if D2_mat.shape[0] != self.m:
raise ValueError(
"The number of rows in matrices "
"`C2` and `D2` must be the same."
)
if D2_mat.shape[1] != self.p:
raise ValueError(
"The number of columns in matrices "
"`B1` and `D2` must be the same."
)
self.A = A_mat
self.B1 = B1_mat
self.B2 = B2_mat
self.C1 = C1_mat
self.C2 = C2_mat
self.D1 = D1_mat
self.D2 = D2_mat
# 解析解用に,内部システムとその接続の仕方を保存
self.type = _ConnectionType.ELSE
self.P = None
self.K = None
@staticmethod
def from_FF(P: Plant) -> "IdealSystem":
"""
Creates an instance of `IdealSystem` from plant `P`.
'from_FF' means that a quantizer is inserted as shown in the
following figure[1]_.
' +-----+ v +-----+ '
' u --->| Q |---->| P |---> z '
' +-----+ +-----+ '
Parameters
----------
P : Plant
Returns
-------
IdealSystem
References
----------
.. [1] S. Azuma and T. Sugie: Synthesis of optimal dynamic
quantizers for discrete-valued input control;IEEE Transactions
on Automatic Control, Vol. 53,pp. 2064–2075 (2008)
"""
n = P.A.shape[0]
m = P.B.shape[1]
l = P.C1.shape[0]
ret = IdealSystem(
A=P.A,
B1=zeros(shape=(n, m)),
B2=P.B,
C1=P.C1,
C2=zeros(shape=(m, n)),
D1=zeros(shape=(l, m)),
D2=eye(m, m),
)
ret.type = _ConnectionType.FF
ret.P = P
return ret
@staticmethod
def from_FB_connection_with_input_quantizer(
P: Plant,
K: Controller
) -> "IdealSystem":
"""
Creates an instance of `IdealSystem` from plant `P` and
controller `K`.
'from_FB_connection_with_input_quantizer' means that a
quantizer is inserted as shown in the following figure[1]_.
' +-------+ +-------+ +-------+ '
' r --->| | u | | v | |---> z '
' | K |---->| Q |---->| P | '
' +->| | | | | |--+ '
' | +-------+ +-------+ +-------+ | y '
' +-----------------------------------------+ '
Parameters
----------
P : Plant
K : Controller
Returns
-------
IdealSystem
References
----------
.. [1] S. Azuma and T. Sugie: Synthesis of optimal dynamic
quantizers for discrete-valued input control;IEEE Transactions
on Automatic Control, Vol. 53,pp. 2064–2075 (2008)
"""
if P.l2 != K.p:
raise ValueError(
"The number of columns in matrix `P.C2` and the "
"number of rows in matrix `K.B2` must be the same."
)
A = block([
[P.A, zeros(shape=(P.A.shape[0], K.A.shape[1]))],
[K.B2 @ P.C2, K.A],
])
B1 = block([
[zeros(shape=(P.A.shape[0], K.B1.shape[1]))],
[K.B1],
])
B2 = block([
[P.B],
[zeros(shape=(K.A.shape[0], P.B.shape[1]))],
])
C1 = block([
[P.C1, zeros(shape=(P.C1.shape[0], K.A.shape[1]))],
])
C2 = block([
[K.D2 @ P.C2, K.C]
])
D1 = zeros(shape=(C1.shape[0], B1.shape[1]))
D2 = K.D1
ret = IdealSystem(A, B1, B2, C1, C2, D1, D2)
ret.type = _ConnectionType.FB_WITH_INPUT_QUANTIZER
ret.P = P
ret.K = K
return ret
@staticmethod
def from_FB_connection_with_output_quantizer(
P: Plant,
K: Controller
) -> "IdealSystem":
"""
Creates an instance of `IdealSystem` from plant `P` and
controller `K`.
'from_FB_connection_with_output_quantizer' means that a
quantizer is inserted as shown in the following figure[1]_.
' +-------+ +-------+ '
' r --->| | | |---> z '
' | K |---------->| P | '
' +->| | | |--+ '
' v | +-------+ +-----+ +-------+ | u '
' +-------------| Q |<------------+ '
' +-----+ '
Parameters
----------
P : Plant
K : Controller
Returns
-------
IdealSystem
References
----------
.. [1] S. Azuma and T. Sugie: Synthesis of optimal dynamic
quantizers for discrete-valued input control;IEEE Transactions
on Automatic Control, Vol. 53,pp. 2064–2075 (2008)
"""
if P.m != K.m:
raise ValueError(
"The number of columns in matrix `K.C` and the "
"number of rows in matrix `P.B` must be the same."
)
A = block([
[P.A, P.B @ K.C],
[zeros(shape=(K.A.shape[0], P.A.shape[1])), K.A],
])
B1 = block([
[P.B @ K.D1],
[K.B1],
])
B2 = block([
[P.B @ K.D2],
[K.B2],
])
C1 = block([
P.C1, zeros(shape=(P.C1.shape[0], K.A.shape[1]))
])
C2 = block([
P.C2, zeros(shape=(P.C2.shape[0], K.A.shape[1]))
])
D1 = zeros(shape=(C1.shape[0], B1.shape[1]))
D2 = zeros(shape=(B2.shape[1], B1.shape[1]))
ret = IdealSystem(
A,
B1,
B2,
C1,
C2,
D1,
D2,
)
ret.type = _ConnectionType.FB_WITH_OUTPUT_QUANTIZER
ret.P = P
ret.K = K
return ret
@staticmethod
def from_FBIQ(
P: Plant,
K: Controller
) -> "IdealSystem":
"""
A shortened form of `from_FB_connection_with_input_quantizer`.
Creates an instance of `IdealSystem` from plant `P` and
controller `K`.
'from_FB_connection_with_input_quantizer' means that a
quantizer is inserted as shown in the following figure[1]_.
' +-------+ +-------+ +-------+ '
' r --->| | u | | v | |---> z '
' | K |---->| Q |---->| P | '
' +->| | | | | |--+ '
' | +-------+ +-------+ +-------+ | y '
' +-----------------------------------------+ '
Parameters
----------
P : Plant
K : Controller
Returns
-------
IdealSystem
References
----------
.. [1] S. Azuma and T. Sugie: Synthesis of optimal dynamic
quantizers for discrete-valued input control;IEEE Transactions
on Automatic Control, Vol. 53,pp. 2064–2075 (2008)
"""
return IdealSystem.from_FB_connection_with_input_quantizer(P, K)
@staticmethod
def from_FBOQ(
P: Plant,
K: Controller
) -> "IdealSystem":
"""
A shortened form of `from_FB_connection_with_output_quantizer`.
Creates an instance of `IdealSystem` from plant `P` and
controller `K`.
'from_FB_connection_with_output_quantizer' means that a
quantizer is inserted as shown in the following figure[1]_.
' +-------+ +-------+ '
' r --->| | | |---> z '
' | K |---------->| P | '
' +->| | | |--+ '
' v | +-------+ +-----+ +-------+ | u '
' +-------------| Q |<------------+ '
' +-----+ '
Parameters
----------
P : Plant
K : Controller
Returns
-------
IdealSystem
References
----------
.. [1] S. Azuma and T. Sugie: Synthesis of optimal dynamic
quantizers for discrete-valued input control;IEEE Transactions
on Automatic Control, Vol. 53,pp. 2064–2075 (2008)
"""
return IdealSystem.from_FB_connection_with_output_quantizer(P, K)
@property
def is_stable(self) -> bool:
"""
Returns stability of this system.
Returns
-------
bool
`True` if stable, `False` if not.
"""
A_tilde = self.A + self.B2 @ self.C2 # convert to closed loop
if eig_max(A_tilde) > 1:
return False
else:
return True
def E(self,
Q: _DynamicQuantizer,
steptime: Union[int, None] = None,
_check_stability: bool = True) -> float:
"""
Returns estimation of E(Q).
Parameters
----------
Q : DynamicQuantizer
steptime : int or None, optional
Evaluation time. Must be a natural number.
(The default is `None`, which implies that this function
calculates until convergence.)
_check_stability : bool, optional
This shouldn't be changed.
`(steptime is not None or _check_stability)` must be `True`.
(The default is `True`.)
Returns
-------
float
Estimation of E(Q) in `steptime`.
References
----------
.. [1] S. Azuma and T. Sugie: Synthesis of optimal dynamic
quantizers for discrete-valued input control;IEEE Transactions
on Automatic Control, Vol. 53,pp. 2064–2075 (2008)
"""
if (steptime is not None) and steptime <= 0:
raise ValueError("steptime must be a natural number.")
if not _check_stability and steptime is None:
raise ValueError(
"`(steptime is not None or _check_stability)` must be `True`."
)
A_tilde = self.A + self.B2@self.C2 # convert to closed loop (only G)
A_bar = block([
[A_tilde, self.B2@Q.C],
[zeros((Q.A.shape[0], A_tilde.shape[0])), Q.A+Q.B@Q.C],
])
B_bar = block([
[self.B2],
[Q.B],
])
C_bar = block([
[self.C1, zeros((self.C1.shape[0], Q.A.shape[0]))]
])
# E = infinity
if _check_stability:
Qcl = _ctrl.ss(A_bar, B_bar, C_bar, C_bar@B_bar*0, True)
Qcl_minreal = Qcl.minreal()
if eig_max(Qcl_minreal.A) > 1:
return inf
k = 0
A_bar_k = eye(*(A_bar.shape))
sum_CAB = zeros((self.C1.shape[0], self.B2.shape[1]))
E_current = 0
if steptime is None:
k_max = inf
# smallest k that satisfies
# `eig_max(A_bar)**k / eig_max(A_bar) < 1e-8`
eig_max_ = eig_max(A_bar)
if eig_max_ <= 1e-8:
k_min = 1
elif 0.9999 <= eig_max_: # TODO: なおす
k_min = 1000
else:
k_min = 1 - 8 / _np.log10(eig_max_)
else:
k_max = steptime
k_min = 1
while k <= k_max:
E_past = E_current
sum_CAB = sum_CAB + abs(C_bar @ A_bar_k @ B_bar)
E_current = norm(sum_CAB) * Q.delta
if k >= k_min:
if abs(E_current-E_past)/E_current < 1e-8:
break
k = k + 1
A_bar_k = A_bar_k @ A_bar
return E_current
def is_stable_with_quantizer(self, Q) -> bool:
"""
Returns stability of this system with quantizer `Q`[1]_.
Returns
-------
bool
`True` if stable, `False` if not.
References
----------
.. [1] S. Azuma and T. Sugie: Synthesis of optimal dynamic
quantizers for discrete-valued input control;IEEE Transactions
on Automatic Control, Vol. 53,pp. 2064–2075 (2008)
"""
return self.is_stable and Q.is_stable
def is_stable_with(self, Q) -> bool:
"""
A shortened form of `is_stable_with_quantizer`.
Returns stability of this system with quantizer `Q`[1]_.
Returns
-------
bool
`True` if stable, `False` if not.
References
----------
.. [1] S. Azuma and T. Sugie: Synthesis of optimal dynamic
quantizers for discrete-valued input control;IEEE Transactions
on Automatic Control, Vol. 53,pp. 2064–2075 (2008)
"""
return self.is_stable_with_quantizer(Q)
|
import argparse
import csv
from os.path import abspath, dirname
from unicodedata import normalize
from urllib.parse import urlparse
from jinja2 import Environment, FileSystemLoader
def load_template(spider_data):
env = Environment(loader=FileSystemLoader("."))
template = env.get_template("spider_template.py")
return template.render(
spider_class_base=spider_data["spider_class_base"],
spider_class_name=spider_data["spider_class_name"],
spider_name=spider_data["spider_name"],
allowed_domain=spider_data["allowed_domain"],
start_year=spider_data["start_year"],
start_month=spider_data["start_month"],
start_day=spider_data["start_day"],
base_url=spider_data["base_url"],
territory_id=spider_data["territory_id"],
)
if __name__ == "__main__":
parser = argparse.ArgumentParser("Generate a spider structure from CSV.")
parser.add_argument("file")
args = parser.parse_args()
project_root = dirname(dirname(abspath(__file__)))
with open(args.file) as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
normalized_city_name = (
normalize("NFKD", row["city"]).encode("ASCII", "ignore").decode("utf8")
)
split_city_name = normalized_city_name.replace("-", " ").split()
split_city_name = [word.title() for word in split_city_name]
domain = urlparse(row["url"]).netloc
spider_data = {
"spider_class_base": row["base_class"],
"spider_class_name": f"{row['state'].title()}{''.join(split_city_name)}",
"spider_name": f"{row['state'].lower()}_{'_'.join(split_city_name).lower()}",
"allowed_domain": domain,
"start_year": row["start_year"],
"start_month": row["start_month"],
"start_day": row["start_day"],
"base_url": row["url"],
"territory_id": row["territory_id"],
}
spider_path = f"{project_root}/data_collection/gazette/spiders/"
filepath = f"{spider_path}{row['state'].lower()}_{'_'.join(split_city_name).lower()}.py"
with open(filepath, "w") as spiderfile:
spiderfile.write(load_template(spider_data))
|
# @Author: George Onoufriou <georgeraven>
# @Date: 2018-11-04
# @Filename: argz.py
# @Last modified by: georgeraven
# @Last modified time: 2018-11-04
# @License: Please see LICENSE in project root.
# @Copyright: George Onoufriou
import os, sys, argparse, configparser
def argz(argv=None, description=None):
# creating parser object with description
description = description if description is not None else "Serverus generalised python server"
parser = argparse.ArgumentParser(description=description)
# creating argument groups
optional = parser._action_groups.pop() # popping -h off
required = parser.add_argument_group('required arguments')
optional.add_argument("-S", "--toStartServer",
default=False,
action="store_true",
help="flag to start web server and its associated components")
optional.add_argument("-s", "--toStopServer",
default=False,
action="store_true",
help="flag to stop web server and its associated components")
# adding optional params at end again
parser._action_groups.append(optional) # pushing -h back on with extras
return vars(parser.parse_args(argv))
def getDefaultArg(conf, section, key, fallbackVal, isBool=False):
#TODO: this is very awkward because it might be called where conf is None
try:
val = ""
if(isBool == False):
val = conf[str(section)][str(key)]
else:
val = conf.getboolean(str(section), str(key))
if(val == ""):
return fallbackVal
else:
return val
except:
return fallbackVal
|
__all__ = (
"Result",
"AlwaysSuccess",
"AlwaysFailure",
"ResultType",
"NoResultType",
"unsafe",
"success",
"failure",
"unwrap_success",
"unwrap_failure",
"NoResult",
)
# Annotations
from .result import (
Result,
AlwaysSuccess,
AlwaysFailure,
)
# Types
from .result import (
ResultType,
NoResultType,
)
# Concretes
from .result import (
unsafe,
success,
failure,
unwrap_success,
unwrap_failure,
NoResult,
)
|
from markdown import Extension
from markdown.preprocessors import Preprocessor
from prosecode.chunk import Chunk
import re
class CodeChunkExtension(Extension):
def __init__(self, execute = False, executepath = ''):
super().__init__()
self.execute = execute
self.executepath = executepath
self.chunks = {}
def extendMarkdown(self, md):
""" Add CodeChunkPreprocessor to the Markdown instance. """
md.registerExtension(self)
preprocessor = CodeChunkPreprocessor(md, self.chunks, self.execute,
self.executepath)
md.preprocessors.register(preprocessor, 'code_chunk', 26)
class CodeChunkPreprocessor(Preprocessor):
CODE_CHUNK_RE = re.compile(r'''
(?P<fence>^(?:`{3,}))[ ]* # opening ```
(?P<lang>[\w#.+-]*)[ ]* # language
(?P<chunkoptions>\{.*?\})?[ ]*\n # code chunk options
(?P<code>.*?)
(?<=\n)(?P=fence)[ ]*$''', re.MULTILINE | re.DOTALL | re.VERBOSE)
CODE_WRAP = '<pre class="prettyprint"><code%s>%s</code></pre>'
LANG_TAG = ' class="lang-%s"'
OUTPUT_CLASS = ' class="verbatim"'
def __init__(self, md, chunks, execute, executepath = ''):
super().__init__(md)
self.chunks = chunks
self.execute = execute
self.executepath = executepath
def run(self, lines):
""" Match code chunks and store them in the HtmlStash. """
text = "\n".join(lines)
while True:
m = self.CODE_CHUNK_RE.search(text)
if not m: break # Break if there is no match.
# Extract lang, code, and chunkoptions from the match.
lang = m.group('lang')
if lang is None:
lang = 'verbatim'
code = m.group('code')
chunkoptions = m.group('chunkoptions')
# Create the chunk, register is predecessor and store it.
chunk = Chunk(lang, chunkoptions, code)
chunk.setcontinue(self.chunks.get(chunk.cont_id))
self.chunks[chunk.id] = chunk
# If it's a chunk we want to see, stash it.
if chunk.hide:
placeholder = ''
else:
codehtml = self._codehtml(lang, code)
placeholder = self.md.htmlStash.store(codehtml)
output = ''
if self.execute and chunk.cmd:
stdout, stderr = chunk.execute(self.executepath)
if len(stdout) + len(stderr) > 0:
if chunk.error_expected:
rawoutput = self._escape(stdout + '\n' + stderr)
else:
if stderr:
pass
# This is where we would log any errors.
rawoutput = self._escape(stdout)
if chunk.figure:
imgfilename = chunk.id.replace('.', '/') + '.svg'
chunkoutput = '<img src=\"{}\"></img>'.format(imgfilename)
output = self.md.htmlStash.store(chunkoutput)
elif chunk.output != 'none':
chunkoutput = self._codehtml('verbatim', rawoutput)
output = self.md.htmlStash.store(chunkoutput)
text = '{}\n{}\n{}\n{}'.format(text[:m.start()],
placeholder,
output,
text[m.end():])
return text.split("\n")
def _codehtml(self, lang, code):
langhtml = self.LANG_TAG % lang if lang else ''
codehtml = self.CODE_WRAP % (langhtml,
self._escape(code))
return codehtml
def _escape(self, txt):
""" basic html escaping """
txt = txt.replace('&', '&')
txt = txt.replace('<', '<')
txt = txt.replace('>', '>')
txt = txt.replace('"', '"')
return txt
def makeExtension(**kwargs): # pragma: no cover
return CodeChunkExtension(**kwargs)
|
from setuptools import find_packages, setup
#The packages gets all the relevant python packages, for non python files, you need to add
# them into the manifest.in
VERSION = "0.2.0"
setup(
name='YTReviewsAPI',
version=VERSION,
author="Porter Hunley",
author_email="porterhunley@gatech.edu",
license="MIT",
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=[
'flask',
],
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import paramiko
import Mobigen.Common.Log as Log; Log.Init()
class SftpClient :
def __init__(self, host, port, user, passwd) :
self.host = host
self.port = port
self.user = user
self.passwd = passwd
self._connect()
def _connect(self) :
try :
sftpHosts = self.host.split(';')
__LOG__.Trace('SFTP host: {}'.format(sftpHosts))
for oneHost in sftpHosts :
try :
# self.transport = paramiko.Transport((oneHost, int(self.port)))
# self.transport.connect(username = self.user, password = self.passwd)
# self.transport.connect(username = '11', password = self.passwd)
self.ssh = paramiko.SSHClient()
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ssh.connect(oneHost, port = int(self.port), username = self.user, password = self.passwd, timeout = 10)
__LOG__.Trace('SSH Connected HOST({})'.format(oneHost))
break
except :
__LOG__.Trace('SFTP Connection faild. HOST({})'.format(oneHost))
# self.sftp = paramiko.SFTPClient.from_transport(self.transport)
self.sftp = self.ssh.open_sftp()
except :
__LOG__.Trace('SFTP Connection error. HOST({})/PORT({})'.format(self.host, self.port))
raise
def download(self, remoteFilepath, remoteFilename, localFilepath) :
try :
self.sftp.get(os.path.join(remoteFilepath, remoteFilename), os.path.join(localFilepath, remoteFilename))
__LOG__.Trace('{} -> {} file download succeed'.format(os.path.join(remoteFilepath, remoteFilename), os.path.join(localFilepath, remoteFilename)))
except :
__LOG__.Trace('SFTP file download failed.')
raise
def mkdirs(self, remoteDir) :
if not remoteDir :
return
if remoteDir == '/' :
self.sftp.chdir(remoteDir)
return
try :
self.sftp.chdir(remoteDir)
except IOError :
dirname, basename = os.path.split(remoteDir.rstrip('/'))
self.mkdirs(dirname)
self.sftp.mkdir(basename)
self.sftp.chdir(basename)
return
def upload(self, sourceFilepath, destinationFilepath) :
try :
self.sftp.put(sourceFilepath, destinationFilepath)
__LOG__.Trace('{} -> {} file upload succeed'.format(sourceFilepath, destinationFilepath))
except :
__LOG__.Trace('SFTP file upload failed.')
raise
def close(self) :
try :
if self.sftp :
self.sftp.close()
__LOG__.Trace('sftp closed')
# if self.transport :
# self.transport.close()
if self.ssh :
self.ssh.close()
__LOG__.Trace('ssh closed')
except :
__LOG__.Trace('SFTP Connection close failed.')
raise
|
import unittest
import ciw
class TimeDependentBatches(ciw.dists.Distribution):
def sample(self, t, ind=None):
if t < 11.0:
return 5
return 1
class TestArrivalNode(unittest.TestCase):
def test_init_method(self):
ciw.seed(5)
Q = ciw.Simulation(ciw.create_network_from_yml(
'ciw/tests/testing_parameters/params.yml'))
N = ciw.ArrivalNode(Q)
self.assertEqual(round(N.next_event_date, 5), 0.00440)
self.assertEqual(N.number_of_individuals, 0)
dates_dict = {1: {0: 0.2110410999, 1: 0.1415614623, 2: 0.3923690877},
2: {0: 0.1218825551, 1: 0.0044003133, 2: 0.2442775601},
3: {0: 0.0819463473, 1: 0.4135097542, 2: 0.7256307839},
4: {0: 0.1738823223, 1: 0.3988184145, 2: 0.2987813628}}
self.assertEqual({nd: {obs: round(N.event_dates_dict[nd][obs], 10)
for obs in N.event_dates_dict[nd]} for nd in N.event_dates_dict},
dates_dict)
def test_initialise_event_dates_dict_method(self):
ciw.seed(6)
Q = ciw.Simulation(ciw.create_network_from_yml(
'ciw/tests/testing_parameters/params.yml'))
N = ciw.ArrivalNode(Q)
dates_dict_1 = {1: {0: 0.4362282541, 1: 0.2672232406, 2: 0.3864256273},
2: {0: 0.1636952311, 1: 0.0714709565, 2: 0.8065738414},
3: {0: 0.4088480190, 1: 0.0514323248, 2: 0.8132038176},
4: {0: 1.1573751438, 1: 0.4649276714, 2: 0.8176876727}}
dates_dict_2 = {1: {0: 0.0325870775, 1: 0.8054262558, 2: 0.8168179515},
2: {0: 0.0841671381, 1: 0.0328245299, 2: 0.2196023847},
3: {0: 0.2519089068, 1: 0.0573597814, 2: 1.5117882121},
4: {0: 0.8881158889, 1: 0.0560592622, 2: 2.1307650868}}
self.assertEqual({nd: {obs: round(N.event_dates_dict[nd][obs], 10)
for obs in N.event_dates_dict[nd]} for nd in N.event_dates_dict},
dates_dict_1)
N.initialise_event_dates_dict()
self.assertEqual({nd: {obs: round(N.event_dates_dict[nd][obs], 10)
for obs in N.event_dates_dict[nd]} for nd in N.event_dates_dict},
dates_dict_2)
def test_repr_method(self):
Q = ciw.Simulation(ciw.create_network_from_yml(
'ciw/tests/testing_parameters/params.yml'))
N = ciw.ArrivalNode(Q)
self.assertEqual(str(N), 'Arrival Node')
def test_find_next_event_date_method(self):
ciw.seed(1)
Q = ciw.Simulation(ciw.create_network_from_yml(
'ciw/tests/testing_parameters/params.yml'))
N = ciw.ArrivalNode(Q)
self.assertEqual(round(N.next_event_date, 5), 0.00105)
N.find_next_event_date()
self.assertEqual(round(N.next_event_date, 5), 0.00105)
self.assertEqual(N.next_node, 1)
self.assertEqual(N.next_class, 1)
N.have_event()
self.assertEqual(round(N.next_event_date, 5), 0.00518)
self.assertEqual(N.next_node, 3)
self.assertEqual(N.next_class, 1)
def test_have_event_method(self):
ciw.seed(1)
Q = ciw.Simulation(ciw.create_network_from_yml(
'ciw/tests/testing_parameters/params.yml'))
N = ciw.ArrivalNode(Q)
self.assertEqual(Q.transitive_nodes[0].all_individuals, [])
self.assertEqual(Q.transitive_nodes[0].individuals, [[]])
self.assertEqual(Q.transitive_nodes[1].all_individuals, [])
self.assertEqual(Q.transitive_nodes[1].individuals, [[]])
self.assertEqual(Q.transitive_nodes[2].all_individuals, [])
self.assertEqual(Q.transitive_nodes[2].individuals, [[]])
self.assertEqual(Q.transitive_nodes[3].all_individuals, [])
self.assertEqual(Q.transitive_nodes[3].individuals, [[]])
self.assertEqual(round(N.next_event_date, 5), 0.00105)
self.assertEqual(N.next_node, 1)
N.have_event()
self.assertEqual([str(obj) for obj
in Q.transitive_nodes[0].all_individuals],
['Individual 1'])
self.assertEqual([str(obj) for pr_cls in
Q.transitive_nodes[0].individuals for obj in pr_cls],
['Individual 1'])
self.assertEqual(Q.transitive_nodes[1].all_individuals, [])
self.assertEqual(Q.transitive_nodes[1].individuals, [[]])
self.assertEqual(Q.transitive_nodes[2].all_individuals, [])
self.assertEqual(Q.transitive_nodes[2].individuals, [[]])
self.assertEqual(Q.transitive_nodes[3].all_individuals, [])
self.assertEqual(Q.transitive_nodes[3].individuals, [[]])
self.assertEqual(round(N.next_event_date, 5), 0.00518)
self.assertEqual(N.next_node, 3)
ciw.seed(12)
Q = ciw.Simulation(ciw.create_network_from_yml(
'ciw/tests/testing_parameters/params.yml'))
N = ciw.ArrivalNode(Q)
self.assertEqual(Q.transitive_nodes[0].all_individuals, [])
self.assertEqual(Q.transitive_nodes[0].individuals, [[]])
self.assertEqual(Q.transitive_nodes[1].all_individuals, [])
self.assertEqual(Q.transitive_nodes[1].individuals, [[]])
self.assertEqual(Q.transitive_nodes[2].all_individuals, [])
self.assertEqual(Q.transitive_nodes[2].individuals, [[]])
self.assertEqual(Q.transitive_nodes[3].all_individuals, [])
self.assertEqual(Q.transitive_nodes[3].individuals, [[]])
self.assertEqual(round(N.next_event_date, 5), 0.01938)
self.assertEqual(N.next_node, 3)
N.have_event()
self.assertEqual(Q.transitive_nodes[0].all_individuals, [])
self.assertEqual(Q.transitive_nodes[0].individuals, [[]])
self.assertEqual(Q.transitive_nodes[1].all_individuals, [])
self.assertEqual(Q.transitive_nodes[1].individuals, [[]])
self.assertEqual([str(obj) for obj
in Q.transitive_nodes[2].all_individuals],
['Individual 1'])
self.assertEqual([str(obj) for pr_cls
in Q.transitive_nodes[2].individuals for obj in pr_cls],
['Individual 1'])
self.assertEqual(Q.transitive_nodes[3].all_individuals, [])
self.assertEqual(Q.transitive_nodes[3].individuals, [[]])
self.assertEqual(round(N.next_event_date, 5), 0.02021)
self.assertEqual(N.next_node, 2)
def test_no_arrivals_example(self):
N = ciw.create_network(
arrival_distributions=[ciw.dists.NoArrivals(), ciw.dists.Exponential(1)],
service_distributions=[ciw.dists.Exponential(4), ciw.dists.Exponential(4)],
routing=[[0.5, 0.1], [0.1, 0.1]],
number_of_servers=[1, 2])
Q = ciw.Simulation(N)
AN = Q.nodes[0]
self.assertEqual(
str(AN.simulation.network.customer_classes[0].arrival_distributions[0]),
'NoArrivals')
self.assertEqual(AN.inter_arrival(1, 0), float('Inf'))
def test_rejection_dict(self):
params = {'arrival_distributions':[ciw.dists.Deterministic(3.0),
ciw.dists.Deterministic(4.0)],
'service_distributions':[ciw.dists.Deterministic(10.0),
ciw.dists.Deterministic(10.0)],
'routing':[[0.0, 1.0], [0.0, 0.0]],
'number_of_servers':[1, 1],
'queue_capacities':[1, 1]}
Q = ciw.Simulation(ciw.create_network(**params))
self.assertEqual(Q.rejection_dict, {1: {0: []}, 2: {0:[]}})
Q.simulate_until_max_time(20)
self.assertEqual(Q.rejection_dict,
{1: {0: [9.0, 12.0, 18.0]}, 2: {0: [12.0, 16.0]}})
def test_send_individual(self):
params = {'arrival_distributions':[ciw.dists.Exponential(3.0)],
'service_distributions':[ciw.dists.Exponential(10.0)],
'routing':[[0.5]],
'number_of_servers':[1]}
Q = ciw.Simulation(ciw.create_network(**params))
AN = Q.nodes[0]
ind1 = ciw.Individual(555)
ind2 = ciw.Individual(666)
self.assertEqual(Q.nodes[1].all_individuals, [])
self.assertEqual(Q.nodes[1].individuals, [[]])
AN.send_individual(Q.nodes[1], ind1)
self.assertEqual(Q.nodes[1].all_individuals, [ind1])
self.assertEqual(Q.nodes[1].individuals, [[ind1]])
AN.send_individual(Q.nodes[1], ind2)
self.assertEqual(Q.nodes[1].all_individuals, [ind1, ind2])
self.assertEqual(Q.nodes[1].individuals, [[ind1, ind2]])
def test_report_rejection(self):
params = {'arrival_distributions':[ciw.dists.Exponential(3.0)],
'service_distributions':[ciw.dists.Exponential(10.0)],
'routing':[[0.5]],
'number_of_servers':[1]}
Q = ciw.Simulation(ciw.create_network(**params))
AN = Q.nodes[0]
AN.next_event_date = 3.33
self.assertEqual(AN.rejection_dict, {1: {0: []}})
AN.record_rejection(Q.nodes[1])
self.assertEqual(AN.rejection_dict, {1: {0: [3.33]}})
AN.next_event_date = 4.44
AN.record_rejection(Q.nodes[1])
self.assertEqual(AN.rejection_dict, {1: {0: [3.33, 4.44]}})
def test_update_next_event_date_passes(self):
params = {'arrival_distributions':[ciw.dists.Exponential(3.0)],
'service_distributions':[ciw.dists.Exponential(10.0)],
'routing':[[0.5]],
'number_of_servers':[1]}
Q = ciw.Simulation(ciw.create_network(**params))
AN = Q.nodes[0]
AN.next_event_date = 3.33
AN.update_next_event_date()
self.assertEqual(AN.next_event_date, 3.33)
AN.update_next_event_date()
self.assertEqual(AN.next_event_date, 3.33)
def test_batching(self):
# Test that 2 arrivals occur at a time
N = ciw.create_network(
arrival_distributions=[ciw.dists.Sequential([5.0, 5.0, 100.0])],
service_distributions=[ciw.dists.Sequential([2.0, 3.0])],
number_of_servers=[1],
batching_distributions=[ciw.dists.Deterministic(2)]
)
Q = ciw.Simulation(N)
N = Q.transitive_nodes[0]
self.assertEqual(len(N.all_individuals), 0)
Q.nodes[0].have_event()
self.assertEqual(len(N.all_individuals), 2)
Q.nodes[0].have_event()
self.assertEqual(len(N.all_individuals), 4)
Q.nodes[0].have_event()
self.assertEqual(len(N.all_individuals), 6)
# Test that batched individuals have same arrival date
N = ciw.create_network(
arrival_distributions=[ciw.dists.Sequential([5.0, 5.0, 2.0, 1.0, 5.0, 100.0])],
service_distributions=[ciw.dists.Sequential([2.0, 3.0])],
number_of_servers=[1],
batching_distributions=[ciw.dists.Deterministic(2)]
)
Q = ciw.Simulation(N)
Q.simulate_until_max_time(70.0)
recs = Q.get_all_records()
self.assertEqual(
[r.arrival_date for r in recs],
[5.0, 5.0, 10.0, 10.0, 12.0, 12.0, 13.0, 13.0, 18.0, 18.0])
self.assertEqual(
[r.service_start_date for r in recs],
[5.0, 7.0, 10.0, 12.0, 15.0, 17.0, 20.0, 22.0, 25.0, 27.0])
self.assertEqual(
[r.service_time for r in recs],
[2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0, 2.0, 3.0])
self.assertEqual(
[r.service_end_date for r in recs],
[7.0, 10.0, 12.0, 15.0, 17.0, 20.0, 22.0, 25.0, 27.0, 30.0])
self.assertEqual(
[r.waiting_time for r in recs],
[0.0, 2.0, 0.0, 2.0, 3.0, 5.0, 7.0, 9.0, 7.0, 9.0])
def test_batching_sequential(self):
# Test sequential batches
N = ciw.create_network(
arrival_distributions=[ciw.dists.Sequential([5.0, 5.0, 100.0])],
service_distributions=[ciw.dists.Sequential([2.0, 3.0])],
number_of_servers=[1],
batching_distributions=[ciw.dists.Sequential([1, 1, 4, 2, 1, 5])]
)
Q = ciw.Simulation(N)
N = Q.transitive_nodes[0]
self.assertEqual(len(N.all_individuals), 0)
Q.nodes[0].have_event()
self.assertEqual(len(N.all_individuals), 1)
Q.nodes[0].have_event()
self.assertEqual(len(N.all_individuals), 2)
Q.nodes[0].have_event()
self.assertEqual(len(N.all_individuals), 6)
Q.nodes[0].have_event()
self.assertEqual(len(N.all_individuals), 8)
Q.nodes[0].have_event()
self.assertEqual(len(N.all_individuals), 9)
Q.nodes[0].have_event()
self.assertEqual(len(N.all_individuals), 14)
Q.nodes[0].have_event()
self.assertEqual(len(N.all_individuals), 15)
Q.nodes[0].have_event()
self.assertEqual(len(N.all_individuals), 16)
Q.nodes[0].have_event()
self.assertEqual(len(N.all_individuals), 20)
def test_batching_custom(self):
# Test custom batches
N = ciw.create_network(
arrival_distributions=[ciw.dists.Sequential([5.0, 5.0, 100.0])],
service_distributions=[ciw.dists.Sequential([2.0, 3.0])],
number_of_servers=[1],
batching_distributions=[ciw.dists.Pmf([1, 5], [0.5, 0.5])]
)
ciw.seed(12)
Q = ciw.Simulation(N)
N = Q.transitive_nodes[0]
observerd_inds = []
for _ in range(20):
observerd_inds.append(len(N.all_individuals))
Q.nodes[0].have_event()
# Numbers of individuals should only increase by 1 or by 5
self.assertEqual(observerd_inds,
[0, 1, 6, 11, 12, 13, 14, 15, 20, 25, 30, 35, 40, 41, 42, 43, 48, 49, 54, 55])
def test_batching_multi_node(self):
N = ciw.create_network(
arrival_distributions=[ciw.dists.Deterministic(20),
ciw.dists.Deterministic(23),
ciw.dists.Deterministic(25)],
service_distributions=[ciw.dists.Deterministic(1),
ciw.dists.Deterministic(1),
ciw.dists.Deterministic(1)],
number_of_servers=[10, 10, 10],
batching_distributions=[ciw.dists.Deterministic(3),
ciw.dists.Deterministic(2),
ciw.dists.Deterministic(1)],
routing=[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
)
ciw.seed(12)
Q = ciw.Simulation(N)
Q.simulate_until_max_time(28)
recs = Q.get_all_records()
arrivals = [r.arrival_date for r in recs]
nodes = [r.node for r in recs]
classes = [r.customer_class for r in recs]
self.assertEqual(arrivals, [20, 20, 20, 23, 23, 25])
self.assertEqual(nodes, [1, 1, 1, 2, 2, 3])
self.assertEqual(classes, [0, 0, 0, 0, 0, 0])
def test_batching_multi_classes(self):
N = ciw.create_network(
arrival_distributions={'Class 0': [ciw.dists.Deterministic(20)],
'Class 1': [ciw.dists.Deterministic(23)],
'Class 2': [ciw.dists.Deterministic(25)]},
service_distributions={'Class 0': [ciw.dists.Deterministic(1)],
'Class 1': [ciw.dists.Deterministic(1)],
'Class 2': [ciw.dists.Deterministic(1)]},
number_of_servers=[10],
batching_distributions={'Class 0': [ciw.dists.Deterministic(3)],
'Class 1': [ciw.dists.Deterministic(2)],
'Class 2': [ciw.dists.Deterministic(1)]}
)
ciw.seed(12)
Q = ciw.Simulation(N)
Q.simulate_until_max_time(28)
recs = Q.get_all_records()
arrivals = [r.arrival_date for r in recs]
nodes = [r.node for r in recs]
classes = [r.customer_class for r in recs]
self.assertEqual(arrivals, [20, 20, 20, 23, 23, 25])
self.assertEqual(nodes, [1, 1, 1, 1, 1, 1])
self.assertEqual(classes, [0, 0, 0, 1, 1, 2])
def test_batching_time_dependent(self):
N = ciw.create_network(
arrival_distributions=[ciw.dists.Sequential([5.0, 5.0, 2.0, 1.0, 1000.0])],
service_distributions=[ciw.dists.Deterministic(2)],
number_of_servers=[1],
batching_distributions=[TimeDependentBatches()]
)
Q = ciw.Simulation(N)
Q.simulate_until_max_time(30.0)
recs = Q.get_all_records()
self.assertEqual(len(Q.nodes[-1].all_individuals), 12)
self.assertEqual(len(Q.nodes[1].all_individuals), 0)
recs = Q.get_all_records()
self.assertEqual([r.arrival_date for r in recs], [5.0, 5.0, 5.0, 5.0, 5.0, 10.0, 10.0, 10.0, 10.0, 10.0, 12.0, 13.0])
self.assertEqual([r.exit_date for r in recs], [7.0, 9.0, 11.0, 13.0, 15.0, 17.0, 19.0, 21.0, 23.0, 25.0, 27.0, 29.0])
|
#!/usr/bin/env python3
"""\
Stream g-code to grbl controller
This script differs from the simple_stream.py script by
tracking the number of characters in grbl's serial read
buffer. This allows grbl to fetch the next line directly
from the serial buffer and does not have to wait for a
response from the computer. This effectively adds another
buffer layer to prevent buffer starvation.
CHANGELOG:
- 20140714: Updated baud rate to 115200. Added a settings
write mode via simple streaming method. MIT-licensed.
TODO:
- Add runtime command capabilities
---------------------
The MIT License (MIT)
Copyright (c) 2012-2014 Sungeun K. Jeon
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
---------------------
"""
import serial
import time
import sys
import argparse
from grbl_link.interface import Grbl
from grbl_link.protocol import SimpleProtocol
from grbl_link.messages import *
# Define command line argument interface
parser = argparse.ArgumentParser(description='Stream g-code file to grbl. (pySerial and argparse libraries required)')
parser.add_argument('gcode_file', type=argparse.FileType('r'),
help='g-code filename to be streamed')
parser.add_argument('device_file',
help='serial device path')
parser.add_argument('-q','--quiet',action='store_true', default=False,
help='suppress output text')
parser.add_argument('-s','--settings',action='store_true', default=False,
help='settings write mode')
args = parser.parse_args()
# Periodic timer to query for status reports
# TODO: Need to track down why this doesn't restart consistently before a release.
# def periodic():
# s.write('?')
# t = threading.Timer(0.1, periodic) # In seconds
# t.start()
# Initialize
print(args.device_file)
s = serial.Serial(args.device_file,115200)
f = args.gcode_file
verbose = True
if args.quiet : verbose = False
settings_mode = False
if args.settings : settings_mode = True
# Wake up grbl
print("Initializing grbl...")
grbl = Grbl(s, protocol=SimpleProtocol, debug=True)
# def message_handler(message, grbl):
# if isinstance(message, WelcomeMessage)
while not grbl.version:
pass
grbl.query_status()
while not grbl.status['mode']:
pass
if grbl.status['mode'] == 'Alarm':
grbl.unlock()
# The entire file will be enqeued
for line in f:
line = line.strip()
#print(repr(line))
grbl.send(line)
print("The program has been queued and will be run. Pause with CTRL-C")
# Wait until every line is sent
while True:
try:
if grbl.protocol.send_queue.empty():
break
except KeyboardInterrupt:
# use CTRL-C to halt Grbl
#grbl.stop() # stop processing send queue; stop receiving messages
grbl.send(b"!") # feed hold
print("Press ENTER to resume the program. Press CTRL-D to quit the program.")
input("")
grbl.send(b"~")
#grbl.start()
# Wait for user input after streaming is completed
print("G-code streaming finished!\n")
print("WARNING: Wait until grbl completes buffered g-code blocks before exiting.")
input(" Press <Enter> to exit and disable grbl.")
# Close file and serial port
f.close()
s.close()
|
import random
ans = []
def generate_binomial_pricing_of_stock(S0,u,d,n):
if n == 0:
return
curr = []
for val in ans[-1]:
curr.append(val*u)
curr.append(val*d)
ans.append(curr)
generate_binomial_pricing_of_stock(S0,u,d,n-1)
def call_option_pricing(rate, strike_price, answer = ans):
call_option_price = []
delta = []
B = []
for i in range(len(answer)-1, -1, -1):
if i==len(answer)-1:
current = []
for val in answer[i]:
current.append(max(0,val-strike_price))
call_option_price.append(current)
else:
current_borrow = []
current_delta = []
current_put = []
for j in range(len(answer[i])):
s0 = answer[i][j]
su = answer[i+1][2*j]
sd = answer[i+1][2*j+1]
ps = call_option_price[-1][2*j]
pd = call_option_price[-1][2*j+1]
d = (ps - pd)/(su - sd)
b = (ps + pd - d*(su + sd))/(2*(1+rate))
current_borrow.append(b)
current_delta.append(d)
current_put.append(d*s0+b)
B.append(current_borrow)
call_option_price.append(current_put)
delta.append(current_delta)
return B, call_option_price, delta
def monte_carlo_simulation(s0, u, d, strike_price, rate, n, s, N):
random.seed(s)
avg_price=0
for i in range(N):
price = s0
for j in range(n):
prob = random.random()
p = (1+rate-d)/(u-d)
if(prob>p):
price*=u
else:
price*=d
price = max(price-strike_price,0)
avg_price+=price
avg_price /= N
avg_price /= pow((1+rate),n)
return avg_price
s0,u,d,k,r,n,s,N = map(float, input().split())
n = int(n)
N = int(N)
ans.append([s0])
generate_binomial_pricing_of_stock(s0, u, d, n)
avg_price = monte_carlo_simulation(s0,u,d,k,r,n,s,N)
B, call_option_price, delta = call_option_pricing(r,k)
print("{0:.6f}".format(call_option_price[-1][0]),end=' ')
print("{0:.6f}".format(avg_price))
|
# Exact copy of global values in extensions.tg_filters
SPA = "spa"
WEBAPP = "webapp"
ALPINE = "alpine"
DEBIAN = "debian"
YES = 'yes'
NO = 'no'
S3 = 'S3'
GCS = 'GCS'
|
limit = {
'temperature': {'min': 0, 'max': 45},
'soc': {'min': 20, 'max': 80},
'charge_rate': {'min': 0, 'max': 0.8}
}
threshold = {
'temperature': 4,
'soc': 4,
'charge_rate': 0.04
}
warning_message = {
'EN':{
'temperature': {'min': 'Warning: Approaching less temperature', 'max': 'Warning: Approaching high temperature', 'out_of_range': 'Temperature is out of range!'},
'soc': {'min': 'Warning: Approaching discharge', 'max': 'Warning: Approaching charge-peak', 'out_of_range': 'State of Charge is out of range!'},
'charge_rate': {'min': 'Warning: Approaching less charge-rate', 'max': 'Warning: Approaching high charge-rate', 'out_of_range': 'Charge rate is out of range!'}
},
'DE':{
'temperature': {'min': 'Warnung: Annäherung an weniger Temperatur', 'max': 'Warnung: Annähernd hohe Temperatur', 'out_of_range': 'Die Temperatur liegt außerhalb des Bereichs!'},
'soc': {'min': 'Warnung: Nahende Entladung', 'max': 'Warnung: Annäherung an den Ladespitzenwert', 'out_of_range': 'Der Ladezustand ist außer Reichweite!'},
'charge_rate': {'min': 'Warnung: Annäherung an weniger Ladungsrate', 'max': 'Warnung: Annäherung an hohe Ladegeschwindigkeit', 'out_of_range': 'Die Laderate liegt außerhalb des Bereichs!'}
}
}
scaling_factor = {
'temperature': {'celsius': 1, 'fahrenheit': 5/9, 'kelvin': 1},
'soc': {'percent': 1, 'one': 100},
'charge_rate': {'percent': 1/100, 'one': 1}
}
offset = {
'temperature': {'celsius': 0, 'fahrenheit': -32, 'kelvin': -273.15},
'soc': {'percent': 0, 'one': 0},
'charge_rate': {'percent': 0, 'one': 0}
}
def print_warning(warning_text):
if warning_text == 'No Warning':
return None
print(warning_text)
def is_lower_state_of_parameter_tolerant(parameter, value, language):
if value >= limit[parameter]['min'] and value <= (limit[parameter]['min'] + threshold[parameter]):
warning_text = warning_message[language][parameter]['min']
return warning_text
return 'No Warning'
def is_higher_state_of_parameter_tolerant(parameter, value, language):
if value <= limit[parameter]['max'] and value >= (limit[parameter]['max'] - threshold[parameter]):
warning_text = warning_message[language][parameter]['max']
return warning_text
return 'No Warning'
def convert_to_calculation_value(parameter, value, unit):
value = value + offset[parameter][unit] * scaling_factor[parameter][unit]
return value
def is_battery_paremeter_ok(parameter, value, unit, language):
value = convert_to_calculation_value(parameter, value, unit)
warning_text = is_lower_state_of_parameter_tolerant(parameter, value, language)
print_warning(warning_text)
warning_text = is_higher_state_of_parameter_tolerant(parameter, value, language)
print_warning(warning_text)
if value < limit[parameter]['min'] or value > limit[parameter]['max']:
warning_text = warning_message[language][parameter]['out_of_range']
print_warning(warning_text)
return False
return True
def battery_is_ok(parameter, language):
parameter_verdict = {}
for attribute in parameter:
parameter_verdict[attribute] = is_battery_paremeter_ok(attribute, parameter[attribute]['value'], parameter[attribute]['unit'], language)
if False in [parameter_verdict['temperature'], parameter_verdict['soc'], parameter_verdict['charge_rate']]:
return False
return True
if __name__ == '__main__':
assert(battery_is_ok({'temperature': {'value': 25, 'unit': 'celsius'}, 'soc': {'value': 70, 'unit': 'percent'}, 'charge_rate': {'value': 0.7, 'unit': 'one'}}, 'EN') is True)
assert(battery_is_ok({'temperature': {'value': 50, 'unit': 'celsius'}, 'soc': {'value':85, 'unit': 'percent'}, 'charge_rate': {'value':0, 'unit': 'one'}}, 'DE') is False)
|
print("Hello World")
x = input("your name?" )
print("Hello ", x)
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class Ocean(pulumi.CustomResource):
associate_public_ip_address: pulumi.Output[bool]
"""
Configure public IP address allocation.
"""
autoscaler: pulumi.Output[dict]
"""
Describes the Ocean ECS autoscaler.
* `cooldown` (`float`) - Cooldown period between scaling actions.
* `down` (`dict`) - Auto Scaling scale down operations.
* `maxScaleDownPercentage` (`float`) - Would represent the maximum % to scale-down. Number between 1-100
* `headroom` (`dict`) - Spare resource capacity management enabling fast assignment of tasks without waiting for new resources to launch.
* `cpuPerUnit` (`float`) - Optionally configure the number of CPUs to allocate the headroom. CPUs are denoted in millicores, where 1000 millicores = 1 vCPU.
* `memoryPerUnit` (`float`) - Optionally configure the amount of memory (MB) to allocate the headroom.
* `numOfUnits` (`float`) - The number of units to retain as headroom, where each unit has the defined headroom CPU and memory.
* `isAutoConfig` (`bool`) - Automatically configure and optimize headroom resources.
* `isEnabled` (`bool`) - Enable the Ocean ECS autoscaler.
* `resourceLimits` (`dict`) - Optionally set upper and lower bounds on the resource usage of the cluster.
* `maxMemoryGib` (`float`) - The maximum memory in GiB units that can be allocated to the cluster.
* `maxVcpu` (`float`) - The maximum cpu in vCPU units that can be allocated to the cluster.
"""
cluster_name: pulumi.Output[str]
"""
The ocean cluster name.
"""
desired_capacity: pulumi.Output[float]
"""
The number of instances to launch and maintain in the cluster.
"""
draining_timeout: pulumi.Output[float]
"""
The time in seconds, the instance is allowed to run while detached from the ELB. This is to allow the instance time to be drained from incoming TCP connections before terminating it, during a scale down operation.
"""
ebs_optimized: pulumi.Output[bool]
"""
Enable EBS optimized for cluster. Flag will enable optimized capacity for high bandwidth connectivity to the EB service for non EBS optimized instance types. For instances that are EBS optimized this flag will be ignored.
"""
iam_instance_profile: pulumi.Output[str]
"""
The instance profile iam role.
"""
image_id: pulumi.Output[str]
"""
ID of the image used to launch the instances.
"""
key_pair: pulumi.Output[str]
"""
The key pair to attach the instances.
"""
max_size: pulumi.Output[float]
"""
The upper limit of instances the cluster can scale up to.
"""
min_size: pulumi.Output[float]
"""
The lower limit of instances the cluster can scale down to.
"""
monitoring: pulumi.Output[bool]
"""
Enable detailed monitoring for cluster. Flag will enable Cloud Watch detailed detailed monitoring (one minute increments). Note: there are additional hourly costs for this service based on the region used.
"""
name: pulumi.Output[str]
"""
The Ocean cluster name.
"""
region: pulumi.Output[str]
"""
The region the cluster will run in.
"""
security_group_ids: pulumi.Output[list]
"""
One or more security group ids.
"""
subnet_ids: pulumi.Output[list]
"""
A comma-separated list of subnet identifiers for the Ocean cluster. Subnet IDs should be configured with auto assign public ip.
"""
tags: pulumi.Output[list]
"""
Optionally adds tags to instances launched in an Ocean cluster.
* `key` (`str`) - The tag key.
* `value` (`str`) - The tag value.
"""
update_policy: pulumi.Output[dict]
user_data: pulumi.Output[str]
"""
Base64-encoded MIME user data to make available to the instances.
"""
utilize_reserved_instances: pulumi.Output[bool]
whitelists: pulumi.Output[list]
"""
Instance types allowed in the Ocean cluster.
"""
def __init__(__self__, resource_name, opts=None, associate_public_ip_address=None, autoscaler=None, cluster_name=None, desired_capacity=None, draining_timeout=None, ebs_optimized=None, iam_instance_profile=None, image_id=None, key_pair=None, max_size=None, min_size=None, monitoring=None, name=None, region=None, security_group_ids=None, subnet_ids=None, tags=None, update_policy=None, user_data=None, utilize_reserved_instances=None, whitelists=None, __props__=None, __name__=None, __opts__=None):
"""
Provides a Spotinst Ocean ECS resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] associate_public_ip_address: Configure public IP address allocation.
:param pulumi.Input[dict] autoscaler: Describes the Ocean ECS autoscaler.
:param pulumi.Input[str] cluster_name: The ocean cluster name.
:param pulumi.Input[float] desired_capacity: The number of instances to launch and maintain in the cluster.
:param pulumi.Input[float] draining_timeout: The time in seconds, the instance is allowed to run while detached from the ELB. This is to allow the instance time to be drained from incoming TCP connections before terminating it, during a scale down operation.
:param pulumi.Input[bool] ebs_optimized: Enable EBS optimized for cluster. Flag will enable optimized capacity for high bandwidth connectivity to the EB service for non EBS optimized instance types. For instances that are EBS optimized this flag will be ignored.
:param pulumi.Input[str] iam_instance_profile: The instance profile iam role.
:param pulumi.Input[str] image_id: ID of the image used to launch the instances.
:param pulumi.Input[str] key_pair: The key pair to attach the instances.
:param pulumi.Input[float] max_size: The upper limit of instances the cluster can scale up to.
:param pulumi.Input[float] min_size: The lower limit of instances the cluster can scale down to.
:param pulumi.Input[bool] monitoring: Enable detailed monitoring for cluster. Flag will enable Cloud Watch detailed detailed monitoring (one minute increments). Note: there are additional hourly costs for this service based on the region used.
:param pulumi.Input[str] name: The Ocean cluster name.
:param pulumi.Input[str] region: The region the cluster will run in.
:param pulumi.Input[list] security_group_ids: One or more security group ids.
:param pulumi.Input[list] subnet_ids: A comma-separated list of subnet identifiers for the Ocean cluster. Subnet IDs should be configured with auto assign public ip.
:param pulumi.Input[list] tags: Optionally adds tags to instances launched in an Ocean cluster.
:param pulumi.Input[str] user_data: Base64-encoded MIME user data to make available to the instances.
:param pulumi.Input[list] whitelists: Instance types allowed in the Ocean cluster.
The **autoscaler** object supports the following:
* `cooldown` (`pulumi.Input[float]`) - Cooldown period between scaling actions.
* `down` (`pulumi.Input[dict]`) - Auto Scaling scale down operations.
* `maxScaleDownPercentage` (`pulumi.Input[float]`) - Would represent the maximum % to scale-down. Number between 1-100
* `headroom` (`pulumi.Input[dict]`) - Spare resource capacity management enabling fast assignment of tasks without waiting for new resources to launch.
* `cpuPerUnit` (`pulumi.Input[float]`) - Optionally configure the number of CPUs to allocate the headroom. CPUs are denoted in millicores, where 1000 millicores = 1 vCPU.
* `memoryPerUnit` (`pulumi.Input[float]`) - Optionally configure the amount of memory (MB) to allocate the headroom.
* `numOfUnits` (`pulumi.Input[float]`) - The number of units to retain as headroom, where each unit has the defined headroom CPU and memory.
* `isAutoConfig` (`pulumi.Input[bool]`) - Automatically configure and optimize headroom resources.
* `isEnabled` (`pulumi.Input[bool]`) - Enable the Ocean ECS autoscaler.
* `resourceLimits` (`pulumi.Input[dict]`) - Optionally set upper and lower bounds on the resource usage of the cluster.
* `maxMemoryGib` (`pulumi.Input[float]`) - The maximum memory in GiB units that can be allocated to the cluster.
* `maxVcpu` (`pulumi.Input[float]`) - The maximum cpu in vCPU units that can be allocated to the cluster.
The **tags** object supports the following:
* `key` (`pulumi.Input[str]`) - The tag key.
* `value` (`pulumi.Input[str]`) - The tag value.
The **update_policy** object supports the following:
* `rollConfig` (`pulumi.Input[dict]`)
* `batchSizePercentage` (`pulumi.Input[float]`)
* `shouldRoll` (`pulumi.Input[bool]`)
> This content is derived from https://github.com/terraform-providers/terraform-provider-spotinst/blob/master/website/docs/r/ocean_ecs.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['associate_public_ip_address'] = associate_public_ip_address
__props__['autoscaler'] = autoscaler
if cluster_name is None:
raise TypeError("Missing required property 'cluster_name'")
__props__['cluster_name'] = cluster_name
__props__['desired_capacity'] = desired_capacity
__props__['draining_timeout'] = draining_timeout
__props__['ebs_optimized'] = ebs_optimized
__props__['iam_instance_profile'] = iam_instance_profile
__props__['image_id'] = image_id
__props__['key_pair'] = key_pair
__props__['max_size'] = max_size
__props__['min_size'] = min_size
__props__['monitoring'] = monitoring
__props__['name'] = name
if region is None:
raise TypeError("Missing required property 'region'")
__props__['region'] = region
if security_group_ids is None:
raise TypeError("Missing required property 'security_group_ids'")
__props__['security_group_ids'] = security_group_ids
if subnet_ids is None:
raise TypeError("Missing required property 'subnet_ids'")
__props__['subnet_ids'] = subnet_ids
__props__['tags'] = tags
__props__['update_policy'] = update_policy
__props__['user_data'] = user_data
__props__['utilize_reserved_instances'] = utilize_reserved_instances
__props__['whitelists'] = whitelists
super(Ocean, __self__).__init__(
'spotinst:ecs/ocean:Ocean',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, associate_public_ip_address=None, autoscaler=None, cluster_name=None, desired_capacity=None, draining_timeout=None, ebs_optimized=None, iam_instance_profile=None, image_id=None, key_pair=None, max_size=None, min_size=None, monitoring=None, name=None, region=None, security_group_ids=None, subnet_ids=None, tags=None, update_policy=None, user_data=None, utilize_reserved_instances=None, whitelists=None):
"""
Get an existing Ocean resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] associate_public_ip_address: Configure public IP address allocation.
:param pulumi.Input[dict] autoscaler: Describes the Ocean ECS autoscaler.
:param pulumi.Input[str] cluster_name: The ocean cluster name.
:param pulumi.Input[float] desired_capacity: The number of instances to launch and maintain in the cluster.
:param pulumi.Input[float] draining_timeout: The time in seconds, the instance is allowed to run while detached from the ELB. This is to allow the instance time to be drained from incoming TCP connections before terminating it, during a scale down operation.
:param pulumi.Input[bool] ebs_optimized: Enable EBS optimized for cluster. Flag will enable optimized capacity for high bandwidth connectivity to the EB service for non EBS optimized instance types. For instances that are EBS optimized this flag will be ignored.
:param pulumi.Input[str] iam_instance_profile: The instance profile iam role.
:param pulumi.Input[str] image_id: ID of the image used to launch the instances.
:param pulumi.Input[str] key_pair: The key pair to attach the instances.
:param pulumi.Input[float] max_size: The upper limit of instances the cluster can scale up to.
:param pulumi.Input[float] min_size: The lower limit of instances the cluster can scale down to.
:param pulumi.Input[bool] monitoring: Enable detailed monitoring for cluster. Flag will enable Cloud Watch detailed detailed monitoring (one minute increments). Note: there are additional hourly costs for this service based on the region used.
:param pulumi.Input[str] name: The Ocean cluster name.
:param pulumi.Input[str] region: The region the cluster will run in.
:param pulumi.Input[list] security_group_ids: One or more security group ids.
:param pulumi.Input[list] subnet_ids: A comma-separated list of subnet identifiers for the Ocean cluster. Subnet IDs should be configured with auto assign public ip.
:param pulumi.Input[list] tags: Optionally adds tags to instances launched in an Ocean cluster.
:param pulumi.Input[str] user_data: Base64-encoded MIME user data to make available to the instances.
:param pulumi.Input[list] whitelists: Instance types allowed in the Ocean cluster.
The **autoscaler** object supports the following:
* `cooldown` (`pulumi.Input[float]`) - Cooldown period between scaling actions.
* `down` (`pulumi.Input[dict]`) - Auto Scaling scale down operations.
* `maxScaleDownPercentage` (`pulumi.Input[float]`) - Would represent the maximum % to scale-down. Number between 1-100
* `headroom` (`pulumi.Input[dict]`) - Spare resource capacity management enabling fast assignment of tasks without waiting for new resources to launch.
* `cpuPerUnit` (`pulumi.Input[float]`) - Optionally configure the number of CPUs to allocate the headroom. CPUs are denoted in millicores, where 1000 millicores = 1 vCPU.
* `memoryPerUnit` (`pulumi.Input[float]`) - Optionally configure the amount of memory (MB) to allocate the headroom.
* `numOfUnits` (`pulumi.Input[float]`) - The number of units to retain as headroom, where each unit has the defined headroom CPU and memory.
* `isAutoConfig` (`pulumi.Input[bool]`) - Automatically configure and optimize headroom resources.
* `isEnabled` (`pulumi.Input[bool]`) - Enable the Ocean ECS autoscaler.
* `resourceLimits` (`pulumi.Input[dict]`) - Optionally set upper and lower bounds on the resource usage of the cluster.
* `maxMemoryGib` (`pulumi.Input[float]`) - The maximum memory in GiB units that can be allocated to the cluster.
* `maxVcpu` (`pulumi.Input[float]`) - The maximum cpu in vCPU units that can be allocated to the cluster.
The **tags** object supports the following:
* `key` (`pulumi.Input[str]`) - The tag key.
* `value` (`pulumi.Input[str]`) - The tag value.
The **update_policy** object supports the following:
* `rollConfig` (`pulumi.Input[dict]`)
* `batchSizePercentage` (`pulumi.Input[float]`)
* `shouldRoll` (`pulumi.Input[bool]`)
> This content is derived from https://github.com/terraform-providers/terraform-provider-spotinst/blob/master/website/docs/r/ocean_ecs.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["associate_public_ip_address"] = associate_public_ip_address
__props__["autoscaler"] = autoscaler
__props__["cluster_name"] = cluster_name
__props__["desired_capacity"] = desired_capacity
__props__["draining_timeout"] = draining_timeout
__props__["ebs_optimized"] = ebs_optimized
__props__["iam_instance_profile"] = iam_instance_profile
__props__["image_id"] = image_id
__props__["key_pair"] = key_pair
__props__["max_size"] = max_size
__props__["min_size"] = min_size
__props__["monitoring"] = monitoring
__props__["name"] = name
__props__["region"] = region
__props__["security_group_ids"] = security_group_ids
__props__["subnet_ids"] = subnet_ids
__props__["tags"] = tags
__props__["update_policy"] = update_policy
__props__["user_data"] = user_data
__props__["utilize_reserved_instances"] = utilize_reserved_instances
__props__["whitelists"] = whitelists
return Ocean(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import torch.optim as optim
import numpy as np
### Create our BC Model ###
class HumanData(Dataset):
def __init__(self, data):
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return torch.FloatTensor(self.data[idx])
class BC(nn.Module):
def __init__(self, state_dim, action_dim, hidden_dim):
super(BC, self).__init__()
self.state_dim = state_dim
self.action_dim = action_dim
self.linear1 = nn.Linear(state_dim, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
self.linear3 = nn.Linear(hidden_dim, action_dim)
self.loss_func = nn.MSELoss()
def encoder(self, state):
h1 = torch.tanh(self.linear1(state))
h2 = torch.tanh(self.linear2(h1))
return self.linear3(h2)
def forward(self, x):
state = x[:, :self.state_dim]
a_target = x[:, -self.action_dim:]
a_predicted = self.encoder(state)
loss = self.loss(a_predicted, a_target)
return loss
def loss(self, a_predicted, a_target):
return self.loss_func(a_predicted, a_target)
### Collect the human demonstrations ###
N = 100 # number of demonstrations
sigma_h = 0.1 # amount of noise in the demonstration
T = 10 # each demonstration has T timesteps
D = [] # dataset of state-action pairs
for iter in range(N):
xi = np.zeros((T, 2))
p_robot = np.random.rand(2)
p_goal = np.random.rand(2)
for timestep in range(T):
a = np.random.normal((p_goal - p_robot) / 5.0, sigma_h)
xi[timestep, :] = np.copy(p_robot)
D.append(p_robot.tolist() + p_goal.tolist() + a.tolist())
p_robot += a
plt.plot(p_goal[0], p_goal[1], 'ko')
plt.plot(xi[:,0], xi[:,1], 'bo-')
plt.axis([0, 1, 0, 1])
plt.show()
### Train the BC Model ###
# arguments: state dimension, action dimension, hidden size
model = BC(4, 2, 32)
EPOCH = 1001
BATCH_SIZE_TRAIN = 100
LR = 0.01
LR_STEP_SIZE = 360
LR_GAMMA = 0.1
train_data = HumanData(D)
train_set = DataLoader(dataset=train_data, batch_size=BATCH_SIZE_TRAIN, shuffle=True)
optimizer = optim.Adam(model.parameters(), lr=LR)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=LR_STEP_SIZE, gamma=LR_GAMMA)
for epoch in range(EPOCH):
for batch, x in enumerate(train_set):
optimizer.zero_grad()
loss = model(x)
loss.backward()
optimizer.step()
scheduler.step()
if epoch % 100 == 0:
print(epoch, loss.item())
torch.save(model.state_dict(), "bc_weights")
### Rollout the trained model ###
model = BC(4, 2, 32)
model.load_state_dict(torch.load("bc_weights"))
N = 10 # number of rollouts
T = 20 # each one has T timesteps
for iter in range(N):
xi = np.zeros((T, 2))
p_robot = np.random.rand(2)
p_goal = np.random.rand(2)
for timestep in range(T):
context = np.concatenate((p_robot, p_goal))
a = model.encoder(torch.Tensor(context)).detach().numpy()
xi[timestep, :] = np.copy(p_robot)
p_robot += a
plt.plot(p_goal[0], p_goal[1], 'ko')
plt.plot(xi[:,0], xi[:,1], 'bo-')
plt.axis([0, 1, 0, 1])
plt.show()
|
'''
Configuration File:
-------------------
config_name: The name of the config file to look at for the sim parameters to use
See the template directory for predefined config files. Run ::
$ peptidesim --config <configuration file name>
to generate a config file in the current directory based on the config templates provided, or use
``default`` to generate the default configuration.
'''
#make it so we can write in Python3
from .version import __version__
from peptidesim.utilities import cs_validity, load_eds_restart, plot_couplings
def main():
generate_config()
if __name__ == '__main__':
main()
else:
from peptidesim.peptidesim import SimulationInfo, PeptideSim
|
from django.shortcuts import render, redirect
from .models import Message
from .forms import MessageForm
def board(request):
messages = Message.objects.order_by('-date')
if request.method == "POST":
form = MessageForm(request.POST)
if form.is_valid():
form.save()
return redirect('board')
else:
form = MessageForm()
return render(request, 'msgboard/board.html', {
'messages': messages,
'form': form,
})
|
from .auth import Auth, new_oauth_client
from .blueprint import AuthBlueprint
from .config import AuthConfig
from .decorator import requires_auth, requires_role
__all__ = [
"Auth",
"AuthConfig",
"requires_auth",
"requires_role",
"AuthBlueprint",
"new_oauth_client",
]
|
import json
from pyjetbrainsdevecosystem.data_import_utils import unpack_csv_data
questions_dict = {}
with open('survey_data/2020/DevEcosystem 2020 questions_outside.csv',
encoding='utf8') as questions_text:
questions_reader = unpack_csv_data(questions_text)
questions_fieldnames = questions_reader.fieldnames
for column in questions_reader:
questions_dict.update(
{
column['shortname']:
{
column['question_title'],
column['type'],
column['page'],
column['place']
}
}
)
question_column_map = {}
with open('survey_data/2020/2020_sharing_data_outside.csv',
newline='',
encoding='utf8') as survey_text:
survey_reader = unpack_csv_data(survey_text)
survey_fieldnames = survey_reader.fieldnames
for question in questions_dict.keys():
field_list_with_position = {}
for field_name in survey_fieldnames:
if field_name.find(question) == 0:
column_number = survey_fieldnames.index(field_name)
field_list_with_position.update({field_name: column_number})
question_column_map.update({question: field_list_with_position})
entry_count = {}
for response in survey_reader:
response_data = {}
question_row = question_column_map.items()
for parent_question, column_name_dict in question_row:
temp_dict = {}
sub_entry_count = entry_count.get(parent_question, {})
for column_name in column_name_dict:
column_name: str
temp_dict.update({column_name: response[column_name]})
sub_entry_count[response[column_name]] = sub_entry_count.get(response[column_name], 0) + 1
response_data.update({parent_question: temp_dict})
entry_count.update({parent_question: sub_entry_count})
print(json.dumps(entry_count, indent=4))
#print(json.dumps(question_column_map, indent=4))
# {
# "os_devenv": {
# "os_devenv.Windows": 19,
# "os_devenv.Linux": 20,
# "os_devenv.macOS": 21,
# "os_devenv.Other": 22
# },
# "mobile_target_os": {
# "mobile_target_os.Android": 1475,
# "mobile_target_os.iOS": 1476,
# "mobile_target_os.Other": 1477
# },
# "db_adopt": {
# "db_adopt.No, I'm not planning to adopt / migrate to any": 242,
# "db_adopt.Yes, I'm planning to adopt / migrate to other database(s) - Write In": 243,
# "db_adopt.DB2": 244,
# "db_adopt.MS SQL Server": 245,
# "db_adopt.MySQL": 246,
# "db_adopt.Oracle Database": 247,
# "db_adopt.PostgreSQL": 248,
# "db_adopt.SQLite": 249,
# "db_adopt.Cassandra": 250,
# "db_adopt.Couchbase": 251,
# "db_adopt.HBase": 252,
# "db_adopt.MongoDB": 253,
# "db_adopt.Neo4j": 254,
# "db_adopt.Redis": 255,
# "db_adopt.Amazon Redshift": 256,
# "db_adopt.H2": 257,
# "db_adopt.MariaDB": 258,
# "db_adopt.ClickHouse": 259,
# "db_adopt.Other": 260
# },
# "proglang": {
# "proglang.I don't use programming languages": 78,
# "proglang.Java": 79,
# "proglang.C": 80,
# "proglang.C++": 81,
# "proglang.Python": 82,
# "proglang.C#": 83,
# "proglang.PHP": 84,
# "proglang.JavaScript": 85,
# "proglang.Ruby": 86,
# "proglang.Elixir": 87,
# "proglang.Kotlin": 88,
# "proglang.Swift": 89,
# "proglang.Objective-C": 90,
# "proglang.Visual Basic": 91,
# "proglang.Scala": 92,
# "proglang.Go": 93,
# "proglang.HTML / CSS": 94,
# "proglang.Haskell": 95,
# "proglang.R": 96,
# "proglang.SQL(PL/SQL, T-SQL and otherprogramming extensions of SQL)": 97,
# "proglang.TypeScript": 98,
# "proglang.Dart": 99,
# "proglang.Clojure / ClojureScript": 100,
# "proglang.Delphi": 101,
# "proglang.Groovy": 102,
# "proglang.Rust": 103,
# "proglang.Perl": 104,
# "proglang.Assembly": 105,
# "proglang.Matlab": 106,
# "proglang.Lua": 107,
# "proglang.Shell scripting languages(bash/shell/powershell)": 108,
# "proglang.Julia": 109,
# "proglang.F#": 110,
# "proglang.Other": 111,
# "proglang_rank.Java": 183,
# "proglang_rank.C": 184,
# "proglang_rank.C++": 185,
# "proglang_rank.Python": 186,
# "proglang_rank.C#": 187,
# "proglang_rank.PHP": 188,
# "proglang_rank.JavaScript": 189,
# "proglang_rank.Ruby": 190,
# "proglang_rank.Kotlin": 191,
# "proglang_rank.Swift": 192,
# "proglang_rank.Objective-C": 193,
# "proglang_rank.Scala": 194,
# "proglang_rank.Go": 195,
# "proglang_rank.SQL(PL/SQL, T-SQL and otherprogramming extensions of SQL)": 196,
# "proglang_rank.Rust": 197,
# "proglang_rank.Haskell": 198,
# "proglang_rank.HTML / CSS": 199,
# "proglang_rank.Elixir": 200,
# "proglang_rank.Visual Basic": 201,
# "proglang_rank.R": 202,
# "proglang_rank.TypeScript": 203,
# "proglang_rank.Dart": 204,
# "proglang_rank.Clojure / ClojureScript": 205,
# "proglang_rank.Delphi": 206,
# "proglang_rank.Groovy": 207,
# "proglang_rank.Perl": 208,
# "proglang_rank.Assembly": 209,
# "proglang_rank.Matlab": 210,
# "proglang_rank.Lua": 211,
# "proglang_rank.Shell scripting languages(bash/shell/powershell)": 212,
# "proglang_rank.Julia": 213,
# "proglang_rank.F#": 214,
# "proglang_rank.Other": 215
# },
# "tools_ci": {
# "tools_ci.Jenkins / Hudson": 305,
# "tools_ci.TeamCity": 306,
# "tools_ci.Bamboo": 307,
# "tools_ci.Microsoft Team Foundation Build": 308,
# "tools_ci.Travis CI": 309,
# "tools_ci.Codeship": 310,
# "tools_ci.CircleCI": 311,
# "tools_ci.CruiseControl": 312,
# "tools_ci.GoCD": 313,
# "tools_ci.Gitlab CI": 314,
# "tools_ci.AppVeyor": 315,
# "tools_ci.Drone": 316,
# "tools_ci.Semaphore CI": 317,
# "tools_ci.GitHub Actions": 318,
# "tools_ci.Azure DevOps (former Microsoft TFS / Visual Studio Team Services)": 319,
# "tools_ci.AWS CodePipeline / AWS CodeStar": 320,
# "tools_ci.Google Cloud Build": 321,
# "tools_ci.Bitbucket Pipelines": 322,
# "tools_ci.Custom tool": 323,
# "tools_ci.Other": 324
# },
# "tools_it": {
# "tools_it.Jira": 325,
# "tools_it.YouTrack": 326,
# "tools_it.Redmine": 327,
# "tools_it.GitLab Issue Board": 328,
# "tools_it.Asana": 329,
# "tools_it.Wrike": 330,
# "tools_it.Microsoft TFS / Visual Studio Team Services": 331,
# "tools_it.Trello": 332,
# "tools_it.GitHub Issues": 333,
# "tools_it.Other": 334
# },
# "tools_vcs": {
# "tools_vcs.None": 362,
# "tools_vcs.Concurrent Versions System (CVS)": 363,
# "tools_vcs.Apache Subversion (SVN)": 364,
# "tools_vcs.Git": 365,
# "tools_vcs.Mercurial": 366,
# "tools_vcs.Perforce": 367,
# "tools_vcs.Plastic SCM": 368,
# "tools_vcs.Visual Studio Team Services (VSTS)": 369,
# "tools_vcs.Microsoft TFS": 370,
# "tools_vcs.Other": 371
# },
# "contribute_os": {
# "contribute_os": 1754
# },
# "hours_code_job": {
# "hours_code_job": 1752
# },
# "tools_adopted": {
# "tools_adopted.Source code collaboration tool (e_g_ GitHub, GitLab, Bitbucket)": 407,
# "tools_adopted.Issue tracker (e_g_ Jira, YouTrack)": 408,
# "tools_adopted.Team collaboration / task management / project / workflow management tools": 409,
# "tools_adopted.Code review tool (e_g_ Crucible, Upsource)": 410,
# "tools_adopted.Continuous Integration (CI) or Continuous Delivery (CD) tool (e_g_ Jenkins, TeamCity)": 411,
# "tools_adopted.Service desk / helpdesk automation solutions (Zendesk)": 412,
# "tools_adopted.None": 413,
# "tools_adopted.Static analysis tool (e_g_ CodeClimate)": 414,
# "tools_adopted.Standalone IDE (e_g_\u00a0 Visual Studio, Eclipse, IntelliJ IDEA)": 415,
# "tools_adopted.Desktop Editor (e_g_ Sublime Text, Atom, VS Code, Vim)": 416,
# "tools_adopted.In-cloud Editor or IDE": 417
# },
# "unittests_how": {
# "unittests_how": 222
# },
# "team_size": {
# "team_size": 2126
# },
# "advocate": {
# "advocate": 2125
# },
# "team_distributed": {
# "team_distributed": 2131
# },
# "java_version": {
# "java_version.Java 13": 451,
# "java_version.Java 12": 452,
# "java_version.Java 11": 453,
# "java_version.Java 10": 454,
# "java_version.Java 9": 455,
# "java_version.Java 8": 456,
# "java_version.Java 7": 457,
# "java_version.Java 6": 458,
# "java_version.Other": 459
# },
# "java_app_server": {
# "java_app_server.None": 460,
# "java_app_server.Apache Tomcat": 461,
# "java_app_server.Jetty": 462,
# "java_app_server.WildFly": 463,
# "java_app_server.JBoss EAP": 464,
# "java_app_server.WebLogic": 465,
# "java_app_server.WebSphere": 466,
# "java_app_server.Liberty": 467,
# "java_app_server.GlassFish": 468,
# "java_app_server.Payara": 469,
# "java_app_server.Other": 470
# },
# "java_app_frameworks": {
# "java_app_frameworks.None": 471,
# "java_app_frameworks.Netty": 472,
# "java_app_frameworks.Undertow": 473,
# "java_app_frameworks.Vert_x": 474,
# "java_app_frameworks.Spark Java": 475,
# "java_app_frameworks.Spring Boot": 476,
# "java_app_frameworks.Other": 477
# },
# "java_package": {
# "java_package.As artifacts (e_g_ WAR)": 478,
# "java_package.I use an embedded server (e_g_ JAR)": 479,
# "java_package.I'm not sure": 480
# },
# "java_web_frameworks": {
# "java_web_frameworks.None": 491,
# "java_web_frameworks.Spring MVC": 492,
# "java_web_frameworks.GWT": 493,
# "java_web_frameworks.Vaadin": 494,
# "java_web_frameworks.Play Framework": 495,
# "java_web_frameworks.Grails 2": 496,
# "java_web_frameworks.Grails 3": 497,
# "java_web_frameworks.Spring Boot": 498,
# "java_web_frameworks.JSF": 499,
# "java_web_frameworks.Struts 1": 500,
# "java_web_frameworks.Struts 2": 501,
# "java_web_frameworks.Wicket": 502,
# "java_web_frameworks.Dropwizard": 503,
# "java_web_frameworks.Other": 504
# },
# "java_buildsystem": {
# "java_buildsystem.None": 505,
# "java_buildsystem.Maven": 506,
# "java_buildsystem.sbt": 507,
# "java_buildsystem.Gradle": 508,
# "java_buildsystem.Ant": 509,
# "java_buildsystem.Bazel": 510,
# "java_buildsystem.Other": 511
# },
# "company_size": {
# "company_size": 2069
# },
# "job_role": {
# "job_role.Developer / Programmer / Software Engineer": 1,
# "job_role.DevOps Engineer / Infrastructure Developer": 2,
# "job_role.DBA": 3,
# "job_role.Architect": 4,
# "job_role.Tester / QA Engineer": 5,
# "job_role.Technical Support": 6,
# "job_role.Data Analyst / Data Engineer/ Data Scientist": 7,
# "job_role.Business Analyst": 8,
# "job_role.Team Lead": 9,
# "job_role.Systems Analyst": 10,
# "job_role.Product Manager": 11,
# "job_role.UX / UI Designer": 12,
# "job_role.CIO / CEO / CTO": 13,
# "job_role.Marketing Manager": 14,
# "job_role.Developer Advocate": 15,
# "job_role.Instructor / Teacher / Tutor": 16,
# "job_role.Other": 17
# },
# "age_range": {
# "age_range": 76
# },
# "adopt_proglang": {
# "adopt_proglang.No, I'm not planning to adopt / migrate": 145,
# "adopt_proglang.Planning to adopt / migrate to other language(s) - Write In": 146,
# "adopt_proglang.Java": 147,
# "adopt_proglang.C": 148,
# "adopt_proglang.C++": 149,
# "adopt_proglang.Python": 150,
# "adopt_proglang.C#": 151,
# "adopt_proglang.PHP": 152,
# "adopt_proglang.JavaScript": 153,
# "adopt_proglang.Ruby": 154,
# "adopt_proglang.Elixir": 155,
# "adopt_proglang.Crystal": 156,
# "adopt_proglang.Kotlin": 157,
# "adopt_proglang.Swift": 158,
# "adopt_proglang.Objective-C": 159,
# "adopt_proglang.Visual Basic": 160,
# "adopt_proglang.Scala": 161,
# "adopt_proglang.Go": 162,
# "adopt_proglang.HTML / CSS": 163,
# "adopt_proglang.Haskell": 164,
# "adopt_proglang.R": 165,
# "adopt_proglang.SQL(PL/SQL, T-SQL and otherprogramming extensions of SQL)": 166,
# "adopt_proglang.TypeScript": 167,
# "adopt_proglang.Dart": 168,
# "adopt_proglang.CoffeeScript": 169,
# "adopt_proglang.Clojure / ClojureScript": 170,
# "adopt_proglang.Delphi": 171,
# "adopt_proglang.COBOL": 172,
# "adopt_proglang.Groovy": 173,
# "adopt_proglang.Rust": 174,
# "adopt_proglang.Perl": 175,
# "adopt_proglang.Assembly": 176,
# "adopt_proglang.Matlab": 177,
# "adopt_proglang.Lua": 178,
# "adopt_proglang.Shell scripting languages(bash/shell/powershell)": 179,
# "adopt_proglang.Julia": 180,
# "adopt_proglang.F#": 181,
# "adopt_proglang.Other": 182
# },
# "cloud_services": {
# "cloud_services.Amazon Web Services": 1731,
# "cloud_services.Microsoft Azure": 1732,
# "cloud_services.Google Cloud Platform": 1733,
# "cloud_services.Rackspace": 1734,
# "cloud_services.RedHat OpenShift": 1735,
# "cloud_services.IBM SoftLayer": 1736,
# "cloud_services.Cloud Foundry": 1737,
# "cloud_services.Heroku": 1738,
# "cloud_services.Other": 1739
# },
# "tools_cloud": {
# "tools_cloud.None": 437,
# "tools_cloud.Continuous Integration tool": 438,
# "tools_cloud.Continuous Delivery tool": 439,
# "tools_cloud.Code Review tool": 440,
# "tools_cloud.Issue Tracker": 441
# },
# "where_sources": {
# "where_sources.Version control as a service (e_g_ GitHub, Bitbucket)": 442,
# "where_sources.Manually deployed version control (e_g_ GitHub Enterprise, GitLab)": 443,
# "where_sources.Other": 444
# },
# "vc_service": {
# "vc_service.None": 372,
# "vc_service.GitHub": 373,
# "vc_service.GitLab": 374,
# "vc_service.Bitbucket": 375,
# "vc_service.Gitcolony": 376,
# "vc_service.Perforce": 377,
# "vc_service.Amazon CodeCommit": 378,
# "vc_service.RhodeCode": 379,
# "vc_service.SourceForge": 380,
# "vc_service.Azure DevOps (former Microsoft TFS / Visual Studio Team Services)": 381,
# "vc_service.Assembla": 382,
# "vc_service.Helix Core Version Control": 383,
# "vc_service.TeamForge SCM": 384,
# "vc_service.Phabricator": 385,
# "vc_service.Custom tool": 386,
# "vc_service.Other": 387
# },
# "java_ee": {
# "java_ee.None": 512,
# "java_ee.Java EE 8 / Jakarta EE 8": 513,
# "java_ee.Java EE 7": 514,
# "java_ee.Java EE 6": 515,
# "java_ee.Java EE 5": 516,
# "java_ee.J2EE": 517,
# "java_ee.Other": 518
# },
# "java_profiler": {
# "java_profiler.None": 519,
# "java_profiler.VisualVM": 520,
# "java_profiler.JProfiler": 521,
# "java_profiler.Java Mission Control": 522,
# "java_profiler.YourKit": 523,
# "java_profiler.NetBeans profiler": 524,
# "java_profiler.Honest profiler": 525,
# "java_profiler.async-profiler": 526,
# "java_profiler.Own custom tools": 527,
# "java_profiler.Other": 528
# },
# "java_ide": {
# "java_ide": 529
# },
# "c_standart": {
# "c_standart.C99": 549,
# "c_standart.C11": 550,
# "c_standart.Embedded C": 551,
# "c_standart.Other": 552
# },
# "c_ide": {
# "c_ide": 553
# },
# "c_unittesting": {
# "c_unittesting.I don\u2019t write unit tests for C": 554,
# "c_unittesting.I write unit tests, but don\u2019t use any frameworks": 555,
# "c_unittesting.Catch": 556,
# "c_unittesting.Google Test": 557,
# "c_unittesting.CppUnit": 558,
# "c_unittesting.CUnit": 559,
# "c_unittesting.Unity": 560,
# "c_unittesting.Other": 561
# },
# "c_projectmodels": {
# "c_projectmodels.None": 562,
# "c_projectmodels.Visual Studio project": 563,
# "c_projectmodels.Xcode project": 564,
# "c_projectmodels.Autotools": 565,
# "c_projectmodels.Makefiles": 566,
# "c_projectmodels.CMake": 567,
# "c_projectmodels.Qmake": 568,
# "c_projectmodels.Custom build system": 569,
# "c_projectmodels.Other": 570
# },
# "c_compilers": {
# "c_compilers.GCC": 578,
# "c_compilers.Clang": 579,
# "c_compilers.MSVC": 580,
# "c_compilers.Intel": 581,
# "c_compilers.Compiler for microcontrollers (like Keil, C51 C Compiler, IAR, etc_)": 582,
# "c_compilers.Custom": 583,
# "c_compilers.Other": 584
# },
# "cpp_standart": {
# "cpp_standart.C++98 / C++03": 601,
# "cpp_standart.C++11": 602,
# "cpp_standart.C++14": 603,
# "cpp_standart.C++17": 604,
# "cpp_standart.C++20": 605
# },
# "cpp_ide": {
# "cpp_ide": 611
# },
# "cpp_unittesting": {
# "cpp_unittesting.I don\u2019t write unit tests for C++": 612,
# "cpp_unittesting.I write unit tests, but don\u2019t use any frameworks": 613,
# "cpp_unittesting.Boost_Test": 614,
# "cpp_unittesting.Google Test": 615,
# "cpp_unittesting.CppUnit": 616,
# "cpp_unittesting.Catch": 617,
# "cpp_unittesting.doctest": 618,
# "cpp_unittesting.Other": 619
# },
# "cpp_projectmodels": {
# "cpp_projectmodels.None": 631,
# "cpp_projectmodels.Visual Studio project": 632,
# "cpp_projectmodels.Xcode project": 633,
# "cpp_projectmodels.Autotools": 634,
# "cpp_projectmodels.Makefiles": 635,
# "cpp_projectmodels.CMake": 636,
# "cpp_projectmodels.Qmake": 637,
# "cpp_projectmodels.SCons": 638,
# "cpp_projectmodels.Boost_Build": 639,
# "cpp_projectmodels.Bazel": 640,
# "cpp_projectmodels.Custom build system": 641,
# "cpp_projectmodels.Other": 642
# },
# "cpp_compilers": {
# "cpp_compilers.GCC": 643,
# "cpp_compilers.Clang": 644,
# "cpp_compilers.MSVC": 645,
# "cpp_compilers.Intel": 646,
# "cpp_compilers.Custom": 647,
# "cpp_compilers.Other": 648
# },
# "cpp_cli": {
# "cpp_cli": 610
# },
# "cpp_project_size": {
# "cpp_project_size": 695
# },
# "python_vesion": {
# "python_vesion": 696
# },
# "python_ide": {
# "python_ide": 758
# },
# "csharp_version": {
# "csharp_version.C# 5 (async / await, caller info attributes)": 759,
# "csharp_version.C# 6 (? and nameof operators, static imports, exception filters, Roslyn)": 760,
# "csharp_version.C# 7 (pattern matching, local functions, ref locals and returns, out variables)": 761,
# "csharp_version. C# 8 (static local functions, nullable reference types, default interface methods)": 762,
# "csharp_version.An earlier version": 763,
# "csharp_version.I'm not sure": 764
# },
# "csharp_runtimes": {
# "csharp_runtimes._NET Framework": 765,
# "csharp_runtimes.Mono": 766,
# "csharp_runtimes._NET Core": 767
# },
# "csharp_frameworks": {
# "csharp_frameworks.None": 768,
# "csharp_frameworks.SharePoint": 769,
# "csharp_frameworks.ASP_NET MVC": 770,
# "csharp_frameworks.ASP_NET Web Forms": 771,
# "csharp_frameworks.ASP_NET Core": 772,
# "csharp_frameworks.Windows Presentation Foundation (WPF)": 773,
# "csharp_frameworks.Windows Forms": 774,
# "csharp_frameworks.Windows Communication Foundation (WCF)": 775,
# "csharp_frameworks.Entity Framework": 776,
# "csharp_frameworks.Unity3d": 777,
# "csharp_frameworks.Xamarin": 778,
# "csharp_frameworks.UWP": 779,
# "csharp_frameworks.Azure": 780,
# "csharp_frameworks.Other": 781
# },
# "csharp_ide": {
# "csharp_ide": 782
# },
# "csharp_vsversion": {
# "csharp_vsversion": 806
# },
# "csharp_unittesting": {
# "csharp_unittesting.I don\u2019t write unit tests for C#": 808,
# "csharp_unittesting.I write unit tests, but don\u2019t use any frameworks": 809,
# "csharp_unittesting.MSTest/Visual Studio Unit Testing Framework": 810,
# "csharp_unittesting.MSTest V2": 811,
# "csharp_unittesting.NUnit": 812,
# "csharp_unittesting.xUnit": 813,
# "csharp_unittesting.Other": 814
# },
# "csharp_performance": {
# "csharp_performance.None": 815,
# "csharp_performance.YourKit Profiler": 816,
# "csharp_performance.PerfView": 817,
# "csharp_performance.Intel VTune Amplifier": 818,
# "csharp_performance.SciTech _NET memory profiler": 819,
# "csharp_performance.Windows Performance Toolkit": 820,
# "csharp_performance.Visual Studio's built-in performance and diagnostic tools": 821,
# "csharp_performance.dotTrace": 822,
# "csharp_performance.dotMemory": 823,
# "csharp_performance.ANTS Profiler": 824,
# "csharp_performance.Other": 825
# },
# "php_version": {
# "php_version.PHP 7_4": 831,
# "php_version.PHP 7_3": 832,
# "php_version.PHP 7_2": 833,
# "php_version.PHP 7_1": 834,
# "php_version.PHP 7_0": 835,
# "php_version.PHP 5_6": 836,
# "php_version.PHP 5_5 or older": 837,
# "php_version.Other": 838
# },
# "php_devenviron": {
# "php_devenviron.Local": 839,
# "php_devenviron.Remote (SFTP, SSH, Remote desktop, etc_)": 840,
# "php_devenviron.Virtualized (Vagrant, Otto, etc_)": 841,
# "php_devenviron.Containerized (Docker, Rocket)": 842,
# "php_devenviron.Other": 843
# },
# "php_ide": {
# "php_ide": 869
# },
# "php_testing": {
# "php_testing.I don\u2019t write tests for PHP": 870,
# "php_testing.I write tests, but don\u2019t use any frameworks": 871,
# "php_testing.PHPUnit": 872,
# "php_testing.Behat": 873,
# "php_testing.PHPSpec": 874,
# "php_testing.Codeception": 875,
# "php_testing.SimpleTest": 876,
# "php_testing.Infection": 877,
# "php_testing.Other": 878
# },
# "js_frameworks": {
# "js_frameworks.None": 1197,
# "js_frameworks.AngularJS": 1198,
# "js_frameworks.Angular": 1199,
# "js_frameworks.React": 1200,
# "js_frameworks.React Native": 1201,
# "js_frameworks.Cordova / PhoneGap": 1202,
# "js_frameworks.Express": 1203,
# "js_frameworks.Vue_js": 1204,
# "js_frameworks.Meteor": 1205,
# "js_frameworks.Ember": 1206,
# "js_frameworks.Backbone": 1207,
# "js_frameworks.Polymer": 1208,
# "js_frameworks.Electron": 1209,
# "js_frameworks.Svelte": 1210,
# "js_frameworks.Other": 1211
# },
# "js_ide": {
# "js_ide": 1212
# },
# "js_unittesting": {
# "js_unittesting.I don\u2019t write unit tests for JavaScript": 1213,
# "js_unittesting.I write unit tests, but don\u2019t use any frameworks": 1214,
# "js_unittesting.Mocha": 1215,
# "js_unittesting.Jest": 1216,
# "js_unittesting.Ava": 1217,
# "js_unittesting.Karma": 1218,
# "js_unittesting.Jasmine": 1219,
# "js_unittesting.Other": 1220
# },
# "js_moduleloader": {
# "js_moduleloader.None": 1221,
# "js_moduleloader.Browserify": 1222,
# "js_moduleloader.Webpack": 1223,
# "js_moduleloader.RequireJS": 1224,
# "js_moduleloader.SystemJS": 1225,
# "js_moduleloader.Rollup": 1226,
# "js_moduleloader.Parcel": 1227,
# "js_moduleloader.Other": 1228
# },
# "ruby_version": {
# "ruby_version.Ruby 2_7 preview": 1021,
# "ruby_version.Ruby 2_6": 1022,
# "ruby_version.Ruby 2_5": 1023,
# "ruby_version.Ruby 2_4": 1024,
# "ruby_version.Ruby 2_3": 1025,
# "ruby_version.Ruby 2_2": 1026,
# "ruby_version.Ruby 2_1 and older": 1027,
# "ruby_version.Other": 1029,
# "ruby_version_management.None": 1030,
# "ruby_version_management.RVM": 1031,
# "ruby_version_management.Rbenv": 1032,
# "ruby_version_management.Asdf": 1033,
# "ruby_version_management.Chruby": 1034,
# "ruby_version_management.Docker": 1035,
# "ruby_version_management.Other": 1036
# },
# "ruby_gemmanagement": {
# "ruby_gemmanagement.None": 1037,
# "ruby_gemmanagement.Bundler": 1038,
# "ruby_gemmanagement.RVM gemsets": 1039,
# "ruby_gemmanagement.Rbenv gemsets": 1040,
# "ruby_gemmanagement.Other": 1041
# },
# "ruby_gems_count": {
# "ruby_gems_count": 1042
# },
# "ruby_frameworks": {
# "ruby_frameworks.None": 1043,
# "ruby_frameworks.Ruby on Rails": 1044,
# "ruby_frameworks.Rack": 1045,
# "ruby_frameworks.Sinatra": 1046,
# "ruby_frameworks.Hanami": 1047,
# "ruby_frameworks.Grape": 1048,
# "ruby_frameworks.Other": 1049
# },
# "ruby_rails_version": {
# "ruby_rails_version": 1050,
# "ruby_rails_version_migrate": 1051
# },
# "ruby_servers": {
# "ruby_servers.None": 1052,
# "ruby_servers.Unicorn": 1053,
# "ruby_servers.Puma": 1054,
# "ruby_servers.Passenger": 1055,
# "ruby_servers.Thin": 1056,
# "ruby_servers.Other": 1057
# },
# "ruby_ide": {
# "ruby_ide": 1058
# },
# "ruby_unittesting": {
# "ruby_unittesting.I don\u2019t write unit tests for Ruby": 1059,
# "ruby_unittesting.Shoulda": 1060,
# "ruby_unittesting.RSpec": 1061,
# "ruby_unittesting.Cucumber": 1062,
# "ruby_unittesting.MiniTest": 1063,
# "ruby_unittesting.TestUnit": 1064,
# "ruby_unittesting.Other": 1065
# },
# "swiftoc_unittesting": {
# "swiftoc_unittesting.I don\u2019t write unit tests for Swift or Objective-C development": 1080,
# "swiftoc_unittesting.I write unit tests, but don\u2019t use any frameworks": 1081,
# "swiftoc_unittesting.XCTest": 1082,
# "swiftoc_unittesting.Quick + Nimble": 1083,
# "swiftoc_unittesting.Kiwi": 1084,
# "swiftoc_unittesting.Other": 1085
# },
# "swiftoc_ui_tests": {
# "swiftoc_ui_tests": 1093
# },
# "swiftoc_dependecymanager": {
# "swiftoc_dependecymanager.None": 1100,
# "swiftoc_dependecymanager.CocoaPods": 1101,
# "swiftoc_dependecymanager.Carthage": 1102,
# "swiftoc_dependecymanager.Swift Package Manager": 1103,
# "swiftoc_dependecymanager.Other": 1104
# },
# "swiftoc_db_engine": {
# "swiftoc_db_engine.None": 1106,
# "swiftoc_db_engine.SQLite with my own wrapper": 1107,
# "swiftoc_db_engine.CoreData": 1108,
# "swiftoc_db_engine.Realm": 1109,
# "swiftoc_db_engine.Firebase": 1110,
# "swiftoc_db_engine.Other": 1111
# },
# "swiftoc_build": {
# "swiftoc_build.I build my project from the IDE": 1114,
# "swiftoc_build.I use Fastlane": 1115,
# "swiftoc_build.I build on CI": 1116,
# "swiftoc_build.Other": 1117
# },
# "swiftoc_linux": {
# "swiftoc_linux": 1118
# },
# "sql_tool": {
# "sql_tool.None": 1447,
# "sql_tool.MySQL Workbench": 1448,
# "sql_tool.pgAdmin": 1449,
# "sql_tool.Oracle SQL Developer": 1450,
# "sql_tool.SQL Server Management Studio": 1451,
# "sql_tool.DataGrip": 1452,
# "sql_tool.phpMyAdmin": 1453,
# "sql_tool.Navicat": 1454,
# "sql_tool.Toad": 1455,
# "sql_tool.EMS SQL Manager": 1456,
# "sql_tool.dbForge Studio": 1457,
# "sql_tool.HeidiSQL": 1458,
# "sql_tool.DbVisualizer": 1459,
# "sql_tool.DBeaver": 1460,
# "sql_tool.Sequel Pro": 1461,
# "sql_tool.SQuirreL SQL": 1462,
# "sql_tool.Command Line": 1463,
# "sql_tool.JetBrains IDE(s) (IntelliJ IDEA, PhpStorm, etc_) with the Database plugin": 1464,
# "sql_tool.Robo 3T": 1465,
# "sql_tool.PL / SQL Developer": 1466,
# "sql_tool.Other": 1467
# },
# "use_static_analysis": {
# "use_static_analysis": 1751
# },
# "regularly_tools": {
# "regularly_tools.Source code collaboration tool (e_g_ GitHub, GitLab, Bitbucket)": 261,
# "regularly_tools.Issue tracker (e_g_ Jira, YouTrack)": 262,
# "regularly_tools.Team collaboration / task management / project / workflow management tools": 263,
# "regularly_tools.Code review tool (e_g_ Crucible, Upsource)": 264,
# "regularly_tools.Continuous Integration (CI) or Continuous Delivery (CD) tool (e_g_ Jenkins, TeamCity)": 265,
# "regularly_tools.Service desk / helpdesk automation solutions (Zendesk)": 266,
# "regularly_tools.Static analysis tool (e_g_ CodeClimate)": 267,
# "regularly_tools.Standalone IDE (e_g_\u00a0 Visual Studio, Eclipse, IntelliJ IDEA)": 268,
# "regularly_tools.Desktop Editor (e_g_ Sublime Text, Atom, VS Code, Vim)": 269,
# "regularly_tools.In-cloud Editor or IDE": 270,
# "regularly_tools.None": 271
# },
# "visit_meetups": {
# "visit_meetups": 2222
# },
# "ruby_rails_version_migrate": {
# "ruby_rails_version_migrate": 1051
# },
# "scala_java_version": {
# "scala_java_version.Java 11": 1130,
# "scala_java_version.Java 10": 1131,
# "scala_java_version.Java 9": 1132,
# "scala_java_version.Java 8": 1133,
# "scala_java_version.Other": 1134
# },
# "scala_frameworks_web": {
# "scala_frameworks_web.None": 1142,
# "scala_frameworks_web.Akka-http": 1143,
# "scala_frameworks_web.Netty": 1144,
# "scala_frameworks_web.Spark Java": 1145,
# "scala_frameworks_web.Play": 1146,
# "scala_frameworks_web.Spray": 1147,
# "scala_frameworks_web.Spring": 1148,
# "scala_frameworks_web.sttp": 1149,
# "scala_frameworks_web.Http4s": 1150,
# "scala_frameworks_web.Other": 1151
# },
# "scala_ide": {
# "scala_ide": 1165,
# "scala_ide_additional": 1166
# },
# "scala_buildsystem": {
# "scala_buildsystem.Maven": 1167,
# "scala_buildsystem.Gradle": 1168,
# "scala_buildsystem.sbt": 1169,
# "scala_buildsystem.Bloop": 1170,
# "scala_buildsystem.Other": 1171
# },
# "scala_macros": {
# "scala_macros": 1187
# },
# "target_os": {
# "target_os.Windows": 71,
# "target_os.Linux": 72,
# "target_os.macOS": 73,
# "target_os.Other": 74
# },
# "php_frameworks": {
# "php_frameworks.None": 845,
# "php_frameworks.Symfony": 846,
# "php_frameworks.Drupal": 847,
# "php_frameworks.WordPress": 848,
# "php_frameworks.Zend": 849,
# "php_frameworks.Magento": 850,
# "php_frameworks.Laravel": 851,
# "php_frameworks.Joomla!": 852,
# "php_frameworks.Yii": 853,
# "php_frameworks.CakePHP": 854,
# "php_frameworks.CodeIgniter": 855,
# "php_frameworks.Slim": 856,
# "php_frameworks.Other": 857
# },
# "devops_conf_management": {
# "devops_conf_management.None": 1666,
# "devops_conf_management.Puppet": 1667,
# "devops_conf_management.Chef": 1668,
# "devops_conf_management.Ansible": 1669,
# "devops_conf_management.Salt": 1670,
# "devops_conf_management.Custom solution": 1671,
# "devops_conf_management.Other": 1672
# },
# "ruby_version_management": {
# "ruby_version_management.None": 1030,
# "ruby_version_management.RVM": 1031,
# "ruby_version_management.Rbenv": 1032,
# "ruby_version_management.Asdf": 1033,
# "ruby_version_management.Chruby": 1034,
# "ruby_version_management.Docker": 1035,
# "ruby_version_management.Other": 1036
# },
# "agile_framework": {
# "agile_framework": 2127
# },
# "hours_code_hobby": {
# "hours_code_hobby": 1753
# },
# "ides": {
# "ides.RStudio": 272,
# "ides.IntelliJ IDEA": 273,
# "ides.Android Studio": 274,
# "ides.Visual Studio": 275,
# "ides.Xcode": 276,
# "ides.PhpStorm": 277,
# "ides.WebStorm": 278,
# "ides.RubyMine": 279,
# "ides.PyCharm": 280,
# "ides.Vim": 281,
# "ides.Sublime Text": 282,
# "ides.Atom": 283,
# "ides.VS Code (Visual Studio Code)": 284,
# "ides.Notepad++": 285,
# "ides.AppCode": 286,
# "ides.CLion": 287,
# "ides.Eclipse": 288,
# "ides.NetBeans": 289,
# "ides.QtCreator": 290,
# "ides.Emacs": 291,
# "ides.JetBrains Rider": 292,
# "ides.Gedit": 293,
# "ides.IPython/Jupyter Notebook": 294,
# "ides.DataGrip": 295,
# "ides.GoLand": 296,
# "ides.Other": 297
# },
# "mobile_os_how": {
# "mobile_os_how.I use native tools (Swift / Objective-C for iOS, Kotlin / Android, etc_)": 1478,
# "mobile_os_how.I use cross-platform technologies / frameworks (Xamarin, Apache Cordova, Ionic, etc_)": 1479
# },
# "mobile_crossplatform_frmwrk": {
# "mobile_crossplatform_frmwrk.Apache Flex": 1480,
# "mobile_crossplatform_frmwrk.Corona": 1481,
# "mobile_crossplatform_frmwrk.Ionic": 1482,
# "mobile_crossplatform_frmwrk.Kivy": 1483,
# "mobile_crossplatform_frmwrk.Kendo UI": 1484,
# "mobile_crossplatform_frmwrk.Xamarin": 1485,
# "mobile_crossplatform_frmwrk.Cordova": 1486,
# "mobile_crossplatform_frmwrk.Unity": 1487,
# "mobile_crossplatform_frmwrk.React Native": 1488,
# "mobile_crossplatform_frmwrk.Flutter": 1489,
# "mobile_crossplatform_frmwrk.PhoneGap": 1490,
# "mobile_crossplatform_frmwrk.NativeScript": 1491,
# "mobile_crossplatform_frmwrk.Kotlin Multiplatform": 1492,
# "mobile_crossplatform_frmwrk.Other": 1493
# },
# "python_for": {
# "python_for.Educational purposes": 697,
# "python_for.Data analysis": 698,
# "python_for.System administration / Writing automation scripts / Infrastructure configuration (DevOps)": 699,
# "python_for.Software testing / writing automated tests": 700,
# "python_for.Software prototyping": 701,
# "python_for.Web development": 702,
# "python_for.Programming of web parsers / scrapers / crawlers": 703,
# "python_for.Machine learning": 704,
# "python_for.Network programming": 705,
# "python_for.Desktop development": 706,
# "python_for.Computer graphics": 707,
# "python_for.Game development": 708,
# "python_for.Mobile development": 709,
# "python_for.Embedded development": 710,
# "python_for.Other": 711
# },
# "csharp_os": {
# "csharp_os.Windows": 790,
# "csharp_os.Unix / Linux": 791,
# "csharp_os.macOS": 792,
# "csharp_os.Other": 793
# },
# "csharp_vsc_plugins": {
# "csharp_vsc_plugins.None": 794,
# "csharp_vsc_plugins.C# for Visual Studio Code (powered by OmniSharp)": 795,
# "csharp_vsc_plugins.NuGet Package Manager": 796,
# "csharp_vsc_plugins.C# FixFormat": 797,
# "csharp_vsc_plugins.C# Extensions": 798,
# "csharp_vsc_plugins.C# XML Documentation Comments": 799,
# "csharp_vsc_plugins.Code Runner": 800,
# "csharp_vsc_plugins.ESLint": 801,
# "csharp_vsc_plugins.TSLint": 802,
# "csharp_vsc_plugins.ASP_NET Helper": 803,
# "csharp_vsc_plugins.C# snippets": 804,
# "csharp_vsc_plugins.Other": 805
# },
# "csharp_msdn": {
# "csharp_msdn": 826,
# "csharp_msdn_type": 827
# },
# "csharp_tfs": {
# "csharp_tfs.No": 828,
# "csharp_tfs.TFS": 829,
# "csharp_tfs.VSTS": 830
# },
# "scala_version": {
# "scala_version.2_13": 1125,
# "scala_version.2_12": 1126,
# "scala_version.2_11": 1127
# },
# "scala_compilationtarget": {
# "scala_compilationtarget.JVM": 1128,
# "scala_compilationtarget.scala_js": 1129
# },
# "scala_unittesting": {
# "scala_unittesting.I don\u2019t write unit tests for Scala": 1135,
# "scala_unittesting.ScalaTest": 1136,
# "scala_unittesting.ScalaMock": 1137,
# "scala_unittesting.JUnit": 1138,
# "scala_unittesting.ScalaCheck": 1139,
# "scala_unittesting.specs2": 1140,
# "scala_unittesting.Other": 1141
# },
# "proglang_rank": {
# "proglang_rank.Java": 183,
# "proglang_rank.C": 184,
# "proglang_rank.C++": 185,
# "proglang_rank.Python": 186,
# "proglang_rank.C#": 187,
# "proglang_rank.PHP": 188,
# "proglang_rank.JavaScript": 189,
# "proglang_rank.Ruby": 190,
# "proglang_rank.Kotlin": 191,
# "proglang_rank.Swift": 192,
# "proglang_rank.Objective-C": 193,
# "proglang_rank.Scala": 194,
# "proglang_rank.Go": 195,
# "proglang_rank.SQL(PL/SQL, T-SQL and otherprogramming extensions of SQL)": 196,
# "proglang_rank.Rust": 197,
# "proglang_rank.Haskell": 198,
# "proglang_rank.HTML / CSS": 199,
# "proglang_rank.Elixir": 200,
# "proglang_rank.Visual Basic": 201,
# "proglang_rank.R": 202,
# "proglang_rank.TypeScript": 203,
# "proglang_rank.Dart": 204,
# "proglang_rank.Clojure / ClojureScript": 205,
# "proglang_rank.Delphi": 206,
# "proglang_rank.Groovy": 207,
# "proglang_rank.Perl": 208,
# "proglang_rank.Assembly": 209,
# "proglang_rank.Matlab": 210,
# "proglang_rank.Lua": 211,
# "proglang_rank.Shell scripting languages(bash/shell/powershell)": 212,
# "proglang_rank.Julia": 213,
# "proglang_rank.F#": 214,
# "proglang_rank.Other": 215
# },
# "python_other_techs": {
# "python_other_techs.None": 753,
# "python_other_techs.Sphinx": 754,
# "python_other_techs.Buildout": 755,
# "python_other_techs.ORM": 756,
# "python_other_techs.Other": 757
# },
# "kotlin_how_long": {
# "kotlin_how_long": 1000
# },
# "scala_frameworks": {
# "scala_frameworks_web.None": 1142,
# "scala_frameworks_web.Akka-http": 1143,
# "scala_frameworks_web.Netty": 1144,
# "scala_frameworks_web.Spark Java": 1145,
# "scala_frameworks_web.Play": 1146,
# "scala_frameworks_web.Spray": 1147,
# "scala_frameworks_web.Spring": 1148,
# "scala_frameworks_web.sttp": 1149,
# "scala_frameworks_web.Http4s": 1150,
# "scala_frameworks_web.Other": 1151,
# "scala_frameworks.None": 1152,
# "scala_frameworks.Twitter Util": 1153,
# "scala_frameworks.Akka": 1154,
# "scala_frameworks.Spark": 1155,
# "scala_frameworks.Scalaz": 1156,
# "scala_frameworks.Shapeless": 1157,
# "scala_frameworks.Finagle": 1158,
# "scala_frameworks.Cats": 1159,
# "scala_frameworks.Slick": 1160,
# "scala_frameworks.FS2": 1161,
# "scala_frameworks.Monix": 1162,
# "scala_frameworks.ZIO": 1163,
# "scala_frameworks.Other": 1164
# },
# "scala_sbt": {
# "scala_sbt.1_0": 1172,
# "scala_sbt.0_13 or older": 1275
# },
# "scala_interactive": {
# "scala_interactive.None": 1173,
# "scala_interactive.Scala REPL": 1174,
# "scala_interactive.sbt console": 1175,
# "scala_interactive.Ammonite REPL": 1176,
# "scala_interactive.Scastie": 1177,
# "scala_interactive.IntelliJ IDEA Worksheet": 1178,
# "scala_interactive.Scala IDE Worksheet": 1179,
# "scala_interactive.Apache Zeppelin Notebook": 1180,
# "scala_interactive.Jupyter Notebook": 1181,
# "scala_interactive.Other": 1182
# },
# "scala_compiler_plugins": {
# "scala_compiler_plugins.None": 1183,
# "scala_compiler_plugins.Scalamacros/Scalameta Paradise": 1184,
# "scala_compiler_plugins.Kind Projector": 1185,
# "scala_compiler_plugins.Other": 1186
# },
# "go_multipleversions": {
# "go_multipleversions": 1321
# },
# "go_gopath": {
# "go_gopath": 1322
# },
# "go_multipleprojects": {
# "go_multipleprojects": 1323
# },
# "go_packagemanager": {
# "go_packagemanager.None": 1330,
# "go_packagemanager.dep": 1331,
# "go_packagemanager.godep": 1332,
# "go_packagemanager.glide": 1333,
# "go_packagemanager.govendor": 1334,
# "go_packagemanager.Go Modules": 1335,
# "go_packagemanager.gom": 1336,
# "go_packagemanager.gpm": 1337,
# "go_packagemanager.Other": 1338,
# "go_packagemanager_migrate.No, I don't plan to": 1357,
# "go_packagemanager_migrate.Go Modules": 1358
# },
# "go_packagemanager_migrate": {
# "go_packagemanager_migrate.No, I don't plan to": 1357,
# "go_packagemanager_migrate.Go Modules": 1358
# },
# "go_frameworks": {
# "go_frameworks.None": 1359,
# "go_frameworks.Buffalo": 1360,
# "go_frameworks.Gin": 1361,
# "go_frameworks.Echo": 1362,
# "go_frameworks.Beego": 1363,
# "go_frameworks.Revel": 1364,
# "go_frameworks.Other": 1365
# },
# "go_router": {
# "go_router.None": 1366,
# "go_router.Standard library": 1367,
# "go_router.gorilla / mux": 1368,
# "go_router.go-chi / chi": 1369,
# "go_router.julienschmidt / httproute": 1370,
# "go_router.gocraft / web": 1371,
# "go_router.Other": 1372
# },
# "go_testing": {
# "go_testing.I don\u2019t write unit tests for Go": 1373,
# "go_testing.I write unit tests, but don\u2019t use any frameworks": 1374,
# "go_testing.built-in testing": 1375,
# "go_testing.gocheck": 1376,
# "go_testing.testify": 1377,
# "go_testing.ginkgo": 1378,
# "go_testing.gomega": 1379,
# "go_testing.goconvey": 1380,
# "go_testing.gomock": 1381,
# "go_testing.go-sqlmock": 1382,
# "go_testing.Other": 1383
# },
# "go_external_deps": {
# "go_external_deps": 1384
# },
# "go_code_size": {
# "go_code_size": 1385
# },
# "primary_proglang": {
# "primary_proglang.Java": 112,
# "primary_proglang.C": 113,
# "primary_proglang.C++": 114,
# "primary_proglang.Python": 115,
# "primary_proglang.C#": 116,
# "primary_proglang.PHP": 117,
# "primary_proglang.JavaScript": 118,
# "primary_proglang.Ruby": 119,
# "primary_proglang.Kotlin": 120,
# "primary_proglang.Swift": 121,
# "primary_proglang.Objective-C": 122,
# "primary_proglang.Scala": 123,
# "primary_proglang.Go": 124,
# "primary_proglang.SQL(PL/SQL, T-SQL and otherprogramming extensions of SQL)": 125,
# "primary_proglang.Rust": 126,
# "primary_proglang.Haskell": 127,
# "primary_proglang.HTML / CSS": 128,
# "primary_proglang.Elixir": 129,
# "primary_proglang.Visual Basic": 130,
# "primary_proglang.R": 131,
# "primary_proglang.TypeScript": 132,
# "primary_proglang.Dart": 133,
# "primary_proglang.Clojure / ClojureScript": 134,
# "primary_proglang.Delphi": 135,
# "primary_proglang.Groovy": 136,
# "primary_proglang.Perl": 137,
# "primary_proglang.Assembly": 138,
# "primary_proglang.Matlab": 139,
# "primary_proglang.Lua": 140,
# "primary_proglang.Shell scripting languages(bash/shell/powershell)": 141,
# "primary_proglang.Julia": 142,
# "primary_proglang.F#": 143,
# "primary_proglang.Other": 144
# },
# "kotlin_languages_before": {
# "kotlin_languages_before.Java": 1008,
# "kotlin_languages_before.JavaScript/TypeScript": 1009,
# "kotlin_languages_before.C/C++": 1010,
# "kotlin_languages_before.C#": 1011,
# "kotlin_languages_before.PHP": 1012,
# "kotlin_languages_before.Ruby": 1013,
# "kotlin_languages_before.Scala": 1014,
# "kotlin_languages_before.Go": 1015,
# "kotlin_languages_before.Groovy": 1016,
# "kotlin_languages_before.Python": 1017,
# "kotlin_languages_before.Swift": 1018,
# "kotlin_languages_before.Other": 1019
# },
# "scala_ide_additional": {
# "scala_ide_additional": 1166
# },
# "devops_server_templating": {
# "devops_server_templating.None": 1673,
# "devops_server_templating.Docker": 1674,
# "devops_server_templating.Vagrant": 1675,
# "devops_server_templating.Packer": 1676,
# "devops_server_templating.CoreOS rkt": 1677,
# "devops_server_templating.Other": 1678
# },
# "devops_container_orchestration": {
# "devops_container_orchestration.None": 1690,
# "devops_container_orchestration.Amazon ECS / Fargate": 1691,
# "devops_container_orchestration.Amazon EKS": 1692,
# "devops_container_orchestration.Mesos or DC / OS": 1693,
# "devops_container_orchestration.Kubernetes (self-managed or fully managed)": 1694,
# "devops_container_orchestration.Hashicorp Nomad": 1695,
# "devops_container_orchestration.Docker Swarm": 1696,
# "devops_container_orchestration.CoreOS Tectonic": 1697,
# "devops_container_orchestration.Other": 1698
# },
# "devops_deploy_docker_repo": {
# "devops_deploy_docker_repo.I do not deploy": 1703,
# "devops_deploy_docker_repo.I use only the command line": 1704,
# "devops_deploy_docker_repo.I use a configuration management tool (Chef, Puppet, Ansible, etc_)": 1705,
# "devops_deploy_docker_repo.I deploy from CI / CD": 1706,
# "devops_deploy_docker_repo.I deploy with custom / in-house tools": 1707,
# "devops_deploy_docker_repo.Other": 1708
# },
# "devops_keep_artifacts": {
# "devops_keep_artifacts.I don't keep artifacts": 1709,
# "devops_keep_artifacts.Pulp": 1710,
# "devops_keep_artifacts.Amazon S3": 1711,
# "devops_keep_artifacts.Archiva": 1712,
# "devops_keep_artifacts.NuGet": 1713,
# "devops_keep_artifacts.Nexus": 1714,
# "devops_keep_artifacts.JFrog Artifactory": 1715,
# "devops_keep_artifacts.MyGet": 1716,
# "devops_keep_artifacts.npm": 1717,
# "devops_keep_artifacts.Docker Hub (private or public)": 1718,
# "devops_keep_artifacts.Custom tool": 1719,
# "devops_keep_artifacts.Other": 1720
# },
# "accounts": {
# "accounts.None of the above": 2206,
# "accounts.Facebook": 2207,
# "accounts.Twitter": 2208,
# "accounts.LinkedIn": 2209,
# "accounts.QQ": 2210,
# "accounts.Qzone": 2211,
# "accounts.Baidu Tieba": 2212,
# "accounts.Quora": 2213,
# "accounts.Zhihu (\u77e5\u4e4e)": 2214,
# "accounts.XING": 2215,
# "accounts.Instagram": 2216,
# "accounts.VKontakte": 2217,
# "accounts.GitHub": 2218,
# "accounts.Stack Overflow": 2219,
# "accounts.Reddit": 2220,
# "accounts.Other": 2221
# },
# "learn_pl": {
# "learn_pl.I am not learning any programming languages": 1981,
# "learn_pl.Java": 1982,
# "learn_pl.\u0421": 1983,
# "learn_pl.C++": 1984,
# "learn_pl.Python": 1985,
# "learn_pl.C#": 1986,
# "learn_pl.PHP": 1987,
# "learn_pl.JavaScript": 1988,
# "learn_pl.Ruby": 1989,
# "learn_pl.Kotlin": 1990,
# "learn_pl.Swift": 1991,
# "learn_pl.Scala": 1992,
# "learn_pl.Go": 1993,
# "learn_pl.R": 1994,
# "learn_pl.TypeScript": 1995,
# "learn_pl.Haskell": 1996,
# "learn_pl.Elixir": 1997,
# "learn_pl.Clojure": 1998,
# "learn_pl.Rust": 1999,
# "learn_pl.Other": 2000
# },
# "learn_what": {
# "learn_what.I have not tried to learn anything new in the last 12 months": 2001,
# "learn_what.Offline educational organizations": 2002,
# "learn_what.Books": 2003,
# "learn_what.Personal teacher / consultant": 2004,
# "learn_what.Online coding schools": 2005,
# "learn_what.MOOCs (Coursera, edX, Udacity, etc_)": 2006,
# "learn_what.Blogs / forums": 2007,
# "learn_what.Documentation & APIs": 2008,
# "learn_what.Other": 2009
# },
# "it_core": {
# "it_core": 2070
# },
# "sectors_it": {
# "sectors_it.Telecom": 2071,
# "sectors_it.Game Development (including mobile games)": 2072,
# "sectors_it.Mobile Development": 2073,
# "sectors_it.IoT / Embedded": 2074,
# "sectors_it.IT Services": 2075,
# "sectors_it.Cloud Computing / Platform": 2076,
# "sectors_it.Big Data / Data Analysis": 2077,
# "sectors_it.Hardware": 2078,
# "sectors_it.Data center Services": 2079,
# "sectors_it.Software Development Tools": 2080,
# "sectors_it.Internet / Search Engines": 2081,
# "sectors_it.Semiconductors": 2082,
# "sectors_it.E-learning": 2083,
# "sectors_it.Fintech": 2084,
# "sectors_it.Healthcare IT": 2085,
# "sectors_it.Cybersecurity": 2086,
# "sectors_it.BPO Services": 2087,
# "sectors_it.Other Software (all other types of software)": 2088,
# "sectors_it.Other": 2089
# },
# "sectors_nonit": {
# "sectors_nonit.Government and Defense": 2090,
# "sectors_nonit.Administration / Management / Business Development": 2091,
# "sectors_nonit.Banking / Real Estate / Mortgage Financing / Accounting / Finance / Insurance": 2092,
# "sectors_nonit.Business / Strategic Management": 2093,
# "sectors_nonit.Construction / Architecture": 2094,
# "sectors_nonit.Customer Support": 2095,
# "sectors_nonit.Design": 2096,
# "sectors_nonit.Education / Training": 2097,
# "sectors_nonit.Human Resources": 2098,
# "sectors_nonit.Law": 2099,
# "sectors_nonit.Logistics/ Transportation": 2100,
# "sectors_nonit.Machinery": 2101,
# "sectors_nonit.Aerospace": 2102,
# "sectors_nonit.Automotive and Boating": 2103,
# "sectors_nonit.Manufacturing": 2104,
# "sectors_nonit.Marketing": 2105,
# "sectors_nonit.Medicine / Health": 2106,
# "sectors_nonit.Non-profit": 2107,
# "sectors_nonit.Entertainment / Mass Media and Information / Publishing": 2108,
# "sectors_nonit.Restaurants / Hospitality / Tourism": 2109,
# "sectors_nonit.Sales / Distribution / Retail": 2110,
# "sectors_nonit.Food / Agriculture": 2111,
# "sectors_nonit.Science": 2112,
# "sectors_nonit.Security": 2113,
# "sectors_nonit.Service / Maintenance": 2114,
# "sectors_nonit.Energy": 2115,
# "sectors_nonit.Other": 2116
# },
# "pairprog_do": {
# "pairprog_do.Yes, remote pair programming": 2128,
# "pairprog_do.Yes, face-to-face pair programming": 2129,
# "pairprog_do.No": 2130
# },
# "devops_infr_provisioning": {
# "devops_infr_provisioning.None": 1679,
# "devops_infr_provisioning.Terraform": 1680,
# "devops_infr_provisioning.AWS CloudFormation": 1681,
# "devops_infr_provisioning.AWS CDK": 1682,
# "devops_infr_provisioning.TOSCA/Cloudify": 1683,
# "devops_infr_provisioning.OpenStack Heat": 1684,
# "devops_infr_provisioning.Other": 1685
# },
# "devops_involved": {
# "devops_involved": 1665
# },
# "devops_deploy_cloud": {
# "devops_deploy_cloud.Run scripts on your local workstation / VM": 1726,
# "devops_deploy_cloud.Use Continuous Integration / Continuous Delivery": 1727,
# "devops_deploy_cloud.Use your cloud provider's web interface": 1728,
# "devops_deploy_cloud.Other": 1729
# },
# "kind_of_dev": {
# "kind_of_dev.Product development": 2117,
# "kind_of_dev.Outsourcing": 2118,
# "kind_of_dev.Custom-tailored software / websites / applications": 2119,
# "kind_of_dev.In-house development": 2120,
# "kind_of_dev.Internal deployment and maintenance of third-party tools": 2121,
# "kind_of_dev.Customer services development (websites, mobile apps, etc_)": 2122,
# "kind_of_dev.Open source projects": 2123,
# "kind_of_dev.Other": 2124
# },
# "java_unittesting": {
# "java_unittesting.I don\u2019t write unit tests for Java": 481,
# "java_unittesting.I write unit tests, but don\u2019t use any frameworks": 482,
# "java_unittesting.JUnit": 483,
# "java_unittesting.TestNG": 484,
# "java_unittesting.Mockito": 485,
# "java_unittesting.PowerMock": 486,
# "java_unittesting.Spock": 487,
# "java_unittesting.EasyMock": 488,
# "java_unittesting.JMockit": 489,
# "java_unittesting.Other": 490
# },
# "tools_use": {
# "tools_use.Team collaboration / task management / project / workflow management tools.Free": 418,
# "tools_use.Team collaboration / task management / project / workflow management tools.Paid": 419,
# "tools_use.Team collaboration / task management / project / workflow management tools.Not sure": 420,
# "tools_use.In-cloud Editor or IDE.Not sure": 421,
# "tools_use.Service desk / helpdesk automation solutions (Zendesk).Not sure": 422,
# "tools_use.Service desk / helpdesk automation solutions (Zendesk).Paid": 423,
# "tools_use.In-cloud Editor or IDE.Free": 424,
# "tools_use.Service desk / helpdesk automation solutions (Zendesk).Free": 425,
# "tools_use.In-cloud Editor or IDE.Paid": 426
# },
# "swiftoc_platforms": {
# "swiftoc_platforms.iOS": 1072,
# "swiftoc_platforms.tvOS": 1073,
# "swiftoc_platforms.watchOS": 1074,
# "swiftoc_platforms.macOS": 1075,
# "swiftoc_platforms.I don\u2019t develop for Apple platforms": 1076
# },
# "swiftoc_cpp_libs": {
# "swiftoc_cpp_libs": 1079
# },
# "swiftoc_ui_frameworks": {
# "swiftoc_ui_frameworks.None": 1094,
# "swiftoc_ui_frameworks.XCTest": 1095,
# "swiftoc_ui_frameworks.KIF": 1096,
# "swiftoc_ui_frameworks.EarlGrey": 1097,
# "swiftoc_ui_frameworks.iOSSnapshotTestCase (FBSnapshotTestCase)": 1098,
# "swiftoc_ui_frameworks.Other": 1099
# },
# "swiftoc_db_viewer_do": {
# "swiftoc_db_viewer_do": 1112
# },
# "swiftoc_together": {
# "swiftoc_together": 1078
# },
# "employment_status": {
# "employment_status": 0
# },
# "test_types": {
# "test_types.None": 216,
# "test_types.Unit": 217,
# "test_types.Integration": 218,
# "test_types.End-to-End": 219,
# "test_types.Performance ": 220,
# "test_types.Other": 221
# },
# "db": {
# "db.None": 223,
# "db.DB2": 224,
# "db.MS SQL Server": 225,
# "db.MySQL": 226,
# "db.Oracle Database": 227,
# "db.PostgreSQL": 228,
# "db.SQLite": 229,
# "db.Cassandra": 230,
# "db.Couchbase": 231,
# "db.HBase": 232,
# "db.MongoDB": 233,
# "db.Neo4j": 234,
# "db.Redis": 235,
# "db.Amazon Redshift": 236,
# "db.H2": 237,
# "db.MariaDB": 238,
# "db.Exasol": 239,
# "db.ClickHouse": 240,
# "db.Other": 241,
# "db_adopt.No, I'm not planning to adopt / migrate to any": 242,
# "db_adopt.Yes, I'm planning to adopt / migrate to other database(s) - Write In": 243,
# "db_adopt.DB2": 244,
# "db_adopt.MS SQL Server": 245,
# "db_adopt.MySQL": 246,
# "db_adopt.Oracle Database": 247,
# "db_adopt.PostgreSQL": 248,
# "db_adopt.SQLite": 249,
# "db_adopt.Cassandra": 250,
# "db_adopt.Couchbase": 251,
# "db_adopt.HBase": 252,
# "db_adopt.MongoDB": 253,
# "db_adopt.Neo4j": 254,
# "db_adopt.Redis": 255,
# "db_adopt.Amazon Redshift": 256,
# "db_adopt.H2": 257,
# "db_adopt.MariaDB": 258,
# "db_adopt.ClickHouse": 259,
# "db_adopt.Other": 260,
# "db_how_long": 1468,
# "db_debug_stored_proc": 1469,
# "db_have_tests": 1470,
# "db_keep_scripts_vcs": 1471,
# "db_connections": 1472,
# "db_do_comm": 1473,
# "db_n_rows": 1474
# },
# "c_dependencymanager": {
# "c_dependencymanager.None": 571,
# "c_dependencymanager.build2": 572,
# "c_dependencymanager.Conan": 573,
# "c_dependencymanager.Nuget": 574,
# "c_dependencymanager.vcpkg": 575,
# "c_dependencymanager.I rely on a system package manager": 576,
# "c_dependencymanager.Other": 577
# },
# "cpp_dependencymanager": {
# "cpp_dependencymanager.None": 620,
# "cpp_dependencymanager.build2": 621,
# "cpp_dependencymanager.Conan": 622,
# "cpp_dependencymanager.Hunter": 623,
# "cpp_dependencymanager.Nuget": 624,
# "cpp_dependencymanager.vcpkg": 625,
# "cpp_dependencymanager.I rely on a system package manager": 626,
# "cpp_dependencymanager.Other": 627
# },
# "cpp_guidelines_tools": {
# "cpp_guidelines_tools.None": 668,
# "cpp_guidelines_tools.Clang-analyzer / Clang Static Analyzer": 669,
# "cpp_guidelines_tools.Clang-tidy": 670,
# "cpp_guidelines_tools.Cppcheck": 671,
# "cpp_guidelines_tools.Coverity": 672,
# "cpp_guidelines_tools.Cpplint": 673,
# "cpp_guidelines_tools.PVS-Studio": 674,
# "cpp_guidelines_tools.Klocwork": 675,
# "cpp_guidelines_tools.PC-lint / Flexelint": 676,
# "cpp_guidelines_tools.Parasoft C/C++test": 677,
# "cpp_guidelines_tools.Stack": 678,
# "cpp_guidelines_tools.Tool provided by my IDE (Visual Studio, ReSharper C++, CLion, etc_)": 679,
# "cpp_guidelines_tools.Other": 680
# },
# "cpp_guidelines_sources": {
# "cpp_guidelines_sources.None": 681,
# "cpp_guidelines_sources.Effective C++ series (books by Scott Meyers)": 682,
# "cpp_guidelines_sources.C++ Core Guidelines \u2013 main project (github_com/isocpp/CppCoreGuidelines)": 683,
# "cpp_guidelines_sources.Guru of the Week / Exceptional C++ series (blog/books by Herb Sutter)": 684,
# "cpp_guidelines_sources.C++ Coding Standards (book by Herb Sutter and Andrei Alexandrescu)": 685,
# "cpp_guidelines_sources.Abseil tips of the week": 686,
# "cpp_guidelines_sources.Google C++ Style Guide": 687,
# "cpp_guidelines_sources.CERT C++ Secure Coding Standard (www_securecoding_cert_org)": 688,
# "cpp_guidelines_sources.JSF++, Joint Strike Fighter Air Vehicle": 689,
# "cpp_guidelines_sources.Coding Standards (Lockheed Martin)": 690,
# "cpp_guidelines_sources.High Integrity C++ Coding Standard (Programming Research)": 691,
# "cpp_guidelines_sources.C++ Core Guidelines \u2013 a company-specific fork/branch augmented with internal rules": 692,
# "cpp_guidelines_sources.MISRA C++ (MIRA Ltd_)": 693,
# "cpp_guidelines_sources.Other": 694
# },
# "python_ds_libs": {
# "python_ds_libs.None": 723,
# "python_ds_libs.NumPy": 724,
# "python_ds_libs.SciPy": 725,
# "python_ds_libs.Pandas": 726,
# "python_ds_libs.MXNet": 727,
# "python_ds_libs.Matplotlib": 728,
# "python_ds_libs.Seaborn": 729,
# "python_ds_libs.SciKit-Learn": 730,
# "python_ds_libs.Keras": 731,
# "python_ds_libs.TensorFlow": 732,
# "python_ds_libs.PyTorch": 733,
# "python_ds_libs.Theano": 734,
# "python_ds_libs.NLTK": 735,
# "python_ds_libs.Gensim": 736,
# "python_ds_libs.Other": 737
# },
# "python_other_libs": {
# "python_other_libs.None": 738,
# "python_other_libs.Requests": 739,
# "python_other_libs.aiohttp": 740,
# "python_other_libs.PyQT": 741,
# "python_other_libs.PyGTK": 742,
# "python_other_libs.wxPython": 743,
# "python_other_libs.Pillow": 744,
# "python_other_libs.Tkinter": 745,
# "python_other_libs.Pygame": 746,
# "python_other_libs.Twisted": 747,
# "python_other_libs.Asyncio": 748,
# "python_other_libs.Kivy": 749,
# "python_other_libs.Six": 750,
# "python_other_libs.Scrapy": 751,
# "python_other_libs.Other": 752
# },
# "python_web_libs": {
# "python_web_libs.None": 712,
# "python_web_libs.Django": 713,
# "python_web_libs.web2py": 714,
# "python_web_libs.Bottle": 715,
# "python_web_libs.CherryPy\u00a0": 716,
# "python_web_libs.Flask\u00a0": 717,
# "python_web_libs.Hug": 718,
# "python_web_libs.Pyramid\u00a0": 719,
# "python_web_libs.Tornado": 720,
# "python_web_libs.Falcon": 721,
# "python_web_libs.Other": 722
# },
# "js_sslang": {
# "js_sslang.CSS": 1229,
# "js_sslang.Sass": 1230,
# "js_sslang.SCSS": 1231,
# "js_sslang.Less": 1232,
# "js_sslang.PostCSS": 1233,
# "js_sslang.CSS-in-JS": 1234,
# "js_sslang.CSS Modules": 1235,
# "js_sslang.Stylus": 1236,
# "js_sslang.Other": 1237
# },
# "js_graphql": {
# "js_graphql": 1238
# },
# "js_monorepo": {
# "js_monorepo": 1239
# },
# "learn_time": {
# "learn_time": 2029
# },
# "learn_kind_of_content": {
# "learn_kind_of_content": 2028
# },
# "php_qualitytools": {
# "php_qualitytools.None": 879,
# "php_qualitytools.Php Inspections \u200b(EA Extended)": 880,
# "php_qualitytools.PHP_CodeSniffer": 881,
# "php_qualitytools.PHP CS Fixer": 882,
# "php_qualitytools.PHPMD": 883,
# "php_qualitytools.PHPStan": 884,
# "php_qualitytools.Psalm": 885,
# "php_qualitytools.Phan": 886,
# "php_qualitytools.PHP Insights": 887,
# "php_qualitytools.Other": 888
# },
# "php_templateengines": {
# "php_templateengines.None, I use pure PHP": 889,
# "php_templateengines.None, I don\u2019t render HTML": 890,
# "php_templateengines.Twig": 891,
# "php_templateengines.Blade": 892,
# "php_templateengines.Smarty": 893,
# "php_templateengines.Mustache": 894,
# "php_templateengines.Latte": 895,
# "php_templateengines.Other": 896
# },
# "php_profiler": {
# "php_profiler.None": 897,
# "php_profiler.In code timers (Manual timestamps, PHPBench, Toolbars, etc_)": 898,
# "php_profiler.Xdebug Profiler": 899,
# "php_profiler.XHProf": 900,
# "php_profiler.Blackfire_io": 901,
# "php_profiler.Application performance monitoring (New Relic, Tideways, etc_)": 902,
# "php_profiler.HTTP load testing (ab, siege, etc_)": 903,
# "php_profiler.Other": 904
# },
# "devops_use_docker": {
# "devops_use_docker.Run dockerized utilities": 1686,
# "devops_use_docker.Run your application in one container, and backing services (e_g_ database)": 1687,
# "devops_use_docker.Run multiple application containers (e_g_ microservices)": 1688,
# "devops_use_docker.Other": 1689
# },
# "go_modules_outside": {
# "go_modules_outside": 1386
# },
# "go_migrate": {
# "go_migrate": 1387
# },
# "csharp_vsplugins": {
# "csharp_vsplugins.None": 783,
# "csharp_vsplugins.ReSharper": 784,
# "csharp_vsplugins.ReSharper C++": 785,
# "csharp_vsplugins.CodeRush": 786,
# "csharp_vsplugins.Visual Assist": 787,
# "csharp_vsplugins.Roslynator": 788,
# "csharp_vsplugins.Other": 789
# },
# "csharp_vsedition": {
# "csharp_vsedition": 807
# },
# "csharp_msdn_type": {
# "csharp_msdn_type": 827
# },
# "swiftoc_mock": {
# "swiftoc_mock.None": 1086,
# "swiftoc_mock.OCMock": 1087,
# "swiftoc_mock.OCMockito": 1088,
# "swiftoc_mock.Expecta": 1089,
# "swiftoc_mock.Cuckoo": 1090,
# "swiftoc_mock.SwiftHamcrest": 1091,
# "swiftoc_mock.Other": 1092
# },
# "kotlin_target": {
# "kotlin_target.JVM": 924,
# "kotlin_target.Android": 925,
# "kotlin_target.Kotlin for JavaScript": 926,
# "kotlin_target.Native": 927
# },
# "kotlin_jdk": {
# "kotlin_jdk.JDK 6": 928,
# "kotlin_jdk.JDK 7": 929,
# "kotlin_jdk.JDK 8": 930,
# "kotlin_jdk.JDK 9": 931,
# "kotlin_jdk.JDK 10": 932,
# "kotlin_jdk.JDK 11": 933,
# "kotlin_jdk.I don't know": 934
# },
# "kotlin_android": {
# "kotlin_android.4_1 \u2013 4_3_1 \u00a0Jelly Bean": 935,
# "kotlin_android.4_4 \u2013 4_4_4 \u00a0KitKat \u00a0": 936,
# "kotlin_android.5_0 \u2013 5_1_1 \u00a0Lollipop": 937,
# "kotlin_android.6_0 \u2013 6_0_1 \u00a0Marshmallow": 938,
# "kotlin_android.7_0 \u2013 7_1_2 \u00a0Nougat": 939,
# "kotlin_android.8_0 \u2013 8_1 \u00a0Oreo": 940,
# "kotlin_android.9_0 Pie": 941,
# "kotlin_android.Other": 942
# },
# "kotlin_platforms": {
# "kotlin_platforms.iOS (arm32, arm64, emulator x86_64)": 945,
# "kotlin_platforms.macOS (x86_64)": 946,
# "kotlin_platforms.Android (arm32, arm64)": 947,
# "kotlin_platforms.Windows (mingw x86_64)": 948,
# "kotlin_platforms.Linux (x86_64, arm32, MIPS, MIPS little endian)": 949
# },
# "kotlin_purposes": {
# "kotlin_purposes.For work": 1001,
# "kotlin_purposes.For personal/side projects\u00a0": 1002,
# "kotlin_purposes.I occasionally play around with Kotlin (Hobby)": 1003,
# "kotlin_purposes.Other": 1004
# },
# "kotlin_projecttypes": {
# "kotlin_projecttypes.New projects": 1005,
# "kotlin_projecttypes.Old projects (migration)": 1006,
# "kotlin_projecttypes.Other": 1007
# },
# "communication_tools": {
# "communication_tools.Email (Microsoft Mail Server, Gmail, etc_)": 298,
# "communication_tools.Instant messaging/video calling (Slack, Skype, Hipchat, etc_)": 299,
# "communication_tools.Video conferencing (Google Meet, Zoom, etc_)": 300,
# "communication_tools.Calendars (Google Calendar, etc_)": 301,
# "communication_tools.Corporate portal (MS Sharepoint, Pingboard, etc_)": 302,
# "communication_tools.Service desk/Help desk (Zendesk, Jira Service Desk, etc_)": 303,
# "communication_tools.None": 304
# },
# "space_tools_mobile": {
# "space_tools_mobile.None": 1756,
# "space_tools_mobile.Email (Microsoft Mail Server, Gmail, etc_)": 1757,
# "space_tools_mobile.Instant messaging/video calling (Slack, Skype, Hipchat, etc_)": 1758,
# "space_tools_mobile.Video conferencing (Google Meet, Zoom, etc_)": 1759,
# "space_tools_mobile.Calendars (Google Calendar, etc_)": 1760,
# "space_tools_mobile.Corporate portal (MS Sharepoint, Pingboard, etc_)": 1761,
# "space_tools_mobile.Service desk/Help desk (Zendesk, Jira Service Desk, etc_)": 1762
# },
# "space_mail_server": {
# "space_mail_server": 1860
# },
# "space_suite": {
# "space_suite.None": 1861,
# "space_suite.G Suite (Gmail, Google Drive, Meet, etc_)": 1862,
# "space_suite.Office 365 (Outlook, Microsoft Teams, SharePoint, etc)": 1863,
# "space_suite.Zoho": 1864,
# "space_suite.Yahoo": 1865,
# "space_suite.ProtonMail": 1866,
# "space_suite.Other": 1867
# },
# "space_email_server": {
# "space_email_server": 1876
# },
# "space_chat": {
# "space_chat.Mattermost": 1894,
# "space_chat.Telegram": 1895,
# "space_chat.WhatsApp": 1896,
# "space_chat.Hipchat/Stride": 1897,
# "space_chat.Viber": 1898,
# "space_chat.Slack": 1899,
# "space_chat.Rocket_Chat": 1900,
# "space_chat.Zulip": 1901,
# "space_chat.Skype": 1902,
# "space_chat.Google Hangouts": 1903,
# "space_chat.IRC": 1904,
# "space_chat.Workplace by Facebook": 1905,
# "space_chat.Microsoft Teams": 1906,
# "space_chat.Quip": 1907,
# "space_chat.Zoho Cliq": 1908,
# "space_chat.Flock": 1909,
# "space_chat.Custom tool": 1910,
# "space_chat.Other": 1911
# },
# "space_video_calls": {
# "space_video_calls.Slack": 1912,
# "space_video_calls.Skype": 1913,
# "space_video_calls.Skype for Business, Lync": 1914,
# "space_video_calls.Microsoft Teams": 1915,
# "space_video_calls.Google Meet": 1916,
# "space_video_calls.Polycom": 1917,
# "space_video_calls.Zoom": 1918,
# "space_video_calls.Workplace by Facebook": 1919,
# "space_video_calls.Other": 1920
# },
# "space_knowledge_base": {
# "space_knowledge_base.None": 1921,
# "space_knowledge_base.Confluence": 1922,
# "space_knowledge_base.MediaWiki": 1923,
# "space_knowledge_base.Crowdbase": 1924,
# "space_knowledge_base.GitHub Wiki": 1925,
# "space_knowledge_base.Stack Overflow for Teams": 1926,
# "space_knowledge_base.Custom": 1927,
# "space_knowledge_base.GitLab": 1928,
# "space_knowledge_base.Azure DevOps": 1929,
# "space_knowledge_base.Notion": 1930,
# "space_knowledge_base.Wrike": 1931,
# "space_knowledge_base.Microsoft SharePoint": 1932,
# "space_knowledge_base.Jive": 1933,
# "space_knowledge_base.Guru": 1934,
# "space_knowledge_base.Nuclino": 1935,
# "space_knowledge_base.Slite": 1936,
# "space_knowledge_base.Other": 1937
# },
# "space_document_collaboration": {
# "space_document_collaboration.None": 1956,
# "space_document_collaboration.Office 365": 1957,
# "space_document_collaboration.Zoho Office Suite": 1958,
# "space_document_collaboration. Confluence": 1959,
# "space_document_collaboration.Google Docs\u00a0": 1960,
# "space_document_collaboration.Dropbox Paper": 1961,
# "space_document_collaboration.Quip": 1962,
# "space_document_collaboration.Other": 1963
# },
# "space_file_sharing": {
# "space_file_sharing.None": 1948,
# "space_file_sharing.Google Drive": 1949,
# "space_file_sharing.Dropbox": 1950,
# "space_file_sharing.OneCloud": 1951,
# "space_file_sharing.Microsoft OneDrive": 1952,
# "space_file_sharing.Sharepoint": 1953,
# "space_file_sharing.On-premise FTP server": 1954,
# "space_file_sharing.Other": 1955
# },
# "swiftoc_serverside": {
# "swiftoc_serverside": 1119,
# "swiftoc_serverside_frameworks.Kitura": 1120,
# "swiftoc_serverside_frameworks.Vapor": 1121,
# "swiftoc_serverside_frameworks.Perfect": 1122,
# "swiftoc_serverside_frameworks.Other": 1123
# },
# "swiftoc_serverside_frameworks": {
# "swiftoc_serverside_frameworks.Kitura": 1120,
# "swiftoc_serverside_frameworks.Vapor": 1121,
# "swiftoc_serverside_frameworks.Perfect": 1122,
# "swiftoc_serverside_frameworks.Other": 1123
# },
# "rust_how": {
# "rust_how.Work": 1240,
# "rust_how.Personal / side projects": 1241,
# "rust_how.Hobby": 1242,
# "rust_how.Other": 1243,
# "rust_how_long": 1244,
# "rust_how_debug": 1288
# },
# "rust_how_long": {
# "rust_how_long": 1244
# },
# "rust_other_langs": {
# "rust_other_langs.None": 1245,
# "rust_other_langs.C": 1246,
# "rust_other_langs.C++": 1247,
# "rust_other_langs.Python": 1248,
# "rust_other_langs.Java": 1249,
# "rust_other_langs.Go": 1250,
# "rust_other_langs.JavaScript": 1251,
# "rust_other_langs.PHP": 1252,
# "rust_other_langs.Other": 1253
# },
# "rust_code_interact": {
# "rust_code_interact.Language interop (foreign functions)": 1254,
# "rust_code_interact.RPC": 1255,
# "rust_code_interact.REST API": 1256,
# "rust_code_interact.Other": 1257
# },
# "rust_ide": {
# "rust_ide.Atom": 1266,
# "rust_ide.Emacs": 1267,
# "rust_ide.IntelliJ IDEA": 1268,
# "rust_ide.CLion": 1269,
# "rust_ide.Sublime Text": 1270,
# "rust_ide.Vim": 1271,
# "rust_ide.Visual Studio": 1272,
# "rust_ide.Visual Studio Code": 1273,
# "rust_ide.Other": 1274
# },
# "rust_profiler": {
# "rust_profiler.I don\u2019t use profiling tools": 1289,
# "rust_profiler.perf": 1290,
# "rust_profiler.gprof": 1291,
# "rust_profiler.callgrind/cachegrind": 1292,
# "rust_profiler.DTrace": 1293,
# "rust_profiler.Other": 1294
# },
# "ai_replace_court_trust": {
# "ai_replace_court_trust": 1627
# },
# "rust_install_windows": {
# "rust_install_windows.rustup_rs": 1258,
# "rust_install_windows.Build from sources": 1259,
# "rust_install_windows.Official Windows _msi installer": 1260,
# "rust_install_windows.Linux distribution package": 1261,
# "rust_install_windows.Official tarball": 1262,
# "rust_install_windows.Homebrew": 1263,
# "rust_install_windows.Official MacOS _pkg installer": 1264,
# "rust_install_windows.Other": 1265
# },
# "rust_platforms": {
# "rust_platforms.Linux": 1311,
# "rust_platforms.Windows": 1312,
# "rust_platforms.macOS": 1313,
# "rust_platforms.Android": 1314,
# "rust_platforms.iOS": 1315,
# "rust_platforms.WebAssembly": 1316,
# "rust_platforms.Embedded": 1317,
# "rust_platforms.Other": 1318
# },
# "rust_devs_count": {
# "rust_devs_count": 1319
# },
# "cats_dogs": {
# "cats_dogs": 2030
# },
# "rust_primary_ide": {
# "rust_primary_ide": 1277
# },
# "space_calendar": {
# "space_calendar.Google Calendar": 1877,
# "space_calendar.Outlook": 1878,
# "space_calendar.iCal (Calendar App in Mac)": 1879,
# "space_calendar.Microsoft Exchange": 1880,
# "space_calendar.IBM Domino": 1881,
# "space_calendar.Fantastical": 1882,
# "space_calendar.Other": 1883
# },
# "space_mail_service": {
# "space_mail_service.I don\u2019t know": 1868,
# "space_mail_service.Gmail (as part of G Suite)": 1869,
# "space_mail_service.Microsoft Outlook (as part of Office 365)": 1870,
# "space_mail_service.Microsoft Exchange Server": 1871,
# "space_mail_service.MDaemon": 1872,
# "space_mail_service.OpenSMTPD": 1873,
# "space_mail_service.We don\u2019t have a company email service, everyone can use different email services (Gmail, Yahoo, etc_)_": 1874,
# "space_mail_service.Other": 1875
# },
# "where_host": {
# "where_host.Locally (on your workstation, developer environment, or device)": 1722,
# "where_host.Private Servers (hosted on your company\u2019s cluster or server on-premises)": 1723,
# "where_host.Cloud Service (AWS, MS Azure, GCP, etc_)": 1724,
# "where_host.Other": 1725,
# "where_host_primarly": 1730,
# "where_host_plan.Locally (on your workstation, developer environment or device)": 1740,
# "where_host_plan.Private Servers (hosted on your company\u2019s cluster or server on-premises)": 1741,
# "where_host_plan.Amazon Web Services": 1742,
# "where_host_plan.Microsoft Azure": 1743,
# "where_host_plan.Google Cloud Platform": 1744,
# "where_host_plan.Rackspace": 1745,
# "where_host_plan.RedHat OpenShift": 1746,
# "where_host_plan.IBM SoftLayer": 1747,
# "where_host_plan.Cloud Foundry": 1748,
# "where_host_plan.Heroku": 1749,
# "where_host_plan.Other": 1750
# },
# "where_host_primarly": {
# "where_host_primarly": 1730
# },
# "where_host_plan": {
# "where_host_plan.Locally (on your workstation, developer environment or device)": 1740,
# "where_host_plan.Private Servers (hosted on your company\u2019s cluster or server on-premises)": 1741,
# "where_host_plan.Amazon Web Services": 1742,
# "where_host_plan.Microsoft Azure": 1743,
# "where_host_plan.Google Cloud Platform": 1744,
# "where_host_plan.Rackspace": 1745,
# "where_host_plan.RedHat OpenShift": 1746,
# "where_host_plan.IBM SoftLayer": 1747,
# "where_host_plan.Cloud Foundry": 1748,
# "where_host_plan.Heroku": 1749,
# "where_host_plan.Other": 1750
# },
# "rust_projecttypes": {
# "rust_projecttypes.Web Development": 1295,
# "rust_projecttypes.Systems Programming": 1296,
# "rust_projecttypes.DevOps": 1297,
# "rust_projecttypes.Network Programming": 1298,
# "rust_projecttypes.Databases": 1299,
# "rust_projecttypes.Security": 1300,
# "rust_projecttypes.Finance / Commerce": 1301,
# "rust_projecttypes.Data Science": 1302,
# "rust_projecttypes.Mobile": 1303,
# "rust_projecttypes.Desktop / GUI Applications": 1304,
# "rust_projecttypes.Embedded devices / Internet of Things": 1305,
# "rust_projecttypes.Academic / Scientific / Numeric": 1306,
# "rust_projecttypes.Machine Learning / Artificial Intelligence": 1307,
# "rust_projecttypes.Blockchain": 1308,
# "rust_projecttypes.Games": 1309,
# "rust_projecttypes.Other": 1310
# },
# "go_how": {
# "go_how": 1320
# },
# "kotlin_server_client": {
# "kotlin_server_client.Server-side (like Node_js)": 943,
# "kotlin_server_client.Browser": 944
# },
# "go_templateengines": {
# "go_templateengines.None": 1324,
# "go_templateengines.text/template": 1325,
# "go_templateengines.html/template": 1326,
# "go_templateengines.Plush": 1327,
# "go_templateengines.Pongo2": 1328,
# "go_templateengines.Other": 1329
# },
# "go_ide": {
# "go_ide": 1339
# },
# "position_level": {
# "position_level": 18
# },
# "do_crossplatform": {
# "do_crossplatform": 1582
# },
# "crossplatform_platform": {
# "crossplatform_platform.Windows": 1583,
# "crossplatform_platform.Unix/Linux": 1584,
# "crossplatform_platform.macOS": 1585,
# "crossplatform_platform.iOS": 1586,
# "crossplatform_platform.Android": 1587,
# "crossplatform_platform.Web": 1588,
# "crossplatform_platform.Embedded": 1589,
# "crossplatform_platform.Other": 1590
# },
# "crossplatform_how_os": {
# "crossplatform_how_os.Using containers (e_g_ Docker, Vagrant)": 1591,
# "crossplatform_how_os.Using VMs (e_g_ VirtualBox, vSphere)": 1592,
# "crossplatform_how_os.Using physical machines/devices": 1593,
# "crossplatform_how_os.I don\u2019t normally work with different OS / platforms": 1594,
# "crossplatform_how_os.Other": 1595
# },
# "vcs_how": {
# "vcs_how.From the terminal": 445,
# "vcs_how.Using specialized tools (e_g_ GitKraken, Sourcetree, GitHub desktop, etc_)": 446,
# "vcs_how.From the IDE": 447,
# "vcs_how.From a web browser": 448,
# "vcs_how.Other": 449
# },
# "is_testing_integral": {
# "is_testing_integral": 1510
# },
# "do_case_design": {
# "do_case_design": 1511
# },
# "test_design_how": {
# "test_design_how": 1512
# },
# "testing_types": {
# "testing_types.None": 1513,
# "testing_types.Regression testing": 1514,
# "testing_types.Functional testing": 1515,
# "testing_types.Security testing": 1516,
# "testing_types.Usability testing": 1517,
# "testing_types.Performance testing": 1518,
# "testing_types.Stress testing": 1519,
# "testing_types.Stability testing": 1520,
# "testing_types.Smoke testing": 1521,
# "testing_types.I\u2019m not sure": 1522,
# "testing_types.Other": 1523
# },
# "testers_qa_ratio": {
# "testers_qa_ratio": 1524
# },
# "store_testcases": {
# "store_testcases.I don\u2019t use any specific tools": 1527,
# "store_testcases.Microsoft Office documents (such as Excel spreadsheets)": 1528,
# "store_testcases.Special test case management tools": 1529,
# "store_testcases.Other": 1530
# },
# "test_management_tools": {
# "test_management_tools.None": 1531,
# "test_management_tools.Zephyr": 1532,
# "test_management_tools.TestFLO for JIRA": 1533,
# "test_management_tools.TestRail": 1534,
# "test_management_tools.Other": 1535
# },
# "automated_tests": {
# "automated_tests": 1536
# },
# "auto_tests_pl": {
# "auto_tests_pl.None": 1572,
# "auto_tests_pl.Python": 1573,
# "auto_tests_pl.JavaScript": 1574,
# "auto_tests_pl.Java": 1575,
# "auto_tests_pl.Kotlin": 1576,
# "auto_tests_pl.C#": 1577,
# "auto_tests_pl.Perl": 1578,
# "auto_tests_pl.Ruby": 1579,
# "auto_tests_pl.Tcl": 1580,
# "auto_tests_pl.Other": 1581
# },
# "testers_qa_pskills": {
# "testers_qa_pskills": 1525
# },
# "testers_qa_manual": {
# "testers_qa_manual": 1526
# },
# "auto_tests_frameworks": {
# "auto_tests_frameworks.None": 1537,
# "auto_tests_frameworks.TestNG": 1538,
# "auto_tests_frameworks.JUnit": 1539,
# "auto_tests_frameworks.NUnit / xUnit_Net": 1540,
# "auto_tests_frameworks.MSTest / VSTest": 1541,
# "auto_tests_frameworks.Robot Framework": 1542,
# "auto_tests_frameworks.Serenity (Thucydides)": 1543,
# "auto_tests_frameworks.Cucumber": 1544,
# "auto_tests_frameworks.SpecFlow": 1545,
# "auto_tests_frameworks.RSpec": 1546,
# "auto_tests_frameworks.ExtentReports": 1547,
# "auto_tests_frameworks.Selenium WebDriver": 1548,
# "auto_tests_frameworks.Allure": 1549,
# "auto_tests_frameworks.Other": 1550
# },
# "auto_tests_tools": {
# "auto_tests_tools.None": 1551,
# "auto_tests_tools.SoapUI": 1552,
# "auto_tests_tools.TestComplete": 1553,
# "auto_tests_tools.Apache JMeter": 1554,
# "auto_tests_tools.Gauge": 1555,
# "auto_tests_tools.HP UFT / QTP": 1556,
# "auto_tests_tools.Katalon Studio": 1557,
# "auto_tests_tools.FitNesse": 1558,
# "auto_tests_tools.Rational Functional Tester": 1559,
# "auto_tests_tools.Ranorex": 1560,
# "auto_tests_tools.Postman": 1561,
# "auto_tests_tools.Squish": 1562,
# "auto_tests_tools.Other": 1563
# },
# "testing_platforms": {
# "testing_platforms.None": 1564,
# "testing_platforms.SauceLabs": 1565,
# "testing_platforms.BrowserStack": 1566,
# "testing_platforms.CrossBrowserTesting": 1567,
# "testing_platforms.Kobiton": 1568,
# "testing_platforms.Perfecto": 1569,
# "testing_platforms.TestingBot": 1570,
# "testing_platforms.Other": 1571
# },
# "go_buildsystem": {
# "go_buildsystem.Go build": 1388,
# "go_buildsystem.Bazel": 1389,
# "go_buildsystem.Other": 1390
# },
# "devops_run_cont_apps": {
# "devops_run_cont_apps.None": 1699,
# "devops_run_cont_apps.Docker Compose": 1700,
# "devops_run_cont_apps.Minikube": 1701,
# "devops_run_cont_apps.Other": 1702
# },
# "kotlin_app_types": {
# "kotlin_app_types.Web Back-end": 950,
# "kotlin_app_types.Web Front-end": 951,
# "kotlin_app_types.Mobile": 952,
# "kotlin_app_types.Desktop": 953,
# "kotlin_app_types.Data Analysis / Business Intelligence": 954,
# "kotlin_app_types.Machine Learning": 955,
# "kotlin_app_types.Game Development": 956,
# "kotlin_app_types.IoT": 957,
# "kotlin_app_types.Embedded": 958,
# "kotlin_app_types.Library or Framework": 959,
# "kotlin_app_types.Tooling": 960,
# "kotlin_app_types.Other": 961
# },
# "lifestyle_infosource": {
# "lifestyle_infosource.TV": 2154,
# "lifestyle_infosource.News websites / Aggregated news sites": 2155,
# "lifestyle_infosource.Social media": 2156,
# "lifestyle_infosource.Community forums (Reddit, Stack Overflow)": 2157,
# "lifestyle_infosource.Podcasts": 2158,
# "lifestyle_infosource.Print media (magazines / newspapers)": 2159,
# "lifestyle_infosource.Wikis": 2160,
# "lifestyle_infosource.Radio": 2161,
# "lifestyle_infosource.Books": 2162,
# "lifestyle_infosource.Other websites": 2163,
# "lifestyle_infosource.Other": 2164
# },
# "lifestyle_adblock": {
# "lifestyle_adblock": 2165
# },
# "lifestyle_smartphone": {
# "lifestyle_smartphone": 2166
# },
# "lifestyle_personal_data": {
# "lifestyle_personal_data": 2167
# },
# "lifestyle_confs": {
# "lifestyle_confs_reasons.Gain new knowledge": 2173,
# "lifestyle_confs_reasons.Network": 2174,
# "lifestyle_confs_reasons.Travel": 2175,
# "lifestyle_confs_reasons.Enjoy the atmosphere": 2176,
# "lifestyle_confs_reasons.Promote employer or yourself (for speakers)": 2177,
# "lifestyle_confs_reasons.Other": 2178
# },
# "lifestyle_confs_reasons": {
# "lifestyle_confs_reasons.Gain new knowledge": 2173,
# "lifestyle_confs_reasons.Network": 2174,
# "lifestyle_confs_reasons.Travel": 2175,
# "lifestyle_confs_reasons.Enjoy the atmosphere": 2176,
# "lifestyle_confs_reasons.Promote employer or yourself (for speakers)": 2177,
# "lifestyle_confs_reasons.Other": 2178
# },
# "lifestyle_charity": {
# "lifestyle_charity": 2179,
# "lifestyle_charity_reasons.Provide for the basic needs of the very poor": 2180,
# "lifestyle_charity_reasons.Give the disadvantaged a way to help themselves": 2181,
# "lifestyle_charity_reasons.Give others some of the opportunities you\u2019ve enjoyed": 2182,
# "lifestyle_charity_reasons.Address the fundamental problems of our world": 2183,
# "lifestyle_charity_reasons.Provide services that governments can\u2019t or won\u2019t": 2184,
# "lifestyle_charity_reasons.Make my community a better place to live in": 2185,
# "lifestyle_charity_reasons.Support the positive efforts of my friends, colleagues, or family": 2186,
# "lifestyle_charity_reasons.Make the world a better place to live in": 2187,
# "lifestyle_charity_reasons.Make decisions on where my money goes instead of letting the government decide": 2188,
# "lifestyle_charity_reasons.Ensure a place for diversity of cultures and beliefs": 2189,
# "lifestyle_charity_reasons.Build ties across communities": 2190,
# "lifestyle_charity_reasons.Other": 2191,
# "lifestyle_charity_practices.I work for a charity organization": 2192,
# "lifestyle_charity_practices.I volunteer what help I can (without relying on any training or professional skills)": 2193,
# "lifestyle_charity_practices.I volunteer professional help (relying on my training or professional skills)": 2194,
# "lifestyle_charity_practices.I donate to charity from time to time": 2195,
# "lifestyle_charity_practices.I have recurring donations set up": 2196,
# "lifestyle_charity_practices.Other": 2197,
# "lifestyle_charity_imp.Fighting diseases": 2198,
# "lifestyle_charity_imp.Fighting social inequality": 2199,
# "lifestyle_charity_imp.Helping animals": 2200,
# "lifestyle_charity_imp.Caring for the environment": 2201,
# "lifestyle_charity_imp.Preventing abuse (such as family abuse)": 2202,
# "lifestyle_charity_imp.Helping children": 2203,
# "lifestyle_charity_imp.Helping the disadvantaged (such as the homeless)": 2204,
# "lifestyle_charity_imp.Other": 2205
# },
# "lifestyle_charity_reasons": {
# "lifestyle_charity_reasons.Provide for the basic needs of the very poor": 2180,
# "lifestyle_charity_reasons.Give the disadvantaged a way to help themselves": 2181,
# "lifestyle_charity_reasons.Give others some of the opportunities you\u2019ve enjoyed": 2182,
# "lifestyle_charity_reasons.Address the fundamental problems of our world": 2183,
# "lifestyle_charity_reasons.Provide services that governments can\u2019t or won\u2019t": 2184,
# "lifestyle_charity_reasons.Make my community a better place to live in": 2185,
# "lifestyle_charity_reasons.Support the positive efforts of my friends, colleagues, or family": 2186,
# "lifestyle_charity_reasons.Make the world a better place to live in": 2187,
# "lifestyle_charity_reasons.Make decisions on where my money goes instead of letting the government decide": 2188,
# "lifestyle_charity_reasons.Ensure a place for diversity of cultures and beliefs": 2189,
# "lifestyle_charity_reasons.Build ties across communities": 2190,
# "lifestyle_charity_reasons.Other": 2191
# },
# "lifestyle_charity_practices": {
# "lifestyle_charity_practices.I work for a charity organization": 2192,
# "lifestyle_charity_practices.I volunteer what help I can (without relying on any training or professional skills)": 2193,
# "lifestyle_charity_practices.I volunteer professional help (relying on my training or professional skills)": 2194,
# "lifestyle_charity_practices.I donate to charity from time to time": 2195,
# "lifestyle_charity_practices.I have recurring donations set up": 2196,
# "lifestyle_charity_practices.Other": 2197
# },
# "lifestyle_charity_imp": {
# "lifestyle_charity_imp.Fighting diseases": 2198,
# "lifestyle_charity_imp.Fighting social inequality": 2199,
# "lifestyle_charity_imp.Helping animals": 2200,
# "lifestyle_charity_imp.Caring for the environment": 2201,
# "lifestyle_charity_imp.Preventing abuse (such as family abuse)": 2202,
# "lifestyle_charity_imp.Helping children": 2203,
# "lifestyle_charity_imp.Helping the disadvantaged (such as the homeless)": 2204,
# "lifestyle_charity_imp.Other": 2205
# },
# "lifestyle_hobbies": {
# "lifestyle_hobbies.I have no spare time for hobbies": 388,
# "lifestyle_hobbies.Programming": 389,
# "lifestyle_hobbies.Sports (doing)": 390,
# "lifestyle_hobbies.Sports (watching)": 391,
# "lifestyle_hobbies.Video games": 392,
# "lifestyle_hobbies.Reading": 393,
# "lifestyle_hobbies.Watching TV": 394,
# "lifestyle_hobbies.Spending time with family": 395,
# "lifestyle_hobbies.Fishing / Hunting": 396,
# "lifestyle_hobbies.Gardening": 397,
# "lifestyle_hobbies.Listening to music": 398,
# "lifestyle_hobbies.Traveling": 399,
# "lifestyle_hobbies.Sleeping": 400,
# "lifestyle_hobbies.Socializing": 401,
# "lifestyle_hobbies.Music (playing)": 402,
# "lifestyle_hobbies.Board games": 403,
# "lifestyle_hobbies.Art": 404,
# "lifestyle_hobbies.Writing": 405,
# "lifestyle_hobbies.Other": 406
# },
# "lifestyle_pet": {
# "lifestyle_pet.None": 2223,
# "lifestyle_pet.Cat": 2224,
# "lifestyle_pet.Dog": 2225,
# "lifestyle_pet.Fish": 2226,
# "lifestyle_pet.Bird": 2227,
# "lifestyle_pet.Snake": 2228,
# "lifestyle_pet.Insect / Spider": 2229,
# "lifestyle_pet.Guinea pig / Hamster": 2230,
# "lifestyle_pet.Rat / Mouse": 2231,
# "lifestyle_pet.Other": 2232
# },
# "lifestyle_egift": {
# "lifestyle_egift": 2233
# },
# "pull_requests": {
# "pull_requests": 450
# },
# "code_yrs": {
# "code_yrs": 75
# },
# "ruby_front_js": {
# "ruby_front_js.I don\u2019t use such frameworks with Ruby on Rails_": 1066,
# "ruby_front_js.React": 1067,
# "ruby_front_js.Angular": 1068,
# "ruby_front_js.AngularJS": 1069,
# "ruby_front_js.Vue_js": 1070,
# "ruby_front_js.Other": 1071
# },
# "cpp_err_report_mthds": {
# "cpp_err_report_mthds.Exceptions (throw, try, catch)": 628,
# "cpp_err_report_mthds.Numeric error codes (e_g_, errc, error_code, HRESULT)": 629,
# "cpp_err_report_mthds.Success/failure result class types (e_g_, Boost_Expected, Boost_Outcome)": 630
# },
# "php_how_debug": {
# "php_how_debug": 844
# },
# "php_async_libs": {
# "php_async_libs.ReactPHP": 858,
# "php_async_libs.Amp": 859,
# "php_async_libs.Swoole": 860,
# "php_async_libs.I don\u2019t use any": 861,
# "php_async_libs.Other": 862
# },
# "php_run_apps_prod": {
# "php_run_apps_prod.php-fpm": 863,
# "php_run_apps_prod.mod_php": 864,
# "php_run_apps_prod.Other process manager (RoadRunner, php-pm, etc_)": 865,
# "php_run_apps_prod.Serverless (AWS Lambda, Azure Functions, GCP Functions, etc_)": 866,
# "php_run_apps_prod.I have no idea": 867,
# "php_run_apps_prod.Other": 868
# },
# "php_how_evolve": {
# "php_how_evolve": 905
# },
# "php_miss_ftrs": {
# "php_miss_ftrs": 906
# },
# "mooc_platform": {
# "mooc_platform.edX": 2010,
# "mooc_platform.Coursera": 2011,
# "mooc_platform.Khan Academy": 2012,
# "mooc_platform.Udemy": 2013,
# "mooc_platform.Stepik": 2014,
# "mooc_platform.Canvas": 2015,
# "mooc_platform.FutureLearn": 2016,
# "mooc_platform.Udacity": 2017,
# "mooc_platform.CodeAcademy": 2018,
# "mooc_platform.DataCamp": 2019,
# "mooc_platform.Egghead": 2020,
# "mooc_platform.Pluralsight": 2021,
# "mooc_platform.JavaRush": 2022,
# "mooc_platform.The Open University": 2023,
# "mooc_platform.SWAYAM": 2024,
# "mooc_platform.Stanford Lagunita": 2025,
# "mooc_platform.Mir\u00edadaX": 2026,
# "mooc_platform.Other": 2027
# },
# "team_tools_exp": {
# "team_tools_exp.Jira": 427,
# "team_tools_exp.YouTrack": 428,
# "team_tools_exp.Other": 429,
# "team_tools_exp.Redmine": 430,
# "team_tools_exp.GitLab Issue Board": 431,
# "team_tools_exp.Asana": 432,
# "team_tools_exp.Wrike": 433,
# "team_tools_exp.Microsoft TFS / Visual Studio Team Services": 434,
# "team_tools_exp.Trello": 435,
# "team_tools_exp.GitHub Issues": 436
# },
# "team_tools_use": {
# "team_tools_use.Confluence": 335,
# "team_tools_use.Monday_com": 336,
# "team_tools_use.YouTrack": 337,
# "team_tools_use.Redmine": 338,
# "team_tools_use.GitLab Issue Board": 339,
# "team_tools_use.Asana": 340,
# "team_tools_use.Wrike": 341,
# "team_tools_use.Microsoft TFS / Visual Studio Team Services": 342,
# "team_tools_use.Trello": 343,
# "team_tools_use.GitHub Issues": 344,
# "team_tools_use.GitLab": 345,
# "team_tools_use.Azure DevOps": 346,
# "team_tools_use.Assembla": 347,
# "team_tools_use.Phabricator": 348,
# "team_tools_use.Basecamp": 349,
# "team_tools_use.Bitrix24": 350,
# "team_tools_use.FogBugz": 351,
# "team_tools_use.Notion": 352,
# "team_tools_use.Jira Software": 353,
# "team_tools_use.Jira Core": 354,
# "team_tools_use.Jira Align": 355,
# "team_tools_use.Targetprocess": 356,
# "team_tools_use.Zoho Sprints": 357,
# "team_tools_use.Rally Software (CA Agile Central)": 358,
# "team_tools_use.Microsoft Project": 359,
# "team_tools_use.Custom tool": 360,
# "team_tools_use.Other": 361
# },
# "bigdata_platform_use": {
# "bigdata_platform_use.No specific platform": 1636,
# "bigdata_platform_use.Databricks": 1637,
# "bigdata_platform_use.Cloudera Data Platform": 1638,
# "bigdata_platform_use.Qubole": 1639,
# "bigdata_platform_use.Zeppelin": 1640,
# "bigdata_platform_use.Google Collab": 1641,
# "bigdata_platform_use.Microsoft Azure HDInsight": 1642,
# "bigdata_platform_use.Google AI Platform": 1643,
# "bigdata_platform_use.Other": 1644
# },
# "bigdata_spark_version_use": {
# "bigdata_spark_version_use.None": 1658,
# "bigdata_spark_version_use.2_4": 1659,
# "bigdata_spark_version_use.2_3\u00a0": 1660,
# "bigdata_spark_version_use.2_0 - 2_2": 1661,
# "bigdata_spark_version_use.Custom distribution of spark": 1662,
# "bigdata_spark_version_use.Other": 1663
# },
# "bigdata_tools_use": {
# "bigdata_tools_use.None": 1645,
# "bigdata_tools_use.Apache Spark": 1646,
# "bigdata_tools_use.Apache Kafka": 1647,
# "bigdata_tools_use.Apache Samza": 1648,
# "bigdata_tools_use.Apache Flink": 1649,
# "bigdata_tools_use.Apache Hadoop/MapReduce": 1650,
# "bigdata_tools_use.Apache Hive": 1651,
# "bigdata_tools_use.Apache Pig": 1652,
# "bigdata_tools_use.Apache Tez": 1653,
# "bigdata_tools_use.Apache Beam": 1654,
# "bigdata_tools_use.Dask": 1655,
# "bigdata_tools_use.Jupyter": 1656,
# "bigdata_tools_use.Other": 1657
# },
# "bigdata_where_hosted": {
# "bigdata_where_hosted": 1664
# },
# "db_how_long": {
# "db_how_long": 1468
# },
# "db_debug_stored_proc": {
# "db_debug_stored_proc": 1469
# },
# "db_have_tests": {
# "db_have_tests": 1470
# },
# "db_keep_scripts_vcs": {
# "db_keep_scripts_vcs": 1471
# },
# "db_connections": {
# "db_connections": 1472
# },
# "db_do_comm": {
# "db_do_comm": 1473
# },
# "db_n_rows": {
# "db_n_rows": 1474
# },
# "mcrsrvc_design_approaches": {
# "mcrsrvc_design_approaches.None": 1597,
# "mcrsrvc_design_approaches.Actor systems": 1598,
# "mcrsrvc_design_approaches.CQRS": 1599,
# "mcrsrvc_design_approaches.Monolith with web front-end": 1600,
# "mcrsrvc_design_approaches.Microservices": 1601,
# "mcrsrvc_design_approaches.Service-Oriented Architecture": 1602,
# "mcrsrvc_design_approaches.Reactive streams": 1603,
# "mcrsrvc_design_approaches.Other": 1604
# },
# "mcrsrvc_parts_communicate": {
# "mcrsrvc_parts_communicate.None": 1605,
# "mcrsrvc_parts_communicate.Cross-platform RPC (gRPC, Apache Thrift)": 1606,
# "mcrsrvc_parts_communicate.Custom TCP/UDP communication": 1607,
# "mcrsrvc_parts_communicate.GraphQL": 1608,
# "mcrsrvc_parts_communicate.Message Queue": 1609,
# "mcrsrvc_parts_communicate.Remoting (RMI, JMX)": 1610,
# "mcrsrvc_parts_communicate.REST": 1611,
# "mcrsrvc_parts_communicate.RPC over HTTP": 1612,
# "mcrsrvc_parts_communicate.SOAP": 1613,
# "mcrsrvc_parts_communicate.Stream Processing": 1614,
# "mcrsrvc_parts_communicate.WebSocket": 1615,
# "mcrsrvc_parts_communicate.Other": 1616
# },
# "mcrsrvc_code_or_spec": {
# "mcrsrvc_code_or_spec": 1617
# },
# "mcrsrvc_how_declare": {
# "mcrsrvc_how_declare.I don't document APIs": 1618,
# "mcrsrvc_how_declare.GraphQL": 1619,
# "mcrsrvc_how_declare.Open API (Swagger)": 1620,
# "mcrsrvc_how_declare.RAML": 1621,
# "mcrsrvc_how_declare.Wiki system": 1622,
# "mcrsrvc_how_declare.WSDL": 1623,
# "mcrsrvc_how_declare.Other": 1624
# },
# "mcrsrvc_store_api_spec": {
# "mcrsrvc_store_api_spec": 1625
# },
# "mcrsrvc_how_vcs": {
# "mcrsrvc_how_vcs": 1626
# },
# "pairprog_tools_remote": {
# "pairprog_tools_remote.None": 2133,
# "pairprog_tools_remote.Remote desktop": 2134,
# "pairprog_tools_remote.Video call with screen sharing": 2135,
# "pairprog_tools_remote.Editor / IDE with collaboration feature": 2136,
# "pairprog_tools_remote.Other": 2137
# },
# "pairprog_ide": {
# "pairprog_ide.Visual Studio Code LiveShare": 2138,
# "pairprog_ide.Visual Studio LiveShare": 2139,
# "pairprog_ide.Atom Teletype": 2140,
# "pairprog_ide.SublimeText RemoteCollab": 2141,
# "pairprog_ide.Other": 2142
# },
# "pairprog_how_cloud": {
# "pairprog_how_cloud.I don't use cloud services during development": 2143,
# "pairprog_how_cloud.I don't build applications locally, I am using remote machine in the cloud": 2144,
# "pairprog_how_cloud.I develop applications with source code stored in the cloud": 2145,
# "pairprog_how_cloud.I am debugging applications running in the cloud": 2146,
# "pairprog_how_cloud.Other": 2147
# },
# "pairprog_why_cloud": {
# "pairprog_why_cloud.My local machine is not powerful enough for builds / development": 2148,
# "pairprog_why_cloud.It is hard to reproduce the application environment for local development": 2149,
# "pairprog_why_cloud.Specific hardware is installed on the remote machine": 2150,
# "pairprog_why_cloud.Information security reasons": 2151,
# "pairprog_why_cloud.Data that I'm working with is stored in the cloud": 2152,
# "pairprog_why_cloud.Other": 2153
# },
# "r_version": {
# "r_version": 1391
# },
# "r_distrib": {
# "r_distrib.R base (CRAN)": 1392,
# "r_distrib.Microsoft Open R (MRAN)": 1393,
# "r_distrib.MS SQL Server R Services": 1394,
# "r_distrib.Oracle R Enterprise": 1395,
# "r_distrib.I don't know": 1396,
# "r_distrib.Other": 1397
# },
# "r_ide": {
# "r_ide.RStudio": 1398,
# "r_ide.PyCharm": 1399,
# "r_ide.Visual Studio": 1400,
# "r_ide.Visual Studio Code": 1401,
# "r_ide.Jupyter Notebook": 1402,
# "r_ide.Other": 1403
# },
# "r_what_for": {
# "r_what_for.Educational purposes": 1404,
# "r_what_for.Data analysis": 1405,
# "r_what_for.Programming of parsers/scrapers/ETL scripts etc_": 1406,
# "r_what_for.Machine learning": 1407,
# "r_what_for.Other": 1408
# },
# "r_do_libs": {
# "r_do_libs": 1409
# },
# "r_use_libs": {
# "r_use_libs.None": 1410,
# "r_use_libs.arules": 1411,
# "r_use_libs.caret": 1412,
# "r_use_libs.data_table": 1413,
# "r_use_libs.devtools": 1414,
# "r_use_libs.dplyr": 1415,
# "r_use_libs.dtplyr": 1416,
# "r_use_libs.e1071": 1417,
# "r_use_libs.gbm": 1418,
# "r_use_libs.ggplot2": 1419,
# "r_use_libs.glmnet": 1420,
# "r_use_libs.htmlwidgets": 1421,
# "r_use_libs.igraph": 1422,
# "r_use_libs.kernlab": 1423,
# "r_use_libs.nnet": 1424,
# "r_use_libs.packrat": 1425,
# "r_use_libs.plotly": 1426,
# "r_use_libs.randomForest": 1427,
# "r_use_libs.RCPP": 1428,
# "r_use_libs.rpart": 1429,
# "r_use_libs.shiny": 1430,
# "r_use_libs.SparkR": 1431,
# "r_use_libs.stringi": 1432,
# "r_use_libs.stringr": 1433,
# "r_use_libs.XGBoost": 1434,
# "r_use_libs.Other": 1435
# },
# "r_most_used_libs": {
# "r_most_used_libs": 1436
# },
# "r_code_form": {
# "r_code_form.pure _R files": 1437,
# "r_code_form.Rmarkdown (_Rmd files)": 1438,
# "r_code_form.shiny applications": 1439,
# "r_code_form.R code inside some specific environment (other programming languages, databases, tools)": 1440,
# "r_code_form.Other": 1441
# },
# "r_execute": {
# "r_execute.Local machine": 1442,
# "r_execute.Server (virtual machine)": 1443,
# "r_execute.Cluster": 1444,
# "r_execute.Cloud service": 1445,
# "r_execute.Other": 1446
# },
# "swiftoc_plan_catalyst": {
# "swiftoc_plan_catalyst": 1077
# },
# "swiftoc_plan_spm": {
# "swiftoc_plan_spm": 1105
# },
# "swiftoc_ide": {
# "swiftoc_ide": 1124
# },
# "edu_type_of_inst": {
# "edu_type_of_inst": 2031
# },
# "edu_degree": {
# "edu_degree": 2032
# },
# "edu_major_sub": {
# "edu_major_sub": 2033
# },
# "devops_cryptocur": {
# "devops_cryptocur": 1721
# },
# "laptop_or_desktop": {
# "laptop_or_desktop": 1755
# },
# "bigdata_stat_libs": {
# "bigdata_stat_libs.None": 1628,
# "bigdata_stat_libs.Statistica": 1629,
# "bigdata_stat_libs.SPSS": 1630,
# "bigdata_stat_libs.Spreadsheet editor (Microsoft Excel, OpenOffice Calc, Google Sheets, etc_)": 1631,
# "bigdata_stat_libs.Stata": 1632,
# "bigdata_stat_libs.SAS": 1633,
# "bigdata_stat_libs.Tableau": 1634,
# "bigdata_stat_libs.Other": 1635
# },
# "space_use_dashboards": {
# "space_use_dashboards": 1763
# },
# "rust_missed_ftrs": {
# "rust_missed_ftrs.None": 1278,
# "rust_missed_ftrs.Docker Support": 1279,
# "rust_missed_ftrs.WebAssembly Debugging": 1280,
# "rust_missed_ftrs.REPL": 1281,
# "rust_missed_ftrs.Database Frameworks Support": 1282,
# "rust_missed_ftrs.AWS Lambda Support": 1283,
# "rust_missed_ftrs.Cross-language Navigation and Refactorings": 1284,
# "rust_missed_ftrs.Embedded Development Support": 1285,
# "rust_missed_ftrs.Remote Development Support": 1286,
# "rust_missed_ftrs.Other": 1287
# },
# "work_day_start": {
# "work_day_start": 2132
# },
# "rust_how_debug": {
# "rust_how_debug": 1288
# },
# "edu_pl": {
# "edu_pl.None": 2034,
# "edu_pl.Haskell": 2035,
# "edu_pl.Python": 2036,
# "edu_pl.Java": 2037,
# "edu_pl.C++": 2038,
# "edu_pl.C#": 2039,
# "edu_pl.C": 2040,
# "edu_pl.PHP": 2041,
# "edu_pl.Pascal": 2042,
# "edu_pl.Kotlin": 2043,
# "edu_pl.JavaScript": 2044,
# "edu_pl.R": 2045,
# "edu_pl.Other": 2046
# },
# "edu_tools_adviced": {
# "edu_tools_adviced.RStudio": 2047,
# "edu_tools_adviced.IntelliJ IDEA": 2048,
# "edu_tools_adviced.Android Studio": 2049,
# "edu_tools_adviced.Visual Studio": 2050,
# "edu_tools_adviced.Xcode": 2051,
# "edu_tools_adviced.PhpStorm": 2052,
# "edu_tools_adviced.WebStorm": 2053,
# "edu_tools_adviced.PyCharm": 2054,
# "edu_tools_adviced.Vi / Vim": 2055,
# "edu_tools_adviced.Sublime Text": 2056,
# "edu_tools_adviced.Atom": 2057,
# "edu_tools_adviced.Visual Studio Code": 2058,
# "edu_tools_adviced.Notepad++": 2059,
# "edu_tools_adviced.CLion": 2060,
# "edu_tools_adviced.Eclipse": 2061,
# "edu_tools_adviced.NetBeans": 2062,
# "edu_tools_adviced.QtCreator": 2063,
# "edu_tools_adviced.Emacs": 2064,
# "edu_tools_adviced.JetBrains Rider": 2065,
# "edu_tools_adviced.Gedit": 2066,
# "edu_tools_adviced.IPython/Jupyter Notebook": 2067,
# "edu_tools_adviced.Other": 2068
# },
# "activities_kinds": {
# "activities_kinds.None": 23,
# "activities_kinds.Academic Research": 24,
# "activities_kinds.Coding / Programming": 25,
# "activities_kinds.Code Reviewing": 26,
# "activities_kinds.Testing": 27,
# "activities_kinds.System Design": 28,
# "activities_kinds.Graphics Design / Art": 29,
# "activities_kinds.Infrastructure Development / DevOps": 30,
# "activities_kinds.System Administration": 31,
# "activities_kinds.Deployment": 32,
# "activities_kinds.Business Intelligence": 33,
# "activities_kinds.Data Analysis": 34,
# "activities_kinds.Data Engineering": 35,
# "activities_kinds.Machine Learning": 36,
# "activities_kinds.Teaching Programming": 37,
# "activities_kinds.People Management": 38,
# "activities_kinds.Product Management": 39,
# "activities_kinds.Technical Writing": 40,
# "activities_kinds.UX/UI Design/Research": 41,
# "activities_kinds.Other": 42
# },
# "target_platforms": {
# "target_platforms.I don't develop anything": 43,
# "target_platforms.Desktop": 44,
# "target_platforms.Mobile": 45,
# "target_platforms.Web (Back-end)": 46,
# "target_platforms.Web (Front-end)": 47,
# "target_platforms.Consoles (Xbox / PlayStation / Nintendo etc_)": 48,
# "target_platforms.Server / Infrastructure": 49,
# "target_platforms.IoT / Embedded": 50,
# "target_platforms.WebAssembly": 51,
# "target_platforms.Other - Write In (Required)": 52
# },
# "sw_types_developed": {
# "sw_types_developed.I don\u2019t develop anything": 53,
# "sw_types_developed.Augmented Reality / Virtual Reality": 54,
# "sw_types_developed.Business Intelligence / Data Science / Machine Learning": 55,
# "sw_types_developed.Blockchain": 56,
# "sw_types_developed.Database / Data Storage": 57,
# "sw_types_developed.Entertainment": 58,
# "sw_types_developed.Fintech": 59,
# "sw_types_developed.Games": 60,
# "sw_types_developed.Hardware": 61,
# "sw_types_developed.Home Automation": 62,
# "sw_types_developed.IT Infrastructure": 63,
# "sw_types_developed.Libraries / Frameworks": 64,
# "sw_types_developed.Programming Tools": 65,
# "sw_types_developed.Security": 66,
# "sw_types_developed.System Software": 67,
# "sw_types_developed.Utilities (small apps for small tasks)": 68,
# "sw_types_developed.Websites": 69,
# "sw_types_developed.Other": 70
# },
# "java_sw_developed": {
# "java_sw_developed.None": 530,
# "java_sw_developed.Other": 531,
# "java_sw_developed.Augmented Reality / Virtual Reality": 532,
# "java_sw_developed.Business Intelligence / Data Science / Machine Learning": 533,
# "java_sw_developed.Blockchain": 534,
# "java_sw_developed.Database / Data Storage": 535,
# "java_sw_developed.Entertainment": 536,
# "java_sw_developed.Fintech": 537,
# "java_sw_developed.Games": 538,
# "java_sw_developed.Hardware": 539,
# "java_sw_developed.Home Automation": 540,
# "java_sw_developed.IT Infrastructure": 541,
# "java_sw_developed.Libraries / Frameworks": 542,
# "java_sw_developed.Programming Tools": 543,
# "java_sw_developed.Security": 544,
# "java_sw_developed.System Software": 545,
# "java_sw_developed.Utilities (small apps for small tasks)": 546,
# "java_sw_developed.Websites": 547,
# "java_sw_developed.Other_882": 548
# },
# "c_sw_developed": {
# "c_sw_developed.None": 585,
# "c_sw_developed.Other": 586,
# "c_sw_developed.Business Intelligence / Data Science / Machine Learning": 587,
# "c_sw_developed.Database / Data Storage": 588,
# "c_sw_developed.Entertainment": 589,
# "c_sw_developed.Fintech": 590,
# "c_sw_developed.Games": 591,
# "c_sw_developed.Hardware": 592,
# "c_sw_developed.Home Automation": 593,
# "c_sw_developed.IT Infrastructure": 594,
# "c_sw_developed.Libraries / Frameworks": 595,
# "c_sw_developed.Programming Tools": 596,
# "c_sw_developed.Security": 597,
# "c_sw_developed.System Software": 598,
# "c_sw_developed.Utilities (small apps for small tasks)": 599,
# "c_sw_developed.Other_884": 600
# },
# "cpp_sw_developed": {
# "cpp_sw_developed.None": 649,
# "cpp_sw_developed.Other": 650,
# "cpp_sw_developed.Augmented Reality / Virtual Reality": 651,
# "cpp_sw_developed.Business Intelligence / Data Science / Machine Learning": 652,
# "cpp_sw_developed.Blockchain": 653,
# "cpp_sw_developed.Database / Data Storage": 654,
# "cpp_sw_developed.Entertainment": 655,
# "cpp_sw_developed.Fintech": 656,
# "cpp_sw_developed.Games": 657,
# "cpp_sw_developed.Hardware": 658,
# "cpp_sw_developed.Home Automation": 659,
# "cpp_sw_developed.IT Infrastructure": 660,
# "cpp_sw_developed.Libraries / Frameworks": 661,
# "cpp_sw_developed.Programming Tools": 662,
# "cpp_sw_developed.Security": 663,
# "cpp_sw_developed.System Software": 664,
# "cpp_sw_developed.Utilities (small apps for small tasks)": 665,
# "cpp_sw_developed.Websites": 666,
# "cpp_sw_developed.Other_886": 667
# },
# "mobile_ndevs_ios_andoid": {
# "mobile_ndevs_ios_andoid": 1494
# },
# "mobile_apps_functionality": {
# "mobile_apps_functionality": 1495
# },
# "mobile_apps_components": {
# "mobile_apps_components.Networking": 1496,
# "mobile_apps_components.State and Navigation Management": 1497,
# "mobile_apps_components.Data Storage": 1498,
# "mobile_apps_components.Security": 1499,
# "mobile_apps_components.Computations": 1500,
# "mobile_apps_components.Media (Image, Video, Audio)": 1501,
# "mobile_apps_components.Payments": 1502,
# "mobile_apps_components.ML": 1503,
# "mobile_apps_components.File I/O": 1504,
# "mobile_apps_components.Data Synchronization": 1505,
# "mobile_apps_components.Other": 1506,
# "mobile_apps_components.None of the Above": 1507
# },
# "mobile_UI_native_imp": {
# "mobile_UI_native_imp": 1508
# },
# "mobbile_UI_perfom_imp": {
# "mobbile_UI_perfom_imp": 1509
# },
# "scala_use_dotty": {
# "scala_use_dotty": 1196
# },
# "scala_tools": {
# "scala_tools.None": 1188,
# "scala_tools.Scoverage": 1189,
# "scala_tools.Scalafmt": 1190,
# "scala_tools.Scalafix": 1191,
# "scala_tools.Scapegoat": 1192,
# "scala_tools.Scalastyle": 1193,
# "scala_tools.Wart Remover": 1194,
# "scala_tools.Other": 1195
# },
# "go_sw_peveloped": {
# "go_sw_peveloped.None": 1340,
# "go_sw_peveloped.Other": 1341,
# "go_sw_peveloped.Business Intelligence / Data Science / Machine Learning": 1342,
# "go_sw_peveloped.Blockchain": 1343,
# "go_sw_peveloped.Database / Data Storage": 1344,
# "go_sw_peveloped.Entertainment": 1345,
# "go_sw_peveloped.Fintech": 1346,
# "go_sw_peveloped.Games": 1347,
# "go_sw_peveloped.Home Automation": 1348,
# "go_sw_peveloped.IT Infrastructure": 1349,
# "go_sw_peveloped.Libraries / Frameworks": 1350,
# "go_sw_peveloped.Programming Tools": 1351,
# "go_sw_peveloped.Security": 1352,
# "go_sw_peveloped.System Software": 1353,
# "go_sw_peveloped.Utilities (small apps for small tasks)": 1354,
# "go_sw_peveloped.Websites": 1355,
# "go_sw_peveloped.Other_895": 1356
# },
# "kotlin_jb_libs": {
# "kotlin_jb_libs.None": 962,
# "kotlin_jb_libs.I don\u2019t know": 963,
# "kotlin_jb_libs.Kodein DI": 964,
# "kotlin_jb_libs.kotlin-wrappers/kotlin-react": 965,
# "kotlin_jb_libs.kotlin-wrappers/kotlin-css": 966,
# "kotlin_jb_libs.kotlin-wrappers/*": 967,
# "kotlin_jb_libs.kotlinx_coroutines": 968,
# "kotlin_jb_libs.kotlinx_html": 969,
# "kotlin_jb_libs.kotlinx_dom": 970,
# "kotlin_jb_libs.kotlinx_serialization": 971,
# "kotlin_jb_libs.kotlin_test": 972,
# "kotlin_jb_libs.Ktor": 973,
# "kotlin_jb_libs.Exposed": 974,
# "kotlin_jb_libs.Other": 975
# },
# "kotlin_libs": {
# "kotlin_libs.None": 976,
# "kotlin_libs.I don't know": 977,
# "kotlin_libs.KotlinTest": 978,
# "kotlin_libs.RxKotlin": 979,
# "kotlin_libs.TornadoFX": 980,
# "kotlin_libs.mockito-kotlin": 981,
# "kotlin_libs.Jackson": 982,
# "kotlin_libs.ktlint": 983,
# "kotlin_libs.detekt": 984,
# "kotlin_libs.Spek": 985,
# "kotlin_libs.Spring/Spring Boot": 986,
# "kotlin_libs.RxBinding": 987,
# "kotlin_libs.RxJava": 988,
# "kotlin_libs.Okio": 989,
# "kotlin_libs.MockK": 990,
# "kotlin_libs.Kodein DI": 991,
# "kotlin_libs.Arrow": 992,
# "kotlin_libs.Gson": 993,
# "kotlin_libs.KotlinPoet": 994,
# "kotlin_libs.Koin": 995,
# "kotlin_libs.MvRx": 996,
# "kotlin_libs.Timber": 997,
# "kotlin_libs.SQLDelight": 998,
# "kotlin_libs.Other": 999
# },
# "mcrsrvc_do": {
# "mcrsrvc_do": 1596
# },
# "cpp_move_11": {
# "cpp_move_11": 607
# },
# "cpp_move_14": {
# "cpp_move_14": 608
# },
# "cpp_move_17": {
# "cpp_move_17": 609
# },
# "cpp_move_98": {
# "cpp_move_98": 606
# },
# "php_sw_developed": {
# "php_sw_developed.None": 907,
# "php_sw_developed.Other": 908,
# "php_sw_developed.Business Intelligence / Data Science / Machine Learning": 909,
# "php_sw_developed.Blockchain": 910,
# "php_sw_developed.Database / Data Storage": 911,
# "php_sw_developed.Entertainment": 912,
# "php_sw_developed.Fintech": 913,
# "php_sw_developed.Games": 914,
# "php_sw_developed.Home Automation": 915,
# "php_sw_developed.IT Infrastructure": 916,
# "php_sw_developed.Libraries / Frameworks": 917,
# "php_sw_developed.Programming Tools": 918,
# "php_sw_developed.Security": 919,
# "php_sw_developed.System Software": 920,
# "php_sw_developed.Utilities (small apps for small tasks)": 921,
# "php_sw_developed.Websites": 922,
# "php_sw_developed.Other_908": 923
# },
# "kotlin_ide": {
# "kotlin_ide": 1020
# },
# "space_tools_blogging": {
# "space_tools_blogging.None": 1938,
# "space_tools_blogging.Basecamp": 1939,
# "space_tools_blogging.Notion": 1940,
# "space_tools_blogging.Microsoft SharePoint": 1941,
# "space_tools_blogging.Jive": 1942,
# "space_tools_blogging.Facebook Workplace": 1943,
# "space_tools_blogging.Confluence": 1944,
# "space_tools_blogging.BlogIn": 1945,
# "space_tools_blogging.Custom tool": 1946,
# "space_tools_blogging.Other": 1947
# },
# "space_tools_calendar": {
# "space_tools_calendar.None": 1884,
# "space_tools_calendar.Basecamp": 1885,
# "space_tools_calendar.Igloo": 1886,
# "space_tools_calendar.Notion": 1887,
# "space_tools_calendar.Microsoft SharePoint": 1888,
# "space_tools_calendar.Jive": 1889,
# "space_tools_calendar.Facebook Workplace": 1890,
# "space_tools_calendar.Confluence": 1891,
# "space_tools_calendar.Custom tool": 1892,
# "space_tools_calendar.Other": 1893
# },
# "space_tools_employee": {
# "space_tools_employee.None": 1964,
# "space_tools_employee.Bitrix24": 1965,
# "space_tools_employee.Microsoft SharePoint": 1966,
# "space_tools_employee.OneDirectory": 1967,
# "space_tools_employee.Pingboard": 1968,
# "space_tools_employee.Custom tool": 1969,
# "space_tools_employee.Other": 1970
# },
# "space_tooling_stack": {
# "space_tooling_stack.We do not have any specific stack": 1971,
# "space_tooling_stack.Microsoft (Azure DevOps / Microsoft TFS / VSTS, Office 365)": 1972,
# "space_tooling_stack.Atlassian (Jira, Bitbucket, Crucible, Confluence, Trello)": 1973,
# "space_tooling_stack.Google (Cloud, G Suite, Hangouts)": 1974,
# "space_tooling_stack.Zoho (Calendar, Sprints, Cliq)": 1975,
# "space_tooling_stack.Amazon (AWS, CodeStar / CodeCommit / CodePipeline)": 1976,
# "space_tooling_stack.GitHub": 1977,
# "space_tooling_stack.GitLab": 1978,
# "space_tooling_stack.Assembla": 1979,
# "space_tooling_stack.Other": 1980
# },
# "space_tools_code_review": {
# "space_tools_code_review.None": 1818,
# "space_tools_code_review.GitHub": 1819,
# "space_tools_code_review.GitLab": 1820,
# "space_tools_code_review.Azure DevOps": 1821,
# "space_tools_code_review.Phabricator": 1822,
# "space_tools_code_review.Bitbucket": 1823,
# "space_tools_code_review.Upsource": 1824,
# "space_tools_code_review.Gerrit": 1825,
# "space_tools_code_review.AWS CodeCommit / AWS CodeStar": 1826,
# "space_tools_code_review.Crucible": 1827,
# "space_tools_code_review.Collaborator": 1828,
# "space_tools_code_review.Helix Swarm": 1829,
# "space_tools_code_review.Review Board": 1830,
# "space_tools_code_review.Custom tool": 1831,
# "space_tools_code_review.Other": 1832
# },
# "space_tools_repo": {
# "space_tools_repo.None": 1797,
# "space_tools_repo.GitHub": 1798,
# "space_tools_repo.GitLab": 1799,
# "space_tools_repo.Azure DevOps": 1800,
# "space_tools_repo.Assembla": 1801,
# "space_tools_repo.Helix Core Version Control": 1802,
# "space_tools_repo.Codefresh": 1803,
# "space_tools_repo.JFrog Artifactory": 1804,
# "space_tools_repo.Sonatype Nexus Repository": 1805,
# "space_tools_repo.Docker Hub": 1806,
# "space_tools_repo.Docker Trusted Registry": 1807,
# "space_tools_repo.AWS Elastic Container Registry": 1808,
# "space_tools_repo.Azure Container Registry": 1809,
# "space_tools_repo.Google Container Registry": 1810,
# "space_tools_repo.RedHat Quay": 1811,
# "space_tools_repo.ProGet": 1812,
# "space_tools_repo.Archiva": 1813,
# "space_tools_repo.NuGet": 1814,
# "space_tools_repo.npm": 1815,
# "space_tools_repo.Custom tool": 1816,
# "space_tools_repo.Other": 1817
# },
# "country": {
# "country": 77
# },
# "space_tools_ci_2": {
# "space_tools_ci_2.None": 1776,
# "space_tools_ci_2.Jenkins / Hudson": 1777,
# "space_tools_ci_2.Other": 1778,
# "space_tools_ci_2.TeamCity": 1779,
# "space_tools_ci_2.Bamboo": 1780,
# "space_tools_ci_2.Microsoft Team Foundation Build": 1781,
# "space_tools_ci_2.Travis CI": 1782,
# "space_tools_ci_2.Codeship": 1783,
# "space_tools_ci_2.CircleCI": 1784,
# "space_tools_ci_2.GoCD": 1785,
# "space_tools_ci_2.Gitlab CI": 1786,
# "space_tools_ci_2.AppVeyor": 1787,
# "space_tools_ci_2.Drone": 1788,
# "space_tools_ci_2.Semaphore CI": 1789,
# "space_tools_ci_2.GitHub Actions": 1790,
# "space_tools_ci_2.Azure DevOps (former Microsoft TFS / Visual Studio Team Services)": 1791,
# "space_tools_ci_2.AWS CodePipeline / AWS CodeStar": 1792,
# "space_tools_ci_2.Google Cloud Build": 1793,
# "space_tools_ci_2.Bitbucket Pipelines": 1794,
# "space_tools_ci_2.Custom tool": 1795,
# "space_tools_ci_2.Other_931": 1796
# },
# "space_tools_projmanag_2": {
# "space_tools_projmanag_2.None": 1833,
# "space_tools_projmanag_2.Confluence": 1834,
# "space_tools_projmanag_2.Other": 1835,
# "space_tools_projmanag_2.Monday_com": 1836,
# "space_tools_projmanag_2.YouTrack": 1837,
# "space_tools_projmanag_2.Redmine": 1838,
# "space_tools_projmanag_2.GitLab Issue Board": 1839,
# "space_tools_projmanag_2.Asana": 1840,
# "space_tools_projmanag_2.Wrike": 1841,
# "space_tools_projmanag_2.Microsoft TFS / Visual Studio Team Services": 1842,
# "space_tools_projmanag_2.Trello": 1843,
# "space_tools_projmanag_2.GitHub Issues": 1844,
# "space_tools_projmanag_2.GitLab": 1845,
# "space_tools_projmanag_2.Azure DevOps": 1846,
# "space_tools_projmanag_2.Phabricator": 1847,
# "space_tools_projmanag_2.Basecamp": 1848,
# "space_tools_projmanag_2.Bitrix24": 1849,
# "space_tools_projmanag_2.Notion": 1850,
# "space_tools_projmanag_2.Jira Software": 1851,
# "space_tools_projmanag_2.Jira Core": 1852,
# "space_tools_projmanag_2.Jira Align": 1853,
# "space_tools_projmanag_2.Targetprocess": 1854,
# "space_tools_projmanag_2.Zoho Sprints": 1855,
# "space_tools_projmanag_2.Rally Software (CA Agile Central)": 1856,
# "space_tools_projmanag_2.Microsoft Project": 1857,
# "space_tools_projmanag_2.Custom tool": 1858,
# "space_tools_projmanag_2.Other_932": 1859
# },
# "space_tools_vc_2": {
# "space_tools_vc_2.None": 1764,
# "space_tools_vc_2.Other - Write In (Required)": 1765,
# "space_tools_vc_2.GitHub": 1766,
# "space_tools_vc_2.GitLab": 1767,
# "space_tools_vc_2.Bitbucket": 1768,
# "space_tools_vc_2.Perforce": 1769,
# "space_tools_vc_2.Amazon CodeCommit": 1770,
# "space_tools_vc_2.SourceForge": 1771,
# "space_tools_vc_2.Azure DevOps (former Microsoft TFS / Visual Studio Team Services)": 1772,
# "space_tools_vc_2.Phabricator": 1773,
# "space_tools_vc_2.Custom tool": 1774,
# "space_tools_vc_2.Other": 1775
# },
# "is_employed": {
# "is_employed": 2313
# },
# "primary": {
# "primary_proglang.Java": 112,
# "primary_proglang.C": 113,
# "primary_proglang.C++": 114,
# "primary_proglang.Python": 115,
# "primary_proglang.C#": 116,
# "primary_proglang.PHP": 117,
# "primary_proglang.JavaScript": 118,
# "primary_proglang.Ruby": 119,
# "primary_proglang.Kotlin": 120,
# "primary_proglang.Swift": 121,
# "primary_proglang.Objective-C": 122,
# "primary_proglang.Scala": 123,
# "primary_proglang.Go": 124,
# "primary_proglang.SQL(PL/SQL, T-SQL and otherprogramming extensions of SQL)": 125,
# "primary_proglang.Rust": 126,
# "primary_proglang.Haskell": 127,
# "primary_proglang.HTML / CSS": 128,
# "primary_proglang.Elixir": 129,
# "primary_proglang.Visual Basic": 130,
# "primary_proglang.R": 131,
# "primary_proglang.TypeScript": 132,
# "primary_proglang.Dart": 133,
# "primary_proglang.Clojure / ClojureScript": 134,
# "primary_proglang.Delphi": 135,
# "primary_proglang.Groovy": 136,
# "primary_proglang.Perl": 137,
# "primary_proglang.Assembly": 138,
# "primary_proglang.Matlab": 139,
# "primary_proglang.Lua": 140,
# "primary_proglang.Shell scripting languages(bash/shell/powershell)": 141,
# "primary_proglang.Julia": 142,
# "primary_proglang.F#": 143,
# "primary_proglang.Other": 144,
# "primary.Java": 2244,
# "primary.JavaScript": 2246,
# "primary.Kotlin": 2248,
# "primary.Python": 2250,
# "primary.C#": 2252,
# "primary.HTML / CSS": 2254,
# "primary.TypeScript": 2256,
# "primary.SQL(PL/SQL, T-SQL and otherprogramming extensions of SQL)": 2258,
# "primary.Rust": 2260,
# "primary.C++": 2262,
# "primary.C": 2264,
# "primary.Go": 2266,
# "primary.Dart": 2268,
# "primary.Haskell": 2270,
# "primary.PHP": 2272,
# "primary.Shell scripting languages(bash/shell/powershell)": 2274,
# "primary.Swift": 2276,
# "primary.Scala": 2278,
# "primary.Matlab": 2280,
# "primary.R": 2282,
# "primary.Ruby": 2284,
# "primary.Elixir": 2286,
# "primary.Other": 2288,
# "primary.Lua": 2290,
# "primary.Visual Basic": 2292,
# "primary.Julia": 2294,
# "primary.Groovy": 2297,
# "primary.Clojure / ClojureScript": 2299,
# "primary.Objective-C": 2301,
# "primary.Delphi": 2303,
# "primary.Assembly": 2305,
# "primary.F#": 2307,
# "primary.Perl": 2310,
# "primary.I don't use programming languages": 2312
# },
# "rank": {
# "rank.Java": 2245,
# "rank.C#": 2247,
# "rank.PHP": 2249,
# "rank.Python": 2251,
# "rank.JavaScript": 2253,
# "rank.Kotlin": 2255,
# "rank.Scala": 2257,
# "rank.C++": 2259,
# "rank.TypeScript": 2261,
# "rank.Swift": 2263,
# "rank.Go": 2265,
# "rank.C": 2267,
# "rank.HTML / CSS": 2269,
# "rank.Matlab": 2271,
# "rank.Ruby": 2273,
# "rank.Shell scripting languages(bash/shell/powershell)": 2275,
# "rank.Objective-C": 2277,
# "rank.Rust": 2279,
# "rank.Clojure / ClojureScript": 2281,
# "rank.Other": 2283,
# "rank.SQL(PL/SQL, T-SQL and otherprogramming extensions of SQL)": 2285,
# "rank.Elixir": 2287,
# "rank.Haskell": 2289,
# "rank.Delphi": 2291,
# "rank.Perl": 2293,
# "rank.Lua": 2295,
# "rank.COBOL": 2296,
# "rank.Groovy": 2298,
# "rank.Dart": 2300,
# "rank.Visual Basic": 2302,
# "rank.R": 2304,
# "rank.F#": 2306,
# "rank.Assembly": 2308,
# "rank.Julia": 2309,
# "rank.I don't use programming languages": 2311
# },
# "main": {
# "main": 2314
# },
# "source": {
# "source": 2315
# }
# }
|
# This an autogenerated file
#
# Generated with ThrusterControl
from __future__ import annotations
from typing import Dict,Sequence,List
from dmt.entity import Entity
from dmt.blueprint import Blueprint
from .blueprints.thrustercontrol import ThrusterControlBlueprint
from typing import Dict
from sima.sima.moao import MOAO
from sima.sima.scriptablevalue import ScriptableValue
from sima.simo.dpthrustertype import DPThrusterType
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from sima.simo.ithruster import IThruster
class ThrusterControl(MOAO):
"""
Keyword arguments
-----------------
name : str
(default "")
description : str
(default "")
_id : str
(default "")
scriptableValues : List[ScriptableValue]
thruster : IThruster
Thruster to be controlled
thrusterControlType : DPThrusterType
Thruster control type
direction : float
Direction of thruster. Start value for azimuthing thrusters.(default 0.0)
minForce : float
Minimum force(default 0.0)
maxForce : float
Maximum force(default 0.0)
maxForceRate : float
Maximum rate of force(default 0.0)
maxAngleRate : float
Maximum rate of azimuth angle(default 0.0)
x : float
X-coordinate of thruster in body system(default 0.0)
y : float
Y-coordinate of thruster in body system(default 0.0)
z : float
Z-coordinate of thruster in body system(default 0.0)
"""
def __init__(self , name="", description="", _id="", thrusterControlType=DPThrusterType.FIXED, direction=0.0, minForce=0.0, maxForce=0.0, maxForceRate=0.0, maxAngleRate=0.0, x=0.0, y=0.0, z=0.0, **kwargs):
super().__init__(**kwargs)
self.name = name
self.description = description
self._id = _id
self.scriptableValues = list()
self.thruster = None
self.thrusterControlType = thrusterControlType
self.direction = direction
self.minForce = minForce
self.maxForce = maxForce
self.maxForceRate = maxForceRate
self.maxAngleRate = maxAngleRate
self.x = x
self.y = y
self.z = z
for key, value in kwargs.items():
if not isinstance(value, Dict):
setattr(self, key, value)
@property
def blueprint(self) -> Blueprint:
"""Return blueprint that this entity represents"""
return ThrusterControlBlueprint()
@property
def name(self) -> str:
""""""
return self.__name
@name.setter
def name(self, value: str):
"""Set name"""
self.__name = str(value)
@property
def description(self) -> str:
""""""
return self.__description
@description.setter
def description(self, value: str):
"""Set description"""
self.__description = str(value)
@property
def _id(self) -> str:
""""""
return self.___id
@_id.setter
def _id(self, value: str):
"""Set _id"""
self.___id = str(value)
@property
def scriptableValues(self) -> List[ScriptableValue]:
""""""
return self.__scriptableValues
@scriptableValues.setter
def scriptableValues(self, value: List[ScriptableValue]):
"""Set scriptableValues"""
if not isinstance(value, Sequence):
raise Exception("Expected sequense, but was " , type(value))
self.__scriptableValues = value
@property
def thruster(self) -> IThruster:
"""Thruster to be controlled"""
return self.__thruster
@thruster.setter
def thruster(self, value: IThruster):
"""Set thruster"""
self.__thruster = value
@property
def thrusterControlType(self) -> DPThrusterType:
"""Thruster control type"""
return self.__thrusterControlType
@thrusterControlType.setter
def thrusterControlType(self, value: DPThrusterType):
"""Set thrusterControlType"""
self.__thrusterControlType = value
@property
def direction(self) -> float:
"""Direction of thruster. Start value for azimuthing thrusters."""
return self.__direction
@direction.setter
def direction(self, value: float):
"""Set direction"""
self.__direction = float(value)
@property
def minForce(self) -> float:
"""Minimum force"""
return self.__minForce
@minForce.setter
def minForce(self, value: float):
"""Set minForce"""
self.__minForce = float(value)
@property
def maxForce(self) -> float:
"""Maximum force"""
return self.__maxForce
@maxForce.setter
def maxForce(self, value: float):
"""Set maxForce"""
self.__maxForce = float(value)
@property
def maxForceRate(self) -> float:
"""Maximum rate of force"""
return self.__maxForceRate
@maxForceRate.setter
def maxForceRate(self, value: float):
"""Set maxForceRate"""
self.__maxForceRate = float(value)
@property
def maxAngleRate(self) -> float:
"""Maximum rate of azimuth angle"""
return self.__maxAngleRate
@maxAngleRate.setter
def maxAngleRate(self, value: float):
"""Set maxAngleRate"""
self.__maxAngleRate = float(value)
@property
def x(self) -> float:
"""X-coordinate of thruster in body system"""
return self.__x
@x.setter
def x(self, value: float):
"""Set x"""
self.__x = float(value)
@property
def y(self) -> float:
"""Y-coordinate of thruster in body system"""
return self.__y
@y.setter
def y(self, value: float):
"""Set y"""
self.__y = float(value)
@property
def z(self) -> float:
"""Z-coordinate of thruster in body system"""
return self.__z
@z.setter
def z(self, value: float):
"""Set z"""
self.__z = float(value)
|
#
# Some utility functions for the pipelines
# Author: cdeck3r
#
import os
import collections
# src: https://stackoverflow.com/a/2158532
def flatten(l):
for el in l:
if isinstance(el, collections.Iterable) and not isinstance(el, (str, bytes)):
yield from flatten(el)
else:
yield el
def flatten_list(l):
return list(flatten(l))
# Creates the os path
# from concatenated YAML sequences
def os_path(p):
return os.path.join(*flatten_list(p))
|
# coding=utf-8
from __future__ import unicode_literals
from collections import OrderedDict
from .. import Provider as ColorProvider
localized = True
# Reference
# https://th.wikipedia.org/wiki/รายชื่อสี
# on 2019-10-20
class Provider(ColorProvider):
all_colors = OrderedDict((
('สีดำ', '#000000'),
('สีน้ำเงินเขียว', '#0095B6'),
('สีน้ำเงินม่วง', '#8A2BE2'),
('สีทองแดง', '#CD7F32'),
('สีน้ำตาล', '#964B00'),
('สีกาแฟ', '#6F4E37'),
('สีทอง', '#FFD700'),
('สีเทา', '#808080'),
('สีเขียว', '#00FF00'),
('สีหยก', '#00A86B'),
('สีส้ม', '#FFA500'),
('สีส้มแดง', '#FF4500'),
('สีออร์คิด', '#DA70D6'),
('สีชมพู', '#FFC0CB'),
('สีม่วง', '#800080'),
('สีแดง', '#FF0000'),
('สีเงิน', '#C0C0C0'),
('สีขาว', '#FFFFFF'),
('สีเหลือง', '#FFFF00'),
))
safe_colors = (
'สีดำ', 'สีน้ำตาล', 'สีทอง', 'สีเขียว',
'สีส้ม', 'สีชมพู', 'สีม่วง', 'สีเงิน', 'สีแดง',
'สีเงิน', 'สีขาว', 'สีเหลือง',
)
|
import yaml
import json
import Core
import markdown
import re
__yaml_frontmatter__ = r'(---)(.*?)\1'
class YamlParser(Core.Parser):
accepts = ["yaml", "yml"]
def interpret(self, file_contents):
return yaml.load(file_contents)
class JsonParser(Core.Parser):
accepts = ["json", "js"]
def interpret(self, file_contents):
return json.loads(file_contents)
class MarkdownParser(Core.Parser):
accepts = ["md", "markdown"]
delimiter = "---"
def interpret(self, file_contents):
data = {
"meta": {},
"data": {}
}
matter = re.search(__yaml_frontmatter__, file_contents, re.MULTILINE+re.DOTALL)
if matter.group():
data["meta"] = yaml.load(matter.group().replace(self.delimiter, ''))
data["data"]["content"] = file_contents.replace(matter.group(), "")
else:
print "Couldn't load markdown."
return data
|
"""
*Integer*
An integer of music.
Implemented with int.
"""
from abc import ABCMeta
from music.__model._number import Number
__all__ = ["Integer"]
class Integer(
int,
Number,
):
__metaclass__ = ABCMeta
def __init__(
self,
n: int,
):
super(Integer, self).__new__(
int,
n,
)
|
"""
@file
@brief Actions definition.
"""
from .api_extension import AutoAction
from .gactions import MLAction, MLActionFunction
class MLModel(MLActionFunction):
"""
Base class for every machine learned model
"""
def __init__(self, input, output_names=None, name=None):
"""
@param name a name which identifies the action
@param input an action which produces the output result
@param output_names names for the outputs
"""
MLActionFunction.__init__(self, input, name=name)
self.output_names = output_names
@property
def InputNames(self):
"""
Returns the input names
"""
vars = self.enumerate_variables()
res = list(sorted(set(v.name_var for v in vars)))
if len(res) == 0:
raise ValueError( # pragma: no cover
"At least one variable must be defined.")
return res
@property
def OutputNames(self):
"""
Returns the output names
"""
return self.output_names
@AutoAction.cache
def _export_json(self, hook=None, result_name=None):
js = MLAction._export_json(self, hook=hook)
js.update({"input_names": self.InputNames,
"output_names": self.OutputNames})
return js
@AutoAction.cache
def _export_c(self, hook=None, result_name=None):
if result_name is None:
result_name = "pred"
return MLActionFunction._export_c(self, hook=hook, result_name=result_name)
|
# 2021Jan23 Brockman Neopixel Demo
from adafruit_circuitplayground import cp
import time
Rdefault = 0
Gdefault = 0
Bdefault = 0
R = Rdefault
G = Gdefault
B = Bdefault
cp.pixels.brightness = 0.03
pressed_count = 0
pressed_prior = False
pressed = False
pressed_time = 0
while True:
# ether button pressed?
if cp.button_a or cp.button_b:
pressed = True
else:
pressed = False
# which button pressed, for colorization
if cp.button_a:
R = 255
else:
R = Rdefault
if cp.button_b:
G = 255
else:
G = Gdefault
# Detect state change
if not pressed_prior and pressed:
pressed_count += 1
pressed_time = time.monotonic()
# reset state to latest value
pressed_prior = pressed
elapsed = time.monotonic() - pressed_time
pixel_count = pressed_count % 10
# pixel_count = int(elapsed*5.0 % 10) + 1
if not pressed:
pixel_count = 0
print(f"pressed_count: {pressed_count} pixel_count = {pixel_count}")
if pressed:
cp.pixels[pixel_count-1] = (R,G,B)
else:
# cp.pixels.fill((R, G, B))
False
time.sleep(0.05)
|
# layerindex-web - Branch-based URL definitions
#
# Copyright (C) 2013-2016 Intel Corporation
#
# Licensed under the MIT license, see COPYING.MIT for details
#
# SPDX-License-Identifier: MIT
from django.conf.urls import *
from django.views.defaults import page_not_found
from django.urls import reverse_lazy
from layerindex.views import LayerListView, RecipeSearchView, MachineSearchView, DistroSearchView, ClassSearchView, LayerDetailView, edit_layer_view, delete_layer_view, edit_layernote_view, delete_layernote_view, RedirectParamsView, DuplicatesView, LayerUpdateDetailView, layer_export_recipes_csv_view, comparison_update_view
urlpatterns = [
url(r'^$',
RedirectParamsView.as_view(permanent=False), {'redirect_name': 'layer_list'}),
url(r'^layers/$',
LayerListView.as_view(
template_name='layerindex/layers.html'),
name='layer_list'),
url(r'^layer/(?P<slug>[-\w]+)/$',
LayerDetailView.as_view(
template_name='layerindex/detail.html'),
name='layer_item'),
url(r'^layer/(?P<slug>[-\w]+)/recipes/csv/$',
layer_export_recipes_csv_view,
name='layer_export_recipes_csv'),
url(r'^recipes/$',
RecipeSearchView.as_view(
template_name='layerindex/recipes.html'),
name='recipe_search'),
url(r'^machines/$',
MachineSearchView.as_view(
template_name='layerindex/machines.html'),
name='machine_search'),
url(r'^distros/$',
DistroSearchView.as_view(
template_name='layerindex/distros.html'),
name='distro_search'),
url(r'^classes/$',
ClassSearchView.as_view(
template_name='layerindex/classes.html'),
name='class_search'),
url(r'^edit/(?P<slug>[-\w]+)/$', edit_layer_view, {'template_name': 'layerindex/editlayer.html'}, name="edit_layer"),
url(r'^duplicates/$',
DuplicatesView.as_view(
template_name='layerindex/duplicates.html'),
name='duplicates'),
url(r'^comparison_update/$',
comparison_update_view,
name='comparison_update'),
]
|
#!/usr/bin/env python3
"""
A unival tree (which stands for "universal value") is a tree where all
nodes under it have the same value.
Given the root to a binary tree, count the number of unival subtrees.
For example, the following tree has 5 unival subtrees:
0
/ \
1 0
/ \
1 0
/ \
1 1
"""
class TreeNode:
def __init__(self, val) -> None:
self.val = val
self.left = self.right = None
def count_unival_subtree(root: TreeNode) -> int:
count, is_unival = count_unival_subtree_helper(root)
return count
# Helper functions
def count_unival_subtree_helper(root: TreeNode):
if(root is None):
return 0, True
left_count, is_left_unival = count_unival_subtree_helper(root.left)
right_count, is_right_unival = count_unival_subtree_helper(root.right)
total_count = left_count + right_count
if is_left_unival and is_right_unival:
if root.left and root.left.val != root.val:
return total_count, False
if root.right and root.right.val != root.val:
return total_count, False
return total_count + 1, True
return total_count, False
def is_unival(root: TreeNode) -> bool:
return is_unival_helper(root, root.val)
def is_unival_helper(root: TreeNode, value: int) -> bool:
if root is None:
return True
if (root.val == value):
return is_unival_helper(root.left, value) and is_unival_helper(root.right, value)
if __name__ == "__main__":
root = TreeNode(0)
root.left = TreeNode(1)
root.right = TreeNode(0)
root.right.left = TreeNode(1)
root.right.right = TreeNode(0)
root.right.left.left = TreeNode(1)
root.right.left.right = TreeNode(1)
print(count_unival_subtree(root))
|
from jikanpy import Jikan
from jikanpy.exceptions import APIException
import pickle
from collections import Counter, defaultdict
from time import sleep
from assoc import AssocMap
# VERBOSITY GLOBALS: changing these has no influence on performance, only the human-readable
# output of the program
NRESULTS_SEARCH = 10
NRESULTS_COMMON = 10
# PARAMETRIC GLOBALS: changing these can dramatically alter performance
API_RESET_TIME = 60 # seconds
RETRY_DELAY = 10 # seconds
MAX_ITERATIONS = API_RESET_TIME / RETRY_DELAY
def intercept(st1, sep, st2):
if len(st1) == 0:
return st2
if len(st2) == 0:
return st1
return st1+sep+st2
class MemoCache():
def __init__(self, j):
self.api = j
self.q_anime = dict()
self.q_person = dict()
self.q_related = defaultdict(set)
self.q_assoc = AssocMap()
def query_anime_chars(self, malid):
return self.__query_anime(malid)[1]
def query_anime(self, malid):
return self.__query_anime(malid)[0]
# spincycle: performs an action, attempting a fixed
# number of retries after fixed wait intervals,
# giving up if the maximum allowed retries have been
# exhausted
def __spincycle(self, f, max_iter=MAX_ITERATIONS):
cur_iter = 0
while True:
try:
x = f()
if cur_iter > 0:
print("MemoCache.__spincycle: succeeded after %d retries..." % (cur_iter))
return x
except APIException as api_err:
print("MemoCache.__spincycle: APIException caught ({0})".format(api_err))
if cur_iter >= max_iter:
print("MemoCache.__spincycle: no retries remaining (limit = %d)" % (max_iter))
raise api_err
else:
cur_iter += 1
print("MemoCache.__spincycle: Will try again in 10 seconds (retry %d/%d)" % (cur_iter, max_iter))
sleep(RETRY_DELAY)
def query_person(self, malid):
if malid not in self.q_person:
try:
x = self.__spincycle(lambda: self.api.person(int(malid)))
self.q_person[malid] = x
self.__scan_assocs([role['anime'] for role in x['voice_acting_roles']])
except APIException as api_err:
print("MemoCache.query_person: failed query of person <%d>" % (int(malid)))
return None
else:
x = self.q_person[malid]
return x
def search_anime(self, keyword, nresults=NRESULTS_SEARCH, cli_mode=False):
try:
response = self.__spincycle(lambda: self.api.search('anime', str(keyword)))
results = response['results']
self.__scan_assocs(results)
ret = [(x['mal_id'], x['title']) for x in results][:nresults]
for i in range(len(ret)):
iden, title = ret[i]
if cli_mode:
print("%%%d:" % i, end=" ")
print ("`%s`: %d\n" % (title, iden))
yield iden
except APIException as api_err:
print('MemoCache.search_anime: API lookup failed for keyword <"%s">' % (keyword))
def get_title(self, malid):
try:
return self.q_assoc.lookup_by_id(malid)
except KeyError:
return self.query_anime(malid)['title']
def __query_anime(self, malid):
x = None
y = None
if malid in self.q_anime:
x, y = self.q_anime[malid]
if x is None or y is None:
print("MemoCache.__query_anime: Warn: cached result for <%d> encountered at least one query failure" % (malid))
raise ValueError
try:
x = self.__spincycle(lambda: self.api.anime(int(malid)))
y = self.__spincycle(lambda: self.api.anime(int(malid), extension='characters_staff'))
self.q_anime[malid] = (x, y)
self.__record(x)
except APIException as api_err:
print("MemoCache.__query_anime: API lookup failed for anime <%d>" % (int(malid)))
self.q_anime[malid] = (x, y)
raise api_err
return x, y
def __record(self, x):
if x is None:
return None
if 'mal_id' not in x:
return None
if 'type' in x and x['type'] == 'manga':
return None
if 'title' in x:
self.q_assoc.add_assoc(x['mal_id'], x['title'])
return 'title'
elif 'name' in x:
self.q_assoc.add_assoc(x['mal_id'], x['name'])
return 'name'
def __scan_assocs(self, xs):
for x in xs:
if x is not None:
lab = self.__record(x)
if lab is not None:
yield x['mal_id'], x[lab]
def related_deep(self, malid, init, msg=""):
init.add(malid)
msg = intercept(msg, "->", ("[%d]" % (malid)))
try:
anime = self.query_anime(malid)
except APIException as api_err:
print("MemoCache.related_deep: hierarchical query failed")
print("MemoCache.related_deep: (trace: %s)" % (msg))
# returning 0 as a marker of failure
return set([0])
except ValueError:
print("MemoCache.related_deep: Warn: cached query of anime <%d> indicates failure" % (malid))
else:
query = [x[0] for x in list(anime['related'].values())]
rel = set([i for i, t in self.__scan_assocs(query)])
blob = init.copy()
for i in rel:
if i not in blob:
blob.union(self.related_deep(i, blob, msg))
return blob
def query_related(self, malid):
if malid in self.q_related:
x = self.q_related[malid]
if 0 in x:
print("MemoCache.query_related: Warn: cached result for <%d> encountered at least one query failure" % (malid))
else:
x = self.related_deep(malid, set())
for i in x:
self.q_related[i] = x
return x
def save(self):
with open("anime.dat", "w+b") as ani:
pickle.dump(self.q_anime, ani)
with open("person.dat", "w+b") as per:
pickle.dump(self.q_person, per)
with open("related.dat", "w+b") as rel:
pickle.dump(self.q_related, rel)
def restore(self, load_assocs=False):
try:
with open("anime.dat", "r+b") as ani:
self.q_anime = pickle.load(ani)
with open("person.dat", "r+b") as per:
self.q_person = pickle.load(per)
with open("related.dat", "r+b") as rel:
self.q_related = pickle.load(rel)
if load_assocs:
self.__scan_assocs([x[0] for x in self.q_anime.values()])
except OSError as err:
print("OS error: {0}".format(err))
|
import sys
from random import randint
import commands
def printSpaces(i) :
for ii in range(i) :
sys.stdout.write(' ')
class naryNode3 :
def __init__(self, data):
self.data = data
self.parent = None
self.firstChild = None #this node has pointers to only the first/last child node, not direct child node any more
self.lastChild = None
self.childCount = 0
self.childIndices = []
self.next = None #this next/prev are sibling pointers
self.prev = None
#def __del__(self): #if __del__ is defined, this node will be 'freed' twice, by removeAll() and by gc
# print('del')
# self.removeAll()
def newNode(self, data):
return naryNode3(data)
def buildChildIndices(self):
if self.firstChild :
self.childIndices.clear()
node = self.firstChild
while node :
self.childIndices.append(node)
node.buildChildIndices()
node = node.next
def dump(self, level):
printSpaces(level * 4)
print(self.data, "childCnt=", len(self.childIndices))
if self.firstChild == None :
return
node = self.firstChild
while node:
node.dump(level+1)
node = node.next
def deleteChildNode(self, node):
if self.firstChild == None:
return
self.childCount = self.childCount - 1
#delete only node
if node == self.firstChild and node == self.lastChild:
self.firstChild = None
self.lastChild = None
return
#delete firstChild
if node == self.firstChild :
self.firstChild = self.firstChild.next
self.firstChild.prev = None
return
#delete lastChild
if node == self.lastChild :
self.lastChild = self.lastChild.prev
self.lastChild.next = None
return
#delete middleNode
node.prev.next = node.next;
node.next.prev = node.prev;
return
def addChildFirst(self, data):
node = self.newNode(data)
node.parent = self
self.childCount = self.childCount + 1
node.childCount = 0
node.next = None #this next/prev are sibling pointers
node.prev = None
if self.firstChild == None:
self.firstChild = node
self.lastChild = node
else:
node.next = self.firstChild
self.firstChild.prev = node
self.firstChild = node
return node
def addChildLast(self, data) :
node = self.newNode(data)
node.parent = self
self.childCount = self.childCount + 1
node.next = None #this next/prev are sibling pointers
node.prev = None
if self.lastChild == None:
self.firstChild = node
self.lastChild = node
else:
node.prev = self.lastChild
self.lastChild.next = node
self.lastChild = node
return node
def getRandomChildNode(self):
return self.getChildNode(randint(0, self.childCount-1))
def getNode(self, idx):
node = self
i = 0
while node:
if i == idx:
return node
i = i + 1
node = node.next
return None
def getChildNode(self, idx):
#commands.ODS(self.tp, self.status)
return self.firstChild.getNode(idx)
def removeAll(self): #this removes all inter-referencing of nodes
node = self.firstChild
while node:
next = node.next
node.removeAll()
node = next
#print('remove', self.data)
self.data = None
self.parent = None
self.firstChild = None
self.lastChild = None
self.childCount = 0
self.childIndices.clear()
self.next = None
self.prev = None
def findNodeInChildrenByData(self, data):
node = self.firstChild
if node == None:
return None
while node:
if data == node.data:
return node
node = node.next
return None
def createPath(self, data, *path):
currentNode = self
for pathName in path :
c = currentNode.findNodeInChildrenByData(pathName)
if c == None:
currentNode = currentNode.addChildFirst(pathName)
else :
currentNode = c
if currentNode.data == data :
return
c = currentNode.findNodeInChildrenByData(data)
if c == None :
currentNode.addChildFirst(data)
def findNodeByPathName(self, *path):
currentNode = self
for pathName in path :
currentNode = currentNode.findNodeInChildrenByData(pathName)
if currentNode == None:
return None
return currentNode
def dumpAsXml(self) :
print('<data>')
print(self.data)
print('</data>')
node = self.firstChild
while node:
print('<child>')
node.dumpAsXml()
print('</child>')
node = node.next
|
"""
common functions that almost all route needs
"""
from functools import wraps
from flask import jsonify
from .. import db
def jsonify_wrapper(func):
"""
a decorator, jsonify every api return dict value.
example:
.. code-block:: python
@main.route('/api/search')
@jsonify_wrapper
def search():
# search db, get python object data
# data = dict(name="david")
return data
"""
@wraps(func)
def wrapper(*args, **kwargs):
return jsonify(func(*args, **kwargs))
return wrapper
|
import os
import yaml
import time
import shutil
import torch
import random
import argparse
import numpy as np
import datetime
from torch.utils import data
import pickle
from loader.airsim_fsl_dataset import airsim_fsl_dataset
from loader.samplers import CategoriesSampler
from tools.utils import load_model
from trainer.trainer import *
from models.agent import *
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="config")
# about path
parser.add_argument('--ph', type=int, default=1, choices=[0, 1], help='pre|train|test')
parser.add_argument('--pretrain_dir', type=str, default='', help='path of models')
parser.add_argument('--is_seg', type=int, default=0, choices=[0, 1], help='classification|segmentation')
parser.add_argument('--model_dir', type=str, default='', help='path of models')
# about training
parser.add_argument("--config", type=str, default="configs/mrms_fsl.yml", help="Configuration file to use", )
parser.add_argument("--gpu", type=str, default="0", help="Used GPUs")
parser.add_argument('--bsize', type=int, default=2, help='batch size of tasks')
# about task
parser.add_argument('--way', type=int, default=3)
parser.add_argument('--shot', type=int, default=1)
parser.add_argument('--query', type=int, default=9, help='number of query image per class')
parser.add_argument('--test_episode', type=int, default=2000, help='number of testing episodes after training')
# solver
parser.add_argument('--solver', type=str, default='sinkhorn', choices=['opencv', 'qpth', 'sinkhorn'])
# recurrent
parser.add_argument('--loop', type=int, default=0)
parser.add_argument('--miter', type=int, default=10)
# SFC
parser.add_argument('--sfc_lr', type=float, default=0.05, help='learning rate of SFC')
parser.add_argument('--sfc_wd', type=float, default=0, help='weight decay for SFC weight')
parser.add_argument('--sfc_update_step', type=int, default=10, help='number of updating step of SFC')
parser.add_argument('--sfc_bs', type=int, default=4, help='batch size for finetuning sfc')
# Attention
parser.add_argument('--head', type=int, default=1)
# Use weight max to obtain the source and dst node weight
parser.add_argument('--max_weight', type=int, default=0)
args = parser.parse_args()
# Set the gpu
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
with open(args.config) as fp:
cfg = yaml.load(fp, Loader=yaml.FullLoader)
data_splits = ['val_split', 'test_split']
if args.ph == 0:
mode = 'pre_train'
args.way = 6 - 1 # sample 10 classes (exclude bg)
args.n_agent = args.way
elif args.ph == 1:
mode = 'meta'
args.way = 3
args.n_agent = args.way
# we assume n_agent==n_way
args.pre_train = (args.ph == 0)
args.mode = mode
# Setup seeds
torch.manual_seed(cfg.get("seed", 1337))
torch.cuda.manual_seed(cfg.get("seed", 1337))
np.random.seed(cfg.get("seed", 1337))
random.seed(cfg.get("seed", 1337))
# Setup device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Setup Dataloader
data_path = cfg["data"]["path"]
with open('configs/split_save_files.pkl', 'rb') as f:
split_data_files = pickle.load(f)
# print(split_data_files)
b_size = cfg["training"]["batch_size"]
n_worker = 4 if torch.cuda.is_available() else 1
assert args.head == 1
n_classes = 11
in_channels = 3
if cfg["model"]["arch"] == 'MIMOcom':
if args.shot == 1:
if args.solver == 'sinkhorn':
model = MIMOcom(n_classes=n_classes, n_way=args.way, n_shot=args.shot, n_query=args.query,
in_channels=in_channels,
solver=args.solver, image_size=cfg["data"]["img_rows"],
query_size=cfg["model"]["query_size"], key_size=cfg["model"]["key_size"],
is_seg=args.is_seg, miter=args.miter, n_head=args.head, max_weight=args.max_weight)
else:
print('Use MIMOcomEMD')
model = MIMOcomEMD(n_classes=n_classes, n_way=args.way, n_shot=args.shot, n_query=args.query,
in_channels=in_channels, mode=mode,
solver=args.solver, image_size=cfg["data"]["img_rows"],
query_size=cfg["model"]["query_size"], key_size=cfg["model"]["key_size"],
is_seg=args.is_seg, miter=args.miter, n_head=args.head)
else:
if args.solver == 'sinkhorn':
model = MIMOcom(n_classes=n_classes, n_way=args.way, n_shot=args.shot, n_query=args.query,
in_channels=in_channels, mode=mode,
solver=args.solver, image_size=cfg["data"]["img_rows"],
query_size=cfg["model"]["query_size"], key_size=cfg["model"]["key_size"],
is_seg=args.is_seg, miter=args.miter,
sfc_lr=args.sfc_lr, sfc_wd=args.sfc_wd, sfc_update_step=args.sfc_update_step,
sfc_bs=args.sfc_bs, n_head=args.head, max_weight=args.max_weight)
else:
print('Use MIMOcomEMD')
model = MIMOcomEMD(n_classes=n_classes, n_way=args.way, n_shot=args.shot, n_query=args.query,
in_channels=in_channels, mode=mode,
solver=args.solver, image_size=cfg["data"]["img_rows"],
query_size=cfg["model"]["query_size"], key_size=cfg["model"]["key_size"],
is_seg=args.is_seg, miter=args.miter, n_head=args.head)
else:
raise ValueError('Incorrect arch')
model = model.to(device)
# print(torch.isnan(model.Wg).any(),'---Wg11111')
##################
# resume training
##################
# assert len(args.model_dir) > 0
model_dir = args.model_dir
assert model_dir
if len(model_dir) > 0:
if model_dir.endswith('.pth'):
model_path = model_dir
else:
model_path = os.path.join(args.model_dir, 'best_model.pth')
if args.ph == 0: # for loading pre-trained model
load_model(model, model_path, strict=False)
else: # for resume training
load_model(model, model_path, strict=False)
loss_fn = torch.nn.CrossEntropyLoss(ignore_index=255)
test_set = airsim_fsl_dataset(
split_data_files['test'],
is_transform=True,
split=cfg["data"]["test_split"],
img_size=(cfg["data"]["img_rows"], cfg["data"]["img_cols"]),
)
test_sampler = CategoriesSampler(test_set.all_labels, test_set.stat,
args.test_episode, args.way, args.shot + args.query)
testloader = data.DataLoader(test_set, batch_sampler=test_sampler, num_workers=n_worker, pin_memory=True)
args.split_label = test_set.split_label
trainer = Trainer_MIMOcom(cfg, args, args.model_dir, None, model, loss_fn, None, None, None, None, device)
trainer.evaluate(testloader)
print('Remember to copy best model to model/best_folder')
|
# -*- coding: utf-8 -*-
# python manage.py make migrations your_app_label
# python manage.py migrate --fake-initial your_app_label
from __future__ import unicode_literals
from django.db import models
from django.utils import timezone
from datetime import date, datetime
from django.conf import settings
class OweModel(models.Model):
"""List of splitted amount to each user under same topic
Attributes:
created_at (TYPE): Description
created_by (TYPE): Description
splitted_amount (TYPE): Description
splitted_user (TYPE): Description
updated_at (TYPE): Description
updated_by (TYPE): Description
"""
owed_amount = models.IntegerField()
owed_on = models.CharField(max_length=80, blank=True)
created_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name= 'sa_ledger_created',
on_delete=models.CASCADE
)
updated_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name= 'sa_ledger_updated',
on_delete=models.CASCADE
)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
|
"""Plot the surface pressure coefficient at final time step."""
from matplotlib import pyplot
import numpy
import pathlib
import petibmpy
import rodney
def get_pressure(simudir, timestep):
name = 'p' # name of the field variable to load
datadir = simudir / 'output'
# Load the gridlines from file.
filepath = datadir / 'grid.h5'
x, y = petibmpy.read_grid_hdf5(filepath, name)
# Load the field from file.
filepath = datadir / f'{timestep:0>7}.h5'
p = petibmpy.read_field_hdf5(filepath, name)
return (x, y), p
def compute_surface_pressure_coefficient(p, x, y):
# Define circle outside support region of delta function.
N = 500
dx = 1.5 / 90 # grid-spacing size in the uniform region
R = 0.5 + 3 * dx # radius 3 cells away from real boundary
theta = numpy.linspace(0.0, 2 * numpy.pi, num=N + 1)[:-1]
xc, yc = 0.0, 0.0
xb_ext, yb_ext = xc + R * numpy.cos(theta), yc + R * numpy.sin(theta)
# Interpolate the field on extended boundary.
pb = numpy.empty_like(xb_ext)
for i, (xbi, ybi) in enumerate(zip(xb_ext, yb_ext)):
pi = petibmpy.linear_interpolation(p, y, ybi)
pb[i] = petibmpy.linear_interpolation(pi, x, xbi)
# Compute the pressure coefficient.
rho = 1.0 # fluid density
U_inf = 1.0 # freestream speed
p_inf = 0.0 # far-away pressure
cp = (pb - p_inf) / (0.5 * rho * U_inf**2)
return theta, cp
def split_lower_upper(theta, cp):
mask = numpy.where((theta >= numpy.pi) & (theta < 2 * numpy.pi))[0]
theta_lower = theta[mask] % numpy.pi
cp_lower = cp[mask]
mask = numpy.where((theta >= 0.0) & (theta < numpy.pi))[0]
theta_upper = numpy.flip(numpy.pi - theta[mask])
cp_upper = numpy.flip(cp[mask])
return (dict(theta=theta_lower, cp=cp_lower),
dict(theta=theta_upper, cp=cp_upper))
args = rodney.parse_command_line()
maindir = pathlib.Path(__file__).absolute().parents[1]
timestep = 5000 # final time-step index
label1 = r'500 markers ($\Delta s \approx 0.38 \Delta x$)'
simudir1 = maindir / '500_markers'
grid, p = get_pressure(simudir1, timestep)
theta, cp = compute_surface_pressure_coefficient(p, *grid)
lower1, upper1 = split_lower_upper(theta, cp)
label2 = r'189 markers ($\Delta s \approx \Delta x$)'
simudir2 = maindir / '189_markers'
grid, p = get_pressure(simudir2, timestep)
theta, cp = compute_surface_pressure_coefficient(p, *grid)
lower2, upper2 = split_lower_upper(theta, cp)
# Plot the distribution of the surface pressure coefficient.
pyplot.rc('font', family='serif', size=14)
fig, (ax1, ax2) = pyplot.subplots(ncols=2, figsize=(10.0, 4.0))
ax1.set_title(label1, fontsize=14)
ax1.set_xlabel(r'$\theta$')
ax1.set_ylabel('$C_p$')
ax1.plot(numpy.degrees(lower1['theta']), lower1['cp'],
label='lower surface')
ax1.plot(numpy.degrees(upper1['theta']), upper1['cp'],
label='upper surface', linestyle='--')
ax2.set_title(label2, fontsize=14)
ax2.set_xlabel(r'$\theta$')
ax2.set_ylabel('$C_p$')
ax2.plot(numpy.degrees(lower2['theta']), lower2['cp'],
label='lower surface')
ax2.plot(numpy.degrees(upper2['theta']), upper2['cp'],
label='upper surface', linestyle='--')
if args.extra_data:
# Load digitized values from Li et al. (2016).
theta_li, cp_li = rodney.lietal2016_load_cp(40)
ax1.scatter(theta_li, cp_li, label='Li et al. (2016)',
c='black', marker='s', s=10)
ax2.scatter(theta_li, cp_li, label='Li et al. (2016)',
c='black', marker='s', s=10)
for ax in (ax1, ax2):
ax.set_xlim(0.0, 180.0)
ax.set_ylim(-1.5, 1.5)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax2.legend(frameon=False)
fig.tight_layout()
if args.save_figures:
# Save the figure.
figdir = maindir / 'figures'
figdir.mkdir(parents=True, exist_ok=True)
filepath = figdir / f'cp_{timestep:0>7}.png'
fig.savefig(filepath, dpi=300, bbox_inches='tight')
if args.show_figures:
pyplot.show()
|
import os
import random
import numpy as np
from pysc2.env import sc2_env
from pysc2.lib import actions
from gym.spaces.discrete import Discrete
from sc2env.pysc2_util import register_map
from sc2env.representation import expand_pysc2_to_neural_input
MAP_NAME = 'SimpleTacticalEnvironment'
MAP_SIZE = 64
RGB_SCREEN_SIZE = 256
UNIT_ID_LIST = [
48, # marine
73, # zealot
105, # zergling
107, # hydra
109, # ultra
]
# A simple environment similar to SCAII-RTS Towers
# Follows the interface of OpenAI Gym environments
class SimpleTowersEnvironment():
def __init__(self):
self.sc2env = make_sc2env()
self.action_space = Discrete(self.actions())
def reset(self):
# Move the camera in any direction
# This runs the ResetEpisode trigger built into the map
action = actions.FUNCTIONS.move_camera([0, 0])
self.last_timestep = self.sc2env.step([action])[0]
state, reward, done, info = unpack_timestep(self.last_timestep)
return state
# Step: Choose which enemy to attack
def step(self, action):
if self.can_attack():
target = action_to_target(action)
sc2_action = actions.FUNCTIONS.Attack_minimap("now", target)
self.last_timestep = self.sc2env.step([sc2_action])[0]
else:
print('Cannot attack, taking no-op')
# Wait for a while
self.noop()
return unpack_timestep(self.last_timestep)
def noop(self):
sc2_action = actions.FUNCTIONS.no_op()
self.last_timestep = self.sc2env.step([sc2_action])[0]
def can_attack(self):
available_actions = self.last_timestep.observation.available_actions
return actions.FUNCTIONS.Attack_minimap.id in available_actions
def render(self):
import imutil
state, reward, done, info = unpack_timestep(self.last_timestep)
feature_map, feature_screen, rgb_map, rgb_screen = state
visual = np.concatenate([rgb_map, rgb_screen], axis=1)
imutil.show(visual, save=False)
def actions(self):
# Attack top/bottom left/right corners
return 4
def layers(self):
# One-hot unit ids plus metadata
return len(UNIT_ID_LIST) + 6
# The four actions tell the army to move to
# one of the four corners of the map
def action_to_target(action_id):
x = random.random()
map_size = MAP_SIZE
padding = MAP_SIZE / 4
if action_id == 0:
return [padding + x, padding + x]
elif action_id == 1:
return [map_size - padding - x, padding + x]
elif action_id == 2:
return [map_size - padding - x, map_size - padding - x]
elif action_id == 3:
return [padding + x, map_size - padding - x]
# Create the low-level SC2Env object, which we wrap with
# a high level Gym-style environment
def make_sc2env():
env_args = {
'agent_interface_format': sc2_env.AgentInterfaceFormat(
feature_dimensions=sc2_env.Dimensions(
screen=(MAP_SIZE, MAP_SIZE),
minimap=(MAP_SIZE, MAP_SIZE)
),
rgb_dimensions=sc2_env.Dimensions(
screen=(RGB_SCREEN_SIZE, RGB_SCREEN_SIZE),
minimap=(RGB_SCREEN_SIZE, RGB_SCREEN_SIZE),
),
action_space=actions.ActionSpace.FEATURES,
),
'map_name': MAP_NAME,
'step_mul': 170, # 17 is ~1 action per second
}
maps_dir = os.path.join(os.path.dirname(__file__), '..', 'maps')
register_map(maps_dir, env_args['map_name'])
return sc2_env.SC2Env(**env_args)
# Convert the timestep into a Gym-style tuple
def unpack_timestep(timestep):
# The pysc2 representations include unit types and positions
feature_map = np.array(timestep.observation.feature_minimap)
feature_screen = np.array(timestep.observation.feature_screen)
# The neural representation is appropriate for input to a neural network
feature_screen_onehot = expand_pysc2_to_neural_input(feature_screen, UNIT_ID_LIST)
# The RGB maps will be None if rendering is disabled (eg. for faster training)
rgb_map = np.array(timestep.observation.get('rgb_minimap'))
rgb_screen = np.array(timestep.observation.get('rgb_screen'))
state = (feature_map, feature_screen_onehot, rgb_map, rgb_screen)
# For this game we use a simple reward: number of surviving friendly units
reward = int(timestep.observation.player['army_count'])
done = timestep.last()
# The info dict can include reward decompositions when available
info = {}
return state, reward, done, info
|
"""The SyncWorker is a single thread of execution that is responsible for
executing sync-OLD! commands.
The SyncWorker performs these steps in a loop:
1. Using DTServer, pop the next sync-OLD! command off of the queue.
2. Determine whether the OLD already exists and create it if it does not.
3. Fetch the last modified values for each resource in the local OLD.
4. Fetch the last modified values for each resource in the remote OLD.
5. Compute a diff in order to determine required updates, deletes and adds.
6. Fetch the remote resources that have been updated or added.
7. Mutate the local OLD's SQLite db so that it matches the remote leader.
8. Sleep for a time and then return to (1).
"""
import datetime
import json
import logging
import os
import shlex
import shutil
import subprocess
import threading
import time
from oldclient import OLDClient
import requests
import sqlalchemy as sqla
import dativetop.constants as c
logger = logging.getLogger(__name__)
DEFAULT_LOCAL_OLD_USERNAME = 'admin'
DEFAULT_LOCAL_OLD_PASSWORD = 'adminA_1'
def parse_datetime_string(datetime_string):
return datetime.datetime.strptime(datetime_string, '%Y-%m-%dT%H:%M:%S.%f')
def parse_date_string(date_string):
return datetime.datetime.strptime(date_string, '%Y-%m-%d').date()
def prepare_value_for_upsert(table_name, k, v):
if 'datetime_' in k and v:
try:
return parse_datetime_string(v)
except Exception as e:
logger.warning(
'Failed to parse value "%s" as datetime in table %s, column %s.',
v, table_name, k)
raise e
if 'date_' in k and v:
try:
return parse_date_string(v)
except Exception as e:
logger.warning(
'Failed to parse value "%s" as date in table %s, column %s.',
v, table_name, k)
raise e
return v
def prepare_row_for_upsert(table_name, row):
row = {k: prepare_value_for_upsert(table_name, k, v)
for k, v in row.items()}
if table_name == 'user':
del row['password']
del row['salt']
return row
def pop_sync_old_command(dtserver):
try:
response = requests.put(f'{dtserver.url}sync_old_commands')
if response.status_code == 404:
logger.debug('No sync-OLD! messages currently on the queue')
return None
if response.status_code == 200:
return response.json()
logger.error(
'Received an unexpected response code %s when attempting to pop'
' the next sync-OLD! command.', response.status_code)
return None
except Exception:
msg = 'Failed to pop the next sync-OLD! command from DTServer'
logger.exception(msg)
return None
def complete_sync_old_command(dtserver, command):
try:
response = requests.delete(
f'{dtserver.url}sync_old_commands/{command["id"]}')
if response.status_code == 404:
logger.warning(
f'Failed to complete command {command["id"]}: it does not'
f' exist.')
return None
if response.status_code == 200:
return response.json()
logger.error(
'Received an unexpected response code %s when attempting to'
' complete sync-OLD! command %s.',
response.status_code,
command['id'])
return None
except Exception:
msg = f'Failed to complete sync-OLD! command {command["id"]}.'
logger.exception(msg)
return None
def handle_command(command, old_service):
"""
1. identify the OLD from the command
{'acked': True,
'id': 'cbcfb97d-6f72-4851-997d-173560c6173d',
'old_id': 'd9d5563d-a29a-46b3-85f2-6a3a1104f7fa'}
"""
return None
def fetch_old(dtserver, old_id):
try:
return requests.get(f'{dtserver.url}olds/{old_id}').json()
except Exception:
logger.exception('Failed to fetch OLD %s', old_id)
return None
def old_store_dir_exists(old):
old_store_dir = os.path.join(c.OLD_DIR, 'store', old['slug'])
return os.path.isdir(old_store_dir)
def can_authenticate_to_old(old):
old_client = OLDClient(old['url'])
try:
return old_client.login(
DEFAULT_LOCAL_OLD_USERNAME,
DEFAULT_LOCAL_OLD_PASSWORD)
except Exception as e:
logger.warning(
f'Exception of type {type(e)} when attempting to'
f' authenticate to the OLD')
return False
def authenticate_to_leader(old):
logger.debug(
f'checking if we can login to the leader OLD at {old["leader"]}'
f' using username {old["username"]} and password'
f' {old["password"]}')
old_client = OLDClient(old['leader'])
try:
logged_in = old_client.login(old['username'], old['password'])
if logged_in:
return old_client
return False
except Exception:
logger.exception(f'Failed to login to the leader OLD {old["leader"]}')
return False
def does_old_exist(old):
dir_exists = old_store_dir_exists(old)
if dir_exists:
can_auth = can_authenticate_to_old(old)
if can_auth:
return True
return False
def get_dative_servers(path):
with open(path) as filei:
return json.load(filei)
def write_dative_servers(servers, path):
with open(path, 'w') as fileo:
json.dump(servers, fileo)
def generate_old_dict(old):
return {'name': old['name'],
'type': 'OLD',
'url': old['url'],
'serverCode': None,
'corpusServerURL': None,
'website': 'http://www.onlinelinguisticdatabase.org'}
def register_old_with_dative(old):
try:
servers_path = os.path.join(c.DATIVE_ROOT, 'servers.json')
servers = get_dative_servers(servers_path)
old_dict = generate_old_dict(old)
if old_dict in servers:
return True
servers.append(old_dict)
write_dative_servers(servers, servers_path)
return True
except Exception:
logger.warning(f'Failed to register OLD {old["slug"]} with Dative')
return False
def unregister_old_with_dative(old):
servers_path = os.path.join(c.DATIVE_ROOT, 'servers.json')
servers = get_dative_servers(servers_path)
old_dict = generate_old_dict(old)
if old_dict in servers:
servers = [s for s in servers if s != old_dict]
write_dative_servers(servers, servers_path)
def create_local_old(old):
os.chdir(c.OLD_DIR)
initialize_old_path = 'initialize_old'
if not shutil.which(initialize_old_path):
initialize_old_path = os.path.join(
os.path.dirname(c.HERE),
'app_packages',
'bin',
'initialize_old')
cmd = initialize_old_path + f' configlocal.ini {old["slug"]}'
logger.info(f'Running command `{cmd}` to create the {old["slug"]} OLD')
cmd = shlex.split(cmd)
child = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout_data, stderr_data = child.communicate()
if child.returncode != 0:
logger.warning(f'Failed to create new local OLD {old["slug"]}')
try:
logger.warning(stdout_data)
logger.warning(stderr_data)
except Exception:
logger.warning('Failed to log stdout and stderr')
return False
logger.info(f'Successfully issued the command to create the new local OLD'
f' {old["slug"]}')
old_exists = does_old_exist(old)
if not old_exists:
logger.warning(f'Failed to create new local OLD {old["slug"]}')
return False
logger.info(f'Confirmed that the new local OLD {old["slug"]} exists')
is_registered = register_old_with_dative(old)
if not is_registered:
logger.warning(f'Failed to register local OLD {old["slug"]} with Dative')
return False
logger.info(f'Registered local OLD {old["slug"]} with Dative')
logger.info(f'Created new local OLD {old["slug"]}')
return True
class SyncOLDError(Exception):
pass
def get_diff(prev, curr):
diff = {'delete': {}, 'add': {}, 'update': {}}
for table, ids in prev.items():
for id_, modified in ids.items():
if id_ not in curr[table]:
diff['delete'].setdefault(table, []).append(int(id_))
elif modified != curr[table][id_]:
diff['update'].setdefault(table, []).append(int(id_))
for table, ids in curr.items():
for id_, modified in ids.items():
if id_ not in prev[table]:
diff['add'].setdefault(table, []).append(int(id_))
return diff
def batch_tables(tables, batch_size=200):
"""Given a ``tables`` map from table names (strings) to lists of table row
IDs (integers), return a list of maps of the same form such that every list
of row IDs contains ``batch_size`` or fewer elements."""
batches = []
while tables:
new_remainder = {}
batch = {}
for table_name, ids in tables.items():
if not ids:
continue
batch[table_name] = ids[:batch_size]
remainder_ids = ids[batch_size:]
if remainder_ids:
new_remainder[table_name] = remainder_ids
batches.append(batch)
tables = new_remainder
return batches
def process_command(dtserver, old_service, command):
"""Process a sync-OLD! command."""
# Get the OLD metadata from DTServer
old = fetch_old(dtserver, command['old_id'])
old['url'] = f'{old_service.url}/{old["slug"]}'
# Determine whether the OLD already exists and create it if necessary
old_exists = does_old_exist(old)
if not old_exists:
old_exists = create_local_old(old)
if not old_exists:
msg = f'Failed to create the OLD {old["slug"]} locally'
logger.warning(msg)
raise SyncOLDError(msg)
# Abort if we are not set to sync or if there is nothing to sync with
if not old['is_auto_syncing']:
logger.debug(f'OLD {old["slug"]} is not set to auto-sync')
return
if not old['leader']:
logger.debug(f'OLD {old["slug"]} has no remote leader OLD')
return
leader_client = authenticate_to_leader(old)
if not leader_client:
logger.warning(f'Unable to login to leader OLD {old["leader"]}')
return
# Fetch the last modified values for each resource in the local OLD and in
# the leader OLD and construct a diff.
local_client = OLDClient(old['url'])
local_client.login(
DEFAULT_LOCAL_OLD_USERNAME,
DEFAULT_LOCAL_OLD_PASSWORD)
local_last_mod = local_client.get('sync/last_modified')
leader_last_mod = leader_client.get('sync/last_modified')
diff = get_diff(local_last_mod, leader_last_mod)
# Perform the local updates by modifying the SQLite db of the OLD directly.
meta = sqla.MetaData()
db_path = os.path.join(c.OLD_DIR, f"{old['slug']}.sqlite")
engine = sqla.create_engine(f'sqlite:///{db_path}')
with engine.connect() as conn:
# Perform any deletions
delete_state = diff['delete']
if delete_state:
for table_name, rows in delete_state.items():
if not rows:
continue
table = sqla.Table(table_name, meta, autoload_with=engine)
conn.execute(
table.delete().where(
table.c.id.in_(rows)))
# Perform any additions
add_params = diff['add']
if add_params:
for batch in batch_tables(add_params):
add_state = leader_client.post(
'sync/tables', {'tables': batch})
for table_name, rows in add_state.items():
if not rows:
continue
table = sqla.Table(table_name, meta, autoload_with=engine)
conn.execute(
table.insert(),
[prepare_row_for_upsert(table_name, row)
for row in rows.values()])
# Perform any updates
update_params = diff['update']
if update_params:
for batch in batch_tables(update_params):
update_state = leader_client.post(
'sync/tables', {'tables': batch})
for table_name, rows in update_state.items():
if not rows:
continue
table = sqla.Table(table_name, meta, autoload_with=engine)
for row in rows.values():
row_id = row['id']
updated_row = prepare_row_for_upsert(table_name, row)
conn.execute(
table.update().where(
table.c.id == row_id).values(**updated_row))
def sync_worker(dtserver, old_service, comm):
while True:
try:
# Pop and then process the next sync-OLD! command
command = pop_sync_old_command(dtserver)
if not command:
continue
process_command(dtserver, old_service, command)
except SyncOLDError as e:
logger.exception(str(e))
except Exception as e:
logger.exception(
'Unexpected exception during SyncWorker\'s attempt to process'
' the next sync-OLD! command')
finally:
# Tell DTServer that we have finished processing the command.
if command:
complete_sync_old_command(dtserver, command)
if comm.get('exit?'):
break
time.sleep(5)
def start_sync_worker(dtserver, old_service):
comm = {}
thread = threading.Thread(
target=sync_worker,
kwargs={'dtserver': dtserver,
'old_service': old_service,
'comm': comm,},
daemon=True)
thread.start()
return thread, comm
|
"""remove feels table
Revision ID: c2d2f403fb8c
Revises: 4923fb2ba581
Create Date: 2020-02-14 20:23:25.494070
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "c2d2f403fb8c"
down_revision = "4923fb2ba581"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("feels")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"feels",
sa.Column("id", sa.INTEGER(), autoincrement=True, nullable=False),
sa.Column("url", sa.TEXT(), autoincrement=False, nullable=False),
sa.PrimaryKeyConstraint("id", name="pk_feels"),
sa.UniqueConstraint("url", name="uq_feels_url"),
)
# ### end Alembic commands ###
|
from django.conf.urls import patterns, url
from package import apiv2 as package_api
from grid import views as grid_views
from searchv2 import views as search_views
urlpatterns = patterns("",
# {% url "apiv2:category" %}
url(
regex=r"categories/$",
view=package_api.CategoryListAPIView.as_view(),
name="categories"
),
# {% url "apiv2:packages" %}
url(
regex=r"packages/$",
view=package_api.PackageListAPIView.as_view(),
name="packages"
),
# {% url "apiv2:packages" slug %}
url(
regex=r"packages/(?P<slug>[-\w]+)/$",
view=package_api.PackageDetailAPIView.as_view(),
name="packages"
),
# {% url "apiv2:grids" %}
url(
regex=r"grids/$",
view=grid_views.GridListAPIView.as_view(),
name="grids"
),
# {% url "apiv2:grids" slug %}
url(
regex=r"grids/(?P<slug>[-\w]+)/$",
view=grid_views.GridDetailAPIView.as_view(),
name="grids"
),
# {% url "apiv2:search" %}
url(
regex=r"search/$",
view=search_views.SearchListAPIView.as_view(),
name="search"
),
# {% url "apiv2:search" slug %}
url(
regex=r"search/(?P<slug>[-\w]+)/$",
view=search_views.SearchDetailAPIView.as_view(),
name="search"
),
# {% url "apiv2:python3" slug %}
url(
regex=r"python3/$",
view=package_api.Python3ListAPIView.as_view(),
name="python3"
),
)
|
from datetime import date, timedelta
from sqlalchemy.sql import and_, func
from controllers.c_panel import CPanel
from exceptions.abort_exception import AbortException
from models import try_commit, try_flush, Cells, Certificate, Mark, Place, Incoterm, PanelType, User
from models.company import Company
from models.panel_quotation import PanelQuotation
from my_qt.dialogs.message_boxes import inform_save_successful, warn
from resources import get_last_user, set_last_user
from utilities.various import to_none
class CPanelQuotation(CPanel):
def __init__(self, c_master, gui, panel_quotation_id, current_item_data):
super().__init__(c_master, gui, current_item_data)
self.item = self.session.query(PanelQuotation).filter_by(id=panel_quotation_id).one_or_none()
def changed_date(self):
self.update_all_average()
self.update_current_type_average()
self.update_current_power_average()
def clicked_save(self, overwrite: bool):
super().clicked_save(overwrite)
if not overwrite:
self.update_all_average()
self.update_current_type_average()
self.update_current_power_average()
def load_initial_data(self, default=True):
company_names = [company.name for company in self.session.query(Company).order_by(Company.name).all()]
mark_names = [mark.name for mark in self.session.query(Mark).order_by(Mark.name).all()]
date_quotation = date.today()
date_validity = date_quotation + timedelta(days=30)
user_names = [user.name for user in self.session.query(User).order_by(User.name).all()]
last_user = self.session.query(User).filter_by(name=get_last_user()).one_or_none()
panel_type_names = [panel_type.name for panel_type in
self.session.query(PanelType).order_by(PanelType.name).all()]
cells_number = [cells.number for cells in self.session.query(Cells).order_by(Cells.number).all()]
certificate_names = [certificate.name for certificate in
self.session.query(Certificate).order_by(Certificate.name).all()]
incoterm_names = [incoterm.name for incoterm in self.session.query(Incoterm).order_by(Incoterm.name).all()]
place_names = [place.name for place in self.session.query(Place).order_by(Place.name).all()]
self.gui.load_initial_data(company_names,
mark_names,
date_quotation,
date_validity,
user_names,
last_user,
panel_type_names,
cells_number,
certificate_names,
incoterm_names,
place_names,
default)
def save(self, overwrite: bool):
company_name = self.gui.company
total_power = self.gui.total_power
price = self.gui.price
user_name = self.gui.user
panel_power = self.gui.panel_power
if not company_name or not total_power or not price or not user_name or not panel_power:
warn(self.my_strings.title_error, self.my_strings.message_save_panel_quotation_error_required,
self.my_strings.button_accept)
raise AbortException('Not saved: empty required fields')
date_quotation = self.gui.date_quotation
date_validity = self.gui.date_validity
observations = self.gui.observations
n_contacts = self.gui.n_contacts
efficiency = self.gui.efficiency
tolerance = self.gui.tolerance
warranty_product = self.gui.warranty_product
warranty_performance = self.gui.warranty_performance
company = self.session.query(Company).filter_by(name=company_name).one()
mark = self.session.query(Mark).filter_by(name=self.gui.mark).one_or_none()
if not mark and self.gui.mark:
mark = Mark(None, self.gui.mark)
self.session.add(mark)
try_flush(self.session)
user = self.session.query(User).filter_by(name=user_name).one_or_none()
if not user and self.gui.user:
user = User(None, self.gui.user)
self.session.add(user)
try_flush(self.session)
panel_type = self.session.query(PanelType).filter_by(name=self.gui.panel_type).one_or_none()
if not panel_type and self.gui.panel_type:
panel_type = PanelType(None, self.gui.panel_type)
self.session.add(panel_type)
try_flush(self.session)
cells = self.session.query(Cells).filter_by(number=self.gui.cells if self.gui.cells else None).one_or_none()
if not cells and self.gui.cells:
cells = Cells(None, self.gui.cells)
self.session.add(cells)
try_flush(self.session)
incoterm = self.session.query(Incoterm).filter_by(name=self.gui.incoterm).one_or_none()
if not incoterm and self.gui.incoterm:
incoterm = Incoterm(None, self.gui.incoterm)
self.session.add(incoterm)
try_flush(self.session)
made_in = self.session.query(Place).filter_by(name=self.gui.made_in).one_or_none()
if not made_in and self.gui.made_in:
made_in = Place(None, self.gui.made_in)
self.session.add(made_in)
try_flush(self.session)
origin = self.session.query(Place).filter_by(name=self.gui.origin).one_or_none()
if not origin and self.gui.origin:
origin = Place(None, self.gui.origin)
self.session.add(origin)
try_flush(self.session)
destination = self.session.query(Place).filter_by(name=self.gui.destination).one_or_none()
if not destination and self.gui.destination:
destination = Place(None, self.gui.destination)
self.session.add(destination)
try_flush(self.session)
certificates = self.session.query(Certificate).filter(Certificate.name.in_(self.gui.checked_certificates)).all()
data = [total_power, price, date_quotation, date_validity, observations, n_contacts, panel_power, efficiency,
tolerance, warranty_product, warranty_performance, company, mark, user, panel_type, cells, incoterm,
made_in, origin, destination, certificates]
to_none(data)
if not self.save_unique_check(data, overwrite):
raise AbortException('Not saved: not unique')
if overwrite:
self.item.set_data(data)
else:
new_panel_quotation = PanelQuotation(None, *data)
self.session.add(new_panel_quotation)
self.original_data = self.gui.data
set_last_user(user.name)
try_commit(self.session)
inform_save_successful()
if not overwrite:
self.load_initial_data(default=False)
self.gui.set_focus_nothing()
def save_unique_check(self, data, overwrite):
if overwrite:
panel_quotations = self.session.query(PanelQuotation).filter_by(
total_power=data[0], price=data[1], date_quotation=data[2], date_validity=data[3], observations=data[4],
n_contacts=data[5], panel_power=data[6], efficiency=data[7], tolerance=data[8],
warranty_product=data[9],
warranty_performance=data[10], company=data[11], mark=data[12], user=data[13], panel_type=data[14],
cells=data[15], incoterm=data[16], made_in=data[17], origin=data[18], destination=data[19]
).filter(PanelQuotation.id != self.item.id).all()
else:
panel_quotations = self.session.query(PanelQuotation).filter_by(
total_power=data[0], price=data[1], date_quotation=data[2], date_validity=data[3], observations=data[4],
n_contacts=data[5], panel_power=data[6], efficiency=data[7], tolerance=data[8],
warranty_product=data[9],
warranty_performance=data[10], company=data[11], mark=data[12], user=data[13], panel_type=data[14],
cells=data[15], incoterm=data[16], made_in=data[17], origin=data[18], destination=data[19]
).all()
for panel_quotation in panel_quotations:
if set(panel_quotation.certificates) == set(data[20]):
warn(self.my_strings.title_error,
self.my_strings.message_save_error_quotation_not_unique,
self.my_strings.button_accept)
return False
return True
def update_all_average(self):
all_average = self.session.query(func.avg(PanelQuotation.price)).filter(
and_(PanelQuotation.date_quotation >= self.gui.date_quotation,
PanelQuotation.date_quotation <= self.gui.date_validity
)).scalar()
if all_average:
self.gui.price_bar_chart_all_average = all_average
else:
self.gui.price_bar_chart_all_average = 0
def update_current_type_average(self):
current_type_average = self.session.query(func.avg(PanelQuotation.price)).join(
PanelQuotation.panel_type).filter(
and_(PanelType.name == self.gui.panel_type,
PanelQuotation.date_quotation >= self.gui.date_quotation,
PanelQuotation.date_quotation <= self.gui.date_validity
)).scalar()
if current_type_average:
self.gui.price_bar_chart_current_type_average = current_type_average
else:
self.gui.price_bar_chart_current_type_average = 0
def update_current_power_average(self):
current_power_average = self.session.query(func.avg(PanelQuotation.price)).filter(
and_(PanelQuotation.panel_power == self.gui.panel_power,
PanelQuotation.date_quotation >= self.gui.date_quotation,
PanelQuotation.date_quotation <= self.gui.date_validity
)).scalar()
if current_power_average:
self.gui.price_bar_chart_current_power_average = current_power_average
else:
self.gui.price_bar_chart_current_power_average = 0
|
### This is an example
i = 20
# @@DNA
# Description: prints 'hello world' to stdout
# Input: None
# Output: None
# Status: Done
# @@END
def main(){
print('Hello world')
return
}
|
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import json
import time
import math
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import datetime as dt
from numpy import newaxis
from keras.layers import Dense, Activation, Dropout, LSTM
from keras.models import Sequential, load_model
from keras.callbacks import EarlyStopping, ModelCheckpoint
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import Ridge
from math import pi,sqrt,exp,pow,log
from numpy.linalg import det, inv
from abc import ABCMeta, abstractmethod
from sklearn import cluster
import statsmodels.api as sm
import scipy.stats as scs
import scipy.optimize as sco
import scipy.interpolate as sci
from scipy import stats
# In[22]:
from os import listdir
file_list=listdir('D://NeurIPS Workshop/stockdata/Data')
print(len(file_list))
file_list[0]
# In[51]:
df_news = pd.read_csv("D://NeurIPS Workshop/data/source_price.csv")
# In[66]:
for i in range(0,len(file_list)):
temp='D://NeurIPS Workshop/stockdata/Data/'+str(file_list[i])
df=pd.read_csv(temp)
df_new=pd.DataFrame()
df_new['date']=df['Date']
df_new['price']=df['Adj Close_y']
df_cut=pd.DataFrame(df_new.iloc[2537:2658,:])
# states = ['date']
df_cut.index = range(len(df_cut))
df_cut['wsj_mean_compound']=df_news['wsj_mean_compound']
df_cut['cnbc_mean_compound']=df_news['cnbc_mean_compound']
df_cut['fortune_mean_compound']=df_news['fortune_mean_compound']
df_cut['reuters_mean_compound']=df_news['reuters_mean_compound']
df_cut.to_csv('D://NeurIPS Workshop/stockdata/cutdata/'+str(file_list[i]))
if i%100==0:
print(i)
# In[71]:
result_Mid_LSTM_df=pd.DataFrame(columns=('index','stock','MSE','accuracy','mean_error_percent','TRUE','predict'))
m=len(file_list)
# In[76]:
df.head()
# In[94]:
def stock_loop (filename):
df = pd.read_csv('D://NeurIPS Workshop/stockdata/cutdata/'+str(filename))
df = df.drop(columns=['Unnamed: 0'])
dataframe=df.copy()#for get test data
split = (0.85)
sequence_length=10;
normalise= True
batch_size=100;
input_dim=5
input_timesteps=9
neurons=50
epochs=5
prediction_len=1
dense_output=1
drop_out=0.2
mu=0
noise=0.1
wsj_var=np.var(df.wsj_mean_compound)
cnbc_var=np.var(df.cnbc_mean_compound)
fortune_var=np.var(df.fortune_mean_compound)
reuters_var=np.var(df.reuters_mean_compound)
sigma_wsj=noise*wsj_var
sigma_cnbc=noise*cnbc_var
sigma_fortune=noise*fortune_var
sigma_reuters=noise*reuters_var
n=df.shape[0]
df_noise=pd.DataFrame()
df_noise['wsj_noise']=df['wsj_mean_compound']
df_noise['cnbc_noise']=df['cnbc_mean_compound']
df_noise['fortune_noise']=df['fortune_mean_compound']
df_noise['reuters_noise']=df['reuters_mean_compound']
for i in range(0,n):
df_noise['wsj_noise'][i]+=np.random.normal(mu,sigma_wsj)
df_noise['cnbc_noise'][i]+=np.random.normal(mu,sigma_cnbc)
df_noise['fortune_noise'][i]+=np.random.normal(mu,sigma_fortune)
df_noise['reuters_noise'][i]+=np.random.normal(mu,sigma_reuters)
# ***********
dfn=df_noise
df_1n=pd.DataFrame()
df_1n['wsj']=dfn['wsj_noise']
df_1n['cnbc']=df['cnbc_mean_compound']
df_1n['fortune']=df['fortune_mean_compound']
df_1n['reuters']=df['reuters_mean_compound']
df_1n['price']=df['price']##########
df_2n=pd.DataFrame()
df_2n['wsj']=df['wsj_mean_compound']
df_2n['cnbc']=dfn['cnbc_noise']
df_2n['fortune']=df['fortune_mean_compound']
df_2n['reuters']=df['reuters_mean_compound']
df_2n['price']=df['price']#############
df_3n=pd.DataFrame()
df_3n['wsj']=df['wsj_mean_compound']
df_3n['cnbc']=df['cnbc_mean_compound']
df_3n['fortune']=dfn['fortune_noise']
df_3n['reuters']=df['reuters_mean_compound']
df_3n['price']=df['price']############
df_4n=pd.DataFrame()
df_4n['wsj']=df['wsj_mean_compound']
df_4n['cnbc']=df['cnbc_mean_compound']
df_4n['fortune']=df['fortune_mean_compound']
df_4n['reuters']=dfn['reuters_noise']
df_4n['price']=df['price']##################
df1=df_1n
df2=df_2n
df3=df_3n
df4=df_4n
i_split = int(len(df1) * split)
cols = ['price','wsj','cnbc','fortune','reuters']
data_train_1 = df1.get(cols).values[:i_split]
data_train_2 = df2.get(cols).values[:i_split]
data_train_3 = df3.get(cols).values[:i_split]
data_train_4 = df4.get(cols).values[:i_split]
len_train = len(data_train_1)
len_train_windows = None
##########data_train_1#############################################################
data_windows = []
for i in range(len_train - sequence_length):
data_windows.append(data_train_1[i:i+sequence_length])
data_windows = np.array(data_windows).astype(float)
window_data=data_windows
win_num=window_data.shape[0]
col_num=window_data.shape[2]
normalised_data = []
record_min=[]
record_max=[]
for win_i in range(0,win_num):
normalised_window = []
for col_i in range(0,1):#col_num):
temp_col=window_data[win_i,:,col_i]
temp_min=min(temp_col)
if col_i==0:
record_min.append(temp_min)#record min
temp_col=temp_col-temp_min
temp_max=max(temp_col)
if col_i==0:
record_max.append(temp_max)#record max
temp_col=temp_col/temp_max
normalised_window.append(temp_col)
for col_i in range(1,col_num):
temp_col=window_data[win_i,:,col_i]
normalised_window.append(temp_col)
normalised_window = np.array(normalised_window).T
normalised_data.append(normalised_window)
normalised_data=np.array(normalised_data)
# normalised_data=window_data#************
data_windows=normalised_data
x_train1 = data_windows[:, :-1]
y_train1 = data_windows[:, -1,[0]]
##########data_train_2#############################################################
data_windows = []
for i in range(len_train - sequence_length):
data_windows.append(data_train_2[i:i+sequence_length])
data_windows = np.array(data_windows).astype(float)
window_data=data_windows
win_num=window_data.shape[0]
col_num=window_data.shape[2]
normalised_data = []
record_min=[]
record_max=[]
for win_i in range(0,win_num):
normalised_window = []
for col_i in range(0,1):#col_num):
temp_col=window_data[win_i,:,col_i]
temp_min=min(temp_col)
if col_i==0:
record_min.append(temp_min)#record min
temp_col=temp_col-temp_min
temp_max=max(temp_col)
if col_i==0:
record_max.append(temp_max)#record max
temp_col=temp_col/temp_max
normalised_window.append(temp_col)
for col_i in range(1,col_num):
temp_col=window_data[win_i,:,col_i]
normalised_window.append(temp_col)
normalised_window = np.array(normalised_window).T
normalised_data.append(normalised_window)
normalised_data=np.array(normalised_data)
# normalised_data=window_data#************
data_windows=normalised_data
x_train2 = data_windows[:, :-1]
y_train2 = data_windows[:, -1,[0]]
##########data_train_3#############################################################
data_windows = []
for i in range(len_train - sequence_length):
data_windows.append(data_train_3[i:i+sequence_length])
data_windows = np.array(data_windows).astype(float)
window_data=data_windows
win_num=window_data.shape[0]
col_num=window_data.shape[2]
normalised_data = []
record_min=[]
record_max=[]
for win_i in range(0,win_num):
normalised_window = []
for col_i in range(0,1):#col_num):
temp_col=window_data[win_i,:,col_i]
temp_min=min(temp_col)
if col_i==0:
record_min.append(temp_min)#record min
temp_col=temp_col-temp_min
temp_max=max(temp_col)
if col_i==0:
record_max.append(temp_max)#record max
temp_col=temp_col/temp_max
normalised_window.append(temp_col)
for col_i in range(1,col_num):
temp_col=window_data[win_i,:,col_i]
normalised_window.append(temp_col)
normalised_window = np.array(normalised_window).T
normalised_data.append(normalised_window)
normalised_data=np.array(normalised_data)
# normalised_data=window_data#************
data_windows=normalised_data
x_train3 = data_windows[:, :-1]
y_train3 = data_windows[:, -1,[0]]
##########data_train_4#############################################################
data_windows = []
for i in range(len_train - sequence_length):
data_windows.append(data_train_4[i:i+sequence_length])
data_windows = np.array(data_windows).astype(float)
window_data=data_windows
win_num=window_data.shape[0]
col_num=window_data.shape[2]
normalised_data = []
record_min=[]
record_max=[]
for win_i in range(0,win_num):
normalised_window = []
for col_i in range(0,1):#col_num):
temp_col=window_data[win_i,:,col_i]
temp_min=min(temp_col)
if col_i==0:
record_min.append(temp_min)#record min
temp_col=temp_col-temp_min
temp_max=max(temp_col)
if col_i==0:
record_max.append(temp_max)#record max
temp_col=temp_col/temp_max
normalised_window.append(temp_col)
for col_i in range(1,col_num):
temp_col=window_data[win_i,:,col_i]
normalised_window.append(temp_col)
normalised_window = np.array(normalised_window).T
normalised_data.append(normalised_window)
normalised_data=np.array(normalised_data)
# normalised_data=window_data#************
data_windows=normalised_data
x_train4 = data_windows[:, :-1]
y_train4 = data_windows[:, -1,[0]]
############concat###########################################
x_train_t=np.concatenate((x_train1,x_train2,x_train3,x_train4),axis=0)
x_train=x_train_t
y_train_t=np.concatenate((y_train1,y_train2,y_train3,y_train4),axis=0)
y_train=y_train_t
############get test##################################################
dataframe.columns=['date','price','wsj','cnbc','fortune','reuters']
data_test = dataframe.get(cols).values[i_split:]
data_test_df=pd.DataFrame(data_test)
data_test_df.columns=['price','wsj','cnbc','fortune','reuters']
data_test=data_test_df
len_test = len(data_test)
data_windows = []
for i in range(len_test - sequence_length):
print(i)
data_windows.append(data_test[i:i+sequence_length])
print('hhh')
print(data_windows)
data_windows = np.array(data_windows).astype(float)
print('kkk')
# get original y_test
print(data_windows.shape)
y_test_ori = data_windows[:, -1, [0]]
# print('y_test_ori.shape',y_test_ori.shape)
window_data=data_windows
win_num=window_data.shape[0]
col_num=window_data.shape[2]
normalised_data = []
record_min=[]
record_max=[]
#normalize
for win_i in range(0,win_num):
normalised_window = []
for col_i in range(0,1):#col_num):
temp_col=window_data[win_i,:,col_i]
temp_min=min(temp_col)
if col_i==0:
record_min.append(temp_min)#record min
temp_col=temp_col-temp_min
temp_max=max(temp_col)
if col_i==0:
record_max.append(temp_max)#record max
temp_col=temp_col/temp_max
normalised_window.append(temp_col)
for col_i in range(1,col_num):
temp_col=window_data[win_i,:,col_i]
normalised_window.append(temp_col)
normalised_window = np.array(normalised_window).T
normalised_data.append(normalised_window)
normalised_data=np.array(normalised_data)
# normalised_data=window_data#************
data_windows=normalised_data#get_test_data
x_test = data_windows[:, :-1]
y_test = data_windows[:, -1, [0]]
# LSTM MODEL
# create model
# input_timesteps=9
# input_dim=5
# dense_output=1
model = Sequential()
model.add(LSTM(neurons, input_shape=(input_timesteps, input_dim), return_sequences = True))
model.add(Dropout(drop_out))
model.add(LSTM(neurons,return_sequences = True))
model.add(LSTM(neurons,return_sequences =False))
model.add(Dropout(drop_out))
model.add(Dense(dense_output, activation='linear'))
# Compile model
model.compile(loss='mean_squared_error',
optimizer='adam')
# Fit the model
model.fit(x_train,y_train,epochs=epochs,batch_size=batch_size)
#multi sequence predict
data=x_test
prediction_seqs = []
window_size=sequence_length
pre_win_num=int(len(data)/prediction_len)
for i in range(0,pre_win_num):
curr_frame = data[i*prediction_len]
predicted = []
for j in range(0,prediction_len):
temp=model.predict(curr_frame[newaxis,:,:])[0]
predicted.append(temp)
curr_frame = curr_frame[1:]
curr_frame = np.insert(curr_frame, [window_size-2], predicted[-1], axis=0)
prediction_seqs.append(predicted)
#de_predicted
de_predicted=[]
len_pre_win=int(len(data)/prediction_len)
len_pre=prediction_len
m=0
for i in range(0,len_pre_win):
for j in range(0,len_pre):
de_predicted.append(prediction_seqs[i][j][0]*record_max[m]+record_min[m])
m=m+1
# print(de_predicted)
error = []
diff=y_test.shape[0]-prediction_len*pre_win_num
for i in range(y_test_ori.shape[0]-diff):
error.append(y_test_ori[i,] - de_predicted[i])
squaredError = []
absError = []
for val in error:
squaredError.append(val * val)
absError.append(abs(val))
error_percent=[]
for i in range(len(error)):
val=absError[i]/y_test_ori[i,]
val=abs(val)
error_percent.append(val)
mean_error_percent=sum(error_percent) / len(error_percent)
accuracy=1-mean_error_percent
MSE=sum(squaredError) / len(squaredError)
return MSE, accuracy,mean_error_percent,y_test_ori,de_predicted
# In[95]:
result_Mid_LSTM_df=pd.DataFrame(columns=('index','stock','MSE','accuracy','TRUE','predict'))
m=len(file_list)
filename=file_list
for i in range(0,m):
index=i
stock=filename[i]
result=stock_loop(filename[i])
MSE=result[0]
accuracy=result[1]
TRUE=result[2]
predict=result[3]
result_LSTM_df=result_LSTM_df.append(pd.DataFrame({'index':[index],
'stock':[stock],
'MSE':[MSE],
'accuracy':[accuracy],
'TRUE':[TRUE],
'predict':[predict]}),ignore_index=True)
if i%100==0:
print(i)
np.save('L1_451.npy',result_LSTM_df)
# In[39]:
# In[69]:
df = pd.read_csv('D://NeurIPS Workshop/stockdata/cutdata/d_spy_A.csv')
df.head()
# In[79]:
dataframe= pd.read_csv("D://NeurIPS Workshop/data/source_price.csv")
dataframe.head()
# In[80]:
dataframe.columns
# In[81]:
dataframe.columns=['date','price','wsj','cnbc','fortune','reuters']
|
#
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from datetime import time
from itertools import chain
from pandas import Timestamp
from pandas.tseries.holiday import AbstractHolidayCalendar, GoodFriday, USLaborDay, USPresidentsDay, USThanksgivingDay
from pytz import timezone
from .holidays_us import (Christmas, ChristmasEveBefore1993, ChristmasEveInOrAfter1993, USBlackFridayInOrAfter1993,
USIndependenceDay, USMartinLutherKingJrAfter1998, USMemorialDay, USJuneteenthAfter2022,
USNationalDaysofMourning, USNewYearsDay)
from .market_calendar import MarketCalendar
class CMEBaseExchangeCalendar(MarketCalendar, ABC):
"""
Base Exchange Calendar for CME.
CME Markets: https://www.cmegroup.com/markets/agriculture.html#overview
- Agriculture
- Energy
- Equity Index
- FX
- Interest Rates
- Metals
- Options
Holiays for which entire GLOBEX is closed:
- New Years Day
- Good Friday
- Christmas
Product Specific Closures:
- MLK Day
- Presidents Day
- Memorial Day
- Juneteenth
- US Independence Day
- US Labor Day
- US Thanksgiving Day
"""
@property
@abstractmethod
def name(self):
"""
Name of the market
:return: string name
"""
raise NotImplementedError()
@property
def tz(self):
return timezone('America/Chicago')
@property
def regular_holidays(self):
return AbstractHolidayCalendar(rules=[
USNewYearsDay,
GoodFriday,
Christmas,
])
# I can't find any reference to these special closings onther than NYSE
# @property
# def adhoc_holidays(self):
# return USNationalDaysofMourning
@property
def special_closes(self):
return [(
self.special_close_time,
AbstractHolidayCalendar(rules=[
USMartinLutherKingJrAfter1998,
USPresidentsDay,
USMemorialDay,
USJuneteenthAfter2022,
USLaborDay,
USIndependenceDay,
USThanksgivingDay,
USBlackFridayInOrAfter1993,
ChristmasEveBefore1993,
ChristmasEveInOrAfter1993,
])
)]
class CMEAgricultureExchangeCalendar(CMEBaseExchangeCalendar):
"""
Exchange calendar for CME for Agriculture products
Products:
- Grains and Oilseeds (same trading hours and holidays)
- Livestock
- Dairy
- Fertilizer
- Lumber and Softs
"""
# aliases = ['CME_Agriculture', 'CBOT_Agriculture', 'COMEX_Agriculture', 'NYMEX_Agriculture']
@property
#@abstractmethod #Would have prefered to keep this class abstract but it fails test_market_calendar.py
def name(self):
"""
Name of the market
:return: string name
"""
raise NotImplementedError()
class CMELivestockExchangeCalendar(CMEAgricultureExchangeCalendar):
"""
Exchange calendar for CME for Livestock products
https://www.cmegroup.com/trading/agricultural/livestock.html
GLOBEX Trading Times
https://www.cmegroup.com/markets/agriculture/livestock/live-cattle.contractSpecs.html
Monday - Friday: 8:30 a.m. - 1:05 p.m. CT
"""
aliases = ['CME_Livestock', 'CME_Live_Cattle', 'CME_Feeder_Cattle', 'CME_Lean_Hog', 'CME_Port_Cutout']
regular_market_times = {
"market_open": ((None, time(8, 30)),),
"market_close": ((None, time(13, 5)),)
}
@property
def name(self):
return "CME_Livestock"
@property
def regular_holidays(self):
return AbstractHolidayCalendar(rules=[
USNewYearsDay,
USMartinLutherKingJrAfter1998,
USPresidentsDay,
GoodFriday,
USMemorialDay,
USIndependenceDay,
USLaborDay,
USThanksgivingDay,
Christmas,
])
# @property
# def adhoc_holidays(self):
# return USNationalDaysofMourning
@property
def special_closes(self):
return [(
time(12, 5),
AbstractHolidayCalendar(rules=[
USBlackFridayInOrAfter1993,
ChristmasEveBefore1993,
ChristmasEveInOrAfter1993,
])
)]
class CMEEquityExchangeCalendar(CMEBaseExchangeCalendar):
aliases = ['CME_Equity', 'CBOT_Equity', '/ES', 'S&P500']
# Using CME Globex trading times
# https://www.cmegroup.com/markets/equities/sp/e-mini-sandp500.contractSpecs.html
regular_market_times = {
"market_open": ((None, time(17), -1),), # offset by -1 day
"market_close": ((None, time(16, 00)),)
#"break_start": ((None, time(17,45)),),
#"break_end": ((None, time(17,30)),)
}
@property
def name(self):
return "CME_Equity"
@property
def special_close_time(self):
return time(12, 30)
# For the bond market Good Friday that coincides with the release of NFP on the first friday of the month is an open day
goodFridayClosed = ['1970-03-27', '1971-04-09', '1972-03-31', '1973-04-20', '1974-04-12', '1975-03-28', '1976-04-16',
'1977-04-08', '1978-03-24', '1979-04-13', '1981-04-17', '1982-04-09', '1984-04-20', '1986-03-28',
'1987-04-17', '1989-03-24', '1990-04-13', '1991-03-29', '1992-04-17', '1993-04-09', '1995-04-14',
'1997-03-28', '1998-04-10', '2000-04-21', '2001-04-13', '2002-03-29', '2003-04-18', '2004-04-09',
'2005-03-25', '2006-04-14', '2008-03-21', '2009-04-10', '2011-04-22', '2013-03-29', '2014-04-18',
'2016-03-25', '2017-04-14', '2018-03-30', '2019-04-19', '2020-04-10', '2022-04-15', '2024-03-29',
'2025-04-18', '2027-03-26', '2028-04-14', '2029-03-30', '2030-04-19', '2031-04-11', '2032-03-26',
'2033-04-15', '2035-03-23', '2036-04-11', '2038-04-23', '2039-04-08', '2040-03-30', '2041-04-19',
'2043-03-27', '2044-04-15', '2046-03-23', '2047-04-12', '2049-04-16', '2050-04-08', '2051-03-31',
'2052-04-19', '2054-03-27', '2055-04-16', '2056-03-31', '2057-04-20', '2058-04-12', '2059-03-28',
'2060-04-16', '2061-04-08', '2062-03-24', '2063-04-13', '2065-03-27', '2066-04-09', '2068-04-20',
'2069-04-12', '2070-03-28', '2071-04-17', '2072-04-08', '2073-03-24', '2074-04-13', '2076-04-17',
'2077-04-09', '2079-04-21', '2081-03-28', '2082-04-17', '2084-03-24', '2085-04-13', '2086-03-29',
'2087-04-18', '2088-04-09', '2090-04-14', '2092-03-28', '2093-04-10', '2095-04-22', '2096-04-13',
'2097-03-29', '2098-04-18', '2099-04-10']
BondsGoodFridayClosed = [Timestamp(x, tz='UTC') for x in goodFridayClosed]
goodFridayOpen = ['1980-04-04', '1983-04-01', '1985-04-05', '1988-04-01', '1994-04-01', '1996-04-05', '1999-04-02',
'2007-04-06', '2010-04-02', '2012-04-06', '2015-04-03', '2021-04-02', '2023-04-07', '2026-04-03',
'2034-04-07', '2037-04-03', '2042-04-04', '2045-04-07', '2048-04-03', '2053-04-04', '2064-04-04',
'2067-04-01', '2075-04-05', '2078-04-01', '2080-04-05', '2083-04-02', '2089-04-01', '2091-04-06',
'2094-04-02']
BondsGoodFridayOpen = [Timestamp(x, tz='UTC') for x in goodFridayOpen]
class CMEBondExchangeCalendar(MarketCalendar):
"""
Exchange calendar for CME for Interest Rate and Bond products
The Holiday calendar is different between the open outcry trading floor hours and GLOBEX electronic trading hours.
This calendar attempts to be accurate for the GLOBEX holidays and hours from approx 2010 onward.
"""
aliases = ['CME_Rate', 'CBOT_Rate', 'CME_InterestRate', 'CBOT_InterestRate', 'CME_Bond', 'CBOT_Bond']
regular_market_times = {
"market_open": ((None, time(17), -1),), # offset by -1 day
"market_close": ((None, time(16)),)
}
@property
def name(self):
return "CME_Bond"
@property
def tz(self):
return timezone('America/Chicago')
@property
def regular_holidays(self):
return AbstractHolidayCalendar(rules=[
USNewYearsDay,
Christmas,
])
@property
def adhoc_holidays(self):
return list(chain(USNationalDaysofMourning, BondsGoodFridayClosed))
@property
def special_closes(self):
return [
(time(12),
AbstractHolidayCalendar(rules=[
USMartinLutherKingJrAfter1998,
USPresidentsDay,
USMemorialDay,
USIndependenceDay,
USLaborDay,
USThanksgivingDay,
])),
(time(12, 15),
AbstractHolidayCalendar(rules=[
USBlackFridayInOrAfter1993,
ChristmasEveBefore1993,
ChristmasEveInOrAfter1993,
]))
]
@property
def special_closes_adhoc(self):
return [
(time(10, tzinfo=self.tz), BondsGoodFridayOpen)
]
|
""" flask hello test app """
import sys
import os
from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello World!"
if __name__ == "__main__":
app.run()
"""
print(sys.version)
print("============================")
print(sys.version_info)
print("============================")
print(os.getcwd())
"""
|
#!/usr/bin/env python
# coding: utf-8
# ## Постановка задачи
# Загрузим данные и подготовим все данные для анализа: проведем нормализацию и преобразование категорий. Оптимизируем потребление памяти.
#
# Разделим выборку на обучающую/проверочную в соотношении 80/20.
#
# Применим наивный Байес для классификации скоринга. Будем использовать все возможные столбцы.
#
# Проверим качество предсказания через каппа-метрику и матрицу неточностей.
#
# Данные:
# * https://video.ittensive.com/machine-learning/prudential/train.csv.gz
#
# Соревнование: https://www.kaggle.com/c/prudential-life-insurance-assessment/
#
# © ITtensive, 2020
# In[1]:
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import cohen_kappa_score, confusion_matrix
from sklearn.naive_bayes import GaussianNB
from sklearn import preprocessing
# In[2]:
data = pd.read_csv("https://video.ittensive.com/machine-learning/prudential/train.csv.gz")
# In[4]:
data['Product_Info_2_1'] = data['Product_Info_2'].str.slice(0,1)
data['Product_Info_2_2'] = pd.to_numeric(data['Product_Info_2'].str.slice(1,2))
data.drop('Product_Info_2',axis=1,inplace=True)
print(data.info())
# In[12]:
def reduce_mem_usage(df):
start_mem = df.memory_usage().sum() / 1024 ** 2
for col in df.columns:
col_type = df[col].dtypes
if str(col_type)[:5] == 'float':
c_min = df[col].min()
c_max = df[col].max()
if c_min > np.finfo('f2').min and c_max < np.finfo('f2').max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo('f4').min and c_max < np.finfo('f4').max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
elif str(col_type)[:3] == 'int':
c_min = df[col].min()
c_max = df[col].max()
if c_min > np.iinfo('i1').min and c_max < np.iinfo('i1').max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo('i2').min and c_max < np.iinfo('i2').max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo('i4').min and c_max < np.iinfo('i4').max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo('i8').min and c_max < np.iinfo('i8').max:
df[col] = df[col].astype(np.int64)
else:
df[col] = df[col].astype('category')
end_mem = df.memory_usage().sum() / 1024 ** 2
print(round(start_mem - end_mem,2))
return df
# In[13]:
data = reduce_mem_usage(data)
# In[14]:
data.info()
# ### Предобработка: категоризация, единичные векторы
# | Product |
# | - |
# | A |
# | B |
# | C |
# | A |
#
# Переходит в
#
# | ProductA | ProductB | ProductC |
# | -- | -- | -- |
# | 1 | 0 | 0 |
# | 0 | 1 | 0 |
# | 0 | 0 | 1 |
# | 1 | 0 | 0 |
#
# Можно использовать sklearn.preprocessing.OneHotEncoder, но для этого потребуется дополнительно преобразовать фрейм данных (набор единичных векторов для каждого кортежа данных).
#
# Также не будем использовать кодирование категорий (A->1, B->2, C->3, D->4, E->5), потому что это переводит номинативную случайную величину в ранговую/числовую, и является существенным допущением относительно исходных данных.
# In[15]:
for l in data['Product_Info_2_1'].unique():
data['Product_Info_2_1' + l] = data['Product_Info_2_1'].isin([l]).astype('int8')
data.drop('Product_Info_2_1',axis=1,inplace=True)
# ### Заполним отсутствующие значения
# -1 увеличивает "расстояние" при расчете ближайших соседей
# In[16]:
data.fillna(-1,inplace=True)
# In[17]:
columns_groups = ['Insurance_History','Insured_Info','Medical_Keyword',
'Family_Hist','Medical_History','Product_Info']
columns = ['Wt','Ht','BMI','Ins_Age']
for cg in columns_groups:
columns.extend(data.columns[data.columns.str.startswith(cg)])
print(columns)
# ### Предобработка данных
# Дополнительно проведем z-нормализацию данных через предварительную обработку (preprocessing). Нормализуем весь исходный набор данных.
# In[18]:
scaler = preprocessing.StandardScaler()
scaler.fit(pd.DataFrame(data,columns=columns))
# In[19]:
data_train,data_test = train_test_split(data,test_size=0.2)
# ### Расчет модели наивного Байеса
# \begin{equation}
# P(A\mid B) = \frac{P(B\mid A)\ P(A)}{P(B)}
# \end{equation}
# Для каждого параметра вычисляется его вероятность принять определенное значение - P(B). Для каждого класса вычисляется его вероятность (по факту, доля) - P(A). Затем вычисляется вероятность для каждого параметра принять определенное значение при определенном классе - P(B\A).
#
# По всем вычисленным значениям находится вероятность при известных параметрах принять какое-либо значение класса.
# In[22]:
y = data_train['Response']
x = scaler.transform(pd.DataFrame(data_train,columns=columns))
# In[23]:
bayes = GaussianNB()
bayes.fit(x,y)
# In[24]:
data_test = pd.DataFrame(data_test)
# In[25]:
x_test = scaler.transform(pd.DataFrame(data_test,columns=columns))
data_test['target']=bayes.predict(x_test)
# In[28]:
cohen_kappa_score(data_test['Response'],data_test['target'],weights='quadratic')
# In[27]:
print(confusion_matrix(data_test['Response'],data_test['target']))
# In[ ]:
|
"""Pipeline for image augmentation.
This module contains the image augmentation pipeline.
Todo:
- Add license boilerplate.
- Cleanup
"""
from multiprocessing import Pipe
from ImageBot.Config import *
from collections.abc import Iterable
import uuid
from functools import partial
from queue import Queue
from multiprocessing import Manager
from ImageBot.image_processing.greenscreen import w_enlarge_mask
import numpy as np
import cv2
#from numba import jit
from ImageBot.infrastructure.ImageMessage import ImageMessage
from ImageBot.infrastructure.Pipeline import Pipeline
from ImageBot.infrastructure.filter import *
from ImageBot.data_augmentation.poisson_merge.poisson_image_editing import poisson_edit
from ImageBot.data_augmentation.augmentations import *
from ImageBot.image_processing.general import expand_canvas
Loader : 'Queue[Path]' = Queue()
Backgrounds : List[Path] = []
manager : Manager = None
def load_images(source_folder : Path, bgs_folder : Path, mask_suffix='_mask', extension='png'):
"""Load images paths into loader queue.
Args:
source_folder (Path): Path to image source directory.
bgs_folder (Path): Path to backgrounds folder.
mask_suffix (str, optional): Suffix for mask image files. Defaults to '_mask'.
extension (str, optional): File extension and therefore codec to load. Defaults to 'png'.
"""
for file in source_folder.iterdir():
if not (mask_suffix) in file.stem:
Loader.put(file)
for file in bgs_folder.iterdir():
Backgrounds.append(file)
# TODO: Fix threadpool issue
AugmentationPipeline : Pipeline = None
def init(dest_folder : Path = None):
"""Initialize image augmentation pipeline.
Initialize the image augmentation pipeline by creating an pipeline object and adding the necessary filters to it.
Args:
dest_folder (Path, optional): Path to folder to store images in. If the folder doesn´t exist, it will be created.
If not provided (None), no images will be saved. Defaults to None.
"""
global AugmentationPipeline, manager, Backgrounds
AugmentationPipeline = Pipeline(with_multiprocessing=True)
manager = Manager()
bgs = manager.list(Backgrounds)
# Load image
#AugmentationPipeline.add(partial(load_image, source=Loader, load_mask=True))
#AugmentationPipeline.add(show)
# Convert to grayscale
AugmentationPipeline.add(to_grayscale_image)
#AugmentationPipeline.add(show)
# Only enlarge the mask if the variable to prevent blurring is set
if PREVENT_BLURRED_OBJECTS:
AugmentationPipeline.add(w_enlarge_mask)
# Apply merging filter (poisson_merge)
AugmentationPipeline.add(partial(merge_with_bg_at_random_pos, bg_img_pool=bgs))
#AugmentationPipeline.add(show)
# Last augmentation step
AugmentationPipeline.add(augment_training_images, True)
# Convert to grayscale again
AugmentationPipeline.add(to_grayscale_image)
# If given, save image to folder
if dest_folder is not None:
dest_folder.mkdir(parents=True, exist_ok=True)
AugmentationPipeline.add(partial(save_message, dest_folder=dest_folder, save_mask=True))
|
from torchvision import transforms
from PIL import Image
def base_transform(img_size, mode):
assert mode in ['train', 'test']
if mode == 'train':
transform = transforms.Compose([
transforms.Resize((img_size,img_size), interpolation=Image.BICUBIC),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
else:
transform = transforms.Compose([
transforms.Resize((img_size,img_size), interpolation=Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
return transform
|
# -*- coding: utf-8 -*-
#
# BitcoinLib - Python Cryptocurrency Library
# Chain.so client
# © 2017-2019 July - 1200 Web Development <http://1200wd.com/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import logging
from datetime import datetime
from bitcoinlib.main import MAX_TRANSACTIONS
from bitcoinlib.services.baseclient import BaseClient, ClientError
from bitcoinlib.transactions import Transaction
_logger = logging.getLogger(__name__)
PROVIDERNAME = 'chainso'
class ChainSo(BaseClient):
def __init__(self, network, base_url, denominator, *args):
super(self.__class__, self).__init__(network, PROVIDERNAME, base_url, denominator, *args)
def compose_request(self, function, data='', parameter='', variables=None, method='get'):
url_path = function
url_path += '/' + self.provider_coin_id
if data:
url_path += '/' + data
if parameter:
url_path += '/' + parameter
if variables is None:
variables = {}
if self.api_key:
variables.update({'api_key': self.api_key})
return self.request(url_path, variables, method)
def sendrawtransaction(self, rawtx):
res = self.compose_request('send_tx', variables={'tx_hex': rawtx}, method='post')
return {
'txid': '' if 'data' not in res else res['data']['txid'],
'response_dict': res
}
def getbalance(self, addresslist):
balance = 0.0
for address in addresslist:
res = self.compose_request('get_address_balance', address)
balance += float(res['data']['confirmed_balance']) + float(res['data']['unconfirmed_balance'])
return int(balance * self.units)
def getutxos(self, address, after_txid='', limit=MAX_TRANSACTIONS):
txs = []
lasttx = after_txid
res = self.compose_request('get_tx_unspent', address, lasttx)
if res['status'] != 'success':
pass
for tx in res['data']['txs'][:limit]:
txs.append({
'address': address,
'txid': tx['txid'],
'confirmations': tx['confirmations'],
'output_n': -1 if 'output_no' not in tx else tx['output_no'],
'input_n': -1 if 'input_no' not in tx else tx['input_no'],
'block_height': None,
'fee': None,
'size': 0,
'value': int(round(float(tx['value']) * self.units, 0)),
'script': tx['script_hex'],
'date': datetime.utcfromtimestamp(tx['time']),
})
if len(txs) >= 1000:
_logger.warning("ChainSo: transaction list has been truncated, and thus is incomplete")
return txs
def getrawtransaction(self, txid):
res = self.compose_request('get_tx', txid)
return res['data']['tx_hex']
def gettransaction(self, txid, block_height=None):
res = self.compose_request('get_tx', txid)
tx = res['data']
rawtx = tx['tx_hex']
t = Transaction.import_raw(rawtx, network=self.network)
input_total = 0
output_total = 0
if not t.coinbase:
for n, i in enumerate(t.inputs):
i.value = int(round(float(tx['inputs'][n]['value']) * self.units, 0))
input_total += i.value
for o in t.outputs:
o.spent = None
output_total += o.value
if not t.block_height and tx['confirmations']:
t.block_height = self.getblock(tx['blockhash'], False, 1, 1)['height']
t.block_hash = tx['blockhash']
t.rawtx = bytes.fromhex(rawtx)
t.size = tx['size']
t.network = self.network
t.locktime = tx['locktime']
t.input_total = input_total
t.output_total = output_total
t.fee = 0
if t.input_total:
t.fee = t.input_total - t.output_total
t.confirmations = tx['confirmations']
if tx['confirmations']:
t.status = 'confirmed'
t.date = datetime.utcfromtimestamp(tx['time'])
else:
t.status = 'unconfirmed'
t.date = None
return t
def gettransactions(self, address, after_txid='', limit=MAX_TRANSACTIONS):
txs = []
res1 = self.compose_request('get_tx_received', address, after_txid)
if res1['status'] != 'success':
raise ClientError("Chainso get_tx_received request unsuccessful, status: %s" % res1['status'])
res2 = self.compose_request('get_tx_spent', address, after_txid)
if res2['status'] != 'success':
raise ClientError("Chainso get_tx_spent request unsuccessful, status: %s" % res2['status'])
res = res1['data']['txs'] + res2['data']['txs']
res = sorted(res, key=lambda x: x['time'])
tx_conf = []
for t in res:
tt = (t['confirmations'], t['txid'])
if tt not in tx_conf:
tx_conf.append(tt)
for tx in tx_conf[:limit]:
t = self.gettransaction(tx[1])
txs.append(t)
return txs
def blockcount(self):
return self.compose_request('get_info')['data']['blocks']
def mempool(self, txid):
res = self.compose_request('is_tx_confirmed', txid)
if res['status'] == 'success' and res['data']['confirmations'] == 0:
return [txid]
return False
def getblock(self, blockid, parse_transactions, page, limit):
if limit > 5:
limit = 5
bd = self.compose_request('get_block', str(blockid))['data']
if parse_transactions:
txs = []
for txid in bd['txs'][(page-1)*limit:page*limit]:
# try:
txs.append(self.gettransaction(txid, block_height=bd['block_no']))
# except Exception as e:
# raise ClientError("Could not parse tx %s with error %s" % (txid, e))
else:
txs = bd['txs']
n_txs = len(bd['txs'])
block = {
'bits': None,
'depth': bd['confirmations'],
'block_hash': bd['blockhash'],
'height': bd['block_no'],
'merkle_root': bd['merkleroot'],
'nonce': None,
'prev_block': bd['previous_blockhash'],
'time': bd['time'],
'tx_count': n_txs,
'txs': txs,
'version': b'',
'page': page,
'pages': int(n_txs // limit) + (n_txs % limit > 0),
'limit': limit
}
return block
# def getrawblock(self, blockid):
# def isspent(self, txid, output_n):
def getinfo(self):
info = self.compose_request('get_info')['data']
return {
'blockcount': info['blocks'],
'chain': info['name'],
'difficulty': int(float(info['mining_difficulty'])),
'hashrate': int(float(info['hashrate'])),
'mempool_size': int(info['unconfirmed_txs']),
}
|
"""p2 core constants"""
ATTR_BLOB_STAT_MTIME = 'blob.p2.io/stat/mtime'
ATTR_BLOB_STAT_CTIME = 'blob.p2.io/stat/ctime'
ATTR_BLOB_HASH_MD5 = 'blob.p2.io/hash/md5'
ATTR_BLOB_HASH_SHA1 = 'blob.p2.io/hash/sha1'
ATTR_BLOB_HASH_SHA256 = 'blob.p2.io/hash/sha256'
ATTR_BLOB_HASH_SHA384 = 'blob.p2.io/hash/sha384'
ATTR_BLOB_HASH_SHA512 = 'blob.p2.io/hash/sha512'
ATTR_BLOB_SIZE_BYTES = 'blob.p2.io/size/bytes'
ATTR_BLOB_MIME = 'blob.p2.io/mime'
ATTR_BLOB_IS_TEXT = 'blob.p2.io/is_text'
ATTR_BLOB_IS_FOLDER = 'blob.p2.io/is_folder'
TAG_BLOB_HEADERS = 'blob.p2.io/headers'
|
# -*- coding: utf-8 -*-
"""
Kitconc examples
@author: jlopes@usp.br
"""
from kitconc.kit_corpus import Corpus
corpus = Corpus('kitconc-examples/workspace','ads','english')
collocates = corpus.collocates('experience',left_span=2,right_span=2,coll_pos='IN NN JJ VBN VBD',show_progress=True)
print(collocates.df.head(10))
collocates.save_excel(corpus.output_path + 'collocates.xlsx')
|
#!/usr/bin/env python3
try:
import sys, os, time, threading as t;
import re, datetime as dt, pickle;
import tkinter as tk;
from tkinter import ttk
from PIL import Image; from PIL import ImageTk;
from tkinter import messagebox;
from tkinter import filedialog;
except:
pass;
try:
import sys;
import re, datetime as dt, pickle;
import Tkinter as tk;
from Tkinter import ttk;
from PIL import Image; from PIL import ImageTk;
#from Tkinter import tkMessageBox as messagebox;
except:
pass;
class TradingPlan(tk.Tk):
def __init__(self, *arg, **kwargs):
tk.Tk.__init__(self, *arg, **kwargs);
#tk.Tk.iconbitmap(self, default = 'icon.ico')
global overwrite_var;
overwrite_var = True;
self.resizable(1, 1);
self.update();
self.geometry(self.geometry());
#self.event_generate('Ctrl + S', func = self.saveFile)
self.bind('<Control-W>', self.close);
self.bind('<Control-w>', self.close);
# undo_photo = ImageTk.PhotoImage(Image.open('Undo-50.png'));
# redo_photo = ImageTk.PhotoImage(Image.open('Redo-50.png'));
# copy_photo = ImageTk.PhotoImage(Image.open('Copy-50.png'));
# cut_photo = ImageTk.PhotoImage(Image.open('cut.ico'));
# paste_photo = ImageTk.PhotoImage(Image.open('Cut-50.png'));
self.menuBar = tk.Menu(self);
self.theme = tk.Menu(self, tearoff = False);
self.menu = tk.Menu(self.menuBar, tearoff = 0);
self.menu.add_cascade(label = 'Themes', menu = self.theme);
self.theme.add_radiobutton(label = 'Default', command = self.default_theme);
self.theme.add_radiobutton(label = 'Forex', command = self.forex_theme);
self.theme.add_radiobutton(label = 'Stock', command = self.stock_theme);
self.menu.add_command(label = 'New', command = self.new)
#self.theme.add_separator();
self.menu.add_command(label = 'Save', image = None, command = self.saveFile);
#self.theme.add_command(label = 'Save as', image = None, command = self.saveFile_as);
self.menu.add_command(label = 'Clear All', image = None, command = self.clear_all); #, accelerator = 'Ctrl + D'
self.menu.add_command(label = 'Exit', accelerator = 'Ctrl+W', image = None, command = self.close);
self.menu.add_separator();
self.menuBar.add_cascade(label = 'App', menu = self.menu);
self.editMenu = tk.Menu(self.menuBar, tearoff = 0);
self.editMenu.add_command(label = 'Undo', accelerator = 'Ctrl+Z', compound = tk.LEFT, image = None, command = self.undo);
self.editMenu.add_command(label = 'Redo', accelerator = 'Ctrl+Shift+Z', compound = tk.LEFT, image = None, command = self.redo);
self.editMenu.add_command(label = 'Copy', accelerator = 'Ctrl+C', compound = tk.LEFT, image = None, command = self.copy);
self.editMenu.add_command(label = 'Cut', accelerator = 'Ctrl+X', compound = tk.LEFT, image = None, command = self.cut);
self.editMenu.add_command(label = 'Paste', accelerator = 'Ctrl+V', compound = tk.LEFT, image = None, command = self.paste);
self.editMenu.add_command(label = 'Select All', accelerator = 'Ctrl+A', compound = tk.LEFT, image = None, command = self.select_all);
self.editMenu.add_command(label = 'Update', command = self.file_update);
# self.editMenu.undo_photo = undo_photo;
# self.editMenu.redo_photo = redo_photo;
# self.editMenu.copy_photo = copy_photo;
# self.editMenu.cut_photo = cut_photo; image = undo_photo, image = redo_photo,image = copy_photo,
self.editMenu.add_separator();
self.menuBar.add_cascade(label = 'Edit', menu = self.editMenu);
self.config(menu = self.menuBar);
self.help = tk.Menu(self.menuBar, tearoff = 0);
self.help.add_command(label = 'About', command = self.aboutApp);
self.menuBar.add_cascade(label = 'Help', menu = self.help);
self.container = ttk.Frame(self);
self.container.pack(side = 'top', fill = 'both', expand = 1);
self.container.grid_rowconfigure(0, weight = 1);
self.container.grid_columnconfigure(0, weight =1);
#self.container.bind('Button-3', self.pop);
self.frames = {};
for P in (Home_Page, App_Page):
self.frame = P(self.container, self);
self.frames[P] = self.frame;
self.frame.grid(row=0, column=0, sticky ='NSEW');
self.next_page(Home_Page);
self.after(2000, lambda:self.next_page(App_Page));
def close(self, event = None):
if messagebox.askokcancel("Exit", 'Are you sure?'):
self.quit();
def next_page(self, cont):
self.frame = self.frames[cont];
self.frame.lift();
def default_theme(self) :
page_label.config(bg = '#00DD88');
date_label.config(bg = 'Orange', font = ('courier', 14, 'italic'));
pair_label.config(bg = 'Orange', font = ('courier', 14, 'italic'));
strategy_label.config(bg = 'Orange', font = ('courier', 14, 'italic'));
notes_label.config(bg = 'Orange', font = ('courier', 14, 'italic'));
text.config( font = ('courier', 12), bg = 'white', fg = 'black');
pair_entry.config( font = ('courier', 12), bg = 'white', fg = 'black');
strategy_entry.config( font = ('courier', 12), bg = 'white', fg = 'black');
date_entry.config( font = ('courier', 12), bg = 'white', fg = 'black');
cont.config(bg = 'Orange')
#Page Title get squashed when theme is changed;
#Troubleshoot and fix;
def forex_theme(self):
page_label.config( bg = '#915C83', fg = '#000000', font = ('courier', 20, 'italic'));
date_label.config(bg = 'purple', font = ('courier', 14, 'bold', 'italic'));
pair_label.config(bg = 'purple', font = ('courier', 14, 'bold', 'italic'));
strategy_label.config(bg = 'purple', font = ('courier', 14, 'bold', 'italic'));
notes_label.config(bg = 'purple', font = ('courier', 14, 'bold', 'italic'));
text.config(font = ('helvatica',12, 'italic'), fg = '#339944', bg = 'black');
pair_entry.config(font = ('helvatica',12, 'italic'), fg = '#339944', bg = 'black');
strategy_entry.config(font = ('helvatica',12, 'italic'), fg = '#339944', bg = 'black');
date_entry.config(font = ('helvatica',12, 'italic'), fg = '#339944', bg = 'black');
cont.config(bg = 'purple');
def stock_theme(self):
page_label.config(bg = 'aqua');
date_label.config(bg = 'blue', font = ('courier', 14, 'bold', 'italic'));
pair_label.config(bg = 'blue', font = ('courier', 14, 'bold', 'italic'));
strategy_label.config(bg = 'blue', font = ('courier', 14, 'bold', 'italic'));
notes_label.config(bg = 'blue', font = ('courier', 14, 'bold', 'italic'));
text.config(font = ('times new roman',12, 'italic'), fg = '#33AAFF', bg = '#662200');
pair_entry.config(font = ('times new roman',12, 'italic'), fg = '#33AAFF', bg = '#662200');
strategy_entry.config(font = ('times new roman',12, 'italic'), fg = '#33AAFF', bg = '#662200');
date_entry.config(font = ('times new roman',12, 'italic'), fg = '#33AAFF', bg = '#662200');
cont.config(bg = 'blue');
global overwrite_var;
def saveFile(self):
'''Handles data Creation and Overwrite'''
global date_file;
global note_file;
global strategy_file;
global numStor;
global overwrite_var;
global num;
num = [1];
data_dict = {'Date':date_entryVar.get(), 'Pair':pair_entryVar.get(), 'Strategy':strategy_entryVar.get(), 'Notes':text.get(1.0, tk.END)};
#print(os.getcwd())
if os.path.exists('Tjournal_data') == False:
overwrite_var = False
os.makedirs('Tjournal_data');
os.chdir('Tjournal_data');
n = num[0]
n = str(n);
pickle_data = open('pickle_data'+n+'.pickle', 'wb');
pickle.dump(data_dict, pickle_data);
pickle_data.close(); n = int(n); n+=1;
num[0] = n
num_pickle = open('num.pickle', 'wb');
pickle.dump(num, num_pickle);
num_pickle.close();
os.chdir('..');
else:
#print(os.getcwd())
if overwrite_var == True:
overwrite_var = False;
os.chdir('Tjournal_data');
num_out = open('num.pickle', 'rb');
num_data = pickle.load(num_out);
n = num_data;
n = n[0];
n = str(n);
if os.path.exists('pickle_data'+n+'.pickle') == False:
pickle_data = open('pickle_data'+n+'.pickle', 'wb');
pickle.dump(data_dict, pickle_data);
pickle_data.close(); n = int(n); n+=1;
num[0] = n
num_pickle = open('num.pickle', 'wb');
pickle.dump(num, num_pickle);
num_pickle.close();
os.chdir('..');
else:
#print(os.getcwd())
n = num[0];
n = str(n);
os.chdir('Tjournal_data');
pickle_data = open('pickle_data'+n+'.pickle', 'wb');
pickle.dump(data_dict, pickle_data);
pickle_data.close();
os.chdir('..');
def file_update(self):
global List;
n = List[0];
n = str(n-1);
data_dict = {'Date':date_entryVar.get(), 'Pair':pair_entryVar.get(), 'Strategy':strategy_entryVar.get(), 'Notes':text.get(1.0, tk.END)};
if os.path.exists('Tjournal_data') == True:
os.chdir('Tjournal_data');
try:
if os.path.exists('pickle_data'+n+'.pickle') == True:
#print(n);
pickle_data = open('pickle_data'+n+'.pickle', 'wb');
pickle.dump(data_dict, pickle_data);
pickle_data.close();
os.chdir('..')
else:
os.chdir('..');
except:
pass;
def saveFile_as(self):
date_file = ('Date:'+date_entryVar.get()+'\n\n');
strategy_file = ('Strategy:'+strategy_entryVar.get()+'\n\n');
note_file = ('Notes:'+text.get(1.0, tk.END));
filename = filedialog.asksaveasfilename(defaultextension=".txt", parent = self, title = 'Save as');
if filename:
filename = open(filename, 'w');
filename.write(date_file);
filename.write(strategy_file);
filename.write(note_file);
filename.close()
def clear_all(self):
text.delete(1.0, tk.END);
date_entry.delete(0, tk.END);
pair_entry.delete(0, tk.END);
strategy_entry.delete(0, tk.END);
pair_entry.focus_set();
self.reset();
def new(self):
global overwrite_var;
overwrite_var = True;
text.delete(1.0, tk.END);
date_entry.delete(0, tk.END);
pair_entry.delete(0, tk.END);
strategy_entry.delete(0, tk.END);
pair_entry.focus_set();
self.reset();
def reset(self):
date = dt.datetime.now().strftime("%Y-%m-%d %H:%M");
date_entryVar.set(date);
def undo(self):
text.edit_undo();
def redo(self):
try:
text.edit_redo();
except:
pass;
def copy(self):
text.event_generate('<<Copy>>');
def cut(self):
text.event_generate('<<Cut>>');
def paste(self):
text.event_generate('<<Paste>>');
def select_all(self):
text.tag_add('sel', '1.0', 'end');
def aboutApp(self):
messagebox.showinfo(title = 'About', message = '''This app is designed for the trader/investor who wants to get better by tracking his/her decision making on trades/investments by using a digital Journal that makes keeping a journal a breeze. It has basic functionalities such as viewing previous journal entries, updating those entries and many more!''');
#text.bind('<Control-A>', select_all)
#text.bind('<Control-a>', select_all)
class Home_Page(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent);
self.config(bg = '#33AA66')
self.grid_columnconfigure(0, weight = 1);
self.grid_rowconfigure(0, weight = 1);
self.update();
# self.button = ttk.Button(self, text = 'Next', command = lambda: controller.next_page(App_Page));
# self.button.grid(row = 1, column = 0, pady = 2, padx = 2);
#self.showImg();
#self.after(10, self.animate);
# self.after(500, self.animate_1);
# self.after(1420, self.animate_2);
# self.after(3000, self.animate_3);
# self.after(4000, self.animate_4);
# self.after(6000, self.animate_5);
# self.after(8000, self.animate_6);
#self.after(2000, self.looper);
def showImg(self):
self.image = Image.open('fx.jpg');
self.photo = ImageTk.PhotoImage(self.image);
self.labelHome = tk.Label(self, image = self.photo, bg = '#33AA66');
self.labelHome.image = self.photo;
self.labelHome.grid(row = 0, column = 0, sticky = 'EW', pady = 2, padx = 2);
self.label = tk.Label(self, text = 'Trading Journal', fg = 'white', bg = '#33AA66', font = ('helvatica',30));
self.label.grid(row = 0, column = 0, sticky = 'NEW');
def animate(self):
self.image = Image.open('fx.jpg').resize((300,300)).rotate(0);
self.photo = ImageTk.PhotoImage(self.image);
self.canvas = tk.Canvas(self, bg = '#33AA66', width = self.image.size[0], height = self.image.size[1]);
self.canvas.create_image((0,300), anchor = 'sw', image = self.photo);
self.canvas.grid(row = 1, column = 0, sticky = "S");
self.label = tk.Label(self, text = 'Trading Journal', fg = 'white', bg = '#33AA66', font = ('helvatica',30));
self.label.grid(row = 0, column = 0, sticky = 'NEW');
#self.after(100, self.canvas.destroy);
def animate_1(self):
self.image = Image.open('fx.jpg').resize((150,150)).rotate(60);
self.photo = ImageTk.PhotoImage(self.image);
self.canvas = tk.Canvas(self, bg = '#33AA66', width = self.image.size[0], height = self.image.size[1]);
self.canvas.create_image((0,150), anchor = 'sw', image = self.photo);
self.canvas.grid();
self.after(300, self.canvas.destroy);
def animate_2(self):
self.image = Image.open('fx.jpg').resize((200,200)).rotate(90);
self.photo = ImageTk.PhotoImage(self.image);
self.canvas = tk.Canvas(self, bg = '#33AA66', width = self.image.size[0], height = self.image.size[1]);
self.canvas.create_image((0,200), anchor = 'sw', image = self.photo);
self.canvas.grid();
self.after(400, self.canvas.destroy);
def animate_3(self):
self.image = Image.open('fx.jpg').resize((250,250)).rotate(120);
self.photo = ImageTk.PhotoImage(self.image);
self.canvas = tk.Canvas(self, bg = '#33AA66', width = self.image.size[0], height = self.image.size[1]);
self.canvas.create_image((0,250), anchor = 'sw', image = self.photo);
self.canvas.grid();
self.after(500, self.canvas.destroy);
def animate_4(self):
self.image = Image.open('fx.jpg').resize((300,300)).rotate(150);
self.photo = ImageTk.PhotoImage(self.image);
self.canvas = tk.Canvas(self, bg = '#33AA66', width = self.image.size[0], height = self.image.size[1]);
self.canvas.create_image((0,300), anchor = 'sw', image = self.photo);
self.canvas.grid();
self.after(600, self.canvas.destroy);
def animate_5(self):
self.image = Image.open('fx.jpg').resize((330,330)).rotate(180);
self.photo = ImageTk.PhotoImage(self.image);
self.canvas = tk.Canvas(self, bg = '#33AA66', width = self.image.size[0], height = self.image.size[1]);
self.canvas.create_image((0,330), anchor = 'sw', image = self.photo);
self.canvas.grid();
self.after(700, self.canvas.destroy);
def animate_6(self):
self.image = Image.open('fx.jpg').resize((360,360)).rotate(210);
self.photo = ImageTk.PhotoImage(self.image);
self.canvas = tk.Canvas(self, bg = '#33AA66', width = self.image.size[0], height = self.image.size[1]);
self.canvas.create_image((0,360), anchor = 'sw', image = self.photo);
#self.after(800,self.canvas.destroy);
self.canvas.grid();
def looper(self):
for i in range(0,361):
self.image = Image.open('fx.jpg').resize((i,i)).rotate(i);
self.photo = ImageTk.PhotoImage(self.image);
self.canvas = tk.Canvas(self, bg = '#33AA66', width = self.image.size[0], height = self.image.size[1]);
self.canvas.create_image((0,i), anchor = 'sw', image = self.photo);
self.canvas.grid();
#self.after(10, self.canvas.destroy);
#self.after(200, self.canvas.destroy);
class App_Page(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent, bg = '#00DD88');
self.grid_rowconfigure(0, weight = 1);
self.grid_columnconfigure(1, weight = 1);
self.after(60000, self.update);
global cont; global page_label;
page_label = tk.Label(self, text = 'Dash Board', bg = '#00DD88', fg = '#000000', font = ('courier', 20, 'italic'));
page_label.pack(side = tk.TOP, fill ='x', expand = 0);
cont = tk.Frame(self, bg = 'Orange');
cont.pack(fill = tk.BOTH, expand = 0);
cont.grid_rowconfigure(0, weight = 1);
cont.grid_columnconfigure(1, weight = 1);
#self.after_idle(self.update_file);
#self.bind('<Control-D>', self.new);
#self.bind('<Control-d>', self.new);
#TEXT VARS & global VARS
global date_entryVar; global pair_entryVar; global strategy_entryVar;
global date_entry; global pair_entry; global strategy_entry;
global date_label; global pair_label; global strategy_label; global notes_label;
global date;
global text;
global overwrite_var;
date_entryVar = tk.StringVar();
pair_entryVar = tk.StringVar();
strategy_entryVar = tk.StringVar();
# #LABELS
date_label = tk.Label(cont, text = 'Date-Time', bg = 'Orange', fg = 'black', font = ('courier', 14, 'italic'));
date_label.grid(row = 0, column = 0,sticky = 'NW', padx = 2, pady = 4);
pair_label = tk.Label(cont, text = 'Pair/Ticker', bg = 'Orange', fg = 'black', font = ('courier', 14, 'italic'));
pair_label.grid(row = 1, column = 0, sticky = 'NW', padx = 2, pady = 4);
strategy_label = tk.Label(cont, text = 'Strategy', bg = 'Orange', fg = 'black', font = ('courier', 14, 'italic'));
strategy_label.grid(row = 2, column = 0, sticky = 'NW', padx = 2, pady = 4);
notes_label = tk.Label(cont, text = 'Notes', bg = 'Orange', fg = 'black', font = ('courier', 14, 'italic'));
notes_label.grid(row = 3, column = 0, sticky = 'NW', padx = 2, pady = 4);
date = dt.datetime.now().strftime("%Y-%m-%d %H:%M");
date_entry = tk.Entry(cont, textvariable = date_entryVar, relief = 'groove', font = ('courier', 12), justify = tk.LEFT);
date_entryVar.set(date);
date_entry.grid(row = 0, column = 1, sticky = 'nwe', pady = 4, padx = 2);
pair_entry = tk.Entry(cont, textvariable = pair_entryVar, relief = 'groove', font = ('courier', 12), justify = tk.LEFT);
pair_entry.grid(row = 1, column = 1, sticky = 'nwe', pady = 4, padx = 2);
pair_entry.focus_set();
strategy_entry = tk.Entry(cont, textvariable = strategy_entryVar, relief = 'groove', font = ('courier', 12), justify = tk.LEFT);
strategy_entry.grid(row = 2, column = 1, sticky = 'nwe', pady = 4, padx = 2);
#TEXTPAD
text = tk.Text(cont, wrap = tk.WORD, autoseparators = True, undo = True, bg = 'white', relief = 'groove', font = ('courier', 12));
text.grid(row = 3, column = 1, sticky = 'NWSE', padx = 2, pady = 4);
self.vscroll = ttk.Scrollbar(cont);
text.configure(yscrollcommand = self.vscroll.set);
self.vscroll.config(command = text.yview);
self.vscroll.grid(row = 3, column = 2, sticky = 'NS');
self.button_Two = ttk.Button(cont, text = 'Prev', command = self.prev);
self.button_Two.grid(row = 4, column = 1, sticky = 'sw', padx= 2, pady = 2);
self.button_Three = ttk.Button(cont, text = 'Next', command = self.next);
self.button_Three.grid(row = 4, column = 1, sticky = 'se', pady = 2, padx = 2);
## Risk Calculator Implementation
#Label widgets for Risk management
#Lot
self.lotVar = tk.StringVar();
self.fx_lot_label = tk.Label(cont, text = 'Lot Size', fg = 'Black', font = ('helvatica', 12, 'bold'));
self.fx_lot_label.grid(row = 5, sticky ='W', padx = 4, pady = 4);
self.fx_lot = tk.Entry(cont, fg = 'black', font = ('helvatica', 12, 'bold'));
self.fx_lot.grid(row = 5, column = 1, sticky = 'W', padx = 4, pady = 4);
#Entry
self.fx_entryVar = tk.StringVar();
self.fx_entry_label = tk.Label(cont, text = 'Entry', fg = 'aqua', font = ('helvatica', 12, 'bold'));
self.fx_entry_label.grid(row = 6, sticky ='W', padx = 4, pady = 4);
self.fx_entry = tk.Entry(cont, fg = 'black', font = ('helvatica', 12, 'bold'));
self.fx_entry.grid(row = 6, column = 1, sticky = 'W', padx = 4, pady = 4);
#Exit
self.fx_exitVar = tk.StringVar();
self.fx_exit_label = tk.Label(cont, text = 'Exit', fg = '#66FF00', font = ('helvatica', 12, 'bold'));
self.fx_exit_label.grid(row = 7, sticky ='W', padx = 4, pady = 4);
self.fx_exit = tk.Entry(cont, fg = 'black', font = ('helvatica', 12, 'bold'));
self.fx_exit.grid(row = 7, column = 1, sticky = 'W', padx = 4, pady = 4);
#Stop Loss
self.fx_SL_Var = tk.StringVar();
self.fx_SL_label = tk.Label(cont, text = 'Stop Loss', fg = '#E03C31', font = ('helvatica', 12, 'bold'));
self.fx_SL_label.grid(row = 8, sticky ='W', padx = 4, pady = 4);
self.fx_SL = tk.Entry(cont, fg = 'black', font = ('helvatica', 12, 'bold'));
self.fx_SL.grid(row = 8, column = 1, sticky = 'W', padx = 4, pady = 4);
#PIP Show
self.pipVar = tk.StringVar();
self.fx_pip_label = tk.Label(cont, text = 'Pips', textvariable = self.pipVar, fg = 'blue', bg = 'white', font = ('helvatica', 12, 'bold'));
self.fx_pip_label.grid(row = 9, column = 1, sticky = 'WE');
text.unbind_all('<Control-y>');
text.unbind_all('<Control-Y>');
#text.bind('<Ctrl + S>', self.saveFile);
text.unbind('Ctrl + Y');
text.unbind('Ctrl + y');
#text.bind('<Control-A>', select_all);
#text.bind('<Control-a>', select_all);
#self.text.bind('Button-3', self.pop);
# self.button_One = tk.Button(self, text = 'Back', command = lambda: controller.next_page(Risk_Page));
# self.button_One.grid(row = 8, column = 1, sticky = 's', padx = 2, pady = 2);
self.data_dict = {'Date':date_entryVar.get(), 'Pair':pair_entryVar.get(), 'Strategy':strategy_entryVar.get(), 'Notes':text.get(1.0, tk.END)};
#self.update();
#Not working
def update_file(self):
global data_dict;
if self.data_dict['Pair'] == pair_entryVar.get() or self.data_dict['Strategy'] == strategy_entryVar.get() or self.data_dict['Notes'] == text.get(1.0, tk.END):
self.saveFile();
#time.sleep(1)
#return self.update();
def saveFile(self):
'''Handles data Creation and Overwrite'''
global date_file;
global note_file;
global strategy_file;
global numStor;
global overwrite_var;
global num;
num = [1];
data_dict = {'Date':date_entryVar.get(), 'Pair':pair_entryVar.get(), 'Strategy':strategy_entryVar.get(), 'Notes':text.get(1.0, tk.END)};
#print(os.getcwd())
if os.path.exists('Tjournal_data') == False:
overwrite_var = False;
os.makedirs('Tjournal_data');
os.chdir('Tjournal_data');
n = num[0];
n = str(n);
pickle_data = open('pickle_data'+n+'.pickle', 'wb');
pickle.dump(data_dict, pickle_data);
pickle_data.close(); n = int(n); n+=1;
num[0] = n;
num_pickle = open('num.pickle', 'wb');
pickle.dump(num, num_pickle);
num_pickle.close();
os.chdir('..');
else:
#print(os.getcwd())
if overwrite_var == True:
overwrite_var = False;
os.chdir('Tjournal_data');
num_out = open('num.pickle', 'rb');
num_data = pickle.load(num_out);
n = num_data;
n = n[0];
n = str(n);
if os.path.exists('pickle_data'+n+'.pickle') == False:
pickle_data = open('pickle_data'+n+'.pickle', 'wb');
pickle.dump(data_dict, pickle_data);
pickle_data.close(); n = int(n); n+=1;
num[0] = n;
num_pickle = open('num.pickle', 'wb');
pickle.dump(num, num_pickle);
num_pickle.close();
os.chdir('..');
else:
#print(os.getcwd())
n = num[0];
n = str(n);
os.chdir('Tjournal_data');
pickle_data = open('pickle_data'+n+'.pickle', 'wb');
pickle.dump(data_dict, pickle_data);
pickle_data.close();
os.chdir('..');
global bool_off;
global n;
global x;
global List;
global pchang_bool;
pchang_bool = False
List = [1];
bool_off = True;
def prev(self):
global bool_off;
global x;
global n;
global List;
global pchang_bool;
pchang_bool = True;
try:
#print(os.getcwd())
if os.path.exists('Tjournal_data') == True:
os.chdir('Tjournal_data');
#print(os.getcwd())
if bool_off == True:
bool_off = False;
if os.path.exists('num.pickle') == True:
num = open('num.pickle', 'rb');
num = pickle.load(num);
n = num[0];
n = int(n);
if n > 1:
n -= 1;
n = str(n);
#print(n)
else:
n = str(n);
if os.path.exists('pickle_data'+n+'.pickle') == True:
data_dict = open('pickle_data'+n+'.pickle', 'rb');
data_dict = pickle.load(data_dict);
date_entryVar.set(data_dict['Date']);
pair_entryVar.set(data_dict['Pair']);
strategy_entryVar.set(data_dict['Strategy']);
text.delete(1.0, tk.END);
text.insert(1.0, data_dict['Notes']);
List[0] = int(n)+1;
#print('list num from prev:',List[0])
#print('num from prev', n)
os.chdir('..');
x = str(n);
#print(x)
else:
os.chdir('..');
else:
pass;
except:
pass;
def next(self):
global n;
global x;
global List;
global pchang_bool;
x = List[0];
#print(x);
if pchang_bool == True:
try:
#print(os.getcwd())
if os.path.exists('Tjournal_data') == True:
#print(os.getcwd())
os.chdir('Tjournal_data');
if os.path.exists('num.pickle') == True:
num = open('num.pickle', 'rb');
num = pickle.load(num);
num = num[0];
if int(x) == int(num):
self.clear_all();
elif int(x) < int(num):
#print(num)
x = str(x);
#print('Looping num:',x)
if os.path.exists('pickle_data'+x+'.pickle') == True:
#print('Pickle num:',num)
data_dict = open('pickle_data'+x+'.pickle', 'rb');
data_dict = pickle.load(data_dict);
date_entryVar.set(data_dict['Date']);
pair_entryVar.set(data_dict['Pair']);
strategy_entryVar.set(data_dict['Strategy']);
text.delete(1.0, tk.END);
text.insert(1.0, data_dict['Notes']);
x = int(x); n = x; x+=1;
if x > num:
pass;
else:
List[0] = x
#print('list num from next:',List[0])
#print('num from next', num)
os.chdir('..');
return n;
else:
os.chdir('..');
else:
os.chdir('..');
except:
pass;
else:
pass;
def select_all(self):
text.tag_add('sel', '1.0', 'end');
# self.popup = tk.Menu(text);
# for i in ('copy', 'cut', 'paste', 'redo', 'undo'):
# self.cmd = eval(i);
# self.popup.add_command(label = i, compound = tk.LEFT, command = self.cmd);
# self.popup.add_separator();
def clear_all(self):
text.delete(1.0, tk.END);
date_entry.delete(0, tk.END);
pair_entry.delete(0, tk.END);
strategy_entry.delete(0, tk.END);
self.reset();
os.chdir('..')
def reset(self):
date = dt.datetime.now().strftime("%Y-%m-%d %H:%M");
date_entryVar.set(date);
def pop(self, event):
self.popup.tk.tk_popup(event.x_self, event.y_self, 0);
def undo(self):
text.edit_undo();
def redo(self):
try:
text.edit_redo();
except:
pass;
def onEnterPressed(self, event):
self.entryValue = self.entryVar.get();
self.boxSet();
def onDoubleClicked(self, event):
self.entry.selection_range(0, tk.END);
def onRightClicked(self):
pass;
# class Risk_Page(tk.Toplevel):
# """Window for risk management """
# def __init__(self, parent, controller):
# tk.Toplevel.__init__(self, parent)
# self.label = tk.Label(self, text = 'Risk Page');
# self.label.pack();
if __name__ == '__main__':
App = TradingPlan();
#cut_photo = ImageTk.PhotoImage(Image.open('cut.ico'));
#App.wm_iconbitmap('cut_photo');
App.title('Trading Journal');
App.geometry('450x900');
App.mainloop();
def main():
pass;
|
import pygame
import os
import random
width = 1000
height = 800
centre_x = width/2
centre_y = height/2
white = (255, 255, 255)
black = (0, 0, 0)
red = (255, 0, 0)
green = (0, 255, 0)
blue = (0, 0, 255)
# pour ecrire sur l ecran
font_name = pygame.font.match_font('arial')
def draw_text(surf, text, size, x, y, color = black):
font = pygame.font.Font(font_name, size)
text_surface = font.render(text, True, color)
text_rect = text_surface.get_rect()
text_rect.midtop = (x, y)
surf.blit(text_surface, text_rect)
#Fonts
smallfont=pygame.font.SysFont("comicsansms",25)
medfont=pygame.font.SysFont("comicsansms",50)
largefont=pygame.font.SysFont("comicsansms",80)
#Font Formating Function
def text_objects(text, color, size):
if size == "small":
textSurface = smallfont.render(text, True, color)
if size == "medium":
textSurface = medfont.render(text, True, color)
if size == "large":
textSurface = largefont.render(text, True, color)
return textSurface, textSurface.get_rect()
#Intro Function
def Tir(force, précision, direction):
sx = 0
sy = 0
Bonne_direction = 1 * précision
Moyenne_direction = (3 - précision) * précision
Mauvaise_direction = (3 - précision) * 0.5
Tir_précision = (random.choices([3, 2, 1], [Bonne_direction, Moyenne_direction, Mauvaise_direction]))
if Tir_précision == 3:
if 1 <= direction <= 3:
sy = -10
if 4 <= direction <= 6:
sy = -5
if direction == 1 or 3 or 4 or 6:
sx = -10
if direction == 2 or 5:
sx = 0
if Tir_précision == 2:
if 1 <= direction <= 3:
sy = -10
if 4 <= direction <= 6:
sy = -5
if direction == 1 or 3 or 4 or 6:
sx = -10
if direction == 2 or 5:
sx = 0
if Tir_précision == 1:
sy = random.randrange(-10, 10, 10, 5, -5, 5)
sx = random.randrange(-10, 10, 10, 5, -5, 5)
ballon.speedy = sy * force
ballon.speedx = sx * force
return ballon.speedy
return ballon.speedx
print("bola")
def sauter(direction, force, Tir_précision):
saut = 0
if Tir_précision == [1]:
if force == 1 or force == 2:
st = "Immobile"
if force == 3:
st = (random.choices(["Mauvais", "Immobile"], [0.5, 0.5]))
if Tir_précision == [2]:
if force == 1:
st = "Bon"
if force == 2:
st = (random.choices(["Mauvais", "Bon"], [0.25, 0.75]))
if force == 3:
st = (random.choices(["Mauvais", "Bon"], [0.75, 0.25]))
if Tir_précision == [3]:
if force == 1:
st = (random.choices(["Mauvais", "Bon"], [0.75, 0.25]))
if force == 2:
st = (random.choices(["Mauvais", "Bon"], [0.8, 0.2]))
if force == 3:
st = "Mauvais"
if st != 0:
if st == "Immobile":
saut = 0
if st == "Mauvais":
if direction == 1:
saut = random.choice([2, 3, 4, 5, 6])
if direction == 2:
saut = random.choice([1, 3, 4, 5, 6])
if direction == 3:
saut = random.choice([1, 2, 4, 5, 6])
if direction == 4:
saut = random.choice([1, 2, 3, 5, 6])
if direction == 5:
saut = random.choice([1, 2, 3, 4, 6])
if direction == 6:
saut = random.choice([1, 2, 3, 4, 5])
if st == "Bon":
saut = direction
return saut
def Saut(Tir_précision, force, st, gardienstop, direction, playerrun, t3):
if Tir_précision == [1]:
if force == 1 or force == 2:
st = ["Immobile"]
if force == 3:
st = (random.choices(["PeuImporte", "Immobile"],
[0.5, 0.5])) # ( ON PEUT REMPLACER "BON" PAR "MAUVAIS" ÇA NE CHANGERA RIEN )
if Tir_précision == [2]:
if force == 1:
st = ["Bon"]
if force == 2:
st = (random.choices(["Mauvais", "Bon"], [0.25, 0.75]))
if force == 3:
st = (random.choices(["Mauvais", "Bon"], [0.65, 0.35]))
if Tir_précision == [3]:
if force == 1:
st = (random.choices(["Mauvais", "Bon"], [0.60, 0.40]))
if force == 2:
st = (random.choices(["Mauvais", "Bon"], [0.8, 0.2]))
if force == 3:
st = ["Mauvais"]
if st != 0:
if st == ['Immobile']:
saut = 0
gardienstop = True
elif st == ['Mauvais'] or st == ["PeuImporte"]:
if direction == 1:
saut = random.choice([2, 3, 4, 5, 6])
elif direction == 2:
saut = random.choice([1, 3, 4, 5, 6])
elif direction == 3:
saut = random.choice([1, 2, 4, 5, 6])
elif direction == 4:
saut = random.choice([1, 2, 3, 5, 6])
elif direction == 5:
saut = random.choice([1, 2, 3, 4, 6])
elif direction == 6:
saut = random.choice([1, 2, 3, 4, 5])
elif st == ['Bon']:
saut = direction
playerrun = True
t3 = False
|
import os
import re
import json
from django.conf import settings
from django.db import models
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db.models.signals import post_save
from django.core.exceptions import ObjectDoesNotExist
from django.utils.translation import ugettext_lazy, ugettext as _
from odk_logger.xform_instance_parser import XLSFormError
from utils.stathat_api import stathat_count
from stats.tasks import stat_log
from hashlib import md5
def upload_to(instance, filename):
return os.path.join(
instance.user.username,
'xls',
os.path.split(filename)[1])
class DuplicateUUIDError(Exception):
pass
class XForm(models.Model):
CLONED_SUFFIX = '_cloned'
xls = models.FileField(upload_to=upload_to, null=True)
json = models.TextField(default=u'')
description = models.TextField(default=u'', null=True)
xml = models.TextField()
user = models.ForeignKey(User, related_name='xforms', null=True)
shared = models.BooleanField(default=False)
shared_data = models.BooleanField(default=False)
downloadable = models.BooleanField(default=True)
is_crowd_form = models.BooleanField(default=False)
allows_sms = models.BooleanField(default=False)
encrypted = models.BooleanField(default=False)
# the following fields are filled in automatically
sms_id_string = models.SlugField(
editable=False,
verbose_name=ugettext_lazy("SMS ID"),
default=''
)
id_string = models.SlugField(
editable=False, verbose_name=ugettext_lazy("ID")
)
title = models.CharField(editable=False, max_length=64)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
has_start_time = models.BooleanField(default=False)
uuid = models.CharField(max_length=32, default=u'')
uuid_regex = re.compile(r'(<instance>.*?id="[^"]+">)(.*</instance>)(.*)',
re.DOTALL)
instance_id_regex = re.compile(r'<instance>.*?id="([^"]+)".*</instance>',
re.DOTALL)
uuid_node_location = 2
uuid_bind_location = 4
bamboo_dataset = models.CharField(max_length=60, default=u'')
class Meta:
app_label = 'odk_logger'
unique_together = (("user", "id_string"), ("user", "sms_id_string"))
verbose_name = ugettext_lazy("XForm")
verbose_name_plural = ugettext_lazy("XForms")
ordering = ("id_string",)
permissions = (
("view_xform", _("Can view associated data")),
)
def file_name(self):
return self.id_string + ".xml"
def url(self):
return reverse(
"download_xform",
kwargs={
"username": self.user.username,
"id_string": self.id_string
}
)
def data_dictionary(self):
from odk_viewer.models import DataDictionary
return DataDictionary.objects.get(pk=self.pk)
@property
def has_surveys_with_geopoints(self):
from odk_viewer.models import ParsedInstance
return ParsedInstance.objects.filter(
instance__xform=self, lat__isnull=False).count() > 0
def _set_id_string(self):
matches = self.instance_id_regex.findall(self.xml)
if len(matches) != 1:
raise XLSFormError(_("There should be a single id string."))
self.id_string = matches[0]
def _set_title(self):
text = re.sub(r"\s+", " ", self.xml)
matches = re.findall(r"<h:title>([^<]+)</h:title>", text)
if len(matches) != 1:
raise XLSFormError(_("There should be a single title."), matches)
self.title = u"" if not matches else matches[0]
def _set_encrypted_field(self):
if self.json and self.json != '':
json_dict = json.loads(self.json)
if 'submission_url' in json_dict and 'public_key' in json_dict:
self.encrypted = True
else:
self.encrypted = False
def update(self, *args, **kwargs):
super(XForm, self).save(*args, **kwargs)
def save(self, *args, **kwargs):
self._set_title()
old_id_string = self.id_string
self._set_id_string()
self._set_encrypted_field()
# check if we have an existing id_string,
# if so, the one must match but only if xform is NOT new
if self.pk and old_id_string and old_id_string != self.id_string:
raise XLSFormError(
_(u"Your updated form's id_string '%(new_id)s' must match "
"the existing forms' id_string '%(old_id)s'." %
{'new_id': self.id_string, 'old_id': old_id_string}))
if getattr(settings, 'STRICT', True) and \
not re.search(r"^[\w-]+$", self.id_string):
raise XLSFormError(_(u'In strict mode, the XForm ID must be a '
'valid slug and contain no spaces.'))
if not self.sms_id_string:
try:
# try to guess the form's wanted sms_id_string
# from it's json rep (from XLSForm)
# otherwise, use id_string to ensure uniqueness
self.sms_id_string = json.loads(self.json).get('sms_keyword',
self.id_string)
except:
self.sms_id_string = self.id_string
super(XForm, self).save(*args, **kwargs)
def __unicode__(self):
return getattr(self, "id_string", "")
def submission_count(self):
return self.surveys.filter(is_deleted=False).count()
submission_count.short_description = ugettext_lazy("Submission Count")
def geocoded_submission_count(self):
from odk_viewer.models import ParsedInstance
return ParsedInstance.objects.filter(instance__in=self.surveys.filter(is_deleted=False), lat__isnull=False).count()
geocoded_submission_count.short_description = ugettext_lazy("Geocoded Submission Count")
def time_of_last_submission(self):
try:
return self.surveys.\
filter(is_deleted=False).latest("date_created").date_created
except ObjectDoesNotExist:
pass
def time_of_last_submission_update(self):
try:
# we also consider deleted surveys in this case
return self.surveys.latest("date_modified").date_modified
except ObjectDoesNotExist:
pass
@property
def hash(self):
return u'%s' % md5(self.xml.encode('utf8')).hexdigest()
@property
def can_be_replaced(self):
if hasattr(self.submission_count, '__call__'):
num_submissions = self.submission_count()
else:
num_submissions = self.submission_count
return num_submissions == 0
def stats_forms_created(sender, instance, created, **kwargs):
if created:
stathat_count('formhub-forms-created')
stat_log.delay('formhub-forms-created', 1)
post_save.connect(stats_forms_created, sender=XForm)
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 24 19:09:31 2018
@author: joshu
"""
def add_numbers(num1, num2):
return num1+num2
def assign_name():
name = 'Doug'
gbl_name = 'Sally'
def change_name():
global gbl_name
gbl_name = 'Sammy'
change_name()
print(gbl_name)
def get_sum(num1, num2)
|
import os
from dotenv import load_dotenv
from fabric import Connection
from invoke.exceptions import UnexpectedExit
load_dotenv("infra.env")
HOST = os.environ.get('HOST')
REMOTE_USER = os.environ.get('REMOTE_USER')
KEYFILE = os.environ.get('KEYFILE')
VERSION = os.environ.get('UBUNTU_VER')
PACKAGES = os.environ.get('PACKAGES').split(' ')
# Connect to the remote server
c = Connection(HOST, user=REMOTE_USER, connect_kwargs={"key_filename": KEYFILE})
def upgrade_ubuntu():
"""
Upgrade the Ubuntu OS
"""
c.sudo("sed -i 's/Prompt=lts/Prompt=normal/g' /etc/update-manager/release-upgrades")
c.sudo('apt update')
c.sudo('apt upgrade -y')
c.sudo('apt dist-upgrade -y')
c.sudo('apt autoremove -y')
def install_package(package):
"""
Install the given package
"""
c.sudo(f'apt install -y {package}')
def ubuntu_version_is(version):
"""
Check if the Ubuntu version is less than the given version
"""
try:
c.sudo(f'lsb_release -sr | grep -c {version}')
except UnexpectedExit:
return False
return True
def package_installed(package):
"""
Check if the given package is installed
"""
try:
c.sudo(f'apt list --installed | grep -c {package}')
except UnexpectedExit:
return False
return True
if __name__ == "__main__":
if not ubuntu_version_is(VERSION):
upgrade_ubuntu()
# Install the packages
for package in PACKAGES:
install_package(package)
|
import re
import random
from src.utilities import *
from src import users, channels, debuglog, errlog, plog
from src.functions import get_players, get_all_players
from src.decorators import cmd, event_listener
from src.containers import UserList, UserSet, UserDict, DefaultUserDict
from src.messages import messages
from src.events import Event
# Generated message keys used in this file:
# mystic_villagers, mystic_wolves, mystic_neutrals, mystic_win_stealers,
# mystic_night_num, mystic_day_num, mystic_info,
# mystic_simple, mystic_notify, wolf_mystic_simple, wolf_mystic_notify
def setup_variables(rolename, *, send_role, types):
LAST_COUNT = UserDict() # type: Dict[users.User, Tuple[str, bool]]
role = rolename.replace(" ", "_")
@event_listener("transition_night_end")
def on_transition_night_end(evt, var):
villagers = set(get_players(("doctor",)))
win_stealers = set(get_players(("fool", "monster", "demoniac")))
neutrals = set(get_players(("jester",)))
special_evt = Event("get_special", {"villagers": villagers, "wolves": set(), "win_stealers": win_stealers, "neutrals": neutrals})
special_evt.dispatch(var)
bold = "\u0002{0}\u0002".format
targets = set()
values = []
plural = True
for name in types:
targets.update(special_evt.data[name])
l = len(special_evt.data[name])
if l:
if not values and l == 1:
plural = False
values.append("{0} {1}{2}".format(bold(l), messages["mystic_{0}".format(name)], "" if l == 1 else "s"))
if len(values) > 2:
value = " and ".join((", ".join(values[:-1]), values[-1]))
else:
value = " and ".join(values)
msg = messages["mystic_info"].format("are" if plural else "is", value, " still", "")
for mystic in get_all_players((rolename,)):
LAST_COUNT[mystic] = (value, plural)
if send_role:
to_send = "{0}_{1}".format(role, ("simple" if mystic.prefers_simple() else "notify"))
mystic.send(messages[to_send])
mystic.send(msg)
@event_listener("exchange_roles")
def on_exchange_roles(evt, var, actor, target, actor_role, target_role):
if actor_role == rolename and target_role != rolename:
value, plural = LAST_COUNT.pop(actor)
LAST_COUNT[target] = (value, plural)
key = "were" if plural else "was"
msg = messages["mystic_info"].format(key, value, "", messages["mystic_{0}_num".format(var.PHASE)])
evt.data["target_messages"].append(msg)
if target_role == rolename and actor_role != rolename:
value, plural = LAST_COUNT.pop(target)
LAST_COUNT[actor] = (value, plural)
key = "were" if plural else "was"
msg = messages["mystic_info"].format(key, value, "", messages["mystic_{0}_num".format(var.PHASE)])
evt.data["actor_messages"].append(msg)
@event_listener("reset")
def on_reset(evt, var):
LAST_COUNT.clear()
@event_listener("myrole")
def on_myrole(evt, var, user):
if user in get_all_players((rolename,)):
value, plural = LAST_COUNT[user]
key = "were" if plural else "was"
msg = messages["mystic_info"].format(key, value, "", messages["mystic_{0}_num".format(var.PHASE)])
evt.data["messages"].append(mag)
return LAST_COUNT
# vim: set sw=4 expandtab:
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
import cv2
import torch
import random
def parse_cfg(cfg_file):
'''
读取并解析配置文件
输入: 配置文件目录
输出: 解析后的结果,list 中的每一个元素为 dict 形式
'''
lines = []
with open(cfg_file, 'r') as f:
for line in f.readlines():
if len(line) > 1 and line[0] != '#':
lines.append(line.strip())
block = {}
blocks = []
for line in lines:
if line[0] == '[':
if len(block) != 0:
blocks.append(block)
block = {}
block['type'] = line[1:-1]
else:
key, value = line.split('=')
block[key.strip()] = value.strip()
blocks.append(block)
return blocks
def predict_transform(prediction, dim, anchors, num_classes):
'''
输入: prediction - [batch_size, 特征图通道数, 特征图的高 , 特征图的宽]
比如 [1, 255, 13, 13], 这个 255 = 85x3, 因为每个
特征图上的点有三个不同尺寸的检测框。 13x13 大概就类似
把一张图划分成 169 个正方形部分。
dim - 训练图像的大小,一般yolo v3 固定为 416
anchors - 类似 [(10, 13), (16, 30), (33, 23)], 这些大小是相对于
dim x dim 而言的
num_classes - 分类的类别个数
输出: prediction - [batch_size, 检测框的个数, 85]
'''
batch_size = prediction.size(0)
stride = dim // prediction.size(2)
# grid_size 在此处其实和 prediction.size(2)是一样的,加这一步主要是
# 处理无法整除的情况
grid_size = dim // stride
bbox_attrs = 5 + num_classes
num_anchors = len(anchors)
prediction = prediction.view(batch_size, bbox_attrs*num_anchors, grid_size*grid_size)
prediction = prediction.transpose(1,2).contiguous()
prediction = prediction.view(batch_size, grid_size*grid_size*num_anchors, bbox_attrs)
# anchor 的尺寸是在原图中的尺寸,所以需要除 stride 变成在 feature map 中的大小
anchors = [(a[0] / stride, a[1] / stride) for a in anchors]
prediction[:,:,0] = torch.sigmoid(prediction[:,:,0])
prediction[:,:,1] = torch.sigmoid(prediction[:,:,1])
prediction[:,:,4] = torch.sigmoid(prediction[:,:,4])
# 给 bx, by 加上偏移,偏移量就是左上角的坐标
grid = np.arange(grid_size)
a,b = np.meshgrid(grid, grid)
x_offset = torch.FloatTensor(a).view(-1,1)
y_offset = torch.FloatTensor(b).view(-1,1)
x_y_offset = torch.cat((x_offset, y_offset), 1).repeat(1, num_anchors).view(-1, 2).unsqueeze(0)
prediction[:,:,:2] += x_y_offset
anchors = torch.FloatTensor(anchors)
anchors = anchors.repeat(grid_size*grid_size, 1).unsqueeze(0)
prediction[:,:,2:4] = torch.exp(prediction[:,:,2:4]) * anchors
prediction[:,:,5: 5+num_classes] = torch.sigmoid((prediction[:,:, 5 : 5+num_classes]))
# 最终恢复到原图尺寸
prediction[:,:,:4] *= stride
return prediction
def unique(tensor):
tensor_np = tensor.cpu().numpy()
unique_np = np.unique(tensor_np)
unique_tensor = torch.from_numpy(unique_np)
return unique_tensor
def bbox_iou(box1, boxes2):
"""
返回 box1 检测框 和 boxes2 检测框组的交并比
"""
#Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[:,0], box1[:,1], box1[:,2], box1[:,3]
b2_x1, b2_y1, b2_x2, b2_y2 = boxes2[:,0], boxes2[:,1], boxes2[:,2], boxes2[:,3]
#get the corrdinates of the intersection rectangle
inter_rect_x1 = torch.max(b1_x1, b2_x1)
inter_rect_y1 = torch.max(b1_y1, b2_y1)
inter_rect_x2 = torch.min(b1_x2, b2_x2)
inter_rect_y2 = torch.min(b1_y2, b2_y2)
#Intersection area
inter_area = torch.clamp(inter_rect_x2 - inter_rect_x1 + 1, min=0) * torch.clamp(inter_rect_y2 - inter_rect_y1 + 1, min=0)
#Union Area
b1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1)
b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1)
iou = inter_area / (b1_area + b2_area - inter_area)
return iou
def non_maximum_suppression(prediction, confidence, num_classes, nms_conf = 0.4):
'''
非极大值抑制。
输入: prediction 所有预测的检测框,[batch_size, 10647, 85]
confidence 判断是否包含检测物体的阈值
num_classes 一共有多少个类别,这里是80个
nms_conf 判断两个检测框重合超过多少就算是重复的阈值
输出: output 一个 list,一个 list 中包含多个确认的检测框,每个检测框
有8位,第一位表示是这个batch中的第几个图像,剩下7位就是
4位表示位置信息,1位表示属于分类物体的置信程度,1位表示
属于可能性最大的类的概率,1位表示属于可能性最大的那个类
的标号。
'''
conf_mask = (prediction[:,:,4] > confidence).float().unsqueeze(2)
prediction = prediction * conf_mask
box_corner = prediction.new(prediction.shape)
box_corner[:,:,0] = (prediction[:,:,0] - prediction[:,:,2]/2)
box_corner[:,:,1] = (prediction[:,:,1] - prediction[:,:,3]/2)
box_corner[:,:,2] = (prediction[:,:,0] + prediction[:,:,2]/2)
box_corner[:,:,3] = (prediction[:,:,1] + prediction[:,:,3]/2)
prediction[:,:,:4] = box_corner[:,:,:4]
batch_size = prediction.size(0)
write = False
output = 0
for ind in range(batch_size):
image_pred = prediction[ind]
max_conf, max_conf_score = torch.max(image_pred[:, 5:5+num_classes], 1)
max_conf = max_conf.float().unsqueeze(1)
max_conf_score = max_conf_score.float().unsqueeze(1)
seq = (image_pred[:, :5], max_conf, max_conf_score)
image_pred = torch.cat(seq, 1)
non_zero_ind = torch.nonzero(image_pred[:,4])
if len(non_zero_ind) == 0:
continue
else:
image_pred_ = image_pred[non_zero_ind.squeeze(), :].view(-1,7)
img_classes = unique(image_pred_[:,-1])
for cls in img_classes:
# 找到所有属于这个类的检测结果
class_mask_ind = (image_pred_[:,-1] == cls).nonzero().squeeze()
image_pred_class = image_pred_[class_mask_ind].view(-1,7) # 55, 3
# 按照物体存在概率 objectness confidence 进行降序排列
conf_sort_index = torch.sort(image_pred_class[:,4], descending = True)[1]
# 应用排列顺序到原始列表中
image_pred_class = image_pred_class[conf_sort_index]
# 一共有多少个检测到的框
idx = image_pred_class.size(0)
for i in range(idx):
# 原始实现代码,我不喜欢用 try/catch 替代 if 的工作
# try:
# ious = bbox_iou(image_pred_class[i].unsqueeze(0), image_pred_class[i+1:])
# except ValueError:
# break
# except IndexError:
# break
# 检测第 i 个框与其后边所有框的 iou
if i+1 > image_pred_class.size(0):
break
else:
ious = bbox_iou(image_pred_class[i].unsqueeze(0), image_pred_class[i+1:])
# 如果 iou 大于某个临界点的话,那么就移除
iou_mask = (ious < nms_conf).float().unsqueeze(1)
image_pred_class[i+1:] *= iou_mask
non_zero_ind = torch.nonzero(image_pred_class[:,4]).squeeze()
image_pred_class = image_pred_class[non_zero_ind].view(-1,7)
# 生成一个等长的带有 batch 序号的 tensor
batch_ind = image_pred_class.new(image_pred_class.size(0), 1).fill_(ind)
# 合成一个 tuple
seq = batch_ind, image_pred_class
if not write:
# 拼接结果,至此每个检测结果有8位,第一位是batch的序号,剩下的7个和原来一样
output = torch.cat(seq,1)
write = True
else:
out = torch.cat(seq,1)
output = torch.cat((output,out))
return output
def letterbox_image(img, out_dim):
'''
调整输入图片 img 的分辨率到 (out_dim,out_dim)大小。
不同于一般的 resize,这个函数保留原图片的原始比例,对于额外空白的地方用灰色来填补。
'''
img_w, img_h = img.shape[1], img.shape[0]
w, h = out_dim
new_w = int(img_w * min(w/img_w, h/img_h))
new_h = int(img_h * min(w/img_w, h/img_h))
resized_image = cv2.resize(img, (new_w, new_h), interpolation = cv2.INTER_CUBIC)
canvas = np.full((out_dim[1], out_dim[0], 3), 128)
canvas[(h-new_h)//2:(h-new_h)//2 + new_h, (w-new_w)//2:(w-new_w)//2 + new_w, :] = resized_image
return canvas
def prep_image(img, input_dim):
'''
将输入图片转换成网络需要输入的格式 cv2 -> tensor
首先 cv2 中 图片是按 BGR 顺序排练的,首先转换到 RGB,
然后压缩到 0-1 范围内,再转化为 tensor。
'''
# img = cv2.resize(img, (input_dim, input_dim))
img = letterbox_image(img, (input_dim, input_dim))
img = img[:,:,::-1].transpose((2,0,1)).copy()
img = torch.from_numpy(img).float().div(255.0).unsqueeze(0)
return img
def load_classes(namesfile):
fp = open(namesfile, "r")
names = fp.read().split("\n")[:-1]
return names
def draw_bounding_box(x, results, colors, classes):
'''
在给定的图像 x 上根据 results 画检测框,根据提供的 classes 找到每个框对应的类别,
随机根据 colors 设置选择检测框颜色。
'''
c1 = tuple(x[1:3].int())
c2 = tuple(x[3:5].int())
img = results[int(x[0])]
cls = int(x[-1])
color = random.choice(colors)
label = "{0}".format(classes[cls])
cv2.rectangle(img, c1, c2,color, 1)
t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 1 , 1)[0]
c2 = c1[0] + t_size[0] + 3, c1[1] + t_size[1] + 4
cv2.rectangle(img, c1, c2,color, -1)
cv2.putText(img, label, (c1[0], c1[1] + t_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 1, [225,255,255], 1)
return img
|
# -*- coding: UTF-8 -*-
from collections import OrderedDict
import cv2
import numpy as np
import dlib
FACIAL_LANDMARKS_68_IDXS = OrderedDict([
("mouth", (48, 68)),
("inner_mouth", (60, 68)),
("right_eyebrow", (17, 22)),
("left_eyebrow", (22, 27)),
("right_eye", (36, 42)),
("left_eye", (42, 48)),
("nose", (27, 36)),
("jaw", (0, 17)),
("inner_mouth_right_upper", (60, 63)),
])
def circle_landmarks(img, landmarks, color=(0,0,255), thickness=2):
if landmarks is None:
return
if isinstance(landmarks, np.ndarray):
for (x, y) in landmarks:
cv2.circle(img, (x, y), 1, color, thickness=thickness)
class FaceDetector:
def __init__(self, predictor):
'''
Initialize a FeatureExtractor.
Args:
predictor: a dlib shape predictor
'''
self.predictor = predictor
def _get_biggest_face(self, faces):
maxarea = 0
face = None
for f in faces:
if f.area() > maxarea:
maxarea = f.area()
face = f
return face
def get_landmarks(self, img):
'''
Get 68 facial landmarks from a gray scale image.
If there are multiple faces detected, return the landmarks
of the biggest face.
Args:
img: A gray scale image
Returns:
A (68,2) np array of all landmarks, each row contains the x, and y
coordinates of a landmark.
'''
detector = dlib.get_frontal_face_detector()
faces = detector(img)
face = self._get_biggest_face(faces)
if not face:
return None
shape = self.predictor(img, face)
landmarks = [(shape.part(i).x, shape.part(i).y) for i in range(68)]
return np.asarray(landmarks)
class FaceAligner:
def __init__(self, desiredLeftEye=(0.35, 0.35),
desiredWidth=256, desiredHeight=256):
'''
Initialize a FaceAligner.
Args:
desiredLeftEye: the desired location of the center of the left eye,
compared with the entire output image of the aligner.
The biggner the numbers are, the more zoomed-out the
face will be.
desiredWidth: width of the output image.
desiredHeight: height of the output image.
'''
self.desiredLeftEye = desiredLeftEye
self.desiredWidth = desiredWidth
self.desiredHeight = desiredHeight
def _getAffineMatrix(self, landmarks):
'''
Get affine matrix from landmark coordinates.
'''
# get left and right eye coordinates
(lStart, lEnd) = FACIAL_LANDMARKS_68_IDXS["left_eye"]
(rStart, rEnd) = FACIAL_LANDMARKS_68_IDXS["right_eye"]
leftEyePts = landmarks[lStart:lEnd]
rightEyePts = landmarks[rStart:rEnd]
# compute the center of mass for each eye
leftEyeCenter = leftEyePts.mean(axis=0).astype("int")
rightEyeCenter = rightEyePts.mean(axis=0).astype("int")
# compute the angle between the eye centroids
dY = rightEyeCenter[1] - leftEyeCenter[1]
dX = rightEyeCenter[0] - leftEyeCenter[0]
angle = np.degrees(np.arctan2(dY, dX)) - 180
# compute the desired right eye x-coordinate based on the
# desired x-coordinate of the left eye
dist = np.linalg.norm(leftEyeCenter - rightEyeCenter)
desiredDist = 1.0 - 2 * self.desiredLeftEye[0]
desiredDist *= self.desiredWidth
scale = desiredDist / dist
# center of two eyes
eyesCenter = ((leftEyeCenter[0] + rightEyeCenter[0]) // 2,
(leftEyeCenter[1] + rightEyeCenter[1]) // 2)
# rotation matrix
M = cv2.getRotationMatrix2D(eyesCenter, angle, scale)
# update the translation component of the matrix
# recenter the output image to be center of eyes
tX = self.desiredWidth * 0.5
tY = self.desiredHeight * self.desiredLeftEye[1]
M[0, 2] += (tX - eyesCenter[0])
M[1, 2] += (tY - eyesCenter[1])
return M
def align_image(self, img, landmarks):
'''
Do affine transformation of the original image. The output image is
scaled to certain size, and rotated and translated, so that the two
eyes of the face are horizontally aligned.
Returns:
The output image.
'''
M = self._getAffineMatrix(landmarks)
# apply the affine transformation
(w, h) = (self.desiredWidth, self.desiredHeight)
output = cv2.warpAffine(img, M, (w, h),flags=cv2.INTER_CUBIC)
return output
def align_landmarks(self, landmarks, round=False):
'''
Do affine transformation of the landmarks.
https://docs.opencv.org/2.4/doc/tutorials/imgproc/imgtrans/warp_affine/warp_affine.html
'''
M = self._getAffineMatrix(landmarks)
A = M[:, :2]; B = M[:, 2]
landmarks_ltfm = np.dot(A, landmarks.T)
landmarks_af = np.add(landmarks_ltfm.T, B)
if round:
return np.rint(landmarks_af).astype(int)
else:
return landmarks_af
|
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: sfp_citadel
# Purpose: SpiderFoot plug-in to search Leak-Lookup using their API,
# for potential data breaches.
#
# Author: sn <citadel.pw@protonmail.com>
#
# Created: 15/08/2017
# Licence: MIT
# -------------------------------------------------------------------------------
import json
import time
import urllib.error
import urllib.parse
import urllib.request
from spiderfoot import SpiderFootEvent, SpiderFootPlugin
class sfp_citadel(SpiderFootPlugin):
meta = {
'name': "Leak-Lookup",
'summary': "Searches Leak-Lookup.com's database of breaches.",
'flags': ["apikey"],
'useCases': ["Footprint", "Investigate", "Passive"],
'categories': ["Leaks, Dumps and Breaches"],
'dataSource': {
'website': "https://leak-lookup.com/",
'model': "FREE_AUTH_UNLIMITED",
'references': [
"https://leak-lookup.com/api",
"https://leak-lookup.com/databases"
],
'apiKeyInstructions': [
"Visit https://leak-lookup.com",
"Register an account",
"Login to your account",
"Click on 'Account'",
"Click on 'API'",
"The API key is listed under 'API Key'"
],
'favIcon': "https://leak-lookup.com/favicon.png",
'logo': "https://leak-lookup.com/favicon.png",
'description': "Leak-Lookup allows you to search across thousands of data breaches "
"to stay on top of credentials that may have been compromised in the wild.\n"
"The creators came together when they realized they had a vast trove of data "
"that could be of great value to pen-testers seeking weaknesses in client passwords "
"and those concerned about which of their credentials have been leaked into the wild.\n"
"Always looking forward, Leak-Lookup invests all of its profits back into securing the "
"latest data breaches and leaks / dumps as they become available, ensuring that "
"as well as historical data, Leak-Lookup becomes a field leader in credential monitoring.",
}
}
# Default options
opts = {
"api_key": "",
"timeout": 60
}
optdescs = {
"api_key": "Leak-Lookup API key. Without this you're limited to the public API.",
"timeout": "Custom timeout due to heavy traffic at times."
}
results = None
errorState = False
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = self.tempStorage()
self.errorState = False
self.__dataSource__ = "Leak-Lookup.com"
for opt in list(userOpts.keys()):
self.opts[opt] = userOpts[opt]
# What events is this module interested in for input
def watchedEvents(self):
return ['EMAILADDR']
# What events this module produces
# This is to support the end user in selecting modules based on events
# produced.
def producedEvents(self):
return ["EMAILADDR_COMPROMISED"]
# Query email address
# https://leak-lookup.com/api
def queryEmail(self, email):
apikey = self.opts['api_key']
if not apikey:
# Public API key
apikey = "3edfb5603418f101926c64ca5dd0e409"
params = {
'query': email.encode('raw_unicode_escape').decode("ascii", errors='replace'),
'type': 'email_address',
'key': apikey
}
res = self.sf.fetchUrl("https://leak-lookup.com/api/search",
postData=urllib.parse.urlencode(params),
timeout=self.opts['timeout'],
useragent=self.opts['_useragent'])
if res['code'] == "429":
time.sleep(10)
return self.queryEmail(email)
if res['content'] is None:
self.debug('No response from Leak-Lookup.com')
return None
try:
return json.loads(res['content'])
except Exception as e:
self.debug(f"Error processing JSON response: {e}")
return None
# Handle events sent to this module
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
self.debug(f"Received event, {eventName}, from {srcModuleName}")
if self.errorState:
return
# Don't look up stuff twice
if eventData in self.results:
self.debug(f"Skipping {eventData}, already checked.")
return
self.results[eventData] = True
data = self.queryEmail(eventData)
if data is None:
return
error = data.get('error')
message = data.get('message')
if error == 'true':
self.error(f"Error encountered processing {eventData}: {message}")
if "MISSING API" in message:
self.errorState = True
return
return
if not message:
return
for site in message:
self.info(f"Found Leak-Lookup entry for {eventData}: {site}")
evt = SpiderFootEvent("EMAILADDR_COMPROMISED", f"{eventData} [{site}]", self.__name__, event)
self.notifyListeners(evt)
# End of sfp_citadel class
|
from enum import unique
from flask import Flask, render_template, request, flash
from flask_sqlalchemy import SQLAlchemy
from werkzeug.utils import redirect
from flask_login import LoginManager, UserMixin, login_manager, login_user, logout_user, login_required, current_user
from werkzeug.security import check_password_hash, generate_password_hash
app = Flask(__name__)
app.config['SECRET_KEY'] = "nothingggg"
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///posts.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class BlogPost(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(30), nullable=False)
author = db.Column(db.String(25), nullable=False, default="N.A")
content = db.Column(db.String(200), nullable=False)
def __repr__(self):
return 'BLOGPOST' + str(self.id)
class User(UserMixin, db.Model):
user_id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(15), unique=True, nullable=False)
email = db.Column(db.String, unique=True, nullable=False)
password = db.Column(db.String(80), nullable=False)
def get_id(self):
return (self.user_id)
def __repr__(self):
return 'USER' + str(self.id)
@app.route('/')
def welcome():
return render_template('welcome_page.html')
@app.route('/signup', methods=['POST', 'GET'])
def signup():
if request.method == 'POST':
ueamil = request.form['mail']
uname = request.form['username']
upass = request.form['password']
hpass = generate_password_hash(upass, method='sha256')
new_user = User(username=uname, email=ueamil, password=hpass)
db.session.add(new_user)
db.session.commit()
flash("Account Created. Please Login.")
return redirect('/')
else:
return render_template('signup.html')
@app.route('/login', methods=['POST', 'GET'])
def login():
if request.method == 'POST':
name = request.form['username']
pw = request.form['password']
user = User.query.filter_by(username=name).first()
if user:
if check_password_hash(user.password, pw):
login_user(user)
return redirect('/home')
else:
flash("INVALID PASSWORD")
return render_template('login.html')
else:
flash("INVALID USERNAME")
return render_template('login.html')
else:
return render_template('login.html')
@app.route('/home', methods=['POST', 'GET'])
@login_required
def home():
all_posts = BlogPost.query.all()
return render_template('home.html', posts=all_posts)
@app.route('/add', methods=['POST', 'GET'])
def add():
if request.method == 'POST':
post_title = request.form['title']
post_author = request.form['author']
post_content = request.form['content']
new_post = BlogPost(
title=post_title, author=post_author, content=post_content)
db.session.add(new_post)
db.session.commit()
return redirect('/home')
else:
return render_template('add.html')
@app.route('/home/delete/<int:id>')
def delete(id):
post = BlogPost.query.get_or_404(id)
db.session.delete(post)
db.session.commit()
return redirect('/home')
@app.route('/home/edit/<int:id>', methods=['POST', 'GET'])
def edit(id):
post = BlogPost.query.get_or_404(id)
if request.method == 'POST':
post.title = request.form['title']
post.author = request.form['author']
post.content = request.form['content']
db.session.commit()
return redirect('/home')
else:
return render_template('edit.html', post=post)
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/logout')
def logout():
logout_user()
return redirect('/')
if __name__ == '__main__':
app.run(debug=True)
|
from day15_1 import find_path
def extend(rows, times=5):
"""Extend rows. Return the result."""
extended = rows
length = len(rows)
for row in extended: # Lengthen rows.
for i in range((times - 1) * length):
row.append(row[i] % 9 + 1)
for i in range((times - 1) * length): # Add rows.
extended.append([j % 9 + 1 for j in extended[i]])
return extended
def parse_and_extend():
"""Parse input, and extend. Return the the risk levels and the goal."""
with open('../data/day15.txt') as f:
lines = [list(map(int, line.strip())) for line in f.readlines()]
lines = extend(lines)
goal = (len(lines[0]) - 1, len(lines) - 1) # (x, y)
risks = dict()
for y, line in enumerate(lines):
for x, value in enumerate(line):
risks[(x, y)] = value
return risks, goal
if __name__ == '__main__':
risks, goal = parse_and_extend()
print(find_path(risks, goal))
|
# Code from Chapter 10 of Machine Learning: An Algorithmic Perspective (2nd Edition)
# by Stephen Marsland (http://stephenmonika.net)
# You are free to use, change, or redistribute the code in any way you wish for
# non-commercial purposes, but please maintain the name of the original author.
# This code comes with no warranty of any kind.
# Stephen Marsland, 2008, 2014
# The Population Based Incremental Learning algorithm
# Comment and uncomment fitness functions as appropriate (as an import and the fitnessFunction variable)
import pylab as pl
import numpy as np
#import fourpeaks as fF
import knapsack as fF
def PBIL():
pl.ion()
populationSize = 100
stringLength = 20
eta = 0.005
#fitnessFunction = 'fF.fourpeaks'
fitnessFunction = 'fF.knapsack'
p = 0.5*np.ones(stringLength)
best = np.zeros(501,dtype=float)
for count in range(501):
# Generate samples
population = np.random.rand(populationSize,stringLength)
for i in range(stringLength):
population[:,i] = np.where(population[:,i]<p[i],1,0)
# Evaluate fitness
fitness = eval(fitnessFunction)(population)
# Pick best
best[count] = np.max(fitness)
bestplace = np.argmax(fitness)
fitness[bestplace] = 0
secondplace = np.argmax(fitness)
# Update vector
p = p*(1-eta) + eta*((population[bestplace,:]+population[secondplace,:])/2)
if (np.mod(count,100)==0):
print count, best[count]
pl.plot(best,'kx-')
pl.xlabel('Epochs')
pl.ylabel('Fitness')
pl.show()
#print p
PBIL()
|
import string
from collections import defaultdict
'''
This function constructs folds that have a balanced category distribution.
Folds are stacked up together to give the order of docs in the main data.
idx_order defines the order of documents in the data. Each sequence of (docs_per_fold) documents in idx_order can be treated as a single fold, containing documents balanced across each category.
'''
def prepare_folds(args):
with open(args.cat_path) as fp:
categories = []
for line in fp:
_, docs = line.strip().split('\t')
docs = docs.strip().split(' ')
categories.append(docs)
# categories: list[category, docs_per_category]
categories.sort(key = lambda x: len(x))
n_docs = len(sum(categories, []))
assert n_docs == args.dataset_size, "invalid category list"
docs_per_fold = args.dataset_size // args.num_folds
folds = [[] for f in range(docs_per_fold)]
# folds: list[num_folds, docs_per_fold]
f = 0
for cat in categories:
for doc in cat:
folds[f].append(doc)
f = (f + 1) % 5
# list[num_folds, docs_per_fold] --> list[num_folds * docs_per_fold]
idx_order = sum(folds, [])
return idx_order
'''
This file prepares the numericalized data in the form of lists, to be used in training mode.
idx_order is the order of documents in the dataset.
x: list[num_docs, sentences_per_doc, words_per_sentence] if pretrained = False
list[num_docs, sentences_per_doc, sentence_embedding_dim] if pretrained = True
y: list[num_docs, sentences_per_doc]
'''
def prepare_data(idx_order, args):
x, y = [], []
word2idx = defaultdict(lambda: len(word2idx))
tag2idx = defaultdict(lambda: len(tag2idx))
# map the special symbols first
word2idx['<pad>'], word2idx['<unk>'] = 0, 1
tag2idx['<pad>'], tag2idx['<start>'], tag2idx['<end>'] = 0, 1, 2
# iterate over documents
for doc in idx_order:
doc_x, doc_y = [], []
with open(args.data_path + doc + '.txt') as fp:
# iterate over sentences
for sent in fp:
try:
sent_x, sent_y = sent.strip().split('\t')
except ValueError:
continue
# cleanse text, map words and tags
if not args.pretrained:
sent_x = sent_x.strip().lower().translate(str.maketrans(string.punctuation, ' ' * len(string.punctuation)))
sent_x = list(map(lambda x: word2idx[x], sent_x.split()))
else:
sent_x = list(map(float, sent_x.strip().split()[:args.emb_dim]))
sent_y = tag2idx[sent_y.strip()]
if sent_x != []:
doc_x.append(sent_x)
doc_y.append(sent_y)
x.append(doc_x)
y.append(doc_y)
return x, y, word2idx, tag2idx
'''
This file prepares the numericalized data in the form of lists, to be used in inference mode.
idx_order is the order of documents in the dataset.
x: list[num_docs, sentences_per_doc, words_per_sentence] if pretrained = False
list[num_docs, sentences_per_doc, sentence_embedding_dim] if pretrained = True
'''
def prepare_data_inference(idx_order, args, sent2vec_model):
x = []
# iterate over documents
for doc in idx_order:
doc_x = []
with open(args.data_path + doc + '.txt') as fp:
# iterate over sentences
for sent in fp:
sent_x = sent.strip()
# cleanse text, map words and tags
if not args.pretrained:
sent_x = sent_x.lower().translate(str.maketrans(string.punctuation, ' ' * len(string.punctuation)))
sent_x = list(map(lambda x: args.word2idx[x] if x in args.word2idx else args.word2idx['<unk>'], sent_x.split()))
else:
sent_x = sent2vec_model.embed_sentence(sent_x).flatten().tolist()[:args.emb_dim]
if sent_x != []:
doc_x.append(sent_x)
x.append(doc_x)
return x
|
"""Added the on delete and on update settings
Revision ID: 500de4365b5d
Revises: 1dbcb98d3ab8
Create Date: 2017-09-22 22:31:34.147336
"""
# revision identifiers, used by Alembic.
revision = '500de4365b5d'
down_revision = '1dbcb98d3ab8'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(u'fk_post_categories__category_id__categories', 'post_categories', type_='foreignkey')
op.drop_constraint(u'fk_post_categories__post_id__posts', 'post_categories', type_='foreignkey')
op.create_foreign_key('fk_post_categories__category_id__categories', 'post_categories', 'categories', ['category_id'], ['id'], onupdate='CASCADE', ondelete='CASCADE')
op.create_foreign_key('fk_post_categories__post_id__posts', 'post_categories', 'posts', ['post_id'], ['id'], onupdate='CASCADE', ondelete='CASCADE')
op.drop_constraint(u'fk_tag_id__tags', 'post_tags', type_='foreignkey')
op.drop_constraint(u'fk_post_id__posts', 'post_tags', type_='foreignkey')
op.create_foreign_key('fk_tag_id__tags', 'post_tags', 'tags', ['tag_id'], ['id'], onupdate='CASCADE', ondelete='CASCADE')
op.create_foreign_key('fk_post_id__posts', 'post_tags', 'posts', ['post_id'], ['id'], onupdate='CASCADE', ondelete='CASCADE')
op.drop_constraint(u'fk_url_id__urls', 'posts', type_='foreignkey')
op.create_foreign_key('fk_url_id__urls', 'posts', 'urls', ['url_id'], ['id'], onupdate='CASCADE', ondelete='CASCADE')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('fk_url_id__urls', 'posts', type_='foreignkey')
op.create_foreign_key(u'fk_url_id__urls', 'posts', 'urls', ['url_id'], ['id'])
op.drop_constraint('fk_post_id__posts', 'post_tags', type_='foreignkey')
op.drop_constraint('fk_tag_id__tags', 'post_tags', type_='foreignkey')
op.create_foreign_key(u'fk_post_id__posts', 'post_tags', 'posts', ['post_id'], ['id'])
op.create_foreign_key(u'fk_tag_id__tags', 'post_tags', 'tags', ['tag_id'], ['id'])
op.drop_constraint('fk_post_categories__post_id__posts', 'post_categories', type_='foreignkey')
op.drop_constraint('fk_post_categories__category_id__categories', 'post_categories', type_='foreignkey')
op.create_foreign_key(u'fk_post_categories__post_id__posts', 'post_categories', 'posts', ['post_id'], ['id'])
op.create_foreign_key(u'fk_post_categories__category_id__categories', 'post_categories', 'categories', ['category_id'], ['id'])
### end Alembic commands ###
|
import datetime
from typing import Any, Dict, Tuple
from ee.clickhouse.client import sync_execute
from ee.clickhouse.models.action import format_action_filter
from ee.clickhouse.models.property import parse_prop_clauses
from ee.clickhouse.sql.retention.retention import REFERENCE_EVENT_SQL, REFERENCE_EVENT_UNIQUE_SQL, RETENTION_SQL
from posthog.constants import TREND_FILTER_TYPE_ACTIONS, TREND_FILTER_TYPE_EVENTS
from posthog.models.action import Action
from posthog.models.entity import Entity
from posthog.models.filter import Filter
from posthog.models.team import Team
from posthog.queries.retention import Retention
PERIOD_TRUNC_HOUR = "toStartOfHour"
PERIOD_TRUNC_DAY = "toStartOfDay"
PERIOD_TRUNC_WEEK = "toStartOfWeek"
PERIOD_TRUNC_MONTH = "toStartOfMonth"
class ClickhouseRetention(Retention):
def _execute_sql(
self,
filter: Filter,
date_from: datetime.datetime,
date_to: datetime.datetime,
target_entity: Entity,
returning_entity: Entity,
is_first_time_retention: bool,
team: Team,
) -> Dict[Tuple[int, int], Dict[str, Any]]:
period = filter.period
prop_filters, prop_filter_params = parse_prop_clauses(filter.properties, team.pk)
target_query = ""
target_params: Dict = {}
trunc_func = self._get_trunc_func_ch(period)
if target_entity.type == TREND_FILTER_TYPE_ACTIONS:
action = Action.objects.get(pk=target_entity.id)
action_query, target_params = format_action_filter(action, use_loop=True)
target_query = "AND e.uuid IN ({})".format(action_query)
elif target_entity.type == TREND_FILTER_TYPE_EVENTS:
target_query = "AND e.event = %(target_event)s"
target_params = {"target_event": target_entity.id}
target_query, target_params = self._get_condition(target_entity)
returning_query, returning_params = self._get_condition(returning_entity, "returning")
target_query_formatted = (
"AND {target_query}".format(target_query=target_query)
if is_first_time_retention
else "AND ({target_query} OR {returning_query})".format(
target_query=target_query, returning_query=returning_query
)
)
returning_query_formatted = (
"AND {returning_query}".format(returning_query=returning_query)
if is_first_time_retention
else "AND ({target_query} OR {returning_query})".format(
target_query=target_query, returning_query=returning_query
)
)
reference_event_sql = (REFERENCE_EVENT_UNIQUE_SQL if is_first_time_retention else REFERENCE_EVENT_SQL).format(
target_query=target_query_formatted, filters=prop_filters, trunc_func=trunc_func,
)
result = sync_execute(
RETENTION_SQL.format(
target_query=target_query_formatted,
returning_query=returning_query_formatted,
filters=prop_filters,
trunc_func=trunc_func,
extra_union="UNION ALL {}".format(reference_event_sql) if is_first_time_retention else "",
reference_event_sql=reference_event_sql,
),
{
"team_id": team.pk,
"start_date": date_from.strftime(
"%Y-%m-%d{}".format(" %H:%M:%S" if filter.period == "Hour" else " 00:00:00")
),
"end_date": date_to.strftime(
"%Y-%m-%d{}".format(" %H:%M:%S" if filter.period == "Hour" else " 00:00:00")
),
**prop_filter_params,
**target_params,
**returning_params,
"period": period,
},
)
result_dict = {}
for res in result:
result_dict.update({(res[0], res[1]): {"count": res[2], "people": []}})
return result_dict
def _get_condition(self, target_entity: Entity, prepend: str = "") -> Tuple[str, Dict]:
if target_entity.type == TREND_FILTER_TYPE_ACTIONS:
action = Action.objects.get(pk=target_entity.id)
action_query, params = format_action_filter(action, prepend=prepend, use_loop=True)
condition = "e.uuid IN ({})".format(action_query)
elif target_entity.type == TREND_FILTER_TYPE_EVENTS:
condition = "e.event = %({}_event)s".format(prepend)
params = {"{}_event".format(prepend): target_entity.id}
else:
condition = "e.event = %({}_event)s".format(prepend)
params = {"{}_event".format(prepend): "$pageview"}
return condition, params
def _get_trunc_func_ch(self, period: str) -> str:
if period == "Hour":
return PERIOD_TRUNC_HOUR
elif period == "Week":
return PERIOD_TRUNC_WEEK
elif period == "Day":
return PERIOD_TRUNC_DAY
elif period == "Month":
return PERIOD_TRUNC_MONTH
else:
raise ValueError(f"Period {period} is unsupported.")
|
from django.contrib import admin
from infs3202.events.models import Event, Ticket
admin.site.register(Event)
admin.site.register(Ticket)
|
import geopandas as gpd
import numpy as np
import shapely.geometry as sg
from pandamesh import triangle_geometry as tg
outer_coords = np.array([(0.0, 0.0), (10.0, 0.0), (10.0, 10.0), (0.0, 10.0)])
inner_coords = np.array([(3.0, 3.0), (7.0, 3.0), (7.0, 7.0), (3.0, 7.0)])
line_coords = np.array([(2.0, 8.0), (8.0, 2.0)])
inner = sg.LinearRing(inner_coords)
outer = sg.LinearRing(outer_coords)
line = sg.LineString(line_coords)
donut = sg.Polygon(outer, holes=[inner])
refined = sg.Polygon(inner_coords)
def area(vertices, triangles):
"""
Compute the area of every triangle in the mesh.
(Helper for these tests.)
"""
coords = vertices[triangles]
u = coords[:, 1] - coords[:, 0]
v = coords[:, 2] - coords[:, 0]
return 0.5 * np.abs(np.cross(u, v))
def test_add_linestrings():
series = gpd.GeoSeries(data=[line])
vertices, segments = tg.add_linestrings(series)
expected = np.unique(line_coords, axis=0)
expected_segments = np.array([[0, 1]])
assert np.allclose(vertices, expected)
assert np.array_equal(segments, expected_segments)
series = gpd.GeoSeries(data=[inner])
vertices, segments = tg.add_linestrings(series)
expected = np.unique(inner_coords, axis=0)
expected_segments = np.array(
[
[0, 2],
[2, 3],
[3, 1],
[1, 0],
]
)
assert np.allclose(vertices, expected)
assert np.array_equal(segments, expected_segments)
series = gpd.GeoSeries(data=[outer])
vertices, segments = tg.add_linestrings(series)
expected = np.unique(outer_coords, axis=0)
assert np.allclose(vertices, expected)
assert np.array_equal(segments, expected_segments)
# Empty should work too
series = gpd.GeoSeries(data=[])
_, _ = tg.add_linestrings(series)
def test_add_polygons():
gdf = gpd.GeoDataFrame(geometry=[donut])
cellsize = 0.5
gdf["cellsize"] = cellsize
vertices, segments, regions = tg.add_polygons(gdf)
expected = np.unique(
np.concatenate([outer_coords, inner_coords]),
axis=0,
)
expected_segments = np.array(
[
[0, 6],
[6, 7],
[7, 1],
[1, 0],
[2, 4],
[4, 5],
[5, 3],
[3, 2],
]
)
x, y = regions[0, :2]
assert np.allclose(vertices, expected)
assert np.array_equal(segments, expected_segments)
assert regions[0, 2] == 0
assert regions[0, 3] == 0.5 * cellsize ** 2
assert sg.Point(x, y).within(donut)
def test_add_points():
xy = np.array(
[
[0.0, 0.0],
[1.0, 1.0],
]
)
gdf = gpd.GeoDataFrame(geometry=gpd.points_from_xy(xy[:, 0], xy[:, 1]))
vertices = tg.add_points(gdf)
assert np.allclose(vertices, xy)
def test_polygon_holes():
polygon = sg.Polygon(outer)
gdf = gpd.GeoDataFrame(geometry=[polygon])
assert tg.polygon_holes(gdf) is None
gdf = gpd.GeoDataFrame(geometry=[donut])
assert len(tg.polygon_holes(gdf)) == 1
gdf = gpd.GeoDataFrame(geometry=[donut, refined])
assert tg.polygon_holes(gdf) is None
|
import click
from .commands import archive, archive_category
@click.group()
@click.pass_context
def channel(context):
"""Manage series operations"""
pass
channel.add_command(archive)
channel.add_command(archive_category)
__all__ = [
'channel'
]
|
# DomirScire
from collections import Counter
import operator
WORDS = ['this', 'is', 'an', 'elementary', 'test', 'example']
def most_repeating_letters_count(word):
return Counter(word).most_common(1)[0][1]
def most_repeating_word(words):
return max(words, key=most_repeating_letters_count)
if __name__ == "__main__":
print(most_repeating_word(WORDS))
|
from injector import inject
from logging import getLogger
from typing import List
from typing import Optional
from gumo.core import EntityKey
from gumo.pullqueue import PullTask
from gumo.pullqueue.worker.domain.configuration import PullQueueWorkerConfiguration
logger = getLogger(__name__)
class PullTaskRemoteRepository:
@inject
def __init__(
self,
configuration: PullQueueWorkerConfiguration,
):
self._configuration = configuration
def available_tasks(
self,
queue_name: str,
size: int = 10,
tag: Optional[str] = None,
) -> List[PullTask]:
raise NotImplementedError()
def lease_task(
self,
queue_name: str,
task: PullTask,
lease_time: int = 300,
) -> PullTask:
raise NotImplementedError()
def finalize_task(
self,
queue_name: str,
key: EntityKey,
) -> PullTask:
raise NotImplementedError()
def failure_task(
self,
queue_name: str,
key: EntityKey,
message: str,
) -> PullTask:
raise NotImplementedError()
def lease_extend_task(
self,
queue_name: str,
key: EntityKey,
lease_extend_time: int,
) -> PullTask:
raise NotImplementedError()
|
import docx
def docx_to_text(file_path):
doc = docx.Document(file_path)
result = []
for p in doc.paragraphs:
txt = p.text
result.append(txt)
return result
|
from ..utils.loader import Custom
from ..utils.helper import get_full_name, make_output
from .base_resource import BaseResource
from .role import Role
class ApiGatewayResource(BaseResource):
TEMPLATE = \
'''
Type: AWS::ApiGateway::Resource
Properties:
RestApiId: !Ref null
'''
def _dump_properties(self, properties):
properties['ParentId'] = self.get('parent')
properties['PathPart'] = self.get('path_part')
properties['RestApiId'].value = self.get('rest_api')
class ApiGatewayFunctionMethod(BaseResource):
TEMPLATE = \
'''
Type: AWS::ApiGateway::Method
Properties:
RequestParameters: {}
AuthorizationType: NONE
RestApiId: !Ref null
Integration:
IntegrationHttpMethod: POST
Type: AWS_PROXY
Uri: !Sub
- arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31/functions/${func}/invocations
- func: !GetAtt null
MethodResponses: []
'''
def _dump_properties(self, properties):
properties['HttpMethod'] = self.get('http_method')
properties['ResourceId'] = self.get('resource')
properties['RestApiId'].value = self.get('rest_api')
properties['Integration']['Uri'].value[1]['func'].value = self.get('function') + '.Arn'
class ApiGatewayPermission(BaseResource):
TEMPLATE = \
'''
Type: AWS::Lambda::Permission
Properties:
FunctionName: !GetAtt ProcessApiRequest.Arn
Action: lambda:InvokeFunction
Principal: !Sub apigateway.${AWS::URLSuffix}
SourceArn: !Sub
- arn:aws:execute-api:${AWS::Region}:${AWS::AccountId}:${api}/*/*
- api: !Ref null
'''
def _dump_properties(self, properties):
properties['FunctionName'].value = self.get('function') + '.Arn'
properties['SourceArn'].value[1]['api'].value = self.get('rest_api')
class ApiGatewayS3ObjectRole(Role):
STATEMENT_TEMPLATE = \
'''
- Effect: Allow
Action: s3:GetObject
Resource: !Sub
- arn:aws:s3:::${bucket}
- bucket: null
'''
PRINCIPAL_SERVICE = 'apigateway.amazonaws.com'
def _dump_properties(self, properties):
super()._dump_properties(properties)
statement = properties['Policies'][0]['PolicyDocument']['Statement'][0]
statement['Resource'].value[1]['bucket'] = self.get('bucket')
class ApiGatewayBucketMethod(BaseResource):
# pylint: disable=anomalous-backslash-in-string
TEMPLATE = \
'''
Type: AWS::ApiGateway::Method
Properties:
RestApiId: !Ref null
RequestParameters:
method.request.header.Content-Disposition: false
method.request.header.Content-Type: false
AuthorizationType: NONE
HttpMethod: GET
MethodResponses:
- StatusCode: 200
ResponseParameters:
method.response.header.Timestamp: true
method.response.header.Content-Length: true
method.response.header.Content-Type: true
- StatusCode: 400
- StatusCode: 500
Integration:
IntegrationHttpMethod: GET
Type: AWS
Uri: !Sub
- arn:aws:apigateway:${AWS::Region}:s3:path/${bucket}
- bucket: null
Credentials: !GetAtt null
PassthroughBehavior: WHEN_NO_MATCH
RequestParameters:
integration.request.header.Content-Disposition: method.request.header.Content-Disposition
integration.request.header.Content-Type: method.request.header.Content-Type
IntegrationResponses:
- StatusCode: 200
ResponseParameters:
method.response.header.Timestamp: integration.response.header.Date
method.response.header.Content-Length: integration.response.header.Content-Length
method.response.header.Content-Type: integration.response.header.Content-Type
- StatusCode: 400
SelectionPattern: 4\d{2}
- StatusCode: 500
SelectionPattern: 5\d{2}
'''
def _dump_properties(self, properties):
properties['ResourceId'] = self.get('resource')
properties['RestApiId'].value = self.get('rest_api')
properties['Integration']['Uri'].value[1]['bucket'] = self.get('bucket')
properties['Integration']['Credentials'].value = self.get('role_resource') + '.Arn'
params = filter(None, map(to_param_part, filter(None, self.get('url').split('/'))))
request_params = properties['RequestParameters']
integration_request_params = properties['Integration']['RequestParameters']
for param in params:
name = 'method.request.path.' + param
request_params[name] = True
integration_request_params['integration.request.path.' + param] = name
class ApiGatewayDeployment(BaseResource):
TEMPLATE = \
'''
Type: AWS::ApiGateway::Deployment
Properties:
RestApiId: !Ref null
'''
def _dump(self, template, parent_template):
super()._dump(template, parent_template)
template['DependsOn'].extend(self.get('methods'))
def _dump_properties(self, properties):
properties['RestApiId'].value = self.get('rest_api')
properties['StageName'] = self.get('stage')
def to_param_part(part):
return part[1:-1] if part[0] == '{' and part[-1] == '}' else ''
def get_resource(url, resources):
if url in resources:
return resources[url]
index = url.rindex('/')
part = url[index + 1:]
parent_url = url[:index]
parent = get_resource(parent_url, resources)
name = to_param_part(part)
if name == 'proxy+':
name = 'ProxyVar'
elif name:
name = name.title() + 'Var'
else:
name = part.title()
resource = {'name': parent['name'] + name, 'part': part, 'parent': parent_url}
resources[url] = resource
return resource
def build_resource_name(name, api_name):
return api_name + 'Resource' + name
def build_resource_id(name, api_name):
key = '!Ref' if name else '!GetAtt'
val = build_resource_name(name, api_name) if name else api_name + '.RootResourceId'
return Custom(key, val)
class ApiGateway(BaseResource):
TEMPLATE = \
'''
Type: AWS::ApiGateway::RestApi
Properties:
EndpointConfiguration:
Types:
- EDGE
'''
TYPE = 'apigateway'
# pylint: disable=too-many-locals
def _dump(self, template, parent_template):
super()._dump(template, parent_template)
endpoints = self.get('endpoints', [])
if not endpoints:
return
name = self.name
root = self.root
methods = []
functions = set()
resources = {
'': {'name': ''}
}
urls = []
for endpoint in endpoints:
http_method, url = endpoint['path'].split()
url = url.rstrip('/')
resource = get_resource(url, resources)
urls.append('{:4} {}'.format(http_method, url or '/'))
method_name = name + 'Method' + resource['name'] + http_method.title()
methods.append(method_name)
resource_id = build_resource_id(resource['name'], name)
if 'function' in endpoint:
function = endpoint['function']
functions.add(function)
ApiGatewayFunctionMethod(method_name, {
'resource': resource_id,
'rest_api': name,
'http_method': http_method,
'function': function
}, root).dump(parent_template)
elif 'bucket' in endpoint:
# To simplify role actions list - s3:GetObject.
if http_method != 'GET':
raise ValueError('{} - only GET is allowed'.format(url))
role_name = method_name + 'Role'
ApiGatewayS3ObjectRole(role_name, {
'bucket': endpoint['role_resource']
}, root).dump(parent_template)
ApiGatewayBucketMethod(method_name, {
'resource': resource_id,
'rest_api': name,
'role_resource': role_name,
'bucket': endpoint['bucket'],
'url': url
}, root).dump(parent_template)
for obj in resources.values():
if not obj['name']:
continue
ApiGatewayResource(build_resource_name(obj['name'], name), {
'parent': build_resource_id(resources[obj['parent']]['name'], name),
'path_part': obj['part'],
'rest_api': name
}, root).dump(parent_template)
for function in functions:
ApiGatewayPermission(name + 'Permission' + function, {
'function': function,
'rest_api': name
}, root).dump(parent_template)
stage = self.get('stage')
ApiGatewayDeployment(name + 'Deployment', {
'rest_api': name,
'stage': stage,
'methods': methods
}, root).dump(parent_template)
outputs = parent_template['Outputs']
outputs[name + 'Endpoint'] = make_output(Custom('!Sub', [
'https://${gateway}.execute-api.${AWS::Region}.${AWS::URLSuffix}/' + stage,
{'gateway': Custom('!Ref', name)}
]))
for i, url in enumerate(urls):
outputs['{}Path{}'.format(name, i + 1)] = make_output(url)
def _dump_properties(self, properties):
properties['Name'] = get_full_name(self.name, self.root)
|
import numpy as np
a=[1,2,3,4]
def add_ab(a=1,b=2):
return a+b
b=[1,2,3,4]
print(type(a),a+b)
|
import sys
try:
import unittest2 as unittest
except ImportError:
import unittest
from rope.base import exceptions
from rope.base import libutils
from rope.base.pycore import _TextChangeDetector
from rope.base.pyobjects import get_base_type, AbstractFunction
from ropetest import testutils
class PyCoreTest(unittest.TestCase):
def setUp(self):
super(PyCoreTest, self).setUp()
self.project = testutils.sample_project()
self.pycore = self.project.pycore
def tearDown(self):
testutils.remove_project(self.project)
super(PyCoreTest, self).tearDown()
def test_simple_module(self):
testutils.create_module(self.project, 'mod')
result = self.project.get_module('mod')
self.assertEquals(get_base_type('Module'), result.type)
self.assertEquals(0, len(result.get_attributes()))
def test_nested_modules(self):
pkg = testutils.create_package(self.project, 'pkg')
mod = testutils.create_module(self.project, 'mod', pkg) # noqa
package = self.project.get_module('pkg')
self.assertEquals(get_base_type('Module'), package.get_type())
self.assertEquals(1, len(package.get_attributes()))
module = package['mod'].get_object()
self.assertEquals(get_base_type('Module'), module.get_type())
def test_package(self):
pkg = testutils.create_package(self.project, 'pkg')
mod = testutils.create_module(self.project, 'mod', pkg) # noqa
result = self.project.get_module('pkg')
self.assertEquals(get_base_type('Module'), result.type)
def test_simple_class(self):
mod = testutils.create_module(self.project, 'mod')
mod.write('class SampleClass(object):\n pass\n')
mod_element = self.project.get_module('mod')
result = mod_element['SampleClass'].get_object()
self.assertEquals(get_base_type('Type'), result.get_type())
def test_simple_function(self):
mod = testutils.create_module(self.project, 'mod')
mod.write('def sample_function():\n pass\n')
mod_element = self.project.get_module('mod')
result = mod_element['sample_function'].get_object()
self.assertEquals(get_base_type('Function'), result.get_type())
def test_class_methods(self):
mod = testutils.create_module(self.project, 'mod')
code = 'class SampleClass(object):\n' \
' def sample_method(self):\n' \
' pass\n'
mod.write(code)
mod_element = self.project.get_module('mod')
sample_class = mod_element['SampleClass'].get_object()
self.assertTrue('sample_method' in sample_class)
method = sample_class['sample_method'].get_object()
self.assertEquals(get_base_type('Function'), method.get_type())
def test_global_variables(self):
mod = testutils.create_module(self.project, 'mod')
mod.write('var = 10')
mod_element = self.project.get_module('mod')
result = mod_element['var'] # noqa
def test_class_variables(self):
mod = testutils.create_module(self.project, 'mod')
mod.write('class SampleClass(object):\n var = 10\n')
mod_element = self.project.get_module('mod')
sample_class = mod_element['SampleClass'].get_object()
var = sample_class['var'] # noqa
def test_class_attributes_set_in_init(self):
mod = testutils.create_module(self.project, 'mod')
mod.write('class C(object):\n'
' def __init__(self):\n self.var = 20\n')
mod_element = self.project.get_module('mod')
sample_class = mod_element['C'].get_object()
var = sample_class['var'] # noqa
def test_class_attributes_set_in_init_overwriting_a_defined(self):
mod = testutils.create_module(self.project, 'mod')
code = 'class C(object):\n' \
' def __init__(self):\n' \
' self.f = 20\n' \
' def f():\n' \
' pass\n'
mod.write(code)
mod_element = self.project.get_module('mod')
sample_class = mod_element['C'].get_object()
f = sample_class['f'].get_object()
self.assertTrue(isinstance(f, AbstractFunction))
def test_classes_inside_other_classes(self):
mod = testutils.create_module(self.project, 'mod')
code = 'class SampleClass(object):\n' \
' class InnerClass(object):\n' \
' pass\n\n'
mod.write(code)
mod_element = self.project.get_module('mod')
sample_class = mod_element['SampleClass'].get_object()
var = sample_class['InnerClass'].get_object()
self.assertEquals(get_base_type('Type'), var.get_type())
def test_non_existent_module(self):
with self.assertRaises(exceptions.ModuleNotFoundError):
self.project.get_module('doesnotexistmodule')
def test_imported_names(self):
testutils.create_module(self.project, 'mod1')
mod = testutils.create_module(self.project, 'mod2')
mod.write('import mod1\n')
module = self.project.get_module('mod2')
imported_sys = module['mod1'].get_object()
self.assertEquals(get_base_type('Module'), imported_sys.get_type())
def test_imported_as_names(self):
testutils.create_module(self.project, 'mod1')
mod = testutils.create_module(self.project, 'mod2')
mod.write('import mod1 as my_import\n')
module = self.project.get_module('mod2')
imported_mod = module['my_import'].get_object()
self.assertEquals(get_base_type('Module'), imported_mod.get_type())
def test_get_string_module(self):
mod = libutils.get_string_module(
self.project, 'class Sample(object):\n pass\n')
sample_class = mod['Sample'].get_object()
self.assertEquals(get_base_type('Type'), sample_class.get_type())
def test_get_string_module_with_extra_spaces(self):
mod = libutils.get_string_module(
self.project, 'a = 10\n ') # noqa
def test_parameter_info_for_functions(self):
code = 'def func(param1, param2=10, *param3, **param4):\n pass'
mod = libutils.get_string_module(self.project, code)
sample_function = mod['func']
self.assertEquals(['param1', 'param2', 'param3', 'param4'],
sample_function.get_object().get_param_names())
# FIXME: Not found modules
def xxx_test_not_found_module_is_module(self):
mod = libutils.get_string_module(
self.project, 'import doesnotexist\n')
self.assertEquals(get_base_type('Module'),
mod['doesnotexist'].
get_object().get_type())
def test_mixing_scopes_and_objects_hierarchy(self):
mod = libutils.get_string_module(self.project, 'var = 200\n')
scope = mod.get_scope()
self.assertTrue('var' in scope.get_names())
def test_inheriting_base_class_attributes(self):
code = 'class Base(object):\n' \
' def method(self):\n' \
' pass\n' \
'class Derived(Base):\n' \
' pass\n'
mod = libutils.get_string_module(self.project, code)
derived = mod['Derived'].get_object()
self.assertTrue('method' in derived)
self.assertEquals(get_base_type('Function'),
derived['method'].get_object().get_type())
def test_inheriting_multiple_base_class_attributes(self):
code = 'class Base1(object):\n def method1(self):\n pass\n' \
'class Base2(object):\n def method2(self):\n pass\n' \
'class Derived(Base1, Base2):\n pass\n'
mod = libutils.get_string_module(self.project, code)
derived = mod['Derived'].get_object()
self.assertTrue('method1' in derived)
self.assertTrue('method2' in derived)
def test_inherit_multiple_base_class_attrs_with_the_same_name(self):
code = 'class Base1(object):\n def method(self):\n pass\n' \
'class Base2(object):\n def method(self):\n pass\n' \
'class Derived(Base1, Base2):\n pass\n'
mod = libutils.get_string_module(self.project, code)
base1 = mod['Base1'].get_object()
derived = mod['Derived'].get_object()
self.assertEquals(base1['method'].get_object(),
derived['method'].get_object())
def test_inheriting_unknown_base_class(self):
code = 'class Derived(NotFound):\n' \
' def f(self):\n' \
' pass\n'
mod = libutils.get_string_module(self.project, code)
derived = mod['Derived'].get_object()
self.assertTrue('f' in derived)
def test_module_creation(self):
new_module = testutils.create_module(self.project, 'module')
self.assertFalse(new_module.is_folder())
self.assertEquals(self.project.get_resource('module.py'), new_module)
def test_packaged_module_creation(self):
package = self.project.root.create_folder('package') # noqa
new_module = testutils.create_module(self.project, 'package.module')
self.assertEquals(self.project.get_resource('package/module.py'),
new_module)
def test_packaged_module_creation_with_nested_src(self):
src = self.project.root.create_folder('src')
src.create_folder('pkg')
new_module = testutils.create_module(self.project, 'pkg.mod', src)
self.assertEquals(self.project.get_resource('src/pkg/mod.py'),
new_module)
def test_package_creation(self):
new_package = testutils.create_package(self.project, 'pkg')
self.assertTrue(new_package.is_folder())
self.assertEquals(self.project.get_resource('pkg'), new_package)
self.assertEquals(self.project.get_resource('pkg/__init__.py'),
new_package.get_child('__init__.py'))
def test_nested_package_creation(self):
testutils.create_package(self.project, 'pkg1')
nested_package = testutils.create_package(self.project, 'pkg1.pkg2')
self.assertEquals(self.project.get_resource('pkg1/pkg2'),
nested_package)
def test_packaged_package_creation_with_nested_src(self):
src = self.project.root.create_folder('src')
testutils.create_package(self.project, 'pkg1', src)
nested_package = testutils.create_package(self.project, 'pkg1.pkg2',
src)
self.assertEquals(self.project.get_resource('src/pkg1/pkg2'),
nested_package)
def test_find_module(self):
src = self.project.root.create_folder('src')
samplemod = testutils.create_module(self.project, 'samplemod', src)
found_module = self.project.find_module('samplemod')
self.assertEquals(samplemod, found_module)
def test_find_nested_module(self):
src = self.project.root.create_folder('src')
samplepkg = testutils.create_package(self.project, 'samplepkg', src)
samplemod = testutils.create_module(self.project, 'samplemod',
samplepkg)
found_module = self.project.find_module('samplepkg.samplemod')
self.assertEquals(samplemod, found_module)
def test_find_multiple_module(self):
src = self.project.root.create_folder('src')
samplemod1 = testutils.create_module(self.project, 'samplemod', src)
samplemod2 = testutils.create_module(self.project, 'samplemod')
test = self.project.root.create_folder('test')
samplemod3 = testutils.create_module(self.project, 'samplemod', test)
found_module = self.project.find_module('samplemod')
self.assertTrue(samplemod1 == found_module or
samplemod2 == found_module or
samplemod3 == found_module)
def test_find_module_packages(self):
src = self.project.root
samplepkg = testutils.create_package(self.project, 'samplepkg', src)
found_module = self.project.find_module('samplepkg')
self.assertEquals(samplepkg, found_module)
def test_find_module_when_module_and_package_with_the_same_name(self):
src = self.project.root
testutils.create_module(self.project, 'sample', src)
samplepkg = testutils.create_package(self.project, 'sample', src)
found_module = self.project.find_module('sample')
self.assertEquals(samplepkg, found_module)
def test_source_folders_preference(self):
testutils.create_package(self.project, 'pkg1')
testutils.create_package(self.project, 'pkg1.src2')
lost = testutils.create_module(self.project, 'pkg1.src2.lost')
self.assertEqual(self.project.find_module('lost'), None)
self.project.close()
from rope.base.project import Project
self.project = Project(self.project.address,
source_folders=['pkg1/src2'])
self.assertEqual(self.project.find_module('lost'), lost)
def test_get_pyname_definition_location(self):
mod = libutils.get_string_module(self.project, 'a_var = 20\n')
a_var = mod['a_var']
self.assertEquals((mod, 1), a_var.get_definition_location())
def test_get_pyname_definition_location_functions(self):
mod = libutils.get_string_module(
self.project, 'def a_func():\n pass\n')
a_func = mod['a_func']
self.assertEquals((mod, 1), a_func.get_definition_location())
def test_get_pyname_definition_location_class(self):
code = 'class AClass(object):\n pass\n\n'
mod = libutils.get_string_module(self.project, code)
a_class = mod['AClass']
self.assertEquals((mod, 1), a_class.get_definition_location())
def test_get_pyname_definition_location_local_variables(self):
mod = libutils.get_string_module(
self.project, 'def a_func():\n a_var = 10\n')
a_func_scope = mod.get_scope().get_scopes()[0]
a_var = a_func_scope['a_var']
self.assertEquals((mod, 2), a_var.get_definition_location())
def test_get_pyname_definition_location_reassigning(self):
mod = libutils.get_string_module(
self.project, 'a_var = 20\na_var=30\n')
a_var = mod['a_var']
self.assertEquals((mod, 1), a_var.get_definition_location())
def test_get_pyname_definition_location_importes(self):
testutils.create_module(self.project, 'mod')
mod = libutils.get_string_module(self.project, 'import mod\n')
imported_module = self.project.get_module('mod')
module_pyname = mod['mod']
self.assertEquals((imported_module, 1),
module_pyname.get_definition_location())
def test_get_pyname_definition_location_imports(self):
module_resource = testutils.create_module(self.project, 'mod')
module_resource.write('\ndef a_func():\n pass\n')
imported_module = self.project.get_module('mod')
mod = libutils.get_string_module(
self.project, 'from mod import a_func\n')
a_func = mod['a_func']
self.assertEquals((imported_module, 2),
a_func.get_definition_location())
def test_get_pyname_definition_location_parameters(self):
code = 'def a_func(param1, param2):\n a_var = param\n'
mod = libutils.get_string_module(self.project, code)
a_func_scope = mod.get_scope().get_scopes()[0]
param1 = a_func_scope['param1']
self.assertEquals((mod, 1), param1.get_definition_location())
param2 = a_func_scope['param2']
self.assertEquals((mod, 1), param2.get_definition_location())
def test_module_get_resource(self):
module_resource = testutils.create_module(self.project, 'mod')
module = self.project.get_module('mod')
self.assertEquals(module_resource, module.get_resource())
string_module = libutils.get_string_module(
self.project, 'from mod import a_func\n')
self.assertEquals(None, string_module.get_resource())
def test_get_pyname_definition_location_class2(self):
code = 'class AClass(object):\n' \
' def __init__(self):\n' \
' self.an_attr = 10\n'
mod = libutils.get_string_module(self.project, code)
a_class = mod['AClass'].get_object()
an_attr = a_class['an_attr']
self.assertEquals((mod, 3), an_attr.get_definition_location())
def test_import_not_found_module_get_definition_location(self):
mod = libutils.get_string_module(
self.project, 'import doesnotexist\n')
does_not_exist = mod['doesnotexist']
self.assertEquals((None, None),
does_not_exist.get_definition_location())
def test_from_not_found_module_get_definition_location(self):
mod = libutils.get_string_module(
self.project, 'from doesnotexist import Sample\n')
sample = mod['Sample']
self.assertEquals((None, None), sample.get_definition_location())
def test_from_package_import_module_get_definition_location(self):
pkg = testutils.create_package(self.project, 'pkg')
testutils.create_module(self.project, 'mod', pkg)
pkg_mod = self.project.get_module('pkg.mod')
mod = libutils.get_string_module(
self.project, 'from pkg import mod\n')
imported_mod = mod['mod']
self.assertEquals((pkg_mod, 1),
imported_mod.get_definition_location())
def test_get_module_for_defined_pyobjects(self):
mod = libutils.get_string_module(
self.project, 'class AClass(object):\n pass\n')
a_class = mod['AClass'].get_object()
self.assertEquals(mod, a_class.get_module())
def test_get_definition_location_for_packages(self):
testutils.create_package(self.project, 'pkg')
init_module = self.project.get_module('pkg.__init__')
mod = libutils.get_string_module(self.project, 'import pkg\n')
pkg_pyname = mod['pkg']
self.assertEquals((init_module, 1),
pkg_pyname.get_definition_location())
def test_get_definition_location_for_filtered_packages(self):
pkg = testutils.create_package(self.project, 'pkg')
testutils.create_module(self.project, 'mod', pkg)
init_module = self.project.get_module('pkg.__init__')
mod = libutils.get_string_module(self.project, 'import pkg.mod')
pkg_pyname = mod['pkg']
self.assertEquals((init_module, 1),
pkg_pyname.get_definition_location())
def test_out_of_project_modules(self):
scope = libutils.get_string_scope(
self.project, 'import rope.base.project as project\n')
imported_module = scope['project'].get_object()
self.assertTrue('Project' in imported_module)
def test_file_encoding_reading(self):
contents = u'# -*- coding: utf-8 -*-\n' + \
u'#\N{LATIN SMALL LETTER I WITH DIAERESIS}\n'
mod = testutils.create_module(self.project, 'mod')
mod.write(contents)
self.project.get_module('mod')
def test_global_keyword(self):
contents = 'a_var = 1\ndef a_func():\n global a_var\n'
mod = libutils.get_string_module(self.project, contents)
global_var = mod['a_var']
func_scope = mod['a_func'].get_object().get_scope()
local_var = func_scope['a_var']
self.assertEquals(global_var, local_var)
def test_not_leaking_for_vars_inside_parent_scope(self):
mod = testutils.create_module(self.project, 'mod')
code = 'class C(object):\n' \
' def f(self):\n' \
' for my_var1, my_var2 in []:\n' \
' pass\n'
mod.write(code)
pymod = self.pycore.resource_to_pyobject(mod)
c_class = pymod['C'].get_object()
self.assertFalse('my_var1' in c_class)
self.assertFalse('my_var2' in c_class)
def test_not_leaking_for_vars_inside_parent_scope2(self):
mod = testutils.create_module(self.project, 'mod')
code = 'class C(object):\n' \
' def f(self):\n' \
' for my_var in []:\n' \
' pass\n'
mod.write(code)
pymod = self.pycore.resource_to_pyobject(mod)
c_class = pymod['C'].get_object()
self.assertFalse('my_var' in c_class)
def test_variables_defined_in_excepts(self):
mod = testutils.create_module(self.project, 'mod')
code = 'try:\n' \
' myvar1 = 1\n' \
'except:\n' \
' myvar2 = 1\n' \
'finally:\n' \
' myvar3 = 1\n'
mod.write(code)
pymod = self.pycore.resource_to_pyobject(mod)
self.assertTrue('myvar1' in pymod)
self.assertTrue('myvar2' in pymod)
self.assertTrue('myvar3' in pymod)
def test_not_leaking_tuple_assigned_names_inside_parent_scope(self):
mod = testutils.create_module(self.project, 'mod')
code = 'class C(object):\n' \
' def f(self):\n' \
' var1, var2 = range(2)\n'
mod.write(code)
pymod = self.pycore.resource_to_pyobject(mod)
c_class = pymod['C'].get_object()
self.assertFalse('var1' in c_class)
@testutils.run_only_for_25
def test_with_statement_variables(self):
code = 'import threading\nwith threading.lock() as var: pass\n'
if sys.version_info < (2, 6, 0):
code = 'from __future__ import with_statement\n' + code
pymod = libutils.get_string_module(self.project, code)
self.assertTrue('var' in pymod)
@testutils.run_only_for_25
def test_with_statement_variables_and_tuple_assignment(self):
code = 'class A(object):\n' \
' def __enter__(self):' \
' return (1, 2)\n' \
' def __exit__(self, type, value, tb):\n' \
' pass\n'\
'with A() as (a, b):\n' \
' pass\n'
if sys.version_info < (2, 6, 0):
code = 'from __future__ import with_statement\n' + code
pymod = libutils.get_string_module(self.project, code)
self.assertTrue('a' in pymod)
self.assertTrue('b' in pymod)
@testutils.run_only_for_25
def test_with_statement_variable_type(self):
code = 'class A(object):\n' \
' def __enter__(self):\n' \
' return self\n'\
' def __exit__(self, type, value, tb):\n' \
' pass\n' \
'with A() as var:\n' \
' pass\n'
if sys.version_info < (2, 6, 0):
code = 'from __future__ import with_statement\n' + code
pymod = libutils.get_string_module(self.project, code)
a_class = pymod['A'].get_object()
var = pymod['var'].get_object()
self.assertEquals(a_class, var.get_type())
@testutils.run_only_for_25
def test_with_statement_with_no_vars(self):
code = 'with open("file"): pass\n'
if sys.version_info < (2, 6, 0):
code = 'from __future__ import with_statement\n' + code
pymod = libutils.get_string_module(self.project, code)
pymod.get_attributes()
def test_check_for_else_block(self):
code = 'for i in range(10):\n' \
' pass\n' \
'else:\n' \
' myvar = 1\n'
mod = libutils.get_string_module(self.project, code)
a_var = mod['myvar']
self.assertEquals((mod, 4), a_var.get_definition_location())
def test_check_names_defined_in_whiles(self):
mod = libutils.get_string_module(
self.project, 'while False:\n myvar = 1\n')
a_var = mod['myvar']
self.assertEquals((mod, 2), a_var.get_definition_location())
def test_get_definition_location_in_tuple_assnames(self):
mod = libutils.get_string_module(
self.project, 'def f(x):\n x.z, a = range(2)\n')
x = mod['f'].get_object().get_scope()['x']
a = mod['f'].get_object().get_scope()['a']
self.assertEquals((mod, 1), x.get_definition_location())
self.assertEquals((mod, 2), a.get_definition_location())
def test_syntax_errors_in_code(self):
with self.assertRaises(exceptions.ModuleSyntaxError):
libutils.get_string_module(self.project, 'xyx print\n')
def test_holding_error_location_information(self):
try:
libutils.get_string_module(self.project, 'xyx print\n')
except exceptions.ModuleSyntaxError, e:
self.assertEquals(1, e.lineno)
def test_no_exceptions_on_module_encoding_problems(self):
mod = testutils.create_module(self.project, 'mod')
contents = '\nsdsdsd\n\xa9\n'
file = open(mod.real_path, 'wb')
file.write(contents)
file.close()
mod.read()
def test_syntax_errors_when_cannot_decode_file2(self):
mod = testutils.create_module(self.project, 'mod')
contents = '\n\xa9\n'
file = open(mod.real_path, 'wb')
file.write(contents)
file.close()
with self.assertRaises(exceptions.ModuleSyntaxError):
self.pycore.resource_to_pyobject(mod)
def test_syntax_errors_when_null_bytes(self):
mod = testutils.create_module(self.project, 'mod')
contents = '\n\x00\n'
file = open(mod.real_path, 'wb')
file.write(contents)
file.close()
with self.assertRaises(exceptions.ModuleSyntaxError):
self.pycore.resource_to_pyobject(mod)
def test_syntax_errors_when_bad_strs(self):
mod = testutils.create_module(self.project, 'mod')
contents = '\n"\\x0"\n'
file = open(mod.real_path, 'wb')
file.write(contents)
file.close()
with self.assertRaises(exceptions.ModuleSyntaxError):
self.pycore.resource_to_pyobject(mod)
def test_not_reaching_maximum_recursions_with_from_star_imports(self):
mod1 = testutils.create_module(self.project, 'mod1')
mod2 = testutils.create_module(self.project, 'mod2')
mod1.write('from mod2 import *\n')
mod2.write('from mod1 import *\n')
pymod1 = self.pycore.resource_to_pyobject(mod1)
pymod1.get_attributes()
def test_not_reaching_maximum_recursions_when_importing_variables(self):
mod1 = testutils.create_module(self.project, 'mod1')
mod2 = testutils.create_module(self.project, 'mod2')
mod1.write('from mod2 import myvar\n')
mod2.write('from mod1 import myvar\n')
pymod1 = self.pycore.resource_to_pyobject(mod1)
pymod1['myvar'].get_object()
def test_not_reaching_maximum_recursions_when_importing_variables2(self):
mod1 = testutils.create_module(self.project, 'mod1')
mod1.write('from mod1 import myvar\n')
pymod1 = self.pycore.resource_to_pyobject(mod1)
pymod1['myvar'].get_object()
def test_pyobject_equality_should_compare_types(self):
mod1 = testutils.create_module(self.project, 'mod1')
mod1.write('var1 = ""\nvar2 = ""\n')
pymod1 = self.pycore.resource_to_pyobject(mod1)
self.assertEquals(pymod1['var1'].get_object(),
pymod1['var2'].get_object())
class PyCoreInProjectsTest(unittest.TestCase):
def setUp(self):
super(self.__class__, self).setUp()
self.project = testutils.sample_project()
self.pycore = self.project.pycore
samplemod = testutils.create_module(self.project, 'samplemod')
code = 'class SampleClass(object):\n' \
' def sample_method():\n' \
' pass\n\n' \
'def sample_func():\n' \
' pass\n' \
'sample_var = 10\n\n' \
'def _underlined_func():\n' \
' pass\n\n'
samplemod.write(code)
package = testutils.create_package(self.project, 'package')
testutils.create_module(self.project, 'nestedmod', package)
def tearDown(self):
testutils.remove_project(self.project)
super(self.__class__, self).tearDown()
def test_simple_import(self):
mod = libutils.get_string_module(
self.project, 'import samplemod\n')
samplemod = mod['samplemod'].get_object()
self.assertEquals(get_base_type('Module'), samplemod.get_type())
def test_from_import_class(self):
mod = libutils.get_string_module(
self.project, 'from samplemod import SampleClass\n')
result = mod['SampleClass'].get_object()
self.assertEquals(get_base_type('Type'), result.get_type())
self.assertTrue('sample_func' not in mod.get_attributes())
def test_from_import_star(self):
mod = libutils.get_string_module(
self.project, 'from samplemod import *\n')
self.assertEquals(get_base_type('Type'),
mod['SampleClass'].get_object().get_type())
self.assertEquals(get_base_type('Function'),
mod['sample_func'].get_object().get_type())
self.assertTrue(mod['sample_var'] is not None)
def test_from_import_star_overwriting(self):
code = 'from samplemod import *\n' \
'class SampleClass(object):\n pass\n'
mod = libutils.get_string_module(self.project, code)
samplemod = self.project.get_module('samplemod')
sample_class = samplemod['SampleClass'].get_object()
self.assertNotEquals(sample_class,
mod.get_attributes()['SampleClass'].get_object())
def test_from_import_star_not_imporing_underlined(self):
mod = libutils.get_string_module(
self.project, 'from samplemod import *')
self.assertTrue('_underlined_func' not in mod.get_attributes())
def test_from_import_star_imports_in_functions(self):
mod = libutils.get_string_module(
self.project, 'def f():\n from os import *\n')
mod['f'].get_object().get_scope().get_names()
def test_from_package_import_mod(self):
mod = libutils.get_string_module(
self.project, 'from package import nestedmod\n')
self.assertEquals(get_base_type('Module'),
mod['nestedmod'].get_object().get_type())
# XXX: Deciding to import everything on import start from packages
def xxx_test_from_package_import_star(self):
mod = libutils.get_string_module(
self.project, 'from package import *\n')
self.assertTrue('nestedmod' not in mod.get_attributes())
def test_unknown_when_module_cannot_be_found(self):
mod = libutils.get_string_module(
self.project, 'from doesnotexist import nestedmod\n')
self.assertTrue('nestedmod' in mod)
def test_from_import_function(self):
code = 'def f():\n from samplemod import SampleClass\n'
scope = libutils.get_string_scope(self.project, code)
self.assertEquals(get_base_type('Type'),
scope.get_scopes()[0]['SampleClass'].
get_object().get_type())
def test_circular_imports(self):
mod1 = testutils.create_module(self.project, 'mod1')
mod2 = testutils.create_module(self.project, 'mod2')
mod1.write('import mod2\n')
mod2.write('import mod1\n')
self.project.get_module('mod1')
def test_circular_imports2(self):
mod1 = testutils.create_module(self.project, 'mod1')
mod2 = testutils.create_module(self.project, 'mod2')
mod1.write(
'from mod2 import Sample2\nclass Sample1(object):\n pass\n')
mod2.write(
'from mod1 import Sample1\nclass Sample2(object):\n pass\n')
self.project.get_module('mod1').get_attributes()
def test_multi_dot_imports(self):
pkg = testutils.create_package(self.project, 'pkg')
pkg_mod = testutils.create_module(self.project, 'mod', pkg)
pkg_mod.write('def sample_func():\n pass\n')
mod = libutils.get_string_module(self.project, 'import pkg.mod\n')
self.assertTrue('pkg' in mod)
self.assertTrue('sample_func' in mod['pkg'].get_object()['mod'].
get_object())
def test_multi_dot_imports2(self):
pkg = testutils.create_package(self.project, 'pkg')
testutils.create_module(self.project, 'mod1', pkg)
testutils.create_module(self.project, 'mod2', pkg)
mod = libutils.get_string_module(
self.project, 'import pkg.mod1\nimport pkg.mod2\n')
package = mod['pkg'].get_object()
self.assertEquals(2, len(package.get_attributes()))
self.assertTrue('mod1' in package and
'mod2' in package)
def test_multi_dot_imports3(self):
pkg1 = testutils.create_package(self.project, 'pkg1')
pkg2 = testutils.create_package(self.project, 'pkg2', pkg1)
testutils.create_module(self.project, 'mod1', pkg2)
testutils.create_module(self.project, 'mod2', pkg2)
code = 'import pkg1.pkg2.mod1\nimport pkg1.pkg2.mod2\n'
mod = libutils.get_string_module(self.project, code)
package1 = mod['pkg1'].get_object()
package2 = package1['pkg2'].get_object()
self.assertEquals(2, len(package2.get_attributes()))
self.assertTrue('mod1' in package2 and 'mod2' in package2)
def test_multi_dot_imports_as(self):
pkg = testutils.create_package(self.project, 'pkg')
mod1 = testutils.create_module(self.project, 'mod1', pkg)
mod1.write('def f():\n pass\n')
mod = libutils.get_string_module(
self.project, 'import pkg.mod1 as mod1\n')
module = mod['mod1'].get_object()
self.assertTrue('f' in module)
# TODO: not showing unimported names as attributes of packages
def xxx_test_from_package_import_package(self):
pkg1 = testutils.create_package(self.project, 'pkg1')
pkg2 = testutils.create_package(self.project, 'pkg2', pkg1)
testutils.create_module(self.project, 'mod', pkg2)
mod = libutils.get_string_module(
self.project, 'from pkg1 import pkg2\n')
package = mod['pkg2']
self.assertEquals(0, len(package.get_attributes()))
def test_invalidating_cache_after_resource_change(self):
module = testutils.create_module(self.project, 'mod')
module.write('import sys\n')
mod1 = self.project.get_module('mod')
self.assertTrue('var' not in mod1.get_attributes())
module.write('var = 10\n')
mod2 = self.project.get_module('mod')
self.assertTrue('var' in mod2)
def test_invalidating_cache_after_resource_change_for_init_dot_pys(self):
pkg = testutils.create_package(self.project, 'pkg')
mod = testutils.create_module(self.project, 'mod')
init_dot_py = pkg.get_child('__init__.py')
init_dot_py.write('a_var = 10\n')
mod.write('import pkg\n')
pymod = self.project.get_module('mod')
self.assertTrue('a_var' in pymod['pkg'].get_object())
init_dot_py.write('new_var = 10\n')
self.assertTrue('a_var' not in
pymod['pkg'].get_object().get_attributes())
def test_invalidating_cache_after_rsrc_chng_for_nested_init_dot_pys(self):
pkg1 = testutils.create_package(self.project, 'pkg1')
pkg2 = testutils.create_package(self.project, 'pkg2', pkg1)
mod = testutils.create_module(self.project, 'mod')
init_dot_py = pkg2.get_child('__init__.py')
init_dot_py.write('a_var = 10\n')
mod.write('import pkg1\n')
pymod = self.project.get_module('mod')
self.assertTrue('a_var' in
pymod['pkg1'].get_object()['pkg2'].get_object())
init_dot_py.write('new_var = 10\n')
self.assertTrue('a_var' not in
pymod['pkg1'].get_object()['pkg2'].get_object())
def test_from_import_nonexistent_module(self):
code = 'from doesnotexistmod import DoesNotExistClass\n'
mod = libutils.get_string_module(self.project, code)
self.assertTrue('DoesNotExistClass' in mod)
self.assertEquals(get_base_type('Unknown'),
mod['DoesNotExistClass'].
get_object().get_type())
def test_from_import_nonexistent_name(self):
code = 'from samplemod import DoesNotExistClass\n'
mod = libutils.get_string_module(self.project, code)
self.assertTrue('DoesNotExistClass' in mod)
self.assertEquals(get_base_type('Unknown'),
mod['DoesNotExistClass'].
get_object().get_type())
def test_not_considering_imported_names_as_sub_scopes(self):
code = 'from samplemod import SampleClass\n'
scope = libutils.get_string_scope(self.project, code)
self.assertEquals(0, len(scope.get_scopes()))
def test_not_considering_imported_modules_as_sub_scopes(self):
scope = libutils.get_string_scope(
self.project, 'import samplemod\n')
self.assertEquals(0, len(scope.get_scopes()))
def test_inheriting_dotted_base_class(self):
code = 'import samplemod\n' \
'class Derived(samplemod.SampleClass):\n' \
' pass\n'
mod = libutils.get_string_module(self.project, code)
derived = mod['Derived'].get_object()
self.assertTrue('sample_method' in derived)
def test_self_in_methods(self):
code = 'class Sample(object):\n' \
' def func(self):\n' \
' pass\n'
scope = libutils.get_string_scope(self.project, code)
sample_class = scope['Sample'].get_object()
func_scope = scope.get_scopes()[0].get_scopes()[0]
self.assertEquals(sample_class,
func_scope['self'].get_object().get_type())
self.assertTrue('func' in func_scope['self'].get_object())
def test_none_assignments_in_classes(self):
code = 'class C(object):\n' \
' var = ""\n' \
' def f(self):\n' \
' self.var += "".join([])\n'
scope = libutils.get_string_scope(self.project, code)
c_class = scope['C'].get_object()
self.assertTrue('var' in c_class)
def test_self_in_methods_with_decorators(self):
code = 'class Sample(object):\n' \
' @staticmethod\n' \
' def func(self):\n' \
' pass\n'
scope = libutils.get_string_scope(self.project, code)
sample_class = scope['Sample'].get_object()
func_scope = scope.get_scopes()[0].get_scopes()[0]
self.assertNotEquals(sample_class,
func_scope['self'].get_object().get_type())
def test_location_of_imports_when_importing(self):
mod = testutils.create_module(self.project, 'mod')
mod.write('from samplemod import SampleClass\n')
scope = libutils.get_string_scope(
self.project, 'from mod import SampleClass\n')
sample_class = scope['SampleClass']
samplemod = self.project.get_module('samplemod')
self.assertEquals((samplemod, 1),
sample_class.get_definition_location())
def test_nested_modules(self):
pkg = testutils.create_package(self.project, 'pkg')
testutils.create_module(self.project, 'mod', pkg)
imported_module = self.project.get_module('pkg.mod')
scope = libutils.get_string_scope(self.project, 'import pkg.mod\n')
mod_pyobject = scope['pkg'].get_object()['mod']
self.assertEquals((imported_module, 1),
mod_pyobject.get_definition_location())
def test_reading_init_dot_py(self):
pkg = testutils.create_package(self.project, 'pkg')
init_dot_py = pkg.get_child('__init__.py')
init_dot_py.write('a_var = 1\n')
pkg_object = self.project.get_module('pkg')
self.assertTrue('a_var' in pkg_object)
def test_relative_imports(self):
pkg = testutils.create_package(self.project, 'pkg')
mod1 = testutils.create_module(self.project, 'mod1', pkg)
mod2 = testutils.create_module(self.project, 'mod2', pkg)
mod2.write('import mod1\n')
mod1_object = self.pycore.resource_to_pyobject(mod1)
mod2_object = self.pycore.resource_to_pyobject(mod2)
self.assertEquals(mod1_object,
mod2_object.get_attributes()['mod1'].get_object())
def test_relative_froms(self):
pkg = testutils.create_package(self.project, 'pkg')
mod1 = testutils.create_module(self.project, 'mod1', pkg)
mod2 = testutils.create_module(self.project, 'mod2', pkg)
mod1.write('def a_func():\n pass\n')
mod2.write('from mod1 import a_func\n')
mod1_object = self.pycore.resource_to_pyobject(mod1)
mod2_object = self.pycore.resource_to_pyobject(mod2)
self.assertEquals(mod1_object['a_func'].get_object(),
mod2_object['a_func'].get_object())
def test_relative_imports_for_string_modules(self):
pkg = testutils.create_package(self.project, 'pkg')
mod1 = testutils.create_module(self.project, 'mod1', pkg)
mod2 = testutils.create_module(self.project, 'mod2', pkg)
mod2.write('import mod1\n')
mod1_object = self.pycore.resource_to_pyobject(mod1)
mod2_object = libutils.get_string_module(
self.project, mod2.read(), mod2)
self.assertEquals(mod1_object, mod2_object['mod1'].get_object())
def test_relative_imports_for_string_scopes(self):
pkg = testutils.create_package(self.project, 'pkg')
mod1 = testutils.create_module(self.project, 'mod1', pkg)
mod2 = testutils.create_module(self.project, 'mod2', pkg)
mod2.write('import mod1\n')
mod1_object = self.pycore.resource_to_pyobject(mod1)
mod2_scope = libutils.get_string_scope(self.project, mod2.read(),
mod2)
self.assertEquals(mod1_object, mod2_scope['mod1'].get_object())
@testutils.run_only_for_25
def test_new_style_relative_imports(self):
pkg = testutils.create_package(self.project, 'pkg')
mod1 = testutils.create_module(self.project, 'mod1', pkg)
mod2 = testutils.create_module(self.project, 'mod2', pkg)
mod2.write('from . import mod1\n')
mod1_object = self.pycore.resource_to_pyobject(mod1)
mod2_object = self.pycore.resource_to_pyobject(mod2)
self.assertEquals(mod1_object, mod2_object['mod1'].get_object())
@testutils.run_only_for_25
def test_new_style_relative_imports2(self):
pkg = testutils.create_package(self.project, 'pkg')
mod1 = testutils.create_module(self.project, 'mod1')
mod2 = testutils.create_module(self.project, 'mod2', pkg)
mod1.write('def a_func():\n pass\n')
mod2.write('from ..mod1 import a_func\n')
mod1_object = self.pycore.resource_to_pyobject(mod1)
mod2_object = self.pycore.resource_to_pyobject(mod2)
self.assertEquals(mod1_object['a_func'].get_object(),
mod2_object['a_func'].get_object())
def test_invalidating_cache_for_from_imports_after_resource_change(self):
mod1 = testutils.create_module(self.project, 'mod1')
mod2 = testutils.create_module(self.project, 'mod2')
mod2.write('def a_func():\n print(1)\n')
mod1.write('from mod2 import a_func\na_func()\n')
pymod1 = self.project.get_module('mod1')
pymod2 = self.project.get_module('mod2')
self.assertEquals(pymod1['a_func'].get_object(),
pymod2['a_func'].get_object())
mod2.write(mod2.read() + '\n')
pymod2 = self.project.get_module('mod2')
self.assertEquals(pymod1['a_func'].get_object(),
pymod2['a_func'].get_object())
def test_invalidating_superclasses_after_change(self):
mod1 = testutils.create_module(self.project, 'mod1')
mod2 = testutils.create_module(self.project, 'mod2')
mod1.write('class A(object):\n def func1(self):\n pass\n')
mod2.write('import mod1\nclass B(mod1.A):\n pass\n')
b_class = self.project.get_module('mod2')['B'].get_object()
self.assertTrue('func1' in b_class)
mod1.write('class A(object):\n def func2(self):\n pass\n')
self.assertTrue('func2' in b_class)
def test_caching_pymodule_with_syntax_errors(self):
self.project.prefs['ignore_syntax_errors'] = True
self.project.prefs['automatic_soa'] = True
self.project.pycore._init_automatic_soa()
source = 'import sys\nab cd'
mod = testutils.create_module(self.project, 'mod')
mod.write(source)
from rope.contrib import fixsyntax
fixer = fixsyntax.FixSyntax(self.project, source, mod, 10)
pymodule = fixer.get_pymodule()
self.assertTrue(pymodule.source_code.startswith('import sys\npass\n'))
class TextChangeDetectorTest(unittest.TestCase):
def test_trivial_case(self):
detector = _TextChangeDetector('\n', '\n')
self.assertFalse(detector.is_changed(1, 1))
def test_one_line_change(self):
detector = _TextChangeDetector('1\n2\n', '1\n3\n')
self.assertFalse(detector.is_changed(1, 1))
self.assertTrue(detector.is_changed(2, 2))
def test_line_expansion(self):
detector = _TextChangeDetector('1\n2\n', '1\n3\n4\n2\n')
self.assertFalse(detector.is_changed(1, 1))
self.assertFalse(detector.is_changed(2, 2))
def test_line_removals(self):
detector = _TextChangeDetector('1\n3\n4\n2\n', '1\n2\n')
self.assertFalse(detector.is_changed(1, 1))
self.assertTrue(detector.is_changed(2, 3))
self.assertFalse(detector.is_changed(4, 4))
def test_multi_line_checks(self):
detector = _TextChangeDetector('1\n2\n', '1\n3\n')
self.assertTrue(detector.is_changed(1, 2))
def test_consume_change(self):
detector = _TextChangeDetector('1\n2\n', '1\n3\n')
self.assertTrue(detector.is_changed(1, 2))
self.assertTrue(detector.consume_changes(1, 2))
self.assertFalse(detector.is_changed(1, 2))
class PyCoreProjectConfigsTest(unittest.TestCase):
def setUp(self):
super(PyCoreProjectConfigsTest, self).setUp()
self.project = None
def tearDown(self):
if self.project:
testutils.remove_project(self.project)
super(PyCoreProjectConfigsTest, self).tearDown()
def test_python_files_config(self):
self.project = testutils.sample_project(python_files=['myscript'])
myscript = self.project.root.create_file('myscript')
self.assertTrue(self.project.pycore.is_python_file(myscript))
def test_ignore_bad_imports(self):
self.project = testutils.sample_project(ignore_bad_imports=True)
pymod = libutils.get_string_module(
self.project, 'import some_nonexistent_module\n')
self.assertFalse('some_nonexistent_module' in pymod)
def test_ignore_bad_imports_for_froms(self):
self.project = testutils.sample_project(ignore_bad_imports=True)
pymod = libutils.get_string_module(
self.project, 'from some_nonexistent_module import var\n')
self.assertFalse('var' in pymod)
def test_reporting_syntax_errors_with_force_errors(self):
self.project = testutils.sample_project(ignore_syntax_errors=True)
mod = testutils.create_module(self.project, 'mod')
mod.write('syntax error ...\n')
with self.assertRaises(exceptions.ModuleSyntaxError):
self.project.pycore.resource_to_pyobject(mod, force_errors=True)
def test_reporting_syntax_errors_in_strings_with_force_errors(self):
self.project = testutils.sample_project(ignore_syntax_errors=True)
with self.assertRaises(exceptions.ModuleSyntaxError):
libutils.get_string_module(
self.project, 'syntax error ...', force_errors=True)
def test_not_raising_errors_for_strings_with_ignore_errors(self):
self.project = testutils.sample_project(ignore_syntax_errors=True)
libutils.get_string_module(self.project, 'syntax error ...')
def test_reporting_syntax_errors_with_force_errors_for_packages(self):
self.project = testutils.sample_project(ignore_syntax_errors=True)
pkg = testutils.create_package(self.project, 'pkg')
pkg.get_child('__init__.py').write('syntax error ...\n')
with self.assertRaises(exceptions.ModuleSyntaxError):
self.project.pycore.resource_to_pyobject(pkg, force_errors=True)
def suite():
result = unittest.TestSuite()
result.addTests(unittest.makeSuite(PyCoreTest))
result.addTests(unittest.makeSuite(PyCoreInProjectsTest))
result.addTests(unittest.makeSuite(TextChangeDetectorTest))
result.addTests(unittest.makeSuite(PyCoreProjectConfigsTest))
return result
if __name__ == '__main__':
unittest.main()
|
from unittest import TestCase
from kdrl.agents.static import *
from kdrl.trainer import *
import numpy as np
class TestStaticAgent(TestCase):
def test_random_agent(self):
agent = RandomAgent(action_space=2)
def random_test(f):
first_action = f()
for i in range(100):
action = f()
if first_action != action:
return True
return False
self.assertTrue(random_test(agent.select_best_action))
self.assertTrue(random_test(agent.start_episode))
self.assertTrue(random_test(agent.step))
def test_constant_agent(self):
agent = ConstantAgent(action_space=100, constant_action=0)
def const_test(value):
agent.constant_action = value
self.assertEqual(value, agent.start_episode())
self.assertEqual(value, agent.step())
self.assertEqual(value, agent.select_best_action())
const_test(0)
const_test(2)
const_test([1, 2])
|
class Solution:
def prisonAfterNDays(self, cells: List[int], N: int) -> List[int]:
seen = defaultdict(int)
is_fast_forwarded = False
while N > 0:
if not is_fast_forwarded:
state_key = tuple(cells)
last_seen_index = seen[state_key]
if last_seen_index != 0:
N %= seen[state_key] - N
is_fast_forwarded = True
else:
seen[state_key] = N
if N > 0:
N -= 1
next_day_cells = self.nextDay(cells)
cells = next_day_cells
return cells
def nextDay(self, cells: List[int]):
ret = []
for i in range(len(cells)):
if i > 0 and i < 7 and cells[i-1] == cells[i+1]:
ret.append(1)
else:
ret.append(0)
return ret
|
from win32com.client import (
GetObject,
Dispatch,
)
from pythoncom import (
com_error,
)
import datetime
import re
from textwrap import fill
# --------- #
__test__ = {}
# --------- #
# 12/30/1899, the zero-Date for ADO = 693594
_ADO_zeroHour = datetime.date(1899, 12, 30).toordinal()
_time_zero = datetime.time(0, 0, 0)
def ADO_PyTime_To_Datetime(v):
v_date, v_time = divmod(float(v), 1)
datetime_date = datetime.date.fromordinal(
int(round(v_date + _ADO_zeroHour)))
v_time = int(round(86400 * v_time))
v_hour, v_min = divmod(v_time, 3600)
v_min, v_sec = divmod(v_min, 60)
datetime_time = datetime.time(
int(v_hour), int(v_min), int(v_sec))
return (datetime_date, datetime_time)
# --------- #
class SentinalSingleton(object):
def __str__(self):
return self.__class__.__name__
__repr__ = __str__
class UntranslatableCOM(SentinalSingleton):
pass
UntranslatableCOM = UntranslatableCOM()
class UnrecognizedCOM(SentinalSingleton):
pass
UnrecognizedCOM = UnrecognizedCOM()
re_read_write_buffer = re.compile(
r'^\<read-write buffer '
'ptr 0x([0-9A-F]+)\, '
'size ([0-9A-F]+) '
'at 0x([0-9A-F]+)\>$')
__test__['re_read_write_buffer'] = r'''
>>> bool(re_read_write_buffer.match(
... '<read-write buffer ptr 0x00A4DF40, size 28 at 0x00A4DF20>'))
True
'''
def _process_COM_value(V):
"""
>>> _process_COM_value(3)
3
>>> _process_COM_value((3, 3, 3))
[3, 3, 3]
>>> _process_COM_value((UntranslatableCOM, UntranslatableCOM))
UntranslatableCOM
>>> _process_COM_value((UntranslatableCOM, 3))
[UntranslatableCOM, 3]
>>> _process_COM_value((UntranslatableCOM, UnrecognizedCOM))
[UntranslatableCOM, UnrecognizedCOM]
"""
if V in [UntranslatableCOM, UnrecognizedCOM, None]:
return V
elif isinstance(
V,
(
str,
float,
int,
datetime.date,
datetime.time,
datetime.datetime
)):
return V
elif isinstance(V, str):
try:
return V.encode('latin-1')
except UnicodeEncodeError:
return V
elif isinstance(V, (tuple, list)):
L = list(map(_process_COM_value, V))
if L == ([UntranslatableCOM] * len(L)):
return UntranslatableCOM
else:
return L
elif type(V).__name__ == 'time':
d, t = ADO_PyTime_To_Datetime(V)
if t == _time_zero:
return d
else:
return datetime.datetime.combine(d, t)
else:
R = repr(V)
if R == '<COMObject <unknown>>':
return UntranslatableCOM
elif re_read_write_buffer.match(R):
return UntranslatableCOM
else:
return UnrecognizedCOM
#for S in ['V', 'type(V)', 'str(V)', 'repr(V)', 'type(V).__name__']:
# print '%s: %r' % (S, eval(S))
#
#raise ValueError, V
# --------- #
class LDAP_COM_Wrapper(object):
def __init__(self, LDAP_COM_Object):
self.__dict__[None] = LDAP_COM_Object
def __getattr__(self, name):
LDAP_COM_Object = self.__dict__[None]
try:
V = LDAP_COM_Object.Get(name)
except com_error:
pass
else:
return _process_COM_value(V)
try:
V = getattr(LDAP_COM_Object, name)
except (AttributeError, com_error):
pass
else:
return _process_COM_value(V)
raise AttributeError
def __getitem__(self, name):
_getattr = self.__getattr__
try:
return _getattr(name)
except AttributeError:
raise KeyError
def LDAP_COM_to_dict(X):
d = {}
for i in range(X.PropertyCount):
P = X.Item(i)
Name = P.Name
d[Name] = _process_COM_value(X.Get(Name))
return d
def LDAP_select_all_iterator(Connection, LDAP_query_string):
R = Connection.Execute(LDAP_query_string)[0]
while not R.EOF:
d = {}
for f in R.Fields:
d[f.Name] = _process_COM_value(f.Value)
yield d
R.MoveNext()
def LDAP_select_then_ADsPath_iterator(Connection, LDAP_query_string):
for r in LDAP_select_all_iterator(Connection, LDAP_query_string):
X = GetObject(r['ADsPath'])
X.GetInfo()
yield LDAP_COM_to_dict(X)
# --------- #
def _sort_helper(d):
s = d.get('name', '<<<MISSING>>>')
try:
s = str(s)
except UnicodeEncodeError:
s = repr(s)
return s.lower()
def _get_all_of_objectClass(
Connection, defaultNamingContext, objectClass):
LDAP_query_string = (
"Select * "
"from 'LDAP://%s' "
"where objectClass = '%s'" % (
defaultNamingContext,
objectClass,
))
print('LDAP_query_string: %r' % (LDAP_query_string,))
L = list(LDAP_select_then_ADsPath_iterator(
Connection, LDAP_query_string))
L.sort(key=_sort_helper)
for d in L:
print('\n')
for k in ['name', 'description']:
v = d.get(k, '<<<MISSING>>>')
print(fill(
'%s: %s' % (k, v),
width=70,
initial_indent='',
subsequent_indent=' ',
))
for k in sorted(d.keys()):
try:
k = str(k)
except UnicodeEncodeError:
continue
v = d[k]
if v is UntranslatableCOM:
continue
try:
v = str(v)
except UnicodeEncodeError:
v = repr(v)
print(fill(
'%s: %s' % (k, v),
width=70,
initial_indent=' ',
subsequent_indent=' ',
))
def main():
Connection = Dispatch("ADODB.Connection")
Connection.Open("Provider=ADSDSOObject")
defaultNamingContext = LDAP_COM_Wrapper(
GetObject('LDAP://rootDSE'))['defaultNamingContext']
print('defaultNamingContext: %r' % (defaultNamingContext,))
for objectClass in ['computer', 'user', 'group']:
print()
try:
_get_all_of_objectClass(
Connection, defaultNamingContext, objectClass)
except com_error:
print((
'<<<REPORT FAILED FOR: objectClass %s>>>' % (
objectClass,)))
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
# Copyright 2017, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
"""
Quantum Fourier Transform examples.
Note: if you have only cloned the Qiskit repository but not
used `pip install`, the examples only work from the root directory.
"""
import math
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit import register, execute
import Qconfig
###############################################################
# Set the backend name and coupling map.
###############################################################
coupling_map = [[0, 1], [0, 2], [1, 2], [3, 2], [3, 4], [4, 2]]
###############################################################
# Make a quantum program for the GHZ state.
###############################################################
def input_state(circ, q, n):
"""n-qubit input state for QFT that produces output 1."""
for j in range(n):
circ.h(q[j])
circ.u1(math.pi/float(2**(j)), q[j]).inverse()
def qft(circ, q, n):
"""n-qubit QFT on q in circ."""
for j in range(n):
for k in range(j):
circ.cu1(math.pi/float(2**(j-k)), q[j], q[k])
circ.h(q[j])
q = QuantumRegister(5, "q")
c = ClassicalRegister(5, "c")
qft3 = QuantumCircuit(q, c, name="qft3")
qft4 = QuantumCircuit(q, c, name="qft4")
qft5 = QuantumCircuit(q, c, name="qft5")
input_state(qft3, q, 3)
qft3.barrier()
qft(qft3, q, 3)
qft3.barrier()
for j in range(3):
qft3.measure(q[j], c[j])
input_state(qft4, q, 4)
qft4.barrier()
qft(qft4, q, 4)
qft4.barrier()
for j in range(4):
qft4.measure(q[j], c[j])
input_state(qft5, q, 5)
qft5.barrier()
qft(qft5, q, 5)
qft5.barrier()
for j in range(5):
qft5.measure(q[j], c[j])
print(qft3.qasm())
print(qft4.qasm())
print(qft5.qasm())
###############################################################
# Set up the API and execute the program.
###############################################################
register(Qconfig.APItoken, Qconfig.config["url"])
result = execute([qft3, qft4, qft5], backend='ibmq_qasm_simulator',
coupling_map=coupling_map, shots=1024).result()
print(result)
print(result.get_ran_qasm("qft3"))
print(result.get_counts("qft3"))
print(result.get_counts("qft4"))
print(result.get_counts("qft5"))
result = execute([qft3], backend='ibmq_5_tenerife', shots=1024).result()
print(result)
print(result.get_ran_qasm("qft3"))
print(result.get_counts("qft3"))
|
from icemac.addressbook.browser.search.result.handler.export.base import (
BaseExport)
import icemac.addressbook.export.xls.simple
class DefaultsExport(BaseExport):
"""Exporter for default data and addresses."""
exporter_class = icemac.addressbook.export.xls.simple.DefaultsExport
class CompleteExport(BaseExport):
"""Exporter for all data and addresses."""
exporter_class = icemac.addressbook.export.xls.simple.CompleteExport
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.3.3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # Error bars in plots:
# %% [markdown]
# ### Sources of uncertainty:
# In these calculations we are considering the following uncertainties
# 1. Model uncertainty
# 2. IRF uncertainty/climate sensitivity uncertainty
#
# Model uncertainty is represented as the spread in the ERF produced by the considered RCMIP models. IRF uncertainty is the uncertainty by which the ERF is translated into changes in temperature.
#
#
# %% [markdown]
# ## IRF:
# In these calculations we use the impulse response function:
# \begin{align*}
# \text{IRF}(t)=& 0.885\cdot (\frac{0.587}{4.1}\cdot exp(\frac{-t}{4.1}) + \frac{0.413}{249} \cdot exp(\frac{-t}{249}))\\
# \text{IRF}(t)= & \sum_{i=1}^2\frac{\alpha \cdot c_i}{\tau_i}\cdot exp\big(\frac{-t}{\tau_1}\big)
# \end{align*}
# with $\alpha = 0.885$, $c_1=0.587$, $\tau_1=4.1$, $c_2=0.413$ and $\tau_2 = 249$.
# %% Thus we can estimate the mean surface temperature change from some referance year (here 0) by using [markdown]
# ### Calculate $\Delta T$ from ERF:
# We then use the estimated ERF$_x$ for some forcing agent(s) $x$ as follows:
# %% [markdown]
# \begin{align*}
# \Delta T_x (t) &= \int_0^t ERF_x(t') IRF(t-t') dt' \\
# \end{align*}
# %% [markdown]
# Now, define $\Delta_x$ as follows:
# \begin{align}
# \Delta_x = & \frac{1}{\alpha} \int_0^t ERF_x(t') IRF(t-t') dt'\\
# =& \frac{1}{\alpha} \int_0^t ERF_x(t') \sum_{i=1}^2\frac{\alpha \cdot c_i}{\tau_i}\cdot exp\big(\frac{-(t-t')}{\tau_1}\big)dt' \\
# =& \int_0^t ERF_x(t') \sum_{i=1}^2\frac{c_i}{\tau_i}\cdot exp\big(\frac{-(t-t')}{\tau_1}\big)dt' \\
# \end{align}
# %% [markdown]
# So, then:
# \begin{align}
# \Delta T_x (t) = \alpha \cdot \Delta_x(t)
# \end{align}
# %% [markdown]
# This means that the uncertainty in $\Delta T$ can be calculated according to the propagated uncertainty in the product of parameter $\alpha$ and uncertainty in ERF$_x$.
# %% [markdown]
# ### Distribution of a product of two independent variables:
# Assuming these two are independent we get:
# \begin{align}
# Var(\Delta T_x) = &Var(\alpha\cdot \Delta_{x})\\
# = & (Var(\alpha) +E(\alpha)^2)(Var(\Delta_{x}) + E( \Delta_{x})^2) - E(\alpha)^2E(\Delta_{x})^2
# \end{align}
# %% [markdown]
# Let $\sigma_x= \sqrt{Var(\Delta_{x})}$, $\mu_x= E(\Delta_{x})$, $\sigma_\alpha = \sqrt{Var(\alpha)}$ and $\mu_\alpha = E(\alpha)$
# %% [markdown]
# \begin{align}
# Var(\Delta T_x) = (\sigma_x^2 + \mu_x^2)(\sigma_\alpha^2+\mu_\alpha^2) - \mu_x^2 \mu_\alpha^2
# \end{align}
# %% [markdown]
# ## Method:
# %% [markdown]
# The following method is used:
# 1. Intra model variability from $ERF$ from different models
# 2. Assume this is independent of the $IRF$
# 3. Combine these two uncertainties with $Var(\Delta T_x) = (\sigma_x^2 + \mu_x^2)(\sigma_\alpha^2+\mu_\alpha^2) - \mu_x^2 \mu_\alpha^2$
# %% [markdown]
# ## Sums and differences:
# For any additive combination of several components (either sum of two SLCF's or difference etc), e.g. the difference between methane contribution $X_i$ and the total anthropogenic contribution $Y$, we would have some covariance between between X and Y, because if one model has large $X_i$ it would normally have large $Y$ as well.
# So either we can take this into account explicitly:
# $$ Var(X+Y) = Var(X)+Var(Y) +2Cov(X,Y)$$
# Alternatively, we can treat the sum or difference of the ERF as one stocastic variable and alpha as another and assume they are independent. The independence of the error on ECS and ERF is a good assumption here. Secondly, we do then not need to consider the covariance of ERF between different components because it is implicitly covered.
#
#
# ### Summary:
# Let $\sigma_{\alpha}$ and $\mu_{\alpha}$ be the standard deviation and mean for a normal distribution of the $\alpha$ parameter in ECS. Secondly, let $X_i$ be a sample of
#
# %% [markdown]
# \begin{align}
# X_i = & \frac{1}{\alpha} \int_0^t ERF_i(t') IRF(t-t') dt'\\
# =& \int_0^t ERF_i(t') \sum_{i=1}^2\frac{c_i}{\tau_i}\cdot exp\big(\frac{-(t-t')}{\tau_1}\big)dt' \\
# \end{align}
# where $ERF_i$ is some difference or sum of different ERF components.
# %% [markdown]
# Then
# \begin{align}
# \sigma_{X_i} = \sqrt{\frac{\sum(X_{i,k}-\mu_{X_i})^2}{N}}
# \end{align}
# %% [markdown]
# and we can get
# \begin{align}
# \sigma_T = (\sigma_{X_i}+\mu_{X_i})(\sigma_{\alpha} + \mu_{\alpha}) - \mu_{X_i}\mu_{\alpha}
# \end{align}
# %% [markdown]
# ### Technical calculation:
# From any calculation of
# \begin{align}
# \Delta T_{\alpha=\mu_\alpha} = \sum_i T_i - \sum_k T_k
# \end{align}
# for all models, calculated with IRF such that $\alpha = \mu_{\alpha}$, we can find
# \begin{align}
# X_{i,k} = \frac{1}{\mu_{\alpha}} \Delta T_{\alpha=\mu_\alpha,k}
# \end{align}
# where the index $k$ signifies the different models.
#
# And thus we can easily calculate
# \begin{align}
# \sigma_{X_i} = \sqrt{\frac{\sum(X_{i,k}-\mu_{X_i})^2}{N}}
# \end{align}
# %% [markdown]
# since
# \begin{align}
# \mu_{X_i} = \frac{1}{\mu_\alpha}\mu_{\Delta T_{\alpha=\mu_\alpha}}
# \end{align}
# we have
# \begin{align}
# \sigma_{X_i} = \frac{1}{\mu_\alpha} \sigma_{\Delta T_{\alpha=\mu_\alpha}}.
# \end{align}
# %% [markdown]
# ## Finally:
# Let $\Delta T = X_{i}\cdot \alpha $ and assume $X_i$ and $\alpha$ independent.
# Then
# \begin{align}
# \sigma_{\Delta T}^2 =& (\sigma_{X_i}^2+\mu_{X_i}^2)(\sigma_{\alpha}^2 + \mu_{\alpha}^2) - \mu_{X_i}^2\mu_{\alpha}^2\\
# \sigma_{\Delta T}^2 =& \frac{1}{\mu_\alpha^2}\big[(\sigma_{\Delta T_{\alpha=\mu_\alpha} }^2 +\mu_{\Delta T_{\alpha=\mu_\alpha}}^2)(\sigma_{\alpha}^2 + \mu_{\alpha}^2) - \mu_{\Delta T_{\alpha=\mu_\alpha}}^2\mu_{\alpha}^2 \big]\\
# \sigma_{\Delta T} =& \frac{1}{\mu_\alpha}\big[(\sigma_{\Delta T_{\alpha=\mu_\alpha} }^2 +\mu_{\Delta T_{\alpha=\mu_\alpha}}^2)(\sigma_{\alpha}^2 + \mu_{\alpha}^2) - \mu_{\Delta T_{\alpha=\mu_\alpha}}^2\mu_{\alpha}^2 \big]^{\frac{1}{2}}
# \end{align}
#
# %%
def sigma_DT(dT, sig_alpha, mu_alpha, dim='climatemodel'):
sig_DT = dT.std(dim)
mu_DT = dT.mean(dim)
return ((sig_DT**2 + mu_DT**2)*(sig_alpha**2+mu_alpha**2)- mu_DT**2*mu_alpha**2)**(0.5)/mu_alpha
# %% [markdown]
# In other words, it suffices to know
#
# a) $\sigma_\alpha$ and $\mu_\alpha$ and
#
# b) $\Delta T_x$ calculated for a fixed $\mu_\alpha$
#
# to compute the uncertainty bars.
|
"""Tests for ``iter_dataframes`` generator property."""
import numpy as np
from waves import Sound
def test_iter_dataframes_mono_from_file(mono_sound):
data = mono_sound.dataframes
for i, frame in enumerate(mono_sound.iter_dataframes):
assert frame == data[i]
def test_iter_dataframes_stereo_from_file(stereo_sound):
data = stereo_sound.dataframes
for i, frame in enumerate(stereo_sound.iter_dataframes):
assert frame[0] == data[i][0]
assert frame[1] == data[i][1]
def test_iter_dataframes_mono_from_function(mono_ttf_gen):
fps, frequency, volume = (44100, 110, 0.5)
time_to_frame = mono_ttf_gen(fps=fps, frequency=frequency, volume=volume)
t_fps = 1 / fps
sound = Sound.from_datatimes(time_to_frame, fps=fps).with_duration(0.5)
for i, frame in enumerate(sound.iter_dataframes):
assert frame == time_to_frame(i * t_fps)
def test_iter_dataframes_stereo_from_function():
fps, frequencies, volume = (44100, (110, 440), 0.5)
amplitude, t_fps = (np.iinfo(np.int16).max * volume, 1 / fps)
time_to_frame_left = lambda t: (
np.sin(frequencies[0] * 2 * np.pi * t) * amplitude
).astype(np.int16)
time_to_frame_right = lambda t: (
np.sin(frequencies[1] * 2 * np.pi * t) * amplitude
).astype(np.int16)
sound = Sound.from_datatimes(
lambda t: [time_to_frame_left(t), time_to_frame_right(t)], fps=fps
).with_duration(0.5)
for i, frame in enumerate(sound.iter_dataframes):
assert frame[0] == time_to_frame_left(i * t_fps)
assert frame[1] == time_to_frame_right(i * t_fps)
|
import re
import sys
VERSION = '0.7.1'
VERSION_STRING = "WebIOPi/%s/Python%d.%d" % (VERSION, sys.version_info.major, sys.version_info.minor)
PYTHON_MAJOR = sys.version_info.major
BOARD_REVISION = 0
_MAPPING = [[], [], []]
_MAPPING[1] = ["V33", "V50", 0, "V50", 1, "GND", 4, 14, "GND", 15, 17, 18, 21, "GND", 22, 23, "V33", 24, 10, "GND", 9, 25, 11, 8, "GND", 7]
_MAPPING[2] = ["V33", "V50", 2, "V50", 3, "GND", 4, 14, "GND", 15, 17, 18, 27, "GND", 22, 23, "V33", 24, 10, "GND", 9, 25, 11, 8, "GND", 7]
try:
with open("/proc/cpuinfo") as f:
rc = re.compile("Revision\s*:\s(.*)\n")
info = f.read()
result = rc.search(info)
if result != None:
hex_cpurev = result.group(1)
if hex_cpurev.startswith("1000"):
hex_cpurev = hex_cpurev[-4:]
cpurev = int(hex_cpurev, 16)
BOARD_REVISION = 1 if (cpurev < 4) else 2
except:
pass
MAPPING = _MAPPING[BOARD_REVISION]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-04-28 14:39
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('website', '0044_remove_promotion_referred'),
]
operations = [
migrations.CreateModel(
name='ClaimedPromotion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('active', models.BooleanField(default=True)),
('promotion', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='promotion', to='website.Promotion')),
('referred', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='referred_pocket', to='website.Pocket')),
('referrer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='referrer_pocket', to='website.Pocket')),
],
),
]
|
"""Test all utils."""
from django.test import TestCase
from oauth2client.client import OAuth2WebServerFlow
from geokey.contributions.utils import (
my_flow_from_clientsecrets,
get_args,
get_authenticated_service,
initialize_upload
)
class GetArgsTest(TestCase):
"""Test for method 'get_args'."""
def setUp(self):
"""Set up tests."""
self.path = 'path/sample/test/test_file.mp4'
self.name = 'test_file'
def test_method(self):
"""Test method."""
args = get_args(self.name, self.path)
args_var = vars(args)
self.assertEqual(args_var['file'], self.path)
self.assertEqual(args_var['title'], self.name)
class MyFlowFromClientSecretTest(TestCase):
"""Test for method 'my_flow_from_clientsecrets'."""
def setUp(self):
"""Set up tests."""
self.youtube_uploader = {
'scope': "https://www.googleapis.com/auth/youtube.upload",
'auth_host_name': 'localhost',
'auth_host_port': [8080, 8000],
'client_info': {
"client_id": "109430273076-t3e30ie5aseb3laj2da0gkpikir6b0e9.apps.googleusercontent.com",
"client_secret": "o3U69gnO4FRipA1Q3K6gi0_N",
"redirect_uris": ["http://localhost"],
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
"token_uri": "https://accounts.google.com/o/oauth2/token"
},
}
def test_method(self):
"""Test method."""
flow = my_flow_from_clientsecrets(
self.youtube_uploader['client_info'],
self.youtube_uploader['scope'])
client_info = self.youtube_uploader['client_info']
constructor_kwargs = {
'redirect_uri': None,
'auth_uri': client_info['auth_uri'],
'token_uri': client_info['token_uri'],
'login_hint': None,
}
flow_new = OAuth2WebServerFlow(
client_info['client_id'], client_info['client_secret'],
self.youtube_uploader['scope'], **constructor_kwargs)
self.assertEqual(flow.client_id, flow_new.client_id)
self.assertEqual(flow.scope, flow_new.scope)
self.assertEqual(flow.client_secret, flow_new.client_secret)
|
class BaseError(Exception):
def __init__(self, status, title, body=''):
self._status = status
self._title = title
self._body = body
@property
def title(self):
return self._title
@property
def body(self):
return self._body
@property
def status(self):
return self._status
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, str(self))
def __str__(self):
return '%s: %s' % (self.status, self.title)
class BaseHTTPError(BaseError):
_status = 999
def __init__(self, title, body=''):
super(BaseHTTPError, self).__init__(self._status, title, body)
class BadRequest(BaseHTTPError):
_status = 400
|
import time
from pyrunner import Worker
class SayHello(Worker):
def run(self):
self.logger.info('Hello World!')
return
class FailMe(Worker):
def run(self):
return 1
|
#!/usr/bin/env python3
""" Compute greatest common factor of two integers provided by the user """
from get_integer_from_user import get_integer
def gcd_recursive(num1, num2):
""" (int, int) -> int
Uses Euclid's method to compute the greatest common factor
(greatest common divisor) of two integers, <num1> and <num2>
Returns greatest common factor (gcd) of the two integers
"""
if isinstance(num1, int) and isinstance(num2, int):
if num1 == 0:
return num2
elif num2 == 0:
return num1
else:
if num1 > num2: # Handle RuntimeError: maximum recursion depth exceeded in comparison
return gcd_recursive(num2, num1 % num2)
else:
return gcd_recursive(num1, num2 % num1)
else:
return 'Expected Two Integers'
def gcd_iterative(num1, num2):
""" (int, int) -> int
Uses a naive iterative algorithm to compute gcd of two integers,
<num1> and <num2>.
Returns greatest common factor (gcd) of the two integers
"""
# TODO: Handle bug: gcd_iterative(900 and 0) = 0
if isinstance(num1, int) and isinstance(num2, int):
min_num = num1 if num1 < num1 else num2 # Determine the smalller value
largest_factor = 1 # Universal factor
for potential_gcd in range(1, min_num + 1): # Consider every int less than the smaller of the two values
if num1 % potential_gcd == 0 and num2 % potential_gcd == 0:
largest_factor = potential_gcd # Re-assign
return largest_factor
else:
return 'Expected Two Integers'
if __name__ == '__main__':
print(__doc__)
# Init
print('First Integer ->', end= ' ')
arg1 = get_integer()
print()
print('Second Integer ->', end=' ')
arg2 = get_integer()
print()
print('gcd_recursive({0} and {1}) = {2}'.format(arg1, arg2, gcd_recursive(arg1, arg2)))
print(type(gcd_recursive(arg1, arg2)))
print('gcd_iterative({0} and {1}) = {2}'.format(arg1, arg2, gcd_iterative(arg1, arg2)))
print(type(gcd_iterative(arg1, arg2)))
|
from arcgis.gis import GIS
from arcgis.features.feature import Feature
import os
from .. import util
from ..util import ThreadPool, lprofile
'''
from dotenv import load_dotenv
load_dotenv()
'''
from requests.exceptions import ConnectionError
'''
ARCGIS_USER = os.getenv("ARCGIS_USER")
ARCGIS_PASS = os.getenv("ARCGIS_PASS")
ARCGIS_PORTAL = os.getenv("ARCGIS_PORTAL")
'''
from memory_profiler import profile as mprofile
import gc
GEOMETRY_CACHE = {}
class MapUpdater:
required_capabilities = ["Create", "Delete", "Query", "Update", "Editing"]
def __init__(self, portal, user, pw, chunk_size=100):
self.credentials = (portal, user, pw)
self.chunk_size = chunk_size
self.login()
def login(self):
self.gis = GIS(*self.credentials)
def get_layer(self, content_id):
ret = self.gis.content.get(content_id).layers[0]
self.check_layer_capabilities(ret)
return ret
def check_layer_capabilities(self, layer):
caps = layer.properties.capabilities
incapable = [cap for cap in MapUpdater.required_capabilities if cap not in caps]
if len(incapable) > 0:
raise Exception("You need to have %s capabilities" % (str(incapable),))
#return False
#return True
def cache_kabko_geometry(self, layer, first_tanggal='2020-03-20'):
global GEOMETRY_CACHE
if not GEOMETRY_CACHE:
features = layer.query(
where='tanggal = DATE \'%s\'' % (first_tanggal,),
out_fields='kabko',
return_geometry=True
).features
#cache = {f.attributes["kabko"]:(f.geometry, f.attributes["SHAPE"]) for f in features}
cache = {f.attributes["kabko"]:f.geometry for f in features}
GEOMETRY_CACHE = cache
return GEOMETRY_CACHE
def get_kabko_geometry(self, layer, kabko):
global GEOMETRY_CACHE
if not GEOMETRY_CACHE:
self.cache_kabko_geometry(layer)
if GEOMETRY_CACHE and kabko in GEOMETRY_CACHE:
return GEOMETRY_CACHE[kabko]
feature = layer.query(
where='kabko=\'%s\'' % (kabko,),
out_fields='',
return_geometry=True,
result_record_count=1
).features[0]
geometry = feature.geometry#, feature.attributes["SHAPE"]
GEOMETRY_CACHE[kabko] = geometry
return geometry
def fetch_kabko_feature_tanggal(self, layer, kabko, geometry=None):#, shape=None):
fset = layer.query(
where='kabko=\'%s\'' % (kabko,),
order_by_fields="tanggal ASC",
out_fields='tanggal',
return_geometry=False
)
if geometry:
features = [Feature(geometry, f.attributes) for f in fset.features]
else:
features = list(fset.features)
del fset
#gc.collect()
'''
if geometry:# and shape:
for f in features:
f.geometry = geometry
#f.attributes["SHAPE"] = shape
'''
return features
def filter_tanggal_scalar(self, features):
return {f.attributes["tanggal"] for f in features}
def fetch_kabko_feature_tanggal_scalar(self, layer, kabko):
features = self.fetch_kabko_feature_tanggal(layer, kabko)
tanggal = self.filter_tanggal_scalar(features)
del features
#gc.collect()
return tanggal
def make_features(self, attributes, geometry):
'''
for a in attributes:
a["SHAPE"] = shape
'''
return [Feature(geometry, a) for a in attributes]
'''
def fetch_kabko_features(self, layer, kabko, geometry, shape):
fset = layer.query(
where='kabko=\'%s\'' % (kabko,),
order_by_fields="tanggal ASC",
return_geometry=False
)
features = list(fset.features)
for f in features:
f.geometry = geometry
f.attributes["SHAPE"] = shape
del fset
#gc.collect()
return features
'''
def to_update(self, features, updates):
updates_dict = {u.tanggal_ms():u for u in updates}
feature_dict = {f.attributes["tanggal"]:f for f in features}
to_update = {k:updates_dict[k].apply(v) for k, v in feature_dict.items() if k in updates_dict}
return list(to_update.values()), set(to_update.keys())
def to_append(self, appends, geometry, update_keys=None, features=None, free_features=True):
if update_keys is None:
if features is None:
raise Exception("Please provide either update_keys or features")
update_keys = self.filter_tanggal_scalar(features)
if free_features:
del features
#gc.collect()
appends = [u for u in appends if u.tanggal_ms() not in update_keys]
to_append = self.make_features([a.to_dict() for a in appends], geometry)
return to_append
def to_save(self, layer, kabko, to_save, update=True):
geometry = self.get_kabko_geometry(layer, kabko)
if update:
features = self.fetch_kabko_feature_tanggal(layer, kabko, geometry)
to_update, update_keys = self.to_update(features, to_save)
del features
else:
to_update = []
update_keys = self.fetch_kabko_feature_tanggal_scalar(layer, kabko)
to_append = self.to_append(to_save, geometry, update_keys=update_keys)
return to_update, to_append
def __save(self, f, arg, val):
ret = f(**{arg:val})
del ret
#gc.collect()
return len(val)
def _save(self, layer, to_save, update, chunk_size=100, max_process_count=None, max_tasks_per_child=100):
chunk_size = chunk_size or self.chunk_size
done = 0
pool = None
while True:
chunks = util.chunks(to_save[done:], chunk_size)
arg = "updates" if update else "adds"
args = [(layer.edit_features, arg, c) for c in chunks]
del chunks
#gc.collect()
pool = None
try:
if max_process_count==1 or len(args) == 1:
done += sum(self.__save(*a) for a in args)
else:
#done += self.__save(*args[0])
#pool = Pool(processes=max_process_count, maxtasksperchild=max_tasks_per_child)
#pool = ThreadPool(processes=util.min_none(len(args)-1, max_process_count))
#output = pool.starmap(self.__save, args[1:])
pool = ThreadPool(processes=util.min_none(len(args), max_process_count))
output = pool.starmap(self.__save, args)
pool.close()
pool.join()
done += sum(output)
del pool
return done, chunk_size
except ConnectionError:
if pool:
del pool
if chunk_size > 10:
chunk_size -= 10
else:
raise
finally:
#gc.collect()
pass
def save(self, layer, kabko, to_save, update=True, chunk_size=100, max_process_count=None, max_tasks_per_child=100):
geometry = self.get_kabko_geometry(layer, kabko)
chunk_size = chunk_size or self.chunk_size
done = 0
if update:
features = self.fetch_kabko_feature_tanggal(layer, kabko, geometry)
to_update, update_keys = self.to_update(features, to_save)
del features
#gc.collect()
if len(to_update) > 0:
done2, chunk_size2 = self._save(layer, to_update, True, chunk_size=chunk_size, max_process_count=max_process_count, max_tasks_per_child=max_tasks_per_child)
done += done2
chunk_size = min(chunk_size, chunk_size2)
del to_update
#gc.collect()
else:
update_keys = self.fetch_kabko_feature_tanggal_scalar(layer, kabko)
to_append = self.to_append(to_save, geometry, update_keys=update_keys)
if len(to_append) > 0:
done2, chunk_size2 = self._save(layer, to_append, False, max_process_count=max_process_count, max_tasks_per_child=max_tasks_per_child)
done += done2
chunk_size = min(chunk_size, chunk_size2)
del to_append
#gc.collect()
return done, chunk_size
|
# Import packages
import numpy as np
import cv2
import imutils
print "All packages imported properly!"
# Displaying & resizing images
image = cv2.imread("testudo.jpg")
cv2.imshow("Old School Testudo Logo", image)
cv2.waitKey(0)
image = imutils.resize(image, width=400)
cv2.imshow("Old School Testudo Logo: Resized", image)
cv2.waitKey(0)
# Write image to disk (save image)
cv2.imwrite("testimage.jpg", image)
# Image shape (dimensions)
print image.shape
print "height: %d" % (image.shape[0])
print "width: %d" % (image.shape[1])
print "channels: %d" % (image.shape[2])
# Pixel operations & image slicing
(b, g, r) = image[0, 0]
print "Pixel at (0, 0) - Red: %d, Green: %d, Blue: %d" % (r, g, b)
#
image[0, 0] = (0, 0, 255)
(b, g, r) = image[0, 0]
print "Pixel at (0, 0) - Red: %d, Green: %d, Blue: %d" % (r, g, b)
corner = image[0:100, 0:100]
cv2.imshow("Corner", corner)
image[0:100, 0:100] = (0, 255, 0)
cv2.imshow("Updated", image)
cv2.waitKey(0)
# Image blurring
blurred = np.hstack([
cv2.blur(image, (3,3), 0),
cv2.blur(image, (5,5), 0),
cv2.blur(image, (7,7), 0)])
cv2.imshow("Average Blurring", blurred)
cv2.waitKey(0)
blurred = np.hstack([
cv2.GaussianBlur(image, (3,3), 0),
cv2.GaussianBlur(image, (5,5), 0),
cv2.GaussianBlur(image, (7,7), 0)])
cv2.imshow("Gaussian Blurring", blurred)
cv2.waitKey(0)
blurred = np.hstack([
cv2.medianBlur(image, 3),
cv2.medianBlur(image, 5),
cv2.medianBlur(image, 7)])
cv2.imshow("Median Blurring", blurred)
cv2.waitKey(0)
blurred = np.hstack([
cv2.bilateralFilter(image, 5, 21, 21),
cv2.bilateralFilter(image, 7, 31, 31),
cv2.bilateralFilter(image, 9, 41, 41)])
cv2.imshow("Bilater Filtering", blurred)
cv2.waitKey(0)
# Drawing lines & rectangles
canvas = np.zeros((500, 500, 3), dtype="uint8")
green = (0, 255, 0)
cv2.line(canvas, (0,0), (400, 500), green)
red = (0, 0, 255)
cv2.line(canvas, (500, 0), (0, 500), red, 3)
cv2.rectangle(canvas, (40, 50), (100, 100), green)
cv2.rectangle(canvas, (50, 400), (400, 225), red, 5)
cv2.rectangle(canvas, (350, 150), (400, 425), (255, 0, 0), -1)
cv2.imshow("Canvas", canvas)
cv2.waitKey(0)
# Drawing circles
canvas = np.zeros((500, 500, 3), dtype="uint8")
(centerX, centerY) = (canvas.shape[1]/2, canvas.shape[0]/2)
white = (255, 255, 255)
for r in xrange(0, 275, 25):
cv2.circle(canvas, (centerX, centerY), r, white)
cv2.imshow("Concentric Circles", canvas)
cv2.waitKey(0)
# Overlay text on top of an image
canvas = np.zeros((500, 500, 3), dtype="uint8")
font = cv2.FONT_HERSHEY_COMPLEX_SMALL
red = (0, 0, 255)
cv2.putText(canvas, 'Hello World', (100, 200), font, 1, red, 1)
cv2.imshow("Canvas", canvas)
cv2.waitKey(0)
# Transforming images / flipping
image = cv2.imread("testudo.jpg")
image = imutils.resize(image, width=400)
cv2.imshow("Original", image)
flipped = cv2.flip(image, 1)
cv2.imshow("Flipped Horizontally", flipped)
flipped = cv2.flip(image, 0)
cv2.imshow("Flipped Vertically", flipped)
flipped = cv2.flip(image, -1)
cv2.imshow("Flipped Horizontally & Vertically", flipped)
cv2.waitKey(0)
# Rectangular mask
image = cv2.imread("testudo.jpg")
image = imutils.resize(image, width=400)
cv2.imshow("Original", image)
mask = np.zeros(image.shape[:2], dtype = "uint8")
(cX, cY) = (image.shape[1]/2, image.shape[0]/2)
cv2.rectangle(mask, (cX - 75, cY - 75), (cX + 75, cY + 75), 255, -1)
cv2.imshow("Mask", mask)
masked = cv2.bitwise_and(image, image, mask=mask)
cv2.imshow("Mask Applied to Image", masked)
cv2.waitKey(0)
# Circular mask
image = cv2.imread("testudo.jpg")
image = imutils.resize(image, width=400)
cv2.imshow("Original", image)
mask = np.zeros(image.shape[:2], dtype = "uint8")
(cX, cY) = (image.shape[1]/2, image.shape[0]/2)
cv2.circle(mask, (cX, cY), 100, 255, -1)
cv2.imshow("Mask", mask)
masked = cv2.bitwise_and(image, image, mask=mask)
cv2.imshow("Mask Applied to Image", masked)
cv2.waitKey(0)
|
"""
Example DAG where rekcurd_airflow plugins are used
"""
import airflow
from airflow import DAG
from airflow.operators.python_operator import PythonOperator
from rekcurd_airflow.operators import ModelSwitchOperator
from datetime import timedelta
default_args = {
'owner': 'rekcurd-airflow',
'depends_on_past': False,
'start_date': airflow.utils.dates.days_ago(2),
'email': [],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(seconds=5),
}
dag = DAG('example_model_switch', default_args=default_args, schedule_interval="@once")
def push_by_return(**kwargs):
return 5
"""
In production environment, ModelUploadOperator will return new model's model_id
"""
push_by_return = PythonOperator(task_id='push_by_return', dag=dag, python_callable=push_by_return)
# Rekcurd service (ID = 2. Application ID of the service is 1) will use the model whose ID is 3
switch = ModelSwitchOperator(task_id='switch_op',
project_id=1,
app_id='sample_app',
service_id=2,
model_id=3,
dag=dag)
# ModelSwitchOperator will receive the value returned by `model_provide_task`
# In this case, the switched model ID will be 5.
switch2 = ModelSwitchOperator(task_id='switch_op_xcom_return',
project_id=1,
app_id='sample_app',
service_id=2,
model_provide_task_id='push_by_return',
dag=dag)
# Switch model_id to 3 -> 5
switch.set_upstream(push_by_return)
switch2.set_upstream(switch)
|
class Array(object):
def sum(self, size, array_string):
return 0
|
import webapp2
from views import MainPage, SubscriberPage
application = webapp2.WSGIApplication([
('/', MainPage),
('/subscriber', SubscriberPage),
], debug=True)
|
#!/usr/bin/env python3
from llvmlite import ir
i1 = ir.IntType(1)
i8 = ir.IntType(8)
i16 = ir.IntType(16)
i32 = ir.IntType(32)
i64 = ir.IntType(64)
void = ir.VoidType()
m = ir.Module()
fty = ir.FunctionType(void, [i32, i32, i32])
f = ir.Function(m, fty, "cmov_test")
entry = f.append_basic_block("entry")
bld = ir.IRBuilder(entry)
cond_v = f.args[0]
cond_v.name = "cond"
true_v = f.args[1]
true_v.name = "true_val"
false_v = f.args[2]
false_v.name = "false_val"
bool_v = bld.icmp_unsigned("==", cond_v, cond_v.type(0), name="cmov_cond")
# cur_bb = bld.basic_block
# with bld.if_else(bool_v) as (then, otherwise):
# with then:
# true_bb = bld.basic_block
# with otherwise:
# false_bb = bld.basic_block
bld.select(bool_v, true_v, false_v, name="cmov_val")
print(m)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.