repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
mlperf/training_results_v0.6
|
Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/tvm/python/tvm/contrib/debugger/debug_result.py
|
Python
|
apache-2.0
| 6,677
| 0.002396
|
"""Graph debug results dumping class."""
import os
import json
import tvm
GRAPH_DUMP_FILE_NAME = '_tvmdbg_graph_dump.json'
class DebugResult(object):
"""Graph debug data module.
Data dump module manage all the debug data formatting.
Output data and input graphs are formatted and dumped to file.
Frontend read these data and graph for visualization.
Parameters
----------
graph_json : str
The graph to be deployed in json format output by nnvm graph. Each operator (tvm_op)
in the graph will have a one to one mapping with the symbol in libmod which is used
to construct a "PackedFunc" .
dump_path : str
Output data path is read/provided from frontend
"""
def __init__(self, graph_json, dump_path):
self._dump_path = dump_path
self._output_tensor_list = []
self._time_list = []
self._parse_graph(graph_json)
# dump the json information
self.dump_graph_json(graph_json)
def _parse_graph(self, graph_json):
"""Parse and extract the NNVM graph and update the nodes, shapes and dltype.
Parameters
----------
graph_json : str or graph class
The graph to be deployed in json format output by nnvm graph.
"""
json_obj = json.loads(graph_json)
self._nodes_list = json_obj['nodes']
self._shapes_list = json_obj['attrs']['shape']
self._dtype_list = json_obj['attrs']['dltype']
self._update_graph_json()
def _update_graph_json(self):
"""update the nodes_list with name, shape and data type,
for temporarily storing the output.
"""
nodes_len = len(self._nodes_list)
for i in range(nodes_len):
node = self._nodes_list[i]
input_list = []
for input_node in node['inputs']:
input_list.append(self._nodes_list[input_node[0]]['name'])
node['inputs'] = input_list
dtype = str("type: " + self._dtype_list[1][i])
if 'attrs' not in node:
node['attrs'] = {}
node['op'] = "param"
else:
node['op'] = node['attrs']['func_name']
node['attrs'].update({"T": dtype})
node['shape'] = self._shapes_list[1][i]
def _cleanup_tensors(self):
"""Remove the tensor dump file (graph wont be removed)
"""
for filename in os.listdir(self._dump_path):
if os.path.isfile(filename) and not filename.endswith(".json"):
os.remove(filename)
def get_graph_nodes(self):
"""Return the nodes list
"""
return self._nodes_list
def get_graph_node_shapes(self):
"""Return the nodes shapes list
"""
return self._shapes_list
def get_graph_node_output_num(self, node):
"""Return the number of outputs of a node
"""
return 1 if node['op'] == 'param' else int(node['attrs']['num_outputs'])
def get_graph_node_dtypes(self):
"""Return the nodes dtype list
"""
return self._dtype_list
def dump_output_tensor(self):
"""Dump the outputs to a temporary folder, the tensors are in numpy format
"""
#cleanup existing tensors before dumping
self._cleanup_tensors()
eid = 0
order = 0
output_tensors = {}
for node, time in zip(self._nodes_list, self._time_list):
num_outputs = self.get_graph_node_output_num(node)
for j in range(num_outputs):
order += time[0]
key = node['name'] + "_" + str(j) + "__" + str(order)
output_tensors[key] = self._output_tensor_list[eid]
eid += 1
with open(os.path.join(self._dump_path, "output_tensors.params"), "wb") as param_f:
param_f.write(save_tensors(output_tensors))
def dump_graph_json(self, graph):
"""Dump json formatted graph.
Parameters
----------
graph : json format
json formatted NNVM graph contain list of each node's
name, shape and type.
"""
graph_dump_file_name = GRAPH_DUMP_FILE_NAME
with open(os.path.join(self._dump_path, graph_dump_file_name), 'w') as outfile:
json.dump(graph, outfile, indent=4, sort_keys=False)
def display_debug_result(self):
"""Displays the debugger result"
"""
header = ["Node Name", "Ops", "Time(us)", "Time(%)", "Start Time", \
"End Time", "Shape", "Inputs", "Outputs"]
lines = ["---------", "---", "----
|
----", "-------", "----------", \
"--------", "-----", "------", "-------"]
eid = 0
data = []
total_time = sum(time[0] for time in self._time_list)
for node, time in zip(se
|
lf._nodes_list, self._time_list):
num_outputs = self.get_graph_node_output_num(node)
for j in range(num_outputs):
op = node['op']
if node['op'] == 'param':
continue
name = node['name']
shape = str(self._output_tensor_list[eid].shape)
time_us = round(time[0] * 1000000, 2)
time_percent = round(((time[0] / total_time) * 100), 2)
inputs = str(node['attrs']['num_inputs'])
outputs = str(node['attrs']['num_outputs'])
node_data = [name, op, time_us, time_percent, str(time[1]), str(time[2]), \
shape, inputs, outputs]
data.append(node_data)
eid += 1
fmt = ""
for i, _ in enumerate(header):
max_len = len(header[i])
for j, _ in enumerate(data):
item_len = len(str(data[j][i]))
if item_len > max_len:
max_len = item_len
fmt = fmt + "{:<" + str(max_len + 2) + "}"
print(fmt.format(*header))
print(fmt.format(*lines))
for row in data:
print(fmt.format(*row))
def save_tensors(params):
"""Save parameter dictionary to binary bytes.
The result binary bytes can be loaded by the
GraphModule with API "load_params".
Parameters
----------
params : dict of str to NDArray
The parameter dictionary.
Returns
-------
param_bytes: bytearray
Serialized parameters.
"""
_save_tensors = tvm.get_global_func("_save_param_dict")
args = []
for k, v in params.items():
args.append(k)
args.append(tvm.nd.array(v))
return _save_tensors(*args)
|
richrr/scripts
|
python/merging-python-script.py
|
Python
|
gpl-3.0
| 5,595
| 0.0084
|
import sys
import os
import pandas as pd
import multiprocessing as mp
import csv
# this code is written for the merged file with combined pval & fdr. although it could have been written for the file without comb fisher and fdr,
# it is easier to have the output with the comb pval and fdr and use what we need rather than have to search them in the merged file with comb pval an
|
d fdr
# or run the next (create network) command to calc the combined pva
|
l and fdr.
# cd /nfs3/PHARM/Morgun_Lab/richrr/Cervical_Cancer/analysis/merged/corr/gexpress/stage-ltest_corr/p1
"""
SGE_Batch -c "python ~/Morgun_Lab/richrr/scripts/python/merging-python-script.py merged_gexp_sp_corr_p1_FolChMedian_merged-parallel-output.csv-comb-pval-output.csv 1 2" -m 150G -F 100G -r log_merge-py_1 -q biomed -M rodrrich@oregonstate.edu -P 8
SGE_Batch -c "python ~/Morgun_Lab/richrr/scripts/python/merging-python-script.py merged_gexp_sp_corr_p1_FolChMedian_merged-parallel-output.csv-comb-pval-output.csv 2 2" -m 150G -F 100G -r log_merge-py_2 -q biomed -M rodrrich@oregonstate.edu -P 8
SGE_Batch -c "python ~/Morgun_Lab/richrr/scripts/python/merging-python-script.py merged_gexp_sp_corr_p1_FolChMedian_merged-parallel-output.csv-comb-pval-output.csv 3 2" -m 150G -F 100G -r log_merge-py_3 -q biomed -M rodrrich@oregonstate.edu -P 8
SGE_Batch -c "python ~/Morgun_Lab/richrr/scripts/python/merging-python-script.py merged_gexp_sp_corr_p1_FolChMedian_merged-parallel-output.csv-comb-pval-output.csv 4 2" -m 150G -F 100G -r log_merge-py_4 -q biomed -M rodrrich@oregonstate.edu -P 8
"""
infile = sys.argv[1]
analysis = "Analys " + sys.argv[2] + " "
numb_datasets = int(sys.argv[3])
# get the header line form the big file and decide which (analysis) columns to use
header_line = ''
with open(infile, 'r') as f:
header_line = f.readline().strip()
selcted_cols = [i for i, s in enumerate(header_line.split(',')) if analysis in s] #[s for s in header_line.split(',') if analysis in s]
# get the lowest and highest and make range out of it
# this way you get the combinedpval and combined fdr cols
selcted_cols = range(min(selcted_cols), max(selcted_cols)+1)
selcted_cols.insert(0, 0) # explicitly adding the row id cols
print selcted_cols
header_for_print = [header_line.split(',')[i] for i in selcted_cols]
print header_for_print
def process(df):
res = list()
for row in df.itertuples():
#print row
corrs = row[1:numb_datasets+1]
corrs_flag = 0
# write some condition to check for NA
pos = sum(float(num) > 0 for num in corrs)
neg = sum(float(num) < 0 for num in corrs)
#print pos, neg
if len(corrs) == pos and not len(corrs) == neg:
#print "pos"
corrs_flag = 1
if len(corrs) == neg and not len(corrs) == pos:
#print "neg"
corrs_flag = 1
if corrs_flag == 1:
res.append(row)
return res
counter=0
pool = mp.Pool(30) # use 30 processes
funclist = []
# http://gouthamanbalaraman.com/blog/distributed-processing-pandas.html
#for chunck_df in pd.read_csv(infile, chunksize=100, usecols=range(5), index_col=0):
for chunck_df in pd.read_csv(infile, chunksize=100000, usecols=selcted_cols, index_col=0):
counter = counter + 1
print counter
#print chunck_df
# process each data frame
f = pool.apply_async(process,[chunck_df])
funclist.append(f)
#result = list()
OUTfile = infile + analysis.replace(" ", "_") + '-same-dir-corrs.csv'
with open(OUTfile, 'w') as of:
writer = csv.writer(of, delimiter=',', lineterminator='\n')
writer.writerow(header_for_print)
for f in funclist:
csvd = f.get(timeout=10000) # timeout in 10000 seconds
#result.extend(csvd)
writer.writerows(csvd)
#print result
# quick and dirty command to get the first column of the file:
cutcmd = "cut -d, -f 1 " + OUTfile + " > " + OUTfile + "-ids.csv"
os.system(cutcmd)
print "Done"
""" # sequential
corrs_dict = dict() # satisfies corr direction
counter = 0
# with open(in_filename) as in_f, open(out_filename, 'w') as out_f
with open(infile) as f:
for line in f:
counter = counter + 1
line = line.strip()
print line
contents = line.split(",")
corrs = contents[1:numb_datasets+1]
corrs_flag = 0
if counter == 1: # move to next iteration
of.write(line)
continue
# write some condition to check for NA
pos = sum(float(num) > 0 for num in corrs)
neg = sum(float(num) < 0 for num in corrs)
#print pos, neg
if len(corrs) == pos and not len(corrs) == neg:
print "pos"
corrs_flag = 1
if len(corrs) == neg and not len(corrs) == pos:
print "neg"
corrs_flag = 1
if corrs_flag == 1:
corrs_dict[contents[0]] = contents[1:]
'''
if corrs_flag == 0: # no point in analyzing pvals, move to next iteration
continue
pvals = contents[numb_datasets+1:]
print pvals
pvals_flag = 0
# write some condition to check for NA
sig = sum(float(num) < 1 for num in pvals)
#print sig
if len(corrs) == sig:
print "sig"
pvals_flag = 1
if corrs_flag == 1 and pvals_flag == 1:
corrs_dict[contents[0]] = contents[1:]
if counter == 5:
sys.exit(0)
'''
print corrs_dict
"""
|
ckamm/mirall
|
shell_integration/nautilus/ownCloud.py
|
Python
|
gpl-2.0
| 5,047
| 0.038439
|
#!/usr/bin/python3
import os
import urllib
import socket
from gi.repository import GObject, Nautilus
class ownCloudExtension(GObject.GObject, Nautilus.ColumnProvider, Nautilus.InfoProvider):
nautilusVFSFile_table = {}
registered_paths = {}
remainder = ''
connected = False
watch_id = 0
def __init__(self):
self.connectToOwnCloud
if not self.connected:
# try again in 5 seconds - attention, logic inverted!
GObject.timeout_add(5000, self.connectToOwnCloud)
def port(self):
return 34001 # Fixme, read from config file.
def connectToOwnCloud(self):
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect(("localhost", self.port()))
self.sock.settimeout(5)
self.connected = True
self.watch_id = GObject.io_add_watch(self.sock, GObject.IO_IN, self.handle_notify)
except:
print "Connect could not be established, try again later!"
self.sock.close()
return not self.connected
def sendCommand(self, cmd):
if self.connected:
try:
self.sock.send(cmd)
except:
print "Sending failed."
GObject.source_remove( self.watch_id )
self.connected = False
GObject.timeout_add(5000, self.connectToOwnCloud)
def find_item_for_file( self, path ):
if path in self.nautilusVFSFile_table:
return self.nautilusVFSFile_table[path]
else:
return None
def askForOverlay(self, file):
if os.path.isdir(file):
folderStatus = self.sendCommand("RETRIEVE_FOLDER_STATUS:"+file+"\n");
if os.path.isfile(file):
fileStatus = self.sendCommand("RETRIEVE_FILE_STATUS:"+file+"\n");
def invalidate_items_underneath( self, path ):
update_items = []
for p in self.nautilusVFSFile_table:
if p == path or p.startswith( path ):
item = self.nautilusVFSFile_table[p]
update_items.append(item)
for item in update_items:
item.invalidate_extension_info()
# self.update_file_info(item)
# Handles a single line of server respoonse and sets the emblem
def handle_server_response(self, l):
Emblems = { 'OK' : 'oC_ok',
'SYNC' : 'oC_sync',
'NEW' : 'oC_sync',
'IGNORE' : 'oC_warn',
'ERROR' : 'oC_error',
'OK+SWM' : 'oC_ok_shared',
'SYNC+SWM' : 'oC_sync_shared',
'NEW+SWM' : 'oC_sync_shared',
'IGNORE+SWM': 'oC_warn_shared',
'ERROR+SWM' : 'oC_error_shared',
'NOP' : 'oC_error'
}
print "Server response: "+l
parts = l.split(':')
if len(parts) > 0:
action = parts[0]
# file = parts[1]
# print "Action for " + file + ": "+parts[0]
if action == 'STATUS':
emblem = Emblems[parts[1]]
if emblem:
item = self.find_item_for_file(parts[2])
if item:
item.add_emblem(emblem)
elif action == 'UPDATE_VIEW':
# Search all items underneath this path and invalidate them
if parts[1] in self.registered_paths:
self.invalidate_items_underneath( parts[1] )
elif action == 'REGISTER_PATH':
self.registered_paths[parts[1]] = 1
self.invalidate_items_underneath( parts[1] )
elif action == 'UNREGISTER_PATH':
del self.registered_paths[parts[1]]
self.invalidate_items_underneath( parts[1] )
# check if there are non pathes any more, if so, its usual
# that mirall went away. Try reconnect.
if not self.registered_paths:
self.sock.close()
self.connected = False
GObject.source_remove( self.watch_id )
GObject.timeout_add(5000, self.connectToOwnCloud)
else:
# print "We got unknown action " + action
1
# notify is the raw answer from the socket
def handle_notify(self, source, condition):
data = source.recv(1024)
# prepend the remaining data from last call
if len(self.remainder) > 0:
data = self.remainder+data
self.remainder = ''
if len(data) > 0:
# remember the remainder for next round
lastNL = data.rfind('\n');
if lastNL > -1 and lastNL < len(data):
self.remainder = data[lastNL+1:]
data = data[:lastNL]
for l in data.split('\n'):
self.handle_server_response( l )
else:
return False
return True # run again
def get_local_path(self, path):
r
|
eturn path.replace("file://", "")
def update_file_info(self, item):
if item.get_uri_scheme() != 'file':
return
filename = urllib.unquote(item.get_uri()[7:])
if item.is_directory():
filename += '/'
for reg_path in self.registered_paths:
if filename.startswith(reg_path):
self.nautilusVFSFile_table[filename] = item
# ite
|
m.add_string_attribute('share_state', "share state")
self.askForOverlay(filename)
break
else:
print "Not in scope:"+filename
|
pyroscope/pyroscope
|
pyroscope/pyroscope/controllers/torrent.py
|
Python
|
gpl-2.0
| 1,556
| 0.003213
|
""" PyroScope - Controller "torrent".
Copyright (c) 2009 The PyroScope Project <pyroscope.project@gmail.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a c
|
opy of the GNU General Public License along
with th
|
is program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
import logging
from pylons import request, response, session, tmpl_context as c
from pylons.controllers.util import abort, redirect_to
from pyroscope.lib.base import render, PageController
from pyroscope.engines import rtorrent
log = logging.getLogger(__name__)
class TorrentController(PageController):
def __init__(self):
self.proxy = rtorrent.Proxy()
def index(self):
# Redirect to view page
return redirect_to(action="view") #, id="HelpIndex")
def view(self, id):
c.hash = id
c.name = id
c.torrents = list(rtorrent.View(self.proxy, "incomplete").items())
# Return a rendered template
return render("pages/torrent.mako")
|
olivierverdier/sfepy
|
examples/navier_stokes/stokes.py
|
Python
|
bsd-3-clause
| 4,109
| 0.024093
|
# 24.05.2007, c
# last revision: 25.02.2008
from sfepy import data_dir
from sfepy.fem.periodic import *
filename_mesh = data_dir + '/meshes/2d/special/channels_symm944t.mesh'
if filename_mesh.find( 'symm' ):
region_1 = {
'name' : 'Y1',
'select' : """elements of group 3""",
}
region_2 = {
'name' : 'Y2',
'select' : """elements of group 4 +e elements of group 6
+e elements of group 8""",
}
region_4 = {
'name' : 'Y1Y2',
'select' : """r.Y1 +e r.Y2""",
}
region_5 = {
'name' : 'Walls',
'select' : """r.EBCGamma1 +n r.EBCGamma2""",
}
region_310 = {
'name' : 'EBCGamma1',
'select' : """(elements of group 1 *n elements of group 3)
+n
(elements of group 2 *n elements of group 3)
""",
}
region_320 = {
'name' : 'EBCGamma2',
'select' : """(elements of group 5 *n elements of group 4)
+n
(elements of group 1 *n elements of group 4)
+n
(elements of group 7 *n elements of group 6)
+
|
n
(elements of group 2 *n elements of group 6)
+n
(elements of group 9 *n elements of group 8)
+n
(elements of group 2 *n elements of group 8
|
)
""",
}
w2 = 0.499
# Sides.
region_20 = {
'name' : 'Left',
'select' : 'nodes in (x < %.3f)' % -w2,
}
region_21 = {
'name' : 'Right',
'select' : 'nodes in (x > %.3f)' % w2,
}
region_22 = {
'name' : 'Bottom',
'select' : 'nodes in (y < %.3f)' % -w2,
}
region_23 = {
'name' : 'Top',
'select' : 'nodes in (y > %.3f)' % w2,
}
field_1 = {
'name' : '2_velocity',
'dtype' : 'real',
'shape' : (2,),
'region' : 'Y1Y2',
'approx_order' : 2,
}
field_2 = {
'name' : 'pressure',
'dtype' : 'real',
'shape' : (1,),
'region' : 'Y1Y2',
'approx_order' : 1,
}
variable_1 = {
'name' : 'u',
'kind' : 'unknown field',
'field' : '2_velocity',
'order' : 0,
}
variable_2 = {
'name' : 'v',
'kind' : 'test field',
'field' : '2_velocity',
'dual' : 'u',
}
variable_3 = {
'name' : 'p',
'kind' : 'unknown field',
'field' : 'pressure',
'order' : 1,
}
variable_4 = {
'name' : 'q',
'kind' : 'test field',
'field' : 'pressure',
'dual' : 'p',
}
integral_1 = {
'name' : 'i1',
'kind' : 'v',
'quadrature' : 'gauss_o2_d2',
}
equations = {
'balance' :
"""dw_div_grad.i1.Y1Y2( fluid.viscosity, v, u )
- dw_stokes.i1.Y1Y2( v, p ) = 0""",
'incompressibility' :
"""dw_stokes.i1.Y1Y2( u, q ) = 0""",
}
material_1 = {
'name' : 'fluid',
'values' : {
'viscosity' : 1.0,
'density' : 1e0,
},
}
ebc_1 = {
'name' : 'walls',
'region' : 'Walls',
'dofs' : {'u.all' : 0.0},
}
ebc_2 = {
'name' : 'top_velocity',
'region' : 'Top',
'dofs' : {'u.1' : -1.0, 'u.0' : 0.0},
}
ebc_10 = {
'name' : 'bottom_pressure',
'region' : 'Bottom',
'dofs' : {'p.0' : 0.0},
}
epbc_1 = {
'name' : 'u_rl',
'region' : ['Left', 'Right'],
'dofs' : {'u.all' : 'u.all', 'p.0' : 'p.0'},
'match' : 'match_y_line',
}
functions = {
'match_y_line' : (match_y_line,),
}
##
# FE assembling parameters.
fe = {
'chunk_size' : 100,
'cache_override' : True,
}
solver_0 = {
'name' : 'ls',
'kind' : 'ls.scipy_direct',
}
solver_1 = {
'name' : 'newton',
'kind' : 'nls.newton',
'i_max' : 2,
'eps_a' : 1e-8,
'eps_r' : 1e-2,
'macheps' : 1e-16,
'lin_red' : 1e-2, # Linear system error < (eps_a * lin_red).
'ls_red' : 0.1,
'ls_red_warp' : 0.001,
'ls_on' : 1.1,
'ls_min' : 1e-5,
'check' : 0,
'delta' : 1e-6,
'is_plot' : False,
'problem' : 'nonlinear', # 'nonlinear' or 'linear' (ignore i_max)
}
save_format = 'hdf5' # 'hdf5' or 'vtk'
|
lockerfish/assertj-android
|
generate-assertions-java.py
|
Python
|
apache-2.0
| 4,345
| 0.014499
|
#!/usr/bin/env python
from datetime import date
import os
import re
SRC_DIR = 'src/main/java/'
ABSTRACT = re.compile(r'public abstract class Abstract')
TYPE = re.compile(r'class [A-Za-z0-9]+(<[^>]+?(?: extends ([A-Za-z0-9_]+))?>)?')
TARGET = re.compile(r'\s[A-Z][A-Za-z0-9_]+<[A-Z][A-Za-z0-9_]+(?:<.+?>)?, (([A-Z][A-Za-z0-9_]+).*?)(<.+?>)?(?:, [A-Z])*> {')
IMPORT = re.compile(r'import (?:static )?((?:com\.google\.)?android\..*?);')
ASSERTIONS = 'Assertions.java'
projects = []
for candidate in filter(os.path.isdir, os.listdir('.')):
if candidate.startswith('assertj-android'):
projects.append(candidate)
print('Projects: %s\n' % projects)
def _find_assertions(path):
for root, dirs, files in os.walk(path):
if ASSERTIONS in files:
return os.path.join(root, ASSERTIONS)
raise Exception('Could not locate Assertions.java in %s.' % path)
for project in projects:
src_dir = os.path.join(project, SRC_DIR)
assertions_file = _find_assertions(src_dir)
assertions_dir = os.path.dirname(assertions_file)
classes_package = assertions_dir[len(src_dir):].replace(os.sep, '.')
print('\n' * 3)
print(project)
print('')
print('src_dir = %s' % src_dir)
print('assertions_file = %s' % assertions_file)
print('assertions_dir = %s' % assertions_dir)
print('classes_package = %s' % classes_package)
print('')
assertions = []
for root, dirs, files in os.walk(assertions_dir):
for f in files:
if not f.endswith('Assert.java'):
continue
print('-'*80)
local_package = root[len(src_dir):].replace(os.sep, '.')
package = '%s.%s' % (local_package, f[:-5])
print('package : %s' % package)
with open(os.path.join(root, f)) as j:
java = j.read()
if ABSTRACT.search(java) is not None:
print('SKIP (abstract)')
continue # Abstract class.
target_match = TARGET.search(java)
import_type = target_match.group(2)
target_type = target_match.group(1)
generics = target_match.group(3)
print('import type: %s' % import_type)
print('target type: %s' % target_type)
print('generics : %s' % generics)
for match in IMPORT.finditer(java):
if match.group(1).endswith(import_type):
import_package = match.group(1)
break
else:
raise Exception('Could not find target package for %s' % import_type)
type_match = TYPE.search(java)
bounds_type = type_match.group(1)
bounds_ext = type_
|
match.group(2)
if generics:
print('bou
|
nds type: %s' % bounds_type)
print('bounds ext : %s' % bounds_ext)
if bounds_ext:
for match in IMPORT.finditer(java):
if match.group(1).endswith(bounds_ext):
bounds_type = bounds_type.replace(bounds_ext, match.group(1))
break
else:
raise Exception('Could not find target package for %s' % bounds_ext)
print('bounds fqcn: %s' % bounds_type)
target_package = import_package.replace(import_type, target_type)
print('import pkg : %s' % import_package)
print('target pkg : %s' % target_package)
assertions.append(
(package, target_package, bounds_type or '', generics or '')
)
print('-'*80)
with open(assertions_file, 'w') as out:
out.write('// Copyright %s Square, Inc.\n' % date.today().year)
out.write('//\n')
out.write('// This class is generated. Do not modify directly!\n')
out.write('package %s;\n\n' % classes_package)
out.write('/** Assertions for testing Android classes. */\n')
out.write('@SuppressWarnings("deprecation")\n')
out.write('public final class Assertions {')
for package, target_package, bounds_type, generic_keys in sorted(assertions, key=lambda x: x[0]):
out.write('\n')
out.write(' public static %s%s%s assertThat(\n' % (bounds_type + ' ' if bounds_type else '', package, generic_keys))
out.write(' %s%s actual) {\n' % (target_package, generic_keys))
out.write(' return new %s%s(actual);\n' % (package, '<>' if generic_keys else ''))
out.write(' }\n')
out.write('\n')
out.write(' private Assertions() {\n')
out.write(' throw new AssertionError("No instances.");\n')
out.write(' }\n')
out.write('}\n')
print('\nNew Assertions.java files written!\n')
|
openego/oeplatform
|
modelview/migrations/0010_auto_20160301_1032.py
|
Python
|
agpl-3.0
| 728
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-03-01 09:32
from __future__ import unicode_lit
|
erals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("modelview", "0009_auto_20160301_1030")]
operations = [
migrations.AlterField(
model_name="energyscenario",
name="tools_models",
field=models.ForeignKey(
help_text="Which model(s) and other
|
tools have been used?",
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="modelview.Energymodel",
verbose_name="Tools",
),
)
]
|
ntt-sic/heat
|
heat/engine/update.py
|
Python
|
apache-2.0
| 7,456
| 0
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.db import api as db_api
from heat.engine import dependencies
from heat.engine import resource
from heat.engine import scheduler
from heat.openstack.common import log as logging
from heat.openstack.common.gettextutils import
|
_
logger = logging.getLogger(__name__)
class StackUpdate(object):
"""
A Task to perform the update of an existing stack to a new template.
"""
def __init__(self, existing_stack, new_stack, previous_stack,
rollback=False):
"""Initialise with the existing stack and the new stack."""
self.exist
|
ing_stack = existing_stack
self.new_stack = new_stack
self.previous_stack = previous_stack
self.rollback = rollback
self.existing_snippets = dict((n, r.parsed_template())
for n, r in self.existing_stack.items())
def __repr__(self):
if self.rollback:
return '%s Rollback' % str(self.existing_stack)
else:
return '%s Update' % str(self.existing_stack)
@scheduler.wrappertask
def __call__(self):
"""Return a co-routine that updates the stack."""
cleanup_prev = scheduler.DependencyTaskGroup(
self.previous_stack.dependencies,
self._remove_backup_resource,
reverse=True)
update = scheduler.DependencyTaskGroup(self.dependencies(),
self._resource_update)
if not self.rollback:
yield cleanup_prev()
try:
yield update()
finally:
self.previous_stack.reset_dependencies()
def _resource_update(self, res):
if res.name in self.new_stack and self.new_stack[res.name] is res:
return self._process_new_resource_update(res)
else:
return self._process_existing_resource_update(res)
@scheduler.wrappertask
def _remove_backup_resource(self, prev_res):
if prev_res.state not in ((prev_res.INIT, prev_res.COMPLETE),
(prev_res.DELETE, prev_res.COMPLETE)):
logger.debug(_("Deleting backup resource %s") % prev_res.name)
yield prev_res.destroy()
@staticmethod
def _exchange_stacks(existing_res, prev_res):
db_api.resource_exchange_stacks(existing_res.stack.context,
existing_res.id, prev_res.id)
prev_stack, existing_stack = prev_res.stack, existing_res.stack
prev_stack[existing_res.name] = existing_res
existing_stack[prev_res.name] = prev_res
@scheduler.wrappertask
def _create_resource(self, new_res):
res_name = new_res.name
# Clean up previous resource
if res_name in self.previous_stack:
prev_res = self.previous_stack[res_name]
if prev_res.state not in ((prev_res.INIT, prev_res.COMPLETE),
(prev_res.DELETE, prev_res.COMPLETE)):
# Swap in the backup resource if it is in a valid state,
# instead of creating a new resource
if prev_res.status == prev_res.COMPLETE:
logger.debug(_("Swapping in backup Resource %s") %
res_name)
self._exchange_stacks(self.existing_stack[res_name],
prev_res)
return
logger.debug(_("Deleting backup Resource %s") % res_name)
yield prev_res.destroy()
# Back up existing resource
if res_name in self.existing_stack:
logger.debug(_("Backing up existing Resource %s") % res_name)
existing_res = self.existing_stack[res_name]
self.previous_stack[res_name] = existing_res
existing_res.state_set(existing_res.UPDATE, existing_res.COMPLETE)
self.existing_stack[res_name] = new_res
yield new_res.create()
@scheduler.wrappertask
def _process_new_resource_update(self, new_res):
res_name = new_res.name
if res_name in self.existing_stack:
existing_res = self.existing_stack[res_name]
try:
yield self._update_in_place(existing_res,
new_res)
except resource.UpdateReplace:
pass
else:
logger.info(_("Resource %(res_name)s for stack %(stack_name)s"
" updated") % {
'res_name': res_name,
'stack_name': self.existing_stack.name})
return
yield self._create_resource(new_res)
def _update_in_place(self, existing_res, new_res):
# Note the new resource snippet is resolved in the context
# of the existing stack (which is the stack being updated)
existing_snippet = self.existing_snippets[existing_res.name]
new_snippet = self.existing_stack.resolve_runtime_data(new_res.t)
prev_res = self.previous_stack.get(new_res.name)
return existing_res.update(new_snippet, existing_snippet,
prev_resource=prev_res)
@scheduler.wrappertask
def _process_existing_resource_update(self, existing_res):
res_name = existing_res.name
if res_name in self.previous_stack:
yield self._remove_backup_resource(self.previous_stack[res_name])
if res_name in self.new_stack:
new_res = self.new_stack[res_name]
if new_res.state == (new_res.INIT, new_res.COMPLETE):
# Already updated in-place
return
if existing_res.stack is not self.previous_stack:
yield existing_res.destroy()
if res_name not in self.new_stack:
del self.existing_stack[res_name]
def dependencies(self):
'''
Return a Dependencies object representing the dependencies between
update operations to move from an existing stack definition to a new
one.
'''
existing_deps = self.existing_stack.dependencies
new_deps = self.new_stack.dependencies
def edges():
# Create/update the new stack's resources in create order
for e in new_deps.graph().edges():
yield e
# Destroy/cleanup the old stack's resources in delete order
for e in existing_deps.graph(reverse=True).edges():
yield e
# Don't cleanup old resources until after they have been replaced
for name, res in self.existing_stack.iteritems():
if name in self.new_stack:
yield (res, self.new_stack[name])
return dependencies.Dependencies(edges())
|
Shrews/PyGerrit
|
webapp/django/contrib/localflavor/us/forms.py
|
Python
|
apache-2.0
| 4,166
| 0.00312
|
"""
USA-specific Form helpers
"""
from django.forms import ValidationError
from django.forms.fields import Field, RegexField, Select, EMPTY_VALUES
from django.utils.encoding import smart_unicode
from django.utils.translation import ugettext_lazy as _
import re
phone_digits_re = re.compile(r'^(?:1-?)?(\d{3})[-\.]?(\d{3})[-\.]?(\d{4})$')
ssn_re = re.compile(r"^(?P<area>\d{3})[-\ ]?(?P<group>\d{2})[-\ ]?(?P<serial>\d{4})$")
class USZipCodeField(RegexField):
default_error_messages = {
'invalid': _('Enter a zip code in the format XXXXX or XXXXX-XXXX.'),
}
def __init__(self, *args, **kwargs):
super(USZipCodeField, self).__init__(r'^\d{5}(?:-\d{4})?$',
max_length=None, min_length=None, *args, **kwargs)
class USPhoneNumberField(Field):
default_error_messages = {
'invalid': u'Phone numbers must be in XXX-XXX-XXXX format.',
}
def clean(self, value):
super(USPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value = re.sub('(\(|\)|\s+)', '', smart_unicode(value))
m = phone_digits_re.search(value)
if m:
return u'%s-%s-%s' % (m.group(1), m.group(2), m.group(3))
raise ValidationError(self.error_messages['invalid'])
class USSocialSecurityNumberField(Field):
"""
A United States Social Security number.
Checks the following rules to determine whether the number is valid:
* Conforms to the XXX-XX-XXXX format.
* No group consists entirely of zeroes.
* The leading group is not "666" (block "666" will never be allocated).
* The number is not in the promotional block 987-65-4320 through
987-65-4329, which are permanently invalid.
* The number is not one known to be invalid due to otherwise widespread
promotional use or distribution (e.g., the Woolworth's number or the
1962 promotional number).
"""
default_error_messages = {
'invalid': _('Enter a valid U.S. Social Security number in XXX-XX-XXXX format.'),
}
def clean(self, value):
super(USSocialSecurityNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
match = re.match(ssn_re, value)
if not match:
raise ValidationError(self.error_messages['invalid'])
area, group, serial = match.groupdict()['area'], match.groupdict()['group'], match.groupdict()['serial']
# First pass: no blocks of all zeroes.
if area == '000' or \
group == '00' or \
serial == '0000':
raise ValidationError(self.error_messages['invalid'])
# Second pass: promotional and otherwise permanently invalid numbers.
if area == '666' or \
(area == '987' and group == '65' and 4320 <= int(serial) <= 4329) or \
value == '078-05-1120' or \
value == '219-09-9999':
raise ValidationError(self.error_messages['invalid'])
return u'%s-%s-%s' % (area, group, serial)
class USStateField(Field):
"""
A form field that validates its input is a U.S. state name or abbreviation.
It normalizes the input to the standard two-leter postal service
abbreviation for the given state.
"""
default_error_messages = {
'invalid': u'Enter a U.S. state or territory.',
}
def clean(self, value):
from us_states import STATES_NORMALIZED
super(USStateField, self)
|
.clean(value)
if value in EMPTY_VALUES:
return u''
try:
value = value.strip().lower()
except AttributeError:
pass
else:
try:
return STATES_NORMALIZED[value.strip().lower()].decode('asci
|
i')
except KeyError:
pass
raise ValidationError(self.error_messages['invalid'])
class USStateSelect(Select):
"""
A Select widget that uses a list of U.S. states/territories as its choices.
"""
def __init__(self, attrs=None):
from us_states import STATE_CHOICES
super(USStateSelect, self).__init__(attrs, choices=STATE_CHOICES)
|
monouno/site
|
judge/migrations/0019_og_images.py
|
Python
|
agpl-3.0
| 716
| 0.002793
|
# -*- coding: utf-8
|
-*-
# Generated by Django 1.9 on 2015-12-19 18:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('judge', '0018_django_1_9'),
]
operations = [
migrations.A
|
ddField(
model_name='blogpost',
name='og_image',
field=models.CharField(default=b'', help_text='', max_length=150, verbose_name=b'OpenGraph image'),
),
migrations.AddField(
model_name='contest',
name='og_image',
field=models.CharField(default=b'', help_text='', max_length=150, verbose_name=b'OpenGraph image'),
),
]
|
mwcraig/ccdproc
|
docs/conf.py
|
Python
|
bsd-3-clause
| 6,953
| 0.001726
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
#
# Astropy documentation build configuration file.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this file.
#
# All configuration values have a default. Some values are defined in
# the global Astropy configuration which is loaded here before anything else.
# See astropy.sphinx.conf for which values are set there.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('..'))
# IMPORTANT: the above commented section was generated by sphinx-quickstart, but
# is *NOT* appropriate for astropy or Astropy affiliated packages. It is left
# commented out with this explanation to make it clear why this should not be
# done. If the sys.path entry above is added, when the astropy.sphinx.conf
# import occurs, it will import the *source* version of astropy instead of the
# version installed (if invoked as "make html" or directly with sphinx), or the
# version in the build directory (if "python setup.py build_sphinx" is used).
# Thus, any C-extensions that are needed to build the documentation will *not*
# be accessible, and the documentation will not build correctly.
import datetime
import os
import sys
try:
from sphinx_astropy.conf.v1 import * # noqa
except ImportError:
print('ERROR: the documentation requires the sphinx-astropy package to be installed')
sys.exit(1)
# Get configuration information from setup.cfg
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
conf = ConfigParser()
conf.read([os.path.join(os.path.dirname(__file__), '..', 'setup.cfg')])
setup_cfg = dict(conf.items('metadata'))
# -- General configuration ----------------------------------------------------
# By default, highlight as Python 3.
highlight_language = 'python3'
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.2'
# To perform a Sphinx version check that needs to be more specific than
# major.minor, call `check_sphinx_version("x.y.z")` here.
# check_sphinx_version("1.2.1")
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns.append('_templates')
# This is added to the end of RST files - a good place to put substitutions to
# be used globally.
rst_epilog += """
"""
# -- Project information ------------------------------------------------------
# This does not *have* to match the package name, but typically does
project = setup_cfg['name']
author = setup_cfg['author']
copyright = '{0}, {1}'.format(
datetime.datetime.now().year, setup_cfg['author'])
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
__import__(project)
package = sys.modules[project]
ver = package.__version__
version = '.'.join(ver.split('.'))[:5]
release = ver
# -- Options for HTML output --------------------------------------------------
# A NOTE ON HTML THEMES
# The global astropy configuration uses a custom theme, 'bootstrap-astropy',
# which is installed along with astropy. A different theme can be used or
# the options for this theme can be modified by overriding some of the
# variables set in the global configuration. The variables set in the
# global configuration are listed below, commented out.
# Add any paths that contain custom themes here, relative to this directory.
# To use a different custom theme, add the directory containing the theme.
#html_theme_path = []
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes. To override the custom theme, set this to the
# name of a builtin theme or the name of a custom theme in html_theme_path.
#html_theme = 'bootstrap-ccdproc'
html_theme_options = {
'logotext1': 'ccd', # white, semi-bold
'logotext2': 'proc', # orange, light
'logotext3': ':docs' # white, light
}
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = ''
# The name of an image file (within the static path) to use as favicon of the
#
|
docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = ''
from os.path import join
html_favicon = join('_static', 'ccd_proc.ico')
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = ''
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title =
|
'{0} v{1}'.format(project, release)
# Output file base name for HTML help builder.
htmlhelp_basename = project + 'doc'
# Static files to copy after template files
html_static_path = ['_static']
html_style = 'ccdproc.css'
# -- Options for LaTeX output -------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [('index', project + '.tex', project + u' Documentation',
author, 'manual')]
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [('index', project.lower(), project + u' Documentation',
[author], 1)]
# -- Options for the edit_on_github extension ---------------------------------
if eval(setup_cfg.get('edit_on_github')):
extensions += ['sphinx_astropy.ext.edit_on_github']
versionmod = __import__(setup_cfg['name'] + '.version')
edit_on_github_project = setup_cfg['github_project']
if versionmod.version.release:
edit_on_github_branch = "v" + versionmod.version.version
else:
edit_on_github_branch = "master"
edit_on_github_source_root = ""
edit_on_github_doc_root = "docs"
# -- Resolving issue number to links in changelog -----------------------------
github_issues_url = 'https://github.com/astropy/ccdproc/issues/'
# -- Turn on nitpicky mode for sphinx (to warn about references not found) ----
#
nitpicky = True
# nitpick_ignore = []
#
# for line in open('nitpick-exceptions'):
# if line.strip() == "" or line.startswith("#"):
# continue
# dtype, target = line.split(None, 1)
# target = target.strip()
# nitpick_ignore.append((dtype, six.u(target)))
|
ztane/jaspyx
|
jaspyx/visitor/binop.py
|
Python
|
mit
| 1,067
| 0
|
from __future__ import absolute_import, divi
|
sion, print_function
import ast
from jaspyx.ast_util import ast_load, ast_call
from jaspyx.visitor import BaseVisitor
class BinOp(BaseVisitor):
def visit_BinOp(self, node):
attr = getattr(self, 'BinOp_%s' % node.op.__class__.__name__, None)
attr(node.left, node.right)
for key, value in {
'Add': '+',
'Sub': '-',
'Mult': '*',
'Div': '/',
'Mod': '%',
'BitAnd': '&',
'BitOr'
|
: '|',
'BitXor': '^',
'LShift': '<<',
'RShift': '>>',
}.items():
def gen_op(op):
def f_op(self, left, right):
self.group([left, op, right])
return f_op
exec('BinOp_%s = gen_op("%s")' % (key, value))
def BinOp_Pow(self, left, right):
pow_func = ast_load('Math.pow')
self.visit(ast_call(pow_func, left, right))
def BinOp_FloorDiv(self, left, right):
floor = ast_load('Math.floor')
self.visit(ast_call(floor, ast.BinOp(left, ast.Div(), right)))
|
arenadata/ambari
|
ambari-server/src/main/resources/common-services/ZEPPELIN/0.7.0/package/scripts/interpreter_json_template.py
|
Python
|
apache-2.0
| 15,680
| 0.000255
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
template = '''
{
"interpreterSettings": {
"2CKEKWY8Z": {
"id": "2CKEKWY8Z",
"name": "angular",
"group": "angular",
"properties": {},
"status": "READY",
"interpreterGroup": [
{
"name": "angular",
"class": "org.apache.zeppelin.angular.AngularInterpreter",
"defaultInterpreter": false,
"editor": {
"editOnDblClick": true
}
}
],
"dependencies": [],
"option": {
"remote": true,
"port": -1,
"perNote": "shared",
"perUser": "shared",
"isExistingProcess": false,
"setPermission": false,
"users": [],
"isUserImpersonate": false
}
},
"2CKX8WPU1": {
"id": "2CKX8WPU1",
"name": "spark",
"group": "spark",
"properties": {
"spark.executor.memory": "512m",
"args": "",
"zeppelin.spark.printREPLOutput": "true",
"spark.cores.max": "",
"zeppelin.dep.additionalRemoteRepository": "spark-packages,http://dl.bintray.com/spark-packages/maven,false;",
"zeppelin.spark.sql.stacktrace": "false",
"zeppelin.spark.importImplicit": "true",
"zeppelin.spark.concurrentSQL": "false",
"zeppelin.spark.useHiveContext": "true",
"zeppelin.pyspark.python": "python",
"zeppelin.dep.localrepo": "local-repo",
"zeppelin.R.knitr": "true",
"zeppelin.spark.maxResult": "1000",
"master": "yarn-client",
"spark.app.name": "Zeppelin",
"zeppelin.R.image.width": "100%",
"zeppelin.R.render.options": "out.format \u003d \u0027html\u0027, comment \u003d NA, echo \u003d FALSE, results \u003d \u0027asis\u0027, message \u003d F, warning \u003d F",
"zeppelin.R.cmd": "R"
},
"status": "READY",
"interpreterGroup": [
{
"name": "spark",
"class": "org.apache.zeppelin.spark.SparkInterpreter",
"defaultInterpreter": true,
"editor": {
"language": "scala"
}
},
{
"name": "sql",
"class": "org.apache.zeppelin.spark.SparkSqlInterpreter",
"defaultInterpreter": false,
"editor": {
"language": "sql"
}
},
{
"name": "dep",
"class": "org.apache.zeppelin.spark.DepInterpreter",
"defaultInterpreter": false,
"editor": {
"language": "scala"
}
},
{
"name": "pyspark",
"class": "org.apache.zeppelin.spark.PySparkInterpreter",
"defaultInterpreter": false,
"editor": {
"language": "python"
}
},
{
"name": "r",
"class": "org.apache.zeppelin.spark.SparkRInterpreter",
"defaultInterpreter": false,
"editor": {
"language": "r"
}
}
],
"dependencies": [],
"option": {
"remote": true,
"port": -1,
"perNote": "shared",
"perUser": "shared",
"isExistingProcess": false,
"setPermission": false,
"users": [],
"isUserImpersonate": false
}
},
"2C4U48MY3_spark2": {
"id": "2C4U48MY3_spark2",
"name": "spark2",
"group": "spark",
"properties": {
"spark.executor.memory": "",
"args": "",
"zeppelin.spark.printREPLOutput": "true",
"spark.cores.max": "",
"zeppelin.dep.additionalRemoteRepository": "spark-packages,http://dl.bintray.com/spark-packages/maven,false;",
"zeppelin.spark.importImplicit": "true",
"zeppelin.spark.sql.stacktrace": "false",
"zeppelin.spark.concurrentSQL": "false",
"zeppelin.spark.useHiveContext": "true",
"zeppelin.pyspark.python": "python",
"zeppelin.dep.localrepo": "local-repo",
"zeppelin.R.knitr": "true",
"zeppelin.spark.maxResult": "1000",
"master": "local[*]",
"spark.app.name": "Zeppelin",
"zeppelin.R.image.width": "100%",
"zeppelin.R.render.options": "out.format \u003d \u0027html\u0027, comment \u003d NA, echo \u003d FALSE, results \u003d \u0027asis\u0027, message \u003d F, warning \u003d F",
"zeppelin.R.cmd": "R"
},
"status": "READY",
"interpreterGroup": [
{
"name": "spark",
"class": "org.apache.zeppelin.spark.SparkInterpreter",
"defaultInterpreter": true
},
{
"name": "sql",
"class": "org.apache.zeppelin.spark.SparkSqlInterpreter",
"defaultInterpreter": false
},
{
"name": "dep",
"class": "org.apache.zeppelin.spark.DepInterpreter",
"defaultInterpreter": false
},
{
"name": "pyspark",
"class": "org.apache.zeppelin.spark.PySparkInterpreter",
"defaultInterpreter": false
},
{
"name": "r",
"class": "org.apache.zeppelin.spark.SparkRInterpreter",
"defaultInterpreter": false
}
],
"dependencies": [],
"option": {
"remote": true,
"port": -1,
"perNoteSession": false,
"perNoteProcess": false,
"isExistingProcess": false,
"setPermission": false
}
},
"2CK8A9MEG": {
"id": "2CK8A9MEG",
"name": "jdbc",
"group": "jdbc",
"properties": {
"default.password": "",
"zeppelin.jdbc.auth.type": "",
"common.max_count": "1000",
"zeppelin.jdbc.principal": "",
"default.user": "gpadmin",
"default.url": "jdbc:postgresql://localhost:5432/",
"default.driver": "org.postgresql.Driver",
"zeppelin.jdbc.keytab.location": "",
"zeppelin.jdbc.concurrent.use": "true",
"zeppelin.jdbc.concurrent.max_connection": "10"
},
"status": "READY",
"interpreterGroup": [
{
"name": "sql",
"class": "org.apache.zeppelin.jdbc.JDBCInterpreter",
"defaultInterpreter": false,
"editor": {
"language": "sql",
"editOnDblClick": false
}
}
],
"dependencies": [],
"option": {
"remote": true,
"port": -1,
"perNote": "shared",
"perUser": "shared",
"isExistingProcess": false,
"setPermission": false,
|
"users": [],
"isUserImpersonate": false
}
},
"2CKX6DGQZ": {
"id": "2CKX6DGQZ",
"name": "livy",
"group": "livy",
"properties": {
"zeppelin.li
|
vy.pull_status.interval.millis": "1000",
"livy.spark.executor.memory": "",
"zeppelin.livy.session.create_timeout": "120",
"zeppelin.livy.principal": "",
"zeppelin.livy.spark.sql.maxResult": "1000",
"zeppelin.livy.keytab": "",
"zeppelin.livy.concurrentSQL": "false",
"zeppelin.livy.spark.sql.field.truncate": "true",
"livy.spark.executor.cores": "",
"zeppelin.livy.displayAppInfo": "true",
"zeppelin.livy.url": "http://localhost:8998",
"livy.spark.dynamicAllocation.minExecutors": "",
"livy.spark.driver.cores": "",
"livy.spark.jars.packages": "",
"livy.spark.dynamicAllocation.enabled": "",
"livy.spark.execu
|
zhang0137/chromite
|
lib/cros_test_lib.py
|
Python
|
bsd-3-clause
| 29,541
| 0.008158
|
#!/usr/bin/python
# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Cros unit test library, with utility functions."""
from __future__ import print_function
import collections
import contextlib
import cStringIO
import exceptions
import mox
import os
import re
import sys
import unittest
import osutils
import terminal
import cros_build_lib
if 'chromite' not in sys.modules:
# TODO(build): Finish test wrapper (http://crosbug.com/37517).
# Until then, we detect the chromite manipulation not yet having
# occurred, and inject it ourselves.
|
# We cannot just import chromite since this module is still accessed
# from non chromite.lib.cros_test_lib pathways (which will be resolved
# implicitly via 37517).
sys.path.insert(0, os.path.join(
os.path.dirname(os.path.abspath(__file__)), '../third_party'))
import mock
Directory = collections.namedtuple('Directory', ['name', 'contents'])
def _FlattenStructure(base_path, dir_str
|
uct):
"""Converts a directory structure to a list of paths."""
flattened = []
for obj in dir_struct:
if isinstance(obj, Directory):
new_base = os.path.join(base_path, obj.name).rstrip(os.sep)
flattened.append(new_base + os.sep)
flattened.extend(_FlattenStructure(new_base, obj.contents))
else:
assert(isinstance(obj, basestring))
flattened.append(os.path.join(base_path, obj))
return flattened
def CreateOnDiskHierarchy(base_path, dir_struct):
"""Creates on-disk representation of an in-memory directory structure.
Arguments:
base_path: The absolute root of the directory structure.
dir_struct: A recursively defined data structure that represents a
directory tree. The basic form is a list. Elements can be file names or
cros_test_lib.Directory objects. The 'contents' attribute of Directory
types is a directory structure representing the contents of the directory.
Examples:
- ['file1', 'file2']
- ['file1', Directory('directory', ['deepfile1', 'deepfile2']), 'file2']
"""
flattened = _FlattenStructure(base_path, dir_struct)
for f in flattened:
f = os.path.join(base_path, f)
if f.endswith(os.sep):
os.mkdir(f)
else:
osutils.Touch(f, makedirs=True)
def _VerifyDirectoryIterables(existing, expected):
"""Compare two iterables representing contents of a directory.
Paths in |existing| and |expected| will be compared for exact match.
Arguments:
existing: An iterable containing paths that exist.
expected: An iterable of paths that are expected.
Raises:
AssertionError when there is any divergence between |existing| and
|expected|.
"""
def FormatPaths(paths):
return '\n'.join(sorted(paths))
existing = set(existing)
expected = set(expected)
unexpected = existing - expected
if unexpected:
raise AssertionError('Found unexpected paths:\n%s'
% FormatPaths(unexpected))
missing = expected - existing
if missing:
raise AssertionError('These files were expected but not found:\n%s'
% FormatPaths(missing))
def _DirectoryIterator(base_path):
"""Iterates through the files and subdirs of a directory."""
for root, dirs, files in os.walk(base_path):
for e in [d + os.sep for d in dirs] + files:
yield os.path.join(root, e)
def VerifyOnDiskHierarchy(base_path, dir_struct):
"""Verify that an on-disk directory tree exactly matches a given structure.
Arguments:
See arguments of CreateOnDiskHierarchy()
Raises:
AssertionError when there is any divergence between the on-disk
structure and the structure specified by 'dir_struct'.
"""
expected = _FlattenStructure(base_path, dir_struct)
_VerifyDirectoryIterables(_DirectoryIterator(base_path), expected)
def VerifyTarball(tarball, dir_struct):
"""Compare the contents of a tarball against a directory structure.
Arguments:
tarball: Path to the tarball.
dir_struct: See CreateOnDiskHierarchy()
Raises:
AssertionError when there is any divergence between the tarball and the
structure specified by 'dir_struct'.
"""
contents = cros_build_lib.RunCommandCaptureOutput(
['tar', '-tf', tarball]).output.splitlines()
normalized = set()
for p in contents:
norm = os.path.normpath(p)
if p.endswith('/'):
norm += '/'
if norm in normalized:
raise AssertionError('Duplicate entry %r found in %r!' % (norm, tarball))
normalized.add(norm)
expected = _FlattenStructure('', dir_struct)
_VerifyDirectoryIterables(normalized, expected)
def _walk_mro_stacking(obj, attr, reverse=False):
iterator = iter if reverse else reversed
methods = (getattr(x, attr, None) for x in iterator(obj.__class__.__mro__))
seen = set()
for x in filter(None, methods):
x = getattr(x, 'im_func', x)
if x not in seen:
seen.add(x)
yield x
def _stacked_setUp(self):
self.__test_was_run__ = False
try:
for target in _walk_mro_stacking(self, '__raw_setUp__'):
target(self)
except:
# TestCase doesn't trigger tearDowns if setUp failed; thus
# manually force it ourselves to ensure cleanup occurs.
_stacked_tearDown(self)
raise
# Now mark the object as fully setUp; this is done so that
# any last minute assertions in tearDown can know if they should
# run or not.
self.__test_was_run__ = True
def _stacked_tearDown(self):
exc_info = None
for target in _walk_mro_stacking(self, '__raw_tearDown__', True):
#pylint: disable=W0702
try:
target(self)
except:
# Preserve the exception, throw it after running
# all tearDowns; we throw just the first also. We suppress
# pylint's warning here since it can't understand that we're
# actually raising the exception, just in a nonstandard way.
if exc_info is None:
exc_info = sys.exc_info()
if exc_info:
# Chuck the saved exception, w/ the same TB from
# when it occurred.
raise exc_info[0], exc_info[1], exc_info[2]
class StackedSetup(type):
"""Metaclass that extracts automatically stacks setUp and tearDown calls.
Basically this exists to make it easier to do setUp *correctly*, while also
suppressing some unittests misbehaviours- for example, the fact that if a
setUp throws an exception the corresponding tearDown isn't ran. This sorts
it.
Usage of it is via usual metaclass approach; just set
`__metaclass__ = StackedSetup` .
Note that this metaclass is designed such that because this is a metaclass,
rather than just a scope mutator, all derivative classes derive from this
metaclass; thus all derivative TestCase classes get automatic stacking."""
def __new__(mcs, name, bases, scope):
if 'setUp' in scope:
scope['__raw_setUp__'] = scope.pop('setUp')
scope['setUp'] = _stacked_setUp
if 'tearDown' in scope:
scope['__raw_tearDown__'] = scope.pop('tearDown')
scope['tearDown'] = _stacked_tearDown
return type.__new__(mcs, name, bases, scope)
class EasyAttr(dict):
"""Convenient class for simulating objects with attributes in tests.
An EasyAttr object can be created with any attributes initialized very
easily. Examples:
1) An object with .id=45 and .name="Joe":
testobj = EasyAttr(id=45, name="Joe")
2) An object with .title.text="Big" and .owner.text="Joe":
testobj = EasyAttr(title=EasyAttr(text="Big"), owner=EasyAttr(text="Joe"))
"""
__slots__ = ()
def __getattr__(self, attr):
try:
return self[attr]
except KeyError:
return AttributeError(attr)
def __delattr__(self, attr):
try:
self.pop(attr)
except KeyError:
raise AttributeError(attr)
def __setattr__(self, attr, value):
self[attr] = value
def __dir__(self):
return self.keys()
class OutputCapturer(object):
"""Class with limited support for capturing test stdout/stderr output.
Class is designed as a 'ContextManager'. Example usage in a test method
of an object of TestCase:
with self.OutputCapturer() as output:
# Capturing
|
weissj3/MWTools
|
Scripts/MakeNormalizedHistogram.py
|
Python
|
mit
| 1,516
| 0.030343
|
import sys
import math as ma
import numpy as np
params = ["$\\varepsilon_{sph}$ & ", "$q$ & ", "$\\varepsilon$ & ", "$\mu$ & ", "R & ", "$\\theta$ & ", "$\phi$ &", " $\sigma$ & ", "$\\varepsilon$ & ", "$\mu$ & ", "R & ", "$\\theta$ & ", "$\phi$ & ", "$\sigma$ & ", "$\\varepsilon$ & ", "$\mu$ & ", "R & ", "$\\theta$ & ", "$\phi$ & ", "$\sigma$ & "]
count = 0
for i in range(1, len(sys.argv)):
if i == 3 : count += 1; print "\cline{1-9}"
if i == 9 : count += 1; print "\cline{1-1}\cline{4-15}"
if i == 15 : count += 1; print "\cline{1-1} \cline{10-21}"
|
a = [( ("%.2g" % k) if (abs(k) < 1000 and abs(k) > .01) else ("%.1e" % k)) for k in [j/(84046.0) for j in map(float, sys.argv[i].split())] ]
a[i-1] = "\textbf{" + a[i-1] + "}"
placeholder = [ "& ---" for j in range(i+1, len(sys.argv)) ]
if count == 0:
placeholder[2-i] = "& \multicolumn{1}{|c}{---}"
if count == 1:
a[2] = "\multicolumn{1}{|c}{" + a[2] + "} "
placeholder[8-i] = "& \multicolumn{1}{|c}{---}"
if co
|
unt == 2:
a[8] = "\multicolumn{1}{|c}{" + a[8] + "}"
placeholder[14-i] = "& \multicolumn{1}{|c}{---}"
if count == 3:
a[14] = "\multicolumn{1}{|c}{" + a[14] + "}"
#print str(a[:i]).replace('[', '').replace(',', ' &').replace("'",'').replace(']', ' ')
print params[i-1] + str(a[:i]).replace('[', '').replace(',', ' &').replace("'",'').replace(']', ' ') + str(placeholder).replace('[', '').replace(',', '').replace("'",'').replace(']', '\\\\')
|
k340/SULI
|
setup.py
|
Python
|
bsd-3-clause
| 378
| 0.005291
|
from setuptools import setup
setup(
name="SULI",
packages=['SULI'],
version='0.0.1',
description="SULI project",
author='Kelin Kurzer-Ogul',
au
|
thor_email='kelin.kurzerogul@gmail.com',
url='https://github.com/k340/SULI',
keywords=['SULI', 'Fermi', 'LAT', 'transients'],
classifiers=[],
install_requires=['numpy','a
|
stropy'],
)
|
machacek/information-retrieval-project
|
lucene-experiment/collection.py
|
Python
|
unlicense
| 618
| 0.011327
|
import os
from itertools import imap
from multiprocessing import
|
Pool
from config import config
from document_topic import Document, Topic
class Collection(object):
def __init__(self, list_file, prefix=''):
self.names_list = [os.path.
|
join(prefix, file_name.strip()) for file_name in list_file]
def __iter__(self):
pool = Pool(min(config.workers,len(self.names_list)))
return pool.imap(self.type, self.names_list)
#return imap(self.type, self.names_list)
class TopicCollection(Collection):
type = Topic
class DocumentCollection(Collection):
type = Document
|
nett55/caniusepypy
|
caniusepypy/command.py
|
Python
|
apache-2.0
| 1,565
| 0
|
# Copyright 2014 Google Inc. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import setupto
|
ols
import caniusepypy as ciu
import caniusepypy.__main__ as ciu_main
from caniusepypy import pypi
class Command(setuptools.Command):
description = """Run caniusepypy over a setup.py file."""
user_option
|
s = []
def _dependencies(self):
projects = []
for attr in ('install_requires', 'tests_require'):
requirements = getattr(self.distribution, attr, None) or []
for project in requirements:
if not project:
continue
projects.append(pypi.just_name(project))
extras = getattr(self.distribution, 'extras_require', None) or {}
for value in extras.values():
projects.extend(map(pypi.just_name, value))
return projects
def initialize_options(self):
pass
def run(self):
ciu_main.check(self._dependencies())
def finalize_options(self):
pass
|
axbaretto/beam
|
sdks/python/.tox/lint/lib/python2.7/site-packages/pylint/test/functional/class_members_py27.py
|
Python
|
apache-2.0
| 1,691
| 0.011236
|
""" Various tests for class members access. """
# pylint: disable=R0903,print-statement,no-absolute-import, metaclass-assignment,import-error,no-init,missing-docstring, wrong-import-order,wrong-import-position
from missing import Missing
class MyClass(object):
"""class docstring"""
def __init__(self):
"""init"""
self.correct = 1
def test(self):
"""test"""
self.correct += 2
self.incorrect += 2 # [no-member]
del self.havenot # [no-member]
self.nonexistent1.truc() # [no-member]
self.nonexistent2[1] = 'hehe' # [no-member]
class XYZMixin(object):
"""access to undefined members should be ignored in mixin classes by
default
"""
def __init__(self):
print self.nonexistent
class NewClass(object):
"""use object.__setattr__"""
def __init__(self):
self.__setattr__('toto', 'tutu')
from abc import ABCMeta
class TestMetaclass(object):
"""
|
Test attribute access for metaclasses. """
__metaclass__ = ABCMeta
class Metaclass(type):
""" metaclass """
@classmethod
def test(mcs):
""" classmethod """
class UsingMetaclass(object):
""" empty """
__metaclass__ = Metaclass
#TestMetaclass.register(int)
#UsingMetaclass.test()
TestMetaclass().register(int) # [no-member]
UsingMetaclass().test() # [no-member]
class NoKnownBases(Missing):
"""Don't emit no-m
|
ember if we don't know the bases of a class."""
NoKnownBases().lalala()
# Should be enabled on astroid > 1.4.0
#class MetaClass(object):
# """Look some methods in the implicit metaclass."""
#
# @classmethod
# def whatever(cls):
# return cls.mro() + cls.missing()
|
madrisan/pyoocs
|
oocs/services.py
|
Python
|
gpl-3.0
| 6,244
| 0.004164
|
# This python module is part of the oocs scanner for Linux.
# Copyright (C) 2015 Davide Madrisan <davide.madrisan.gmail.com>
import glob
from os import sep
from os.path import join
from pwd import getpwuid
from oocs.filesystem import Filesystems, UnixCommand, UnixFile
from oocs.io import Config, message_add, quote, unlist
from oocs._oocsext import runlevel
class Services(object):
module_name = 'services'
def __init__(self, verbose=False):
self.verbose = verbose
self.scan = {
'module' : self.module_name,
'checks' : {},
'status' : {}
}
try:
self.cfg = Config().module(self.module_name)
self.enabled = (self.cfg.get('enable', 1) == 1)
except KeyError:
message_add(self.scan['status'], 'warning',
self.module_name +
' directive not found in the configuration file')
self.cfg = {}
self.required = self.cfg.get("required", [])
self.forbidden = self.cfg.get("forbidden", [])
self.runlevel = self.cfg.get("runlevel", '')
self.enabled = (self.cfg.get('enable', 1) == 1)
self.verbose = (self.cfg.get('verbose', verbose) == 1)
def configuration(self): return self.cfg
def sysv_runlevel(self):
try:
return runlevel()
except:
return ''
class Service(Services):
def __init__(self, service):
"""
Note: service can be a chain of commands as in the following example:
'syslogd|/sbin/rsyslogd'
The service string must match the one displayed by 'ps'.
"""
Services.__init__(self)
self.service = service
self.procfs = Filesystems().procfs
self.state, self.fullstatus = self._status()
def _proc_status_parser(self, pid):
procfile = glob.glob(join(self.procfs, str(pid), 'status'))[0]
rawdata = UnixFile(procfile).readlines() or []
data = {}
for line in rawdata:
cols = line.split(':')
key = cols[0].lower()
values = cols[1].rstrip('\n').split()
data[key] = values
return data
def _status(self):
"""
Return a touple (state-string, full-status-infos-dict).
state-string will be 'running' or 'down' and will reflect the state of
the 'self.service' process(es).
full-status-infos-dict is a dictionaty containing the information
provided by /proc/<pid>/status of each process pid:
srv_full_status[pidnum] = dictionary containing the status of the
process whith pid equal to pidnum
"""
cmdlines = glob.glob(join(self.procfs, '*', 'cmdline'))
srv_state = 'down'
srv_full_status = {}
for f in cmdlines:
for srv in self.service.split('|'):
cmdlinefile = UnixFile(f)
if not cmdlinefile.isfile(): continue
if cmdlinefile.readfile().startswith(srv): # FIXME
pid = f.split(sep)[2]
proc_pid_status = self._proc_status_parser(pid)
srv_full_status[pid] = proc_pid_status
srv_state = 'running'
return (srv_state, srv_full_status)
def name(self):
return self.service
def pid(self):
"""Return the list of pid numbers or an empty list when the process
is not running"""
return self.fullstatus.keys()
def ppid(self):
ppids = []
for pid in self.pid():
ppid = self.fullstatus.get(pid)['ppid'][0]
ppids.append(ppid)
return ppids
def state(self):
return self.state
def uid(self):
real_uids = []
for pid in self.pid():
# Real, effective, saved set, and file system UIDs
uids = self.fullstatus.get(pid)['uid']
real_uids.append(uids[0])
return real_uids
def gid(self):
real_gids = []
for pid in self.pid():
# Real, effective, saved set, and file system GIDs
gids = self.fullstatus.get(pid)['gid']
real_gids.append(gids[0])
return real_gids
def owner(self):
owners = []
for uid in self.uid():
owners.append(getpwuid(int(uid)).pw_name)
return owners
def threads(self):
threads_num = 0
for pid in self.pid():
threads_num += int(self.fullstatus.get(pid)['threads'][0])
return threads_num
def check_services(verbose=F
|
alse):
services = Services(verbose=verbose)
localscan = {}
curr_runlevel = services.sysv_runlevel()
if services.runlevel and curr_runlevel != services.r
|
unlevel:
message_add(localscan, 'warning',
'the current runlevel is ' + quote(curr_runlevel) +
' but should be ' + quote(services.runlevel))
if not services.enabled:
if verbose:
message_add(localscan, 'info',
'Skipping ' + quote(module_name) +
' (disabled in the configuration)')
return
for srv in services.required:
service = Service(srv)
pids = service.pid()
owners = service.owner()
if pids and services.verbose:
message_add(localscan, 'info',
'the service ' + quote(service.name()) +
' is running (with pid:%s owner:%s)' % (
unlist(pids,sep=','), unlist(owners,sep=',')))
elif not pids:
message_add(localscan, 'critical',
'the service ' + quote(service.name()) + ' is not running')
for srv in services.forbidden:
service = Service(srv)
pids = service.pid()
if pids:
message_add(localscan, 'critical',
'the service ' + quote(service.name()) +
' should not be running')
elif services.verbose:
message_add(localscan, 'info',
'the service ' + quote(service.name()) +
' is not running as required')
message_add(services.scan['checks'], 'running services', localscan)
return services.scan
|
ZeitOnline/zeit.newsletter
|
src/zeit/newsletter/browser/tests/test_form.py
|
Python
|
bsd-3-clause
| 668
| 0
|
import zeit.newsletter.testing
class MetadataTest(zeit.newsletter.testing.SeleniumTestCase):
def test_form_should_save_entered_data_on_blur(self):
|
s = self.selenium
self.open('/repository/newsletter/@@checkout')
s.waitForElementPresent('id=metadata.subject')
s.assertValue('id=metadata.subject', '')
s.type('id=metadata.subject', 'flubber\t')
s.waitForElementNotPresent('css=.field.dirty')
# Re-open the page and verify that the data
|
is still there
s.clickAndWait('link=Edit contents')
s.waitForElementPresent('id=metadata.subject')
s.assertValue('id=metadata.subject', 'flubber')
|
mayblue9/scikit-learn
|
examples/classification/plot_lda_qda.py
|
Python
|
bsd-3-clause
| 5,046
| 0.001585
|
"""
====================================================================
Linear and Quadratic Discriminant Analysis with confidence ellipsoid
====================================================================
Plot the confidence ellipsoids of each class and decision boundary
"""
print(__doc__)
from scipy import linalg
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import colors
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
###############################################################################
# colormap
cmap = colors.LinearSegmentedColormap(
'red_blue_classes',
{'red': [(0, 1, 1), (1, 0.7, 0.7)],
'green': [(0, 0.7, 0.7), (1, 0.7, 0.7)],
'blue': [(0, 0.7, 0.7), (1, 1, 1)]})
plt.cm.register_cmap(cmap=cmap)
###############################################################################
# generate datasets
def dataset_fixed_cov():
'''Generate 2 Gaussians samples with the same covariance matrix'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -0.23], [0.83, .23]])
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C) + np.array([1, 1])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
def dataset_cov():
'''Generate 2 Gaussians samples with different covariance matrices'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -1.], [2.5, .7]]) * 2.
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C.T) + np.array([1, 4])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
###############################################################################
# plot functions
def plot_data(lda, X, y, y_pred, fig_index):
splot = plt.subplot(2, 2, fig_index)
if fig_index == 1:
plt.title('Linear Discriminant Analysis')
plt.ylabel('Data with fixed covariance')
elif fig_index == 2:
plt.title('Quadratic Discriminant Analysis')
elif fig_index == 3:
plt.ylabel('Data with varying covariances')
tp = (y == y_pred) # True Positive
tp0, tp1 = tp[y == 0], tp[y == 1]
X0, X1 = X[y == 0], X[y == 1]
X0_tp, X0_fp = X0[tp0], X0[~tp0]
X1_tp, X1_fp = X1[tp1], X1[~tp1]
xmin, xmax = X[:, 0].min(), X[:, 0].max()
ymin, ymax = X[:, 1].min(), X[:, 1].max()
# class 0: dots
plt.plot(X0_tp[:, 0], X0_tp[:, 1], 'o', color='red')
plt.plot(X0_fp[:, 0], X0_fp[:, 1], '.', color='#990000') # dark red
# class 1: dots
plt.plot(X1_tp[:, 0], X1_tp[:, 1], 'o', color='blue')
plt.plot(X1_fp[:, 0], X1_fp[:, 1], '.', color='#000099') # dark blue
# class 0 and 1 : areas
nx, ny = 200, 100
x_min, x_max = plt.xlim()
y_min, y_max = plt.ylim()
xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
np.linspace(y_min, y_max, ny))
Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z = Z[:, 1].reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap='red_blue_classes',
norm=colors.Normalize(0., 1.))
plt.contour(xx, yy, Z, [0.5], linewidths=2., colors='k')
# means
plt.plot(lda.means_[0][0], lda.means_[0][1],
'o', color='black', markersize=10)
plt.plot(lda.means_[1][0], lda.means_[1][1],
'o', color='black', markersize=10)
return splot
def plot_ellipse(splot, mean, cov, color):
v, w = linalg.eigh(cov)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
# filled Gaussian at 2 standard deviation
ell = mpl.patches.Ellipse(mean, 2 * v[0] ** 0.5, 2 * v[1] ** 0.5,
180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
splot.set_xticks(())
splot.set_yticks(())
def plot_lda_cov(lda, splot):
plot_ellipse(splot, lda.means_[0], lda.covariance_,
|
'red')
plot_ellipse(splot, lda.means_[1], lda.covariance_, 'blue')
def plot_qda_cov(qda, splot):
plot_ellipse(splot, qda.means_[0], qda.covariances_[0], 'red')
plot_ellipse(splot, qda.means_[1], qda.covariances_[1], 'blue')
###############################################################################
|
for i, (X, y) in enumerate([dataset_fixed_cov(), dataset_cov()]):
# Linear Discriminant Analysis
lda = LinearDiscriminantAnalysis(solver="svd", store_covariance=True)
y_pred = lda.fit(X, y).predict(X)
splot = plot_data(lda, X, y, y_pred, fig_index=2 * i + 1)
plot_lda_cov(lda, splot)
plt.axis('tight')
# Quadratic Discriminant Analysis
qda = QuadraticDiscriminantAnalysis()
y_pred = qda.fit(X, y, store_covariances=True).predict(X)
splot = plot_data(qda, X, y, y_pred, fig_index=2 * i + 2)
plot_qda_cov(qda, splot)
plt.axis('tight')
plt.suptitle('Linear Discriminant Analysis vs Quadratic Discriminant Analysis')
plt.show()
|
ooovector/qtlab_replacement
|
tunable_coupling_transmons/Misis_two_qubit_August_2019_setup.py
|
Python
|
gpl-3.0
| 9,950
| 0.003973
|
from qsweepy.instruments import *
from qsweepy import *
from qsweepy import awg_iq_multi
import numpy as np
device_settings = {'vna_address': 'TCPIP0::10.20.61.48::inst0::INSTR',
'lo1_address': 'TCPIP0::10.20.61.59::inst0::INSTR',
'lo1_timeout': 5000, 'rf_switch_address': '10.20.61.224',
'use_rf_switch': True,
'pxi_chassis_id': 0,
'hdawg_address': 'hdawg-dev8108',
'sa_address': 'TCPIP0::10.20.61.56::inst0::INSTR',
'adc_timeout': 10,
'adc_trig_rep_period': 100 * 125, # 10 kHz rate period
'adc_trig_width': 2, # 80 ns trigger length
}
cw_settings = {}
pulsed_settings = {'lo1_power': 18,
'vna_power': 16,
'ex_clock': 2000e6, # 1 GHz - clocks of some devices
'rep_rate': 10e3, # 10 kHz - pulse sequence repetition rate
# 500 ex_clocks - all waves is shorten by this amount of clock cycles
# to verify that M3202 will not miss next trigger
# (awgs are always missing trigger while they are still outputting waveform)
'global_num_points_delta': 800,
'hdawg_ch0_amplitude': 1.0,
'hdawg_ch1_amplitude': 1.0,
'hdawg_ch2_amplitude': 0.8,
'hdawg_ch3_amplitude': 0.8,
'hdawg_ch4_amplitude': 1.0,
'hdawg_ch5_amplitude': 1.0,
'hdawg_ch6_amplitude': 1.0,
'hdawg_ch7_amplitude': 1.0,
'lo1_freq': 3.3e9,#3.70e9,
'pna_freq': 6.06e9,
#'calibrate_delay_nop': 65536,
'calibrate_delay_nums': 200,
'trigger_readout_channel_name': 'ro_trg',
'trigger_readout_length': 200e-9,
'modem_dc_calibration_amplitude': 1.0,
'adc_nop': 1024,
'adc_nums': 10000, ## Do we need control over this? Probably, but not now... WUT THE FUCK MAN
}
class hardware_setup():
def __init__(self, device_settings, pulsed_settings):
self.device_settings = device_settings
self.pulsed_settings = pulsed_settings
self.cw_settings = cw_settings
self.hardware_state = 'undefined'
self.pna = None
self.lo1 = None
self.rf_switch = None
self.sa = None
self.coil_device = None
self.hdawg = None
self.adc_device = None
self.adc = None
self.ro_trg = None
self.coil = None
self.iq_devices = None
def open_devices(self):
# RF switch for making sure we know what sample we are measuring
self.pna = Agilent_N5242A('pna', address=self.device_settings['vna_address'])
self.lo1 = Agilent_E8257D('lo1', address=self.device_settings['lo1_address'])
self.lo1._visainstrument.timeout = self.device_settings['lo1_timeout']
if self.device_settings['use_rf_switch']:
self.rf_switch = nn_rf_switch('rf_switch', address=self.device_settings['rf_switch_address'])
self.sa = Agilent_N9030A('pxa', address=self.device_settings['sa_address'])
self.hdawg = Zurich_HDAWG1808(self.device_settings['hdawg_address'])
self.dummy_awg = dummy_awg.DummyAWG(channels=1)
self.coil_device = self.hdawg
self.adc_device = TSW14J56_evm()
self.adc_device.timeout = self.device_settings['adc_timeout']
self.adc = TSW14J56_evm_reducer(self.adc_device)
self.adc.output_raw = True
self.adc.last_cov = False
self.adc.avg_cov = False
self.adc.resultnumber = False
self.adc_device.set_trig_src_period(self.device_settings['adc_trig_rep_period']) # 10 kHz period rate
self.adc_device.set_trig_src_width(self.device_settings['adc_trig_width']) # 80 ns trigger length
# self.hardware_state = 'undefined'
def set_pulsed_mode(self):
self.lo1.set_status(1) # turn on lo1 output
self.lo1.set_power(self.pulsed_settings['lo1_power'])
self.lo1.set_frequency(self.pulsed_settings['lo1_freq'])
self.pna.set_power(self.pulsed_settings['vna_power'])
self.pna.write("OUTP ON")
self.pna.write("SOUR1:POW1:MODE ON")
self.pna.write("SOUR1:POW2:MODE OFF")
self.pna.set_sweep_mode("CW")
self.pna.set_frequency(self.pulsed_settings['pna_freq'])
self.hdawg.stop()
self.hdawg.set_clock(self.pulsed_settings['ex_clock'])
self.hdawg.set_clock_source(1)
# setting repetition period for slave devices
# 'global_num_points_delay' is needed to verify that M3202A and other slave devices will be free
# when next trigger arrives.
global_num_points = int(np.round(
self.pulsed_settings['ex_clock'] / self.pulsed_settings['rep_rate'] - self.pulsed_settings[
'global_num_points_delta']))
# global_num
|
_points = 20000
self.hdawg.set_nop(global_num_points)
self.hdawg.clear()
|
# а вот длину сэмплов, которая очевидно то же самое, нужно задавать на всех авгшках.
# хорошо, что сейчас она только одна.
# this is zashkvar WUT THE FUCK MAN
self.hdawg.set_trigger_impedance_1e3()
self.hdawg.set_dig_trig1_source([0, 0, 0, 0])
self.hdawg.set_dig_trig1_slope([1, 1, 1, 1]) # 0 - Level sensitive trigger, 1 - Rising edge trigger,
# 2 - Falling edge trigger, 3 - Rising or falling edge trigger
self.hdawg.set_dig_trig1_source([0, 0, 0, 0])
self.hdawg.set_dig_trig2_slope([1, 1, 1, 1])
self.hdawg.set_trig_level(0.6)
for sequencer in range(4):
self.hdawg.send_cur_prog(sequencer=sequencer)
self.hdawg.set_marker_out(channel=np.int(2 * sequencer), source=4) # set marker 1 to awg mark out 1 for sequencer
self.hdawg.set_marker_out(channel=np.int(2 * sequencer + 1),
source=7) # set marker 2 to awg mark out 2 for sequencer
for channel in range(8):
self.hdawg.set_amplitude(channel=channel, amplitude=self.pulsed_settings['hdawg_ch%d_amplitude'%channel])
self.hdawg.set_offset(channel=channel, offset=0 * 1.0)
self.hdawg.set_digital(channel=channel, marker=[0]*(global_num_points))
self.hdawg.daq.set([['/{}/sigouts/{}/range'.format(self.hdawg.device, channel), 0.6]])
self.hdawg.daq.set([['/{}/sigouts/4/range'.format(self.hdawg.device), 2]])
self.hdawg.set_all_outs()
self.hdawg.run()
self.ro_trg = awg_digital.awg_digital(self.hdawg, 1, delay_tolerance=20e-9) # triggers readout card
self.coil = awg_channel.awg_channel(self.hdawg, 5) # coil control
# ro_trg.mode = 'set_delay' #M3202A
# ro_trg.delay_setter = lambda x: adc.set_trigger_delay(int(x*adc.get_clock()/iq_ex.get_clock()-readout_trigger_delay)) #M3202A
self.ro_trg.mode = 'waveform' # AWG5014C
self.adc.set_nop(self.pulsed_settings['adc_nop'])
self.adc.set_nums(self.pulsed_settings['adc_nums'])
def set_switch_if_not_set(self, value, channel):
if self.rf_switch.do_get_switch(channel=channel) != value:
self.rf_switch.do_set_switch(value, channel=channel)
def setup_iq_channel_connections(self, exdir_db):
# промежуточные частоты для гетеродинной схемы new:
self.iq_devices = {'iq_ex1': awg_iq_multi.Awg_iq_multi(self.hdawg, self.hdawg, 2, 3, self.lo1, exdir_db=exdir_db),
# M3202A
'iq_ex2': awg_iq_multi.Awg_iq_multi(self.hdawg, self.dummy_awg, 5, 0, self.lo1, exdir_db=exdir_db),
'iq_ex3': awg_iq_multi.Awg_iq_multi(self.hdawg, self.hdawg, 6, 7, self.lo1, exdir_db=exdir_db),
# M3202A
'iq_ro': awg_iq_multi.Awg_iq_multi(self.hdawg, self.hdawg, 0, 1, self
|
bjolivot/ansible
|
lib/ansible/modules/cloud/openstack/os_nova_flavor.py
|
Python
|
gpl-3.0
| 8,477
| 0.004719
|
#!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
|
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_nova_flavor
short_description: M
|
anage OpenStack compute flavors
extends_documentation_fragment: openstack
version_added: "2.0"
author: "David Shrewsbury (@Shrews)"
description:
- Add or remove flavors from OpenStack.
options:
state:
description:
- Indicate desired state of the resource. When I(state) is 'present',
then I(ram), I(vcpus), and I(disk) are all required. There are no
default values for those parameters.
choices: ['present', 'absent']
required: false
default: present
name:
description:
- Flavor name.
required: true
ram:
description:
- Amount of memory, in MB.
required: false
default: null
vcpus:
description:
- Number of virtual CPUs.
required: false
default: null
disk:
description:
- Size of local disk, in GB.
required: false
default: null
ephemeral:
description:
- Ephemeral space size, in GB.
required: false
default: 0
swap:
description:
- Swap space size, in MB.
required: false
default: 0
rxtx_factor:
description:
- RX/TX factor.
required: false
default: 1.0
is_public:
description:
- Make flavor accessible to the public.
required: false
default: true
flavorid:
description:
- ID for the flavor. This is optional as a unique UUID will be
assigned if a value is not specified.
required: false
default: "auto"
availability_zone:
description:
- Ignored. Present for backwards compatability
required: false
extra_specs:
description:
- Metadata dictionary
required: false
default: None
version_added: "2.3"
requirements: ["shade"]
'''
EXAMPLES = '''
- name: "Create 'tiny' flavor with 1024MB of RAM, 1 virtual CPU, and 10GB of local disk, and 10GB of ephemeral."
os_nova_flavor:
cloud: mycloud
state: present
name: tiny
ram: 1024
vcpus: 1
disk: 10
ephemeral: 10
- name: "Delete 'tiny' flavor"
os_nova_flavor:
cloud: mycloud
state: absent
name: tiny
- name: Create flavor with metadata
os_nova_flavor:
cloud: mycloud
state: present
name: tiny
ram: 1024
vcpus: 1
disk: 10
extra_specs:
"quota:disk_read_iops_sec": 5000
"aggregate_instance_extra_specs:pinned": false
'''
RETURN = '''
flavor:
description: Dictionary describing the flavor.
returned: On success when I(state) is 'present'
type: dictionary
contains:
id:
description: Flavor ID.
returned: success
type: string
sample: "515256b8-7027-4d73-aa54-4e30a4a4a339"
name:
description: Flavor name.
returned: success
type: string
sample: "tiny"
disk:
description: Size of local disk, in GB.
returned: success
type: int
sample: 10
ephemeral:
description: Ephemeral space size, in GB.
returned: success
type: int
sample: 10
ram:
description: Amount of memory, in MB.
returned: success
type: int
sample: 1024
swap:
description: Swap space size, in MB.
returned: success
type: int
sample: 100
vcpus:
description: Number of virtual CPUs.
returned: success
type: int
sample: 2
is_public:
description: Make flavor accessible to the public.
returned: success
type: bool
sample: true
extra_specs:
description: Flavor metadata
returned: success
type: dict
sample:
"quota:disk_read_iops_sec": 5000
"aggregate_instance_extra_specs:pinned": false
'''
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
def _system_state_change(module, flavor):
state = module.params['state']
if state == 'present' and not flavor:
return True
if state == 'absent' and flavor:
return True
return False
def main():
argument_spec = openstack_full_argument_spec(
state = dict(required=False, default='present',
choices=['absent', 'present']),
name = dict(required=False),
# required when state is 'present'
ram = dict(required=False, type='int'),
vcpus = dict(required=False, type='int'),
disk = dict(required=False, type='int'),
ephemeral = dict(required=False, default=0, type='int'),
swap = dict(required=False, default=0, type='int'),
rxtx_factor = dict(required=False, default=1.0, type='float'),
is_public = dict(required=False, default=True, type='bool'),
flavorid = dict(required=False, default="auto"),
extra_specs = dict(required=False, default=None, type='dict'),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(
argument_spec,
supports_check_mode=True,
required_if=[
('state', 'present', ['ram', 'vcpus', 'disk'])
],
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
state = module.params['state']
name = module.params['name']
extra_specs = module.params['extra_specs'] or {}
try:
cloud = shade.operator_cloud(**module.params)
flavor = cloud.get_flavor(name)
if module.check_mode:
module.exit_json(changed=_system_state_change(module, flavor))
if state == 'present':
if not flavor:
flavor = cloud.create_flavor(
name=name,
ram=module.params['ram'],
vcpus=module.params['vcpus'],
disk=module.params['disk'],
flavorid=module.params['flavorid'],
ephemeral=module.params['ephemeral'],
swap=module.params['swap'],
rxtx_factor=module.params['rxtx_factor'],
is_public=module.params['is_public']
)
changed=True
else:
changed=False
old_extra_specs = flavor['extra_specs']
new_extra_specs = dict([(k, str(v)) for k, v in extra_specs.items()])
unset_keys = set(flavor['extra_specs'].keys()) - set(extra_specs.keys())
if unset_keys:
cloud.unset_flavor_specs(flavor['id'], unset_keys)
if old_extra_specs != new_extra_specs:
cloud.set_flavor_specs(flavor['id'], extra_specs)
changed = (changed or old_extra_specs != new_extra_specs)
module.exit_json(changed=changed,
flavor=flavor,
id=flavor['id'])
elif state == 'absent':
if flavor:
cloud.delete_flavor(name)
module.exit_json(changed=True)
|
glasnt/voc
|
tests/builtins/test_input.py
|
Python
|
bsd-3-clause
| 687
| 0
|
from .. utils import TranspileTestCase, BuiltinFunctionTestCase
class InputTests(TranspileTestCase):
pass
# FIXME: This test can't run without a redirection for stdin.
# class BuiltinInputFunctionTests(BuiltinFunctionTestCase, Trans
|
pileTestCase):
# functions = ["input"]
# not_implemented = [
# 'test_bool',
# 'test_bytearray',
# 'test_bytes',
# 'test_class',
# 'test_complex',
# 'test_dict',
# 'test_float',
# 'test_frozenset',
# 'test_int',
# 'test_list',
# 'test_None',
# 'test_NotImplemented'
|
,
# 'test_set',
# 'test_str',
# 'test_tuple',
# ]
|
SavinaRoja/npyscreen2
|
npyscreen2/logs.py
|
Python
|
gpl-3.0
| 1,956
| 0.001022
|
# -*- coding: utf-8 -*-
"""
"""
import logging
import logging.handlers
import sys
STANDARD_FORMAT = '%(name)s [%(levelname)s] %(message)s'
MESSAGE_ONLY_FORMAT = '%(message)s'
def get_level(level_string):
"""
Returns an appropriate logging level integer from a string name
"""
levels = {'debug': logging.DEBUG, 'info': logging.INFO,
'warning': logging.WARNING, 'error': logging.ERROR,
'critical': logging.CRITICAL}
try:
level = levels[level_string.lower()]
except KeyError:
sys.exit('{0} is not a recognized logging level'.format(level_string))
else:
return level
def activate_logging(level=None):
log = logging.getLogger('npyscreen2')
if level is None:
log.setLevel(logging.DEBUG)
else:
log.setLevel(get_level(level))
def add_rotating_file_handler(filename,
frmt=None,
level=None,
filtr=None,
max_bytes=0,
backup_count=0,
mode='a'):
log = logging.getLogger('npyscreen2')
handler = logging.handlers.RotatingFileHandler(filename,
maxBytes=max_bytes,
backupCount=backup_count,
encoding='utf-8',
mod
|
e=mode)
if level is None:
handler.setLevel(logging.WARNING)
else:
ha
|
ndler.setLevel(get_level(level))
if filtr is not None:
handler.addFilter(logging.Filter(filtr))
if frmt is None:
handler.setFormatter(logging.Formatter(STANDARD_FORMAT))
else:
handler.setFormatter(logging.Formatter(frmt))
log.addHandler(handler)
#def deactivate_logging():
#log = logging.getLogger('npyscreen2')
|
Warbo/bugseverywhere
|
libbe/command/merge.py
|
Python
|
gpl-2.0
| 6,074
| 0.001976
|
# Copyright (C) 2008-2012 Chris Ball <cjb@laptop.org>
# Gianluca Montecchi <gian@grys.it>
# W. Trevor King <wking@tremily.us>
#
# This file is part of Bugs Everywhere.
#
# Bugs Everywhere is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option) any
# later version.
#
# Bugs Everywhere is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# Bugs Everywhere. If not, see <http://www.gnu.org/licenses/>.
import copy
import os
import libbe
import libbe.command
import libbe.command.util
class Merge (libbe.command.Command):
"""Merge duplicate bugs
>>> import sys
>>> import libbe.bugdir
>>> import libbe.comment
>>> bd = libbe.bugdir.SimpleBugDir(memory=False)
>>> io = libbe.command.StringInputOutput()
>>> io.stdout = sys.stdout
>>> ui = libbe.command.UserInterface(io=io)
>>> ui.storage_callbacks.set_storage(bd.storage)
>>> cmd = Merge(ui=ui)
>>> a = bd.bug_from_uuid('a')
>>> a.comment_root.time = 0
>>> dummy = a.new_comment('Testing')
>>> dummy.time = 1
>>> dummy = dummy.new_reply('Testing...')
>>> dummy.time = 2
>>> b = bd.bug_from_uuid('b')
>>> b.status = 'open'
>>> b.comment_root.time = 0
>>> dummy = b.new_comment('1 2')
>>> dummy.time = 1
>>> dummy = dummy.new_reply('1 2 3 4')
>>> dummy.time = 2
>>> ret = ui.run(cmd, args=['/a', '/b'])
Merged bugs #abc/a# and #abc/b#
>>> bd.flush_reload()
>>> a = bd.bug_from_uuid('a')
>>> a.load_comments()
>>> a_comments = sorted([c for c in a.comments()],
... cmp=libbe.comment.cmp_time)
>>> mergeA = a_comments[0]
>>> mergeA.time = 3
>>> print a.string(show_comments=True)
... # doctest: +ELLIPSIS, +REPORT_UDIFF
ID : a
Short name : abc/a
Severity : minor
Status : open
Assigned :
Reporter :
Creator : John Doe <jdoe@example.com>
Created : ...
Bug A
--------- Comment ---------
Name: abc/a/...
From: ...
Date: ...
<BLANKLINE>
Testing
--------- Comment ---------
Name: abc/a/...
From: ...
Date: ...
<BLANKLINE>
Testing...
--------- Comment ---------
Name: abc/a/...
From: ...
Date: ...
<BLANKLINE>
Merged from bug #abc/b#
--------- Comment ---------
Name: abc/a/...
From: ...
Date: ...
<BLANKLINE>
1 2
--------- Comment ---------
Name: abc/a/...
From: ...
Date: ...
<BLANKLINE>
1 2 3 4
>>> b = bd.bug_from_uuid('b')
>>> b.load_comments()
>>> b_comments = sorted([c for c in b.comments()],
... libbe.comment.cmp_time)
>>> mergeB = b_comments[0]
>>> mergeB.time = 3
>>> print b.string(show_comments=True)
... # doctest: +ELLIPSIS, +REPORT_UDIFF
ID : b
Short name : abc/b
Severity : minor
Status : closed
Assigned :
Reporter :
Creator : Jane Doe <jdoe@example.com>
Created : ...
Bug B
--------- Comment ---------
Name: abc/b/...
From: ...
Date: ...
<BLANKLINE>
1 2
--------- Comment ---------
Name: abc/b/...
From: ...
Date: ...
<BLANKLINE>
1 2 3 4
--------- Comment ---------
Name: abc/b/...
From: ...
Date: ...
<BLANKLINE>
Merged into bug #abc/a#
>>> print b.status
closed
>>> ui.cleanup()
>>> bd.cleanup()
"""
name = 'merge'
def __init__(self, *args, **kwargs):
libbe.command.Command.__init__(self, *args, **kwargs)
self.args.extend([
|
libbe.command.Argument(
name='bug-id', metavar='BUG-ID', default=None,
completion_callback=libbe.command.util.complete_bug_id),
libbe.command.Argument(
name='bug-id-to-merge', metavar='BUG-ID', default=None,
completion_callback=libbe.command.util.complete_bug_id),
|
])
def _run(self, **params):
storage = self._get_storage()
bugdirs = self._get_bugdirs()
bugdirA,bugA,comment = (
libbe.command.util.bugdir_bug_comment_from_user_id(
bugdirs, params['bug-id']))
bugA.load_comments()
bugdirB,bugB,dummy_comment = (
libbe.command.util.bugdir_bug_comment_from_user_id(
bugdirs, params['bug-id-to-merge']))
bugB.load_comments()
mergeA = bugA.new_comment('Merged from bug #%s#' % bugB.id.long_user())
newCommTree = copy.deepcopy(bugB.comment_root)
for comment in newCommTree.traverse(): # all descendant comments
comment.bug = bugA
# uuids must be unique in storage
if comment.alt_id == None:
comment.storage = None
comment.alt_id = comment.uuid
comment.storage = storage
comment.uuid = libbe.util.id.uuid_gen()
comment.save() # force onto disk under bugA
for comment in newCommTree: # just the child comments
mergeA.add_reply(comment, allow_time_inversion=True)
bugB.new_comment('Merged into bug #%s#' % bugA.id.long_user())
bugB.status = 'closed'
print >> self.stdout, 'Merged bugs #%s# and #%s#' \
% (bugA.id.user(), bugB.id.user())
return 0
def _long_help(self):
return """
The second bug (B) is merged into the first (A). This adds merge
comments to both bugs, closes B, and appends B's comment tree to A's
merge comment.
"""
|
zibawa/zibawa
|
front/views.py
|
Python
|
gpl-3.0
| 12,712
| 0.014553
|
from __future__ import absolute_import
#from urllib.parse import urlparse, urlunparse
from builtins import str
from builtins import range
from django.conf import settings
# Avoid shadowing the login() and logout() views below.
from django.contrib.auth import (
REDIRECT_FIELD_NAME, get_user_model, login as auth_login,
logout as auth_logout, update_session_auth_hash,
)
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import (
AuthenticationForm, PasswordChangeForm, PasswordResetForm, SetPasswordForm,AdminPasswordChangeForm,
)
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.shortcuts import get_current_site
from django.http import HttpRe
|
sponseRedirect, QueryDict
from django.shortcuts import resolve_url
from django.shortcuts import render
from django.template.response import TemplateResponse
from django.urls import reverse, reverse_lazy
from django.utils.decorators import method_decorator
#from django.utils.deprecation import RemovedInDjango21Warning
from django.utils.encoding import force_text
from django.utils.http import is_safe_url, urlsafe_base64_decode
from django.utils.translation import gettext_laz
|
y as _
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
from django.views.generic.base import TemplateView
from django.views.generic.edit import FormView
UserModel = get_user_model()
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.template import loader
import random
import string
#import user
from .forms import UserForm
from stack_configs.stack_functions import createInfluxDB
from stack_configs.ldap_functions import addToLDAPGroup,resetLDAPpassword,createLDAPuser
from stack_configs.grafana_functions import GrafanaUser,testObj
import logging
logger = logging.getLogger(__name__)
# Create your views here.
# Create your views here.
#from django.contrib.auth.forms import UserCreationForm
def index(request):
template = loader.get_template('welcome.html')
result="welcome"
context = {
'content':result,
'has_permission':request.user.is_authenticated,
'is_popup':False,
'title':'welcome!',
'site_title':'zibawa',
'site_url':settings.SITE_URL
}
return HttpResponse(template.render(context, request))
def create_account(request):
template = loader.get_template('admin/base_site.html')
if request.method == "POST":
form = UserForm(request.POST)
if form.is_valid():
password = form.cleaned_data['password']
new_user = User.objects.create_user(**form.cleaned_data)
#new_user.is_staff=True
#new_user.save()
if (createLDAPuser(new_user,password)):
if (addToLDAPGroup(new_user.username,'active')):
if (addToLDAPGroup(new_user.username,'editor')):
result=createAndConfigureGrafana(new_user,password)
if (result.status):
if createInfluxDB(new_user): #creates a user database in influx
return HttpResponseRedirect('/thanks/')
return HttpResponseRedirect('/account_create_error/')
else:
form = UserForm()
context = {
'has_permission':request.user.is_authenticated,
'is_popup':False,
'form':form,
'title':'New User Creation',
'site_title':'zibawa',
'site_url':settings.SITE_URL
}
return render(request,'form.html',context)
def thanks(request):
template = loader.get_template('thanks.html')
context = {
'content':'Thanks. Please log in to your dashboard',
'title':'Your account has been created',
'is_popup':False,
'has_permission':request.user.is_authenticated,
'site_title':'zibawa',
'site_url':settings.SITE_URL
}
return HttpResponse(template.render(context, request))
def account_create_error(request):
template = loader.get_template('admin/base_site.html')
context = {
'content':'Sorry. Something went wrong during the creation of your account. Please contact your administrator',
'title':'Error',
'is_popup':False,
'has_permission':request.user.is_authenticated,
'site_title':'zibawa',
'site_url':settings.SITE_URL
}
return HttpResponse(template.render(context, request))
def id_generator(size=10, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
# Doesn't need csrf_protect since no-one can guess the URL
@sensitive_post_parameters()
@never_cache
def zibawa_password_reset_confirm(request, uidb64=None, token=None,
template_name='registration/password_reset_confirm.html',
token_generator=default_token_generator,
set_password_form=SetPasswordForm,
post_reset_redirect=None,
extra_context=None):
"""
ZIBAWA NOTE. THIS VIEW CODE IS COPIED FROM DJANGO DEFAULT VIEW WITH MINOR
MODIFICATIONS TO UPDATE PASSWORD IN LDAP (INSTEAD OF THE DJANGO DATABASE)
https://github.com/django/django/blob/master/django/contrib/auth/views.py
Check the hash in a password reset link and present a form for entering a
new password.
warnings.warn("The password_reset_confirm() view is superseded by the "
"class-based PasswordResetConfirmView().",
RemovedInDjango21Warning, stacklevel=2)"""
assert uidb64 is not None and token is not None # checked by URLconf
if post_reset_redirect is None:
post_reset_redirect = reverse('password_reset_complete')
else:
post_reset_redirect = resolve_url(post_reset_redirect)
try:
# urlsafe_base64_decode() decodes to bytestring
uid = force_text(urlsafe_base64_decode(uidb64))
user = UserModel._default_manager.get(pk=uid)
except (TypeError, ValueError, OverflowError, UserModel.DoesNotExist):
user = None
if user is not None and token_generator.check_token(user, token):
validlink = True
title = _('Enter new password')
if request.method == 'POST':
form = set_password_form(user, request.POST)
if form.is_valid():
form.save()
#ZIBAWA MODIFICATIONS START HERE
new_password = form.cleaned_data['new_password1']
if(resetLDAPpassword(user.username,new_password)):
#change Grafana password
grafana_user=GrafanaUser(request.user.id, request.user.username,new_password,request.user.email)
logger.debug('resetting Grafana password for %s',request.user.username)
if not (grafana_user.changeGrafanaPassword()):
#if fails, currently we log but carry on regardless.
logger.warning('couldnt reset Grafana password for %s',request.user.username)
return HttpResponseRedirect(post_reset_redirect)
else:
#if result from LDAP is not what we expect, or if no result
logger.warning('couldnt reset LDAP password')
title = _('Could not reset LDAP password')
#ZIBAWA MODIFICATIONS END HERE
else:
form = set_password_form(user)
else:
validlink = False
form = None
title = _('Password reset unsuccessful')
context = {
'form': form,
'title': title,
'validlink': validlink,
'is_popup':False,
'has_permission':request.user.is_authenticated,
|
safehammad/mancify
|
mancify/dialects/manc.py
|
Python
|
mit
| 7,080
| 0.053107
|
from __future__ import (
unicode_literals,
absolute_import,
division,
print_function,
)
# Make Py2's str type like Py3's
str = type('')
# Rules that take into account part of speech to alter text
structure_rules = [
((["JJ*","NN*"],),
(["chuffing",0,1],),
0.1),
((["."],),
(["our","kid",0],["init",0],["and","that",0],["and","stuff",0]),
0.1),
((["NN"],),
(["thing"],),
0.05),
((["START"],),
([0,"here","yar","."],),
0.05),
]
# Words to be ignored by the translator
ignores = [ "i","a","be","will" ]
# Direct word substitutions
word_rules = [
(("and",),
("n'",)),
(("of",),
("ov",)),
(("her",),
("'er",)),
(("my",),
("me",)),
(("what",),
("wot",)),
(("our",),
("ah",)),
(("acceptable","ace","awesome","brilliant","excellent","fantastic","good",
"great","likable","lovely","super","smashing","nice","pleasing",
"rad","superior","worthy","admirable","agreeable","commendable",
"congenial","deluxe","honorable","honourable","neat","precious",
"reputable","splendid","stupendous","exceptional","favorable",
"favourable","marvelous","satisfactory","satisfying","valuable",
"wonderful","fine","perfect","special","exciting","amazing","succeeded",
"worked","successful"),
("buzzin'","top","mint","boss","sound","fit","sweet","madferit","safe","raz",
"bob on","bangin'","peach","bazzin'","kewl","quality")),
(("anything",),
("owt",)),
(("nothing","none","zero","blank","null","void","nought",),
("nowt",)),
(("disappointed","unhappy","sad","melancholy",),
("gutted",)),
(("break","damage","smash","crack","destroy","annihilate","obliterate",
"corrupt","ruin","spoil","wreck","trash","fail",),
("knacker","bugger",)),
(("bad","poor","rubbish","broken","errored","damaged","atrocious","awful",
"cheap","crummy","dreadful","lousy","rough","unacceptable",
"garbage","inferior","abominable","amiss","beastly","careless",
"cheesy","crap","crappy","cruddy","defective","deficient",
"erroneous","faulty","incorrect","inadequate","substandard",
"unsatisfactory","dysfunctional","malfunctioning","corrupt","failed",),
("naff","shit","knackered","buggered","pants","pear-shaped","tits up",
"ragged","devilled","out of order","bang out of order","biz","kippered",
"bobbins")),
(("error","mistake","problem",),
("cock up","balls up")),
(("very","exceedingly","mostly","sheer","exceptionally","genuinely",
"especially","really"),
("well","bare","pure","dead","proper",)),
(("numerous","many","all","most",),
("bare","pure",)),
(("mad","crazy","insane","crazed","kooky","nuts","nutty","silly","wacky",
"beserk","cuckoo","potty","batty","bonkers","unhinged","mental",
"idiotic","stupid","moronic","dumb","foolish",),
("barmy",)),
(("delighted","pleased","happy","cheerful","contented","ecstatic","elated",
"glad","joyful","joyous","jubilant","lively","merry","overjoyed",
"peaceful","pleasant","pleased","thrilled","upbeat","blessed",
"blest","blissful","captivated","gleeful","gratified","jolly",
"mirthful","playful","proud",),
("chuffed","buzzin'")),
(("things","stuff","elements","parts","pieces","facts","subjects","situations",
"concepts","concerns","items","materials","objects","files",),
("shit",)),
(("attractive","alluring","beautiful","charming","engaging","enticing",
"glamorous","gorgeous","handsome","inviting","tempting","adorable",
"agreeable","enchanting","enthralling","hunky","pretty","seductive",
"provocative","tantalizing","teasing","stunning",),
("fit",)),
(("any",),
("whatever",)),
(("unattractive","ugly","horrible","nasty","unpleasant","hideous","gross",
"unsightly","horrid","unseemly","grisly","awful","foul","repelling",
"repulsive","repugnant","revolting","uninviting","monstrous",),
("mingin'","rancid","'angin","rank","manky")),
(("fast","quick","swift","brief",),
("rapid",)),
(("pound",),
("quid","squid",)),
(("man",),
("bloke", "fella",)),
(("men",),
("blokes", "fellas",)),
(("mate", "friend"),
("pal","mate",)),
(("hello","greetings","welcome","hi","howdy",),
("arrite","how do","hiya",)),
(("bye","goodbye","farewell",),
("ta-ra",)),
(("kiss",),
("snog",)),
(("sandwich",),
("butty","barm")),
(("sandwiches",),
("butties","barms")),
(("eat","consume","absorb","digest","food","sustinance",),
("scran",)),
(("lunch",),
("dinner",)),
(("dinner",),
("tea",)),
(("you",),
("youse",)),
(("idiot","moron","fool","buffoon","clown","jerk","nerd","nitwit","stooge",
"sucker","twit","clod","cretin","dolt","dope","dunce","oaf","twerp",
"imbecile","ignoramus","loon","ninny","numskull",),
("scrote","muppet","knobber","spanner","gonk","cabbage")),
(("police","law","cop","cops","policeman","policewoman","constable","officer",
"detective","bobby","copper",),
("dibble",)),
(("house","dwelling","appartment","building","home","mansion","residence",
"shack","abode","castle","cave","coop","flat","habitatio
|
n","pad",
"residency","place",),
("gaff",)),
(("was",),
("were",)),
(("were",),
("was",)),
(("yes","ok",),
("aye",)),
(("are",),
("iz",)),
(("no",),
("nah",)),
(("haven't",),
("a
|
in't",)),
(("right",),
("reet",)),
(("the",),
("t'",)),
(("?",),
("eh?","or wot?","yeah?")),
]
# Alterations to the sound of a word based on its consonant and vowel sounds
phoneme_rules = [
((["START","HH"],),
["START","'"]),
((["ER","END"],),
["AA","'","END"]),
((["T","END"],),
["'","END"],),
((["AE","R"],),
["AE"]),
((["AA","R"],),
["AE","R"]),
((["AH1"],),
["UW"],),
((["AO","R","END"],["UH","R","END"],),
["AH","R"]),
((["AO"],),
["AA"],),
((["NG","END"],),
["N","'","END"]),
((["T","UW","END"],),
["T","AH","END"]),
((["START","DH"],),
["START","D"]),
((["TH","END"],),
["F","END"],),
((["DH","END"],),
["V","END"],),
((["START","TH"],),
["START","F"]),
((["VOWEL","T","VOWEL"],),
[0,"R",2]),
]
if __name__ == "__main__":
import re,random,sys
text = sys.argv[1]
for patts,repls in words:
for patt in patts:
text = re.sub(r'\b'+patt+r'\b',lambda m: random.choice(repls),text)
print(text)
|
goddardl/gaffer
|
python/GafferTest/ApplicationTest.py
|
Python
|
bsd-3-clause
| 2,198
| 0.010919
|
##########################################################################
#
# Copyright (c) 2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this sof
|
tware may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHAL
|
L THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
import GafferTest
class ApplicationTest( GafferTest.TestCase ) :
def testTaskSchedulerInitDoesntSuppressExceptions( self ) :
def f() :
import Gaffer._Gaffer as _Gaffer
with _Gaffer._tbb_task_scheduler_init( _Gaffer._tbb_task_scheduler_init.automatic ) :
raise Exception( "Woops!")
self.assertRaises( Exception, f )
if __name__ == "__main__":
unittest.main()
|
rajeshgupta14/pathend
|
consultantform/apps.py
|
Python
|
apache-2.0
| 168
| 0
|
# -*- coding:
|
utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class ConsultantformConfig(AppConfig):
|
name = 'consultantform'
|
lubokkanev/cloud-system
|
core/main/runtimes/runtime_validation_error.py
|
Python
|
gpl-2.0
| 50
| 0
|
class RuntimeValidationE
|
rror(Excepti
|
on):
pass
|
jawilson/home-assistant
|
homeassistant/components/alarm_control_panel/const.py
|
Python
|
apache-2.0
| 642
| 0
|
"""Provides the constants needed for component."""
from typing import Fin
|
al
SUPPORT_ALARM_ARM_HOME: Final = 1
SUPPORT_ALARM_ARM_AWAY: Final = 2
SUPPORT_ALARM_ARM_NIGHT: Final = 4
SUPPORT_ALARM_TRIGGER: Final = 8
SUPPORT_ALARM_ARM_CUSTOM_BYPASS: Final = 16
SUPPORT_ALARM_ARM_VACATION: Final = 32
CONDITION_TRIGGERED: Final = "is_triggered"
CONDITION_DISARMED: Final = "is_disarmed"
CONDITION_ARMED_HOME: Final = "is_armed_home"
CONDITION_ARMED_AWAY: Final = "is_armed_away"
CONDITION_ARMED_NIGHT: Final = "is_armed_night"
CONDITION_ARMED_VACATION: Final = "is_a
|
rmed_vacation"
CONDITION_ARMED_CUSTOM_BYPASS: Final = "is_armed_custom_bypass"
|
crdoconnor/cookiecutter-django
|
{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/users/views.py
|
Python
|
bsd-3-clause
| 1,459
| 0
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.core.urlresolvers import reverse
from django.views.generic import DetailView, ListView, RedirectView, UpdateView
from django.contrib.auth.mixins import LoginRequiredMixin
from .models import User
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
# These next two lines tell the
|
view
|
to index lookups by username
slug_field = "username"
slug_url_kwarg = "username"
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse("users:detail",
kwargs={"username": self.request.user.username})
class UserUpdateView(LoginRequiredMixin, UpdateView):
fields = ['name', ]
# we already imported User in the view code above, remember?
model = User
# send the user back to their own page after a successful update
def get_success_url(self):
return reverse("users:detail",
kwargs={"username": self.request.user.username})
def get_object(self):
# Only get the User record for the user making the request
return User.objects.get(username=self.request.user.username)
class UserListView(LoginRequiredMixin, ListView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = "username"
slug_url_kwarg = "username"
|
vivekanand1101/fedmsg
|
fedmsg/tests/__init__.py
|
Python
|
lgpl-2.1
| 822
| 0
|
# This file is part of fedmsg.
# Copyright (C) 2012 Red Hat, Inc.
#
# fedmsg is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# fedmsg is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A
|
PARTICULAR PURPOSE. See the GNU
# Lesser General Pu
|
blic License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with fedmsg; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Authors: Ralph Bean <rbean@redhat.com>
#
|
mikequentel/sarrus_cramer
|
sclinear.py
|
Python
|
mit
| 4,841
| 0.026854
|
#!/usr/bin/python
# System of three linear equations
# ax + by + cz = j
# dx + ey + fz = k
# gx + hy + iz = l
# System of three linear equations in matrix notation
# - - - - - -
# | a b c | | x | | j |
# | | | | | |
# | d e f | | y | = | k |
# | | | | | |
# | g h i | | z | | l |
# - - - - - -
# Matrix of Coefficients
# a b c
# d e f
# g h i
# Matrix of Variables
# x
# y
# z
# Matrix of Resulting Values
# j
# k
# l
# Rule of Sarrus
# a b c|a b
# d e f|d e
# g h i|g h
# Rule of Sarrus Index Values
# 0 1 2|0 1
# 3 4 5|3 4
# 6 7 8|6 7
# Determinant
# det(M) = aei + bfg + cdh - gec - hfa - idb
# Cramer's Rule
# | j b c | | a j c | | a b j |
# | k e f | | d k f | | d e k |
# | l h i | | g l i | | g h l |
# ---------, ---------, ---------
# | a b c | | a b c | | a b c |
# | d e f | | d e f | | d e f |
# | g h i | | g h i | | g h i |
import sys
def main():
inputs_dict = {'a':int(raw_input("a:")), 'b':int(raw_input("b:")), 'c':int(raw_input("c:")), 'j':int(raw_input("j:")),
'd':int(raw_input("d:")), 'e':int(raw_input("e:")), 'f':int(raw_input("f:")), 'k':int(raw_input("k:")),
'g':int(raw_input("g:")), 'h':int(raw_input("h:")), 'i':int(raw_input("i:")), 'l':int(raw_input("l:"))}
coeffs_matrix = {'a':inputs_dict['a'], 'b':inputs_dict['b'], 'c':inputs_dict['c'],
'd':inputs_dict['d'], 'e':inputs_dict['e'], 'f':inputs_dict['f'],
'g'
|
:inputs_di
|
ct['g'], 'h':inputs_dict['h'], 'i':inputs_dict['i']}
x_numerator_matrix = {'j':inputs_dict['j'], 'b':inputs_dict['b'], 'c':inputs_dict['c'],
'k':inputs_dict['k'], 'e':inputs_dict['e'], 'f':inputs_dict['f'],
'l':inputs_dict['l'], 'h':inputs_dict['h'], 'i':inputs_dict['i']}
y_numerator_matrix = {'a':inputs_dict['a'], 'j':inputs_dict['j'], 'c':inputs_dict['c'],
'd':inputs_dict['d'], 'k':inputs_dict['k'], 'f':inputs_dict['f'],
'g':inputs_dict['g'], 'l':inputs_dict['l'], 'i':inputs_dict['i']}
z_numerator_matrix = {'a':inputs_dict['a'], 'b':inputs_dict['b'], 'j':inputs_dict['j'],
'd':inputs_dict['d'], 'e':inputs_dict['e'], 'k':inputs_dict['k'],
'g':inputs_dict['g'], 'h':inputs_dict['h'], 'l':inputs_dict['l']}
# Rule of Sarrus for det_coeffs_matrix
# a b c|a b
# d e f|d e
# g h i|g h
#
det_coeffs_matrix = (coeffs_matrix['a'] * coeffs_matrix['e'] * coeffs_matrix['i'] +
coeffs_matrix['b'] * coeffs_matrix['f'] * coeffs_matrix['g'] +
coeffs_matrix['c'] * coeffs_matrix['d'] * coeffs_matrix['h'] -
coeffs_matrix['g'] * coeffs_matrix['e'] * coeffs_matrix['c'] -
coeffs_matrix['h'] * coeffs_matrix['f'] * coeffs_matrix['a'] -
coeffs_matrix['i'] * coeffs_matrix['d'] * coeffs_matrix['b'])
# Rule of Sarrus for det_x_numerator_matrix
# j b c|j b
# k e f|k e
# l h i|l h
#
det_x_numerator_matrix = (x_numerator_matrix['j'] * x_numerator_matrix['e'] * x_numerator_matrix['i'] +
x_numerator_matrix['b'] * x_numerator_matrix['f'] * x_numerator_matrix['l'] +
x_numerator_matrix['c'] * x_numerator_matrix['k'] * x_numerator_matrix['h'] -
x_numerator_matrix['l'] * x_numerator_matrix['e'] * x_numerator_matrix['c'] -
x_numerator_matrix['h'] * x_numerator_matrix['f'] * x_numerator_matrix['j'] -
x_numerator_matrix['i'] * x_numerator_matrix['k'] * x_numerator_matrix['b'] )
# Rule of Sarrus for det_y_numerator_matrix
# a j c|a j
# d k f|d k
# g l i|g l
#
det_y_numerator_matrix = (y_numerator_matrix['a'] * y_numerator_matrix['k'] * y_numerator_matrix['i'] +
y_numerator_matrix['j'] * y_numerator_matrix['f'] * y_numerator_matrix['g'] +
y_numerator_matrix['c'] * y_numerator_matrix['d'] * y_numerator_matrix['l'] -
y_numerator_matrix['g'] * y_numerator_matrix['k'] * y_numerator_matrix['c'] -
y_numerator_matrix['l'] * y_numerator_matrix['f'] * y_numerator_matrix['a'] -
y_numerator_matrix['i'] * y_numerator_matrix['d'] * y_numerator_matrix['j'])
# Rule of Sarrus for det_z_numerator_matrix
# a b j|a b
# d e k|d e
# g h l|g h
#
det_z_numerator_matrix = (z_numerator_matrix['a'] * z_numerator_matrix['e'] * z_numerator_matrix['l'] +
z_numerator_matrix['b'] * z_numerator_matrix['k'] * z_numerator_matrix['g'] +
z_numerator_matrix['j'] * z_numerator_matrix['d'] * z_numerator_matrix['h'] -
z_numerator_matrix['g'] * z_numerator_matrix['e'] * z_numerator_matrix['j'] -
z_numerator_matrix['h'] * z_numerator_matrix['k'] * z_numerator_matrix['a'] -
z_numerator_matrix['l'] * z_numerator_matrix['d'] * z_numerator_matrix['b'])
x = det_x_numerator_matrix/det_coeffs_matrix
y = det_y_numerator_matrix/det_coeffs_matrix
z = det_z_numerator_matrix/det_coeffs_matrix
print
print "results: "
print "x = " + str(x)
print "y = " + str(y)
print "z = " + str(z)
# Specifies name of main function.
if __name__ == "__main__":
sys.exit(main())
|
morissette/devopsdays-hackathon-2016
|
venv/lib/python2.7/site-packages/aniso8601/resolution.py
|
Python
|
gpl-3.0
| 359
| 0.005571
|
# -*- coding: utf-
|
8 -*-
# This software may be modified and distributed under the term
|
s
# of the BSD license. See the LICENSE file for details.
from aniso8601 import compat
class DateResolution(object):
Year, Month, Week, Weekday, Day, Ordinal = list(compat.range(6))
class TimeResolution(object):
Seconds, Minutes, Hours = list(compat.range(3))
|
alexandonian/ptutils
|
ptutils/datastore/query.py
|
Python
|
mit
| 17,240
| 0.001624
|
from key import Key
def _object_getattr(obj, field):
"""Attribute getter for the objects to operate on.
This function can be overridden in classes or instances of Query, Filter, and
Order. Thus, a custom function to extract values to attributes can be
specified, and the system can remain agnostic to the client's data model,
without loosing query power.
For example, the default implementation works with attributes and items::
def _object_getattr(obj, field):
# check whether this key is an attribute
if hasattr(obj, field):
value = getattr(obj, field)
# if not, perhaps it is an item (raw dicts, etc)
elif field in obj:
value = obj[field]
# return whatever we've got.
return value
Or consider a more complex, application-specific structure::
def _object_getattr(version, field):
if field in ['key', 'committed', 'created', 'hash']:
return getattr(version, field)
else:
return version.attributes[field]['value']
"""
# TODO: consider changing this to raise an exception if no value is found.
value = None
# check whether this key is an attribute
if hasattr(obj, field):
value = getattr(obj, field)
# if not, perhaps it is an item (raw dicts, etc)
elif field in obj:
value = obj[field]
# return whatever we've got.
return value
def limit_gen(limit, iterable):
"""A generator that applies a count `limit`."""
limit = int(limit)
assert limit >= 0, 'negative limit'
for item in iterable:
if limit <= 0:
break
yield item
limit -= 1
def offset_gen(offset, iterable, skip_signal=None):
"""A generator that applies an `offset`, skipping `offset` elements from
`iterable`. If skip_signal is a callable, it will be called with every
skipped element.
"""
offset = int(offset)
assert offset >= 0, 'negative offset'
for item in iterable:
if offset > 0:
offset -= 1
if callable(skip_signal):
skip_signal(item)
else:
yield item
def chain_gen(iterables):
"""A generator that chains `iterables`."""
for iterable in iterables:
for item in iterable:
yield item
class Filter(object):
"""Represents a Filter for a specific field and its value.
Filters are used on queries to narrow down the set of matching objects.
Args:
field: the attribute name (string) on which to apply the filter.
op: the conditional operator to apply (one of
['<', '<=', '=', '!=', '>=', '>']).
value: the attribute value to compare against.
Examples::
Filter('name', '=', 'John Cleese')
Filter('age', '>=', 18)
"""
conditional_operators = ['<', '<=', '=', '!=', '>=', '>']
"""Conditional operators that Filters support."""
_conditional_cmp = {
"<": lambda a, b: a < b,
"<=": lambda a, b: a <= b,
"=": lambda a, b: a == b,
"!=": lambda a, b: a != b,
">=": lambda a, b: a >= b,
">": lambda a, b: a > b
}
object_getattr = staticmethod(_object_getattr)
"""Object attribute getter. Can be overridden to match client data model.
See :py:meth:`datastore.query._object_getattr`.
"""
def __init__(self, field, op, value):
if op not in self.conditional_operators:
raise ValueError(
'"%s" is not a valid filter Conditional Operator' % op)
self.field = field
self.op = op
self.value = value
def __call__(self, obj):
"""Returns whether this object passes this filter.
This method aggressively tries to find the appropriate value.
"""
value = self.object_getattr(obj, self.field)
# TODO: which way should the direction go here? it may make more sense to
# convert the passed-in value instead. Or try both? Or not at all?
if not isinstance(value, self.value.__class__) and not self.value is None and not value is None:
value = self.value.__class__(value)
return self.valuePasses(value)
def valuePasses(self, value):
"""Returns whether this value passes this filter"""
return self._conditional_cmp[self.op](value, self.value)
def __str__(self):
return '%s %s %s' % (self.field, self.op, self.value)
def __repr__(self):
return "Filter('%s', '%s', %s)" % (self.field, self.op, repr(self.value))
def __eq__(self, o):
return self.field == o.field and self.op == o.op and self.value == o.value
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(repr(self))
def generator(self, iterable):
"""Generator function that iteratively filters given `items`."""
for item in iterable:
if self(item):
yield item
@classmethod
def filter(cls, filters, iterable):
"""Returns the elements in `iterable` that pass given `filters`"""
if isinstance(filters, Filter):
filters = [filters]
for filter in filters:
iterable = filter.generator(iterable)
return iterable
class Order(object):
"""Represents an Order upon a specific field, and a direction.
Orders are used on queries to define how they operate on objects
Args:
order: an order in string form. This follows the format: [+-]name
where + is ascending, - is descending, and name is the name
of the field to order by.
Note: if no ordering operator is specified, + is default.
Examples::
Order('+name') # ascending order by name
Order('-age') # descending order by age
Order('score') # ascending order by score
"""
order_operators = ['-', '+']
"""Ordering operators: + is ascending, - is descending."""
object_getattr = staticmethod(_object_getattr)
"""Object attribute getter. Can be overridden to match client data model.
See :py:meth:`datastore.query._object_getattr`.
"""
def __init__(self, order):
self.op = '+'
try:
if order[0] in self.order_operators:
self.op = order[0]
order = order[1:]
except IndexError:
raise ValueError('Order input be at least two characters long.')
self.field = order
if self.op not in self.order_operators:
raise ValueError('"%s" is not a valid Order Operator.' % op)
def __str__(self):
return '%s%s' % (self.op, self.field)
def __repr__(self):
return "Order('%s%s')" % (self.op, self.field)
def __eq__(self, other):
return
|
self.field == other.field and self.op == other.op
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(repr(self))
def isAscending(self):
return self.op == '+'
def isDescending(self):
return not self.isAscending()
def keyfn(self, obj):
|
"""A key function to be used in pythonic sort operations."""
return self.object_getattr(obj, self.field)
@classmethod
def multipleOrderComparison(cls, orders):
"""Returns a function that will compare two items according to `orders`"""
comparers = [(o.keyfn, 1 if o.isAscending() else -1) for o in orders]
def cmpfn(a, b):
for keyfn, ascOrDesc in comparers:
comparison = cmp(keyfn(a), keyfn(b)) * ascOrDesc
if comparison is not 0:
return comparison
return 0
return cmpfn
@classmethod
def sorted(cls, items, orders):
"""Returns the elements in `items` sorted according to `orders`"""
return sorted(items, cmp=cls.multipleOrderComparison(orders))
class Query(object):
"""A Query describes a set of objects.
Queries are used to retrieve ob
|
EternityForest/KaithemAutomation
|
kaithem/src/thirdparty/iot_devices/devices/NVRPlugin/__init__.py
|
Python
|
gpl-3.0
| 46,929
| 0.006393
|
from multiprocessing import RLock
from sys import path
from mako.lookup import TemplateLookup
from scullery import iceflow,workers
import os
import time
import threading
import logging
import weakref
import traceback
import
|
shutil
import re
import io
import random
logger = logging.Logger("plugins.nvr")
templateGetter = TemplateLookup(os.path.dirname(__file__))
from datetime import date, datetime
|
from datetime import timezone
defaultSubclassCode = """
class CustomDeviceType(DeviceType):
def onIncomingCall(self,number):
# Uncomment to accept all incoming calls
# self.accept()
pass
"""
path = os.path.abspath(__file__)
path = os.path.dirname(path)
objectDetector = [None,None]
# Only one of these should happpen at a time. Because we need to limit how much CPU it can burn.
object_detection_lock = threading.RLock()
import numpy
def get_output_layers(net):
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
return output_layers
def toImgOpenCV(imgPIL): # Conver imgPIL to imgOpenCV
i = numpy.array(imgPIL) # After mapping from PIL to numpy : [R,G,B,A]
# numpy Image Channel system: [B,G,R,A]
red = i[:,:,0].copy(); i[:,:,0] = i[:,:,2].copy(); i[:,:,2] = red;
return i;
def letterbox_image(image, size):
'''resize image with unchanged aspect ratio using padding'''
import cv2
import numpy as np
iw, ih = image.shape[0:2][::-1]
w, h = size
scale = min(w/iw, h/ih)
nw = int(iw*scale)
nh = int(ih*scale)
image = cv2.resize(image, (nw,nh), interpolation=cv2.INTER_CUBIC)
new_image = np.zeros((size[1], size[0], 3), np.uint8)
new_image.fill(128)
dx = (w-nw)//2
dy = (h-nh)//2
new_image[dy:dy+nh, dx:dx+nw,:] = image
return new_image
# We get the model from here and export it as tflite without any extra quantization:
# https://github.com/google/automl/blob/master/efficientdet/README.md
# Label map: https://github.com/joonb14/TFLiteDetection
def recognize_tflite(i,r):
import tflite_runtime.interpreter as tflite
import cv2
import PIL.Image
import PIL.ImageOps
import PIL.ImageFilter
invoke_time = time.time()
i = PIL.Image.open(io.BytesIO(i))
pilimg=i
i=i.filter(PIL.ImageFilter.GaussianBlur(1))
i=PIL.ImageOps.autocontrast(i, cutoff=(0.1,0,25))
if not objectDetector[0]:
objectDetector[0]=tflite.Interpreter(num_threads=4, model_path=os.path.join(path,"efficientdet/efficientdet-lite0-f32.tflite"))
objectDetector[0].allocate_tensors()
objectDetector[1]=numpy.loadtxt(os.path.join(path,"labelmap.txt"),dtype = str, delimiter="/n")
interpreter = objectDetector[0]
labels = objectDetector[1]
original_image = toImgOpenCV(i)
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
tensor_w = input_details[0]['shape'][1]
tensor_h= input_details[0]['shape'][2]
image = letterbox_image(original_image,(tensor_w,tensor_h))
image = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
input_image = numpy.expand_dims(image,0)
interpreter.set_tensor(input_details[0]['index'], input_image)
original_image_h=original_image.shape[0]
original_image_w=original_image.shape[1]
interpreter.invoke()
t = time.time()-invoke_time
r.lastInferenceTime = t
# The function `get_tensor()` returns a copy of the tensor data.
# Use `tensor()` in order to get a pointer to the tensor.
o = interpreter.get_tensor(output_details[0]['index'])[0]
probability = numpy.array([i[5]for i in o])
# Our dynamically chosen confidence threshhold meant to pick up things in dim light
p = float(max(min(0.10, float(probability.max())*0.8),0.01))
retval = []
# All this is reverse engineered from looging at the output.
for i in o:
if float(i[5])<p:
continue
if int(i[6])<1:
continue
x,y,x2,y2 = (float((i[2]/tensor_w)*original_image_w),float((i[1]/tensor_h)*original_image_h), float((i[4]/tensor_w)*original_image_w),
float((i[3]/tensor_h)*original_image_h))
x = min(x,x2)
w = max(x,x2)-x
y=min(y,y2)
h = max(y,y2)-y
confidence = float(i[5])
label = labels[int(i[6])-1]
v= {
'x':float(x), 'y':float(y), "w":float(w), 'h': float(h),
'class': label,
'confidence': confidence,
}
if x2 >(original_image_w-20) and confidence<0.2:
continue
if y2>(original_image_h-10) and confidence<0.15:
continue
# For some reason I am consistently getting false positive people detections with y values in the -6 to 15 range
# Could just be my input data. But, things are usually not that high up unless they are big and big means a clear view which means
# you probably would have a higher confidence
if (x> 1 and y>24) or confidence>0.33 :
# If something takes up a very large amount of the frame, we probably have a clear view of it. If we are still not confident the ANN
# Is probably making stuff up. Very large things are going to be uncommon since most cameras like this aren't doing extreme close ups
# and the ones that are probably have good lighting
if ( (w< original_image_w/4) or (confidence >0.18)) and ((h< (original_image_h/3)) or (confidence > 0.15)):
if (w< (original_image_w/1.5) or (confidence >0.32)) and (h< (original_image_h/1.5) or (confidence > 0.32)):
# If the width of this object is such that more than 2/3d is off of the frame, we had better be very confident
# because that seems to be a common pattern of false positives.
if ( ((original_image_w-x) > w/3) or confidence > 0.4 ):
retval.append(v)
else:
pass#print(v, "reject large offscreen")
else:
pass#print(v, "reject to large for confidence 2")
else:
pass#print(v, "reject too large for confidence")
else:
pass#print(v,"reject low xy")
return {'objects':retval,'x-inferencetime':t}
automated_record_uuid = '76241b9c-5b08-4828-9358-37c6a25dd823'
from zeroconf import ServiceBrowser, ServiceStateChange
# very much not thread safe, doesn't matter, it's only for one UI page
httpservices = []
httplock = threading.Lock()
import socket
def on_service_state_change(zeroconf, service_type, name, state_change):
with httplock:
info = zeroconf.get_service_info(service_type, name)
if not info:
return
if state_change is ServiceStateChange.Added:
httpservices.append((tuple(sorted(
[socket.inet_ntoa(i) for i in info.addresses])), service_type, name, info.port))
if len(httpservices) > 2048:
httpservices.pop(0)
else:
try:
httpservices.remove((tuple(sorted(
[socket.inet_ntoa(i) for i in info.addresses])), service_type, name, info.port))
except:
logging.exception("???")
# Not common enough to waste CPU all the time on
#browser = ServiceBrowser(util.zeroconf, "_https._tcp.local.", handlers=[ on_service_state_change])
try:
from src.util import zeroconf as zcinstance
except:
import zeroconf
zcinstance = zeroconf.Zeroconf()
browser2 = ServiceBrowser(zcinstance, "_http._tcp.local.", handlers=[
on_service_state_change])
mediaFolders = weakref.WeakValueDictionary()
class Pipeline(iceflow.GstreamerPipeline):
def onMotionBegin(self, *a, **k):
self.mcb(True)
def onMotionEnd(self, *a, **k):
self.mcb(False)
def onPresenceValue(self, v):
self.presenceval(v)
def onVideoAnalyze(self, *a, **k):
self.acb(*a)
def onBarcode(self, *a, **k)
|
xlcteam/pynxc
|
pynxc/tests/in/_issue_5.py
|
Python
|
bsd-3-clause
| 54
| 0.018519
|
def
|
main():
a1="X"
|
TextOut(10, LCD_LINE1, a1)
|
NervanaSystems/coach
|
rl_coach/agents/n_step_q_agent.py
|
Python
|
apache-2.0
| 6,933
| 0.00375
|
#
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Union
import numpy as np
from rl_coach.agents.policy_optimization_agent import PolicyOptimizationAgent
from rl_coach.agents.value_optimization_agent import ValueOptimizationAgent
from rl_coach.architectures.embedder_parameters import InputEmbedderParameters
from rl_coach.architectures.head_parameters import QHeadParameters
from rl_coach.architectures.middleware_parameters import FCMiddlewareParameters
from rl_coach.base_parameters import AlgorithmParameters, AgentParameters, NetworkParameters
from rl_coach.core_types import EnvironmentSteps
from rl_coach.exploration_policies.e_greedy import EGreedyParameters
from rl_coach.memories.episodic.single_episode_buffer import SingleEpisodeBufferParameters
from rl_coach.utils import last_sample
class NStepQNetworkParameters(NetworkParameters):
def __init__(self):
super().__init__()
self.input_embedders_parameters = {'observation': InputEmbedderParameters()}
self.middleware_parameters = FCMiddlewareParameters()
self.heads_parameters = [QHeadParameters()]
self.optimizer_type = 'Adam'
self.async_training = True
self.shared_optimizer = True
self.create_target_network = True
class NStepQAlgorithmParameters(AlgorithmParameters):
"""
:param num_steps_between_copying_online_weights_to_target: (StepMethod)
The number of steps between copying the online network weights to the target network weights.
:param apply_gradient
|
s_every_x_episodes: (int)
The number of episodes between applying the accumulated gradients to the network. After every
num_steps_between_gradient_updates steps, the agent will calculate the gradients for the collected data,
it will then accumulate it in internal accumulators, and will only apply them to
|
the network once in every
apply_gradients_every_x_episodes episodes.
:param num_steps_between_gradient_updates: (int)
The number of steps between calculating gradients for the collected data. In the A3C paper, this parameter is
called t_max. Since this algorithm is on-policy, only the steps collected between each two gradient calculations
are used in the batch.
:param targets_horizon: (str)
Should be either 'N-Step' or '1-Step', and defines the length for which to bootstrap the network values over.
Essentially, 1-Step follows the regular 1 step bootstrapping Q learning update. For more information,
please refer to the original paper (https://arxiv.org/abs/1602.01783)
"""
def __init__(self):
super().__init__()
self.num_steps_between_copying_online_weights_to_target = EnvironmentSteps(10000)
self.apply_gradients_every_x_episodes = 1
self.num_steps_between_gradient_updates = 5 # this is called t_max in all the papers
self.targets_horizon = 'N-Step'
class NStepQAgentParameters(AgentParameters):
def __init__(self):
super().__init__(algorithm=NStepQAlgorithmParameters(),
exploration=EGreedyParameters(),
memory=SingleEpisodeBufferParameters(),
networks={"main": NStepQNetworkParameters()})
@property
def path(self):
return 'rl_coach.agents.n_step_q_agent:NStepQAgent'
# N Step Q Learning Agent - https://arxiv.org/abs/1602.01783
class NStepQAgent(ValueOptimizationAgent, PolicyOptimizationAgent):
def __init__(self, agent_parameters, parent: Union['LevelManager', 'CompositeAgent']=None):
super().__init__(agent_parameters, parent)
self.last_gradient_update_step_idx = 0
self.q_values = self.register_signal('Q Values')
self.value_loss = self.register_signal('Value Loss')
@property
def is_on_policy(self) -> bool:
return False
def learn_from_batch(self, batch):
# batch contains a list of episodes to learn from
network_keys = self.ap.network_wrappers['main'].input_embedders_parameters.keys()
# get the values for the current states
state_value_head_targets = self.networks['main'].online_network.predict(batch.states(network_keys))
# the targets for the state value estimator
if self.ap.algorithm.targets_horizon == '1-Step':
# 1-Step Q learning
q_st_plus_1 = self.networks['main'].target_network.predict(batch.next_states(network_keys))
for i in reversed(range(batch.size)):
state_value_head_targets[i][batch.actions()[i]] = \
batch.rewards()[i] \
+ (1.0 - batch.game_overs()[i]) * self.ap.algorithm.discount * np.max(q_st_plus_1[i], 0)
elif self.ap.algorithm.targets_horizon == 'N-Step':
# N-Step Q learning
if batch.game_overs()[-1]:
R = 0
else:
R = np.max(self.networks['main'].target_network.predict(last_sample(batch.next_states(network_keys))))
for i in reversed(range(batch.size)):
R = batch.rewards()[i] + self.ap.algorithm.discount * R
state_value_head_targets[i][batch.actions()[i]] = R
else:
assert True, 'The available values for targets_horizon are: 1-Step, N-Step'
# add Q value samples for logging
self.q_values.add_sample(state_value_head_targets)
# train
result = self.networks['main'].online_network.accumulate_gradients(batch.states(network_keys), [state_value_head_targets])
# logging
total_loss, losses, unclipped_grads = result[:3]
self.value_loss.add_sample(losses[0])
return total_loss, losses, unclipped_grads
def train(self):
# update the target network of every network that has a target network
if any([network.has_target for network in self.networks.values()]) \
and self._should_update_online_weights_to_target():
for network in self.networks.values():
network.update_target_network(self.ap.algorithm.rate_for_copying_weights_to_target)
self.agent_logger.create_signal_value('Update Target Network', 1)
else:
self.agent_logger.create_signal_value('Update Target Network', 0, overwrite=False)
return PolicyOptimizationAgent.train(self)
|
SINGROUP/pycp2k
|
pycp2k/classes/_each438.py
|
Python
|
lgpl-3.0
| 1,114
| 0.001795
|
from pycp2k.inputsection import InputSection
class _each438(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Just_energy = None
self.Powell_opt = None
self.Qs_scf = None
self.Xas_scf = None
self
|
.Md = None
self.Pint = None
self.Metadynamics = None
self.Geo_opt = None
self.Rot_opt = None
self.Cell_opt = None
|
self.Band = None
self.Ep_lin_solver = None
self.Spline_find_coeffs = None
self.Replica_eval = None
self.Bsse = None
self.Shell_opt = None
self.Tddft_scf = None
self._name = "EACH"
self._keywords = {'Bsse': 'BSSE', 'Cell_opt': 'CELL_OPT', 'Just_energy': 'JUST_ENERGY', 'Band': 'BAND', 'Xas_scf': 'XAS_SCF', 'Rot_opt': 'ROT_OPT', 'Replica_eval': 'REPLICA_EVAL', 'Tddft_scf': 'TDDFT_SCF', 'Shell_opt': 'SHELL_OPT', 'Md': 'MD', 'Pint': 'PINT', 'Metadynamics': 'METADYNAMICS', 'Geo_opt': 'GEO_OPT', 'Spline_find_coeffs': 'SPLINE_FIND_COEFFS', 'Powell_opt': 'POWELL_OPT', 'Qs_scf': 'QS_SCF', 'Ep_lin_solver': 'EP_LIN_SOLVER'}
|
jstar88/wotmods
|
files/uncompyled/tests/overriding/b.py
|
Python
|
gpl-2.0
| 341
| 0.01173
|
# 2015.09.05 18:13:46 ora legale Europa occidentale
# Embedded file name: b.py
from a import x
def x():
print 'b'
x()
from a import x
x()
# okay decompy
|
ling C:\Users\nicola user\wotmods\files\originals\tests\overriding\b.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.09.05 1
|
8:13:46 ora legale Europa occidentale
|
tchellomello/home-assistant
|
homeassistant/components/knx/schema.py
|
Python
|
apache-2.0
| 14,928
| 0.000335
|
"""Voluptuous schemas for the KNX integration."""
import voluptuous as vol
from xknx.devices.climate import SetpointShiftMode
from homeassistant.const import (
CONF_ADDRESS,
CONF_DEVICE_CLASS,
CONF_ENTITY_ID,
CONF_HOST,
CONF_NAME,
CONF_PORT,
CONF_TYPE,
)
import homeassistant.helpers.config_validation as cv
from .const import (
CONF_STATE_ADDRESS,
CONF_SYNC_STATE,
OPERATION_MODES,
PRESET_MODES,
ColorTempModes,
)
class ConnectionSchema:
"""Voluptuous schema for KNX connection."""
CONF_KNX_LOCAL_IP = "local_ip"
TUNNELING_SCHEMA = vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_KNX_LOCAL_IP): cv.string,
vol.Optional(CONF_PORT): cv.port,
}
)
ROUTING_SCHEMA = vol.Schema({vol.Optional(CONF_KNX_LOCAL_IP): cv.string})
class CoverSchema:
"""Voluptuous schema for KNX covers."""
CONF_MOVE_LONG_ADDRESS = "move_long_address"
CONF_MOVE_SHORT_ADDRESS = "move_short_address"
CONF_STOP_ADDRESS = "stop_address"
CONF_POSITION_ADDRESS = "position_address"
CONF_POSITION_STATE_ADDRESS = "position_state_address"
CONF_ANGLE_ADDRESS = "angle_address"
CONF_ANGLE_STATE_ADDRESS = "angle_state_address"
CONF_TRAVELLING_TIME_DOWN = "travelling_time_down"
CONF_TRAVELLING_TIME_UP = "travelling_time_up"
CONF_INVERT_POSITION = "invert_position"
CONF_INVERT_ANGLE = "invert_angle"
DEFAULT_TRAVEL_TIME = 25
DEFAULT_NAME = "KNX Cover"
SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_MOVE_LONG_ADDRESS): cv.string,
vol.Optional(CONF_MOVE_SHORT_ADDRESS): cv.string,
vol.Optional(CONF_STOP_ADDRESS): cv.string,
vol.Optional(CONF_POSITION_ADDRESS): cv.string,
vol.Optional(CONF_POSITION_STATE_ADDRESS): cv.string,
vol.Optional(CONF_ANGLE_ADDRESS): cv.string,
vol.Optional(CONF_ANGLE_STATE_ADDRESS): cv.string,
vol.Optional(
CONF_TRAVELLING_TIME_DOWN, default=DEFAULT_TRAVEL_TIME
): cv.positive_int,
vol.Optional(
CONF_TRAVELLING_TIME_UP, default=DEFAULT_TRAVEL_TIME
): cv.positive_int,
vol.Optional(CONF_INVERT_POSITION, default=False): cv.boolean,
vol.Optional(CONF_INVERT_ANGLE, default=False): cv.boolean,
}
)
class BinarySensorSchema:
"""Voluptuous schema for KNX binary sensors."""
CONF_STATE_ADDRESS = CONF_STATE_ADDRESS
CONF_SYNC_STATE = CONF_SYNC_STATE
CONF_IGNORE_INTERNAL_STATE = "ignore_internal_state"
CONF_AUTOMATION = "automation"
CONF_HOOK = "hook"
CONF_DEFAULT_HOOK = "on"
CONF_COUNTER = "counter"
CONF_DEFAULT_COUNTER = 1
CONF_ACTION = "action"
CONF_RESET_AFTER = "reset_after"
DEFAULT_NAME = "KNX Binary Sensor"
AUTOMATION_SCHEMA = vol.Schema(
{
vol.Optional(CONF_HOOK, default=CONF_DEFAULT_HOOK): cv.string,
vol.Optional(CONF_COUNTER, default=CONF_DEFAULT_COUNTER): cv.port,
vol.Required(CONF_ACTION): cv.SCRIPT_SCHEMA,
}
)
AUTOMATIONS_SCHEMA = vol.All(cv.ensure_list, [AUTOMATION_SCHEMA])
SCHEMA = vol.All(
cv.deprecated("significant_bit"),
vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_SYNC_STATE, default=True): vol.Any(
vol.All(vol.Coerce(int), vol.Range(min=2, max=1440)),
cv.boolean,
cv.string,
),
vol.Optional(CONF_IGNORE_INTERNAL_STATE, default=False): cv.boolean,
vol.Required(CONF_STATE_ADDRESS): cv.string,
vol.Optional(CONF_DEVICE_CLASS): cv.string,
vol.Optional(CONF_RESET_AFTER): cv.positive_int,
vol.Optional(CONF_AUTOMATION): AUTOMATIONS_SCHEMA,
}
),
)
class LightSchema:
"""Voluptuous schema for KNX lights."""
CONF_STATE_ADDRESS = CONF_STATE_ADDRESS
CONF_BRIGHTNESS_ADDRESS = "brightness_address"
CONF_BRIGHTNESS_STATE_ADDRESS = "brightness_state_address"
CONF_COLOR_ADDRESS = "color_address"
CONF_COLOR_STATE_ADDRESS = "color_state_address"
CONF_COLOR_TEMP_ADDRESS = "color_temperature_address"
CONF_COLOR_TEMP_STATE_ADDRESS = "color_temperature_state_address"
CONF_COLOR_TEMP_MODE = "color_temperature_mode"
CONF_RGBW_ADDRESS = "rgbw_address"
CONF_RGBW_STATE_ADDRESS = "rgbw_state_address"
CONF_MIN_KELVIN = "min_kelvin"
CONF_MAX_KELVIN = "max_kelvin"
DEFAULT_NAME = "KNX Light"
DEFAULT_COLOR_TEMP_MODE = "absolute"
DEFAULT_MIN_KELVIN = 2700 # 370 mireds
DEFAULT_MAX_KELVIN = 6000 # 166 mireds
SCHEMA = vol.Schema(
{
vol.Required(CONF_ADDRESS): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_STATE_ADDRESS): cv.string,
vol.Optional(CONF_BRIGHTNESS_ADDRESS): cv.string,
vol.Optional(CONF_BRIGHTNESS_STATE_ADDRESS): cv.string,
vol.Optional(CONF_COLOR_ADDRESS): cv.string,
vol.Optional(CONF_COLOR_STATE_ADDRESS): cv.string,
vol.Optional(CONF_COLOR_TEMP_ADDRESS): cv.string,
vol.Optional(CONF_COLOR_TEMP_STATE_ADDRESS): cv.string,
vol.Optional(
CONF_COLOR_TEMP_MODE, default=DEFAULT_COLOR_TEMP_MODE
): cv.enum(ColorTempModes),
vol.Optional(CONF_RGBW_ADDRESS): cv.string,
vol.Optional(CONF_RGBW_STATE_ADDRESS): cv.string,
vol.Optional(CONF_MIN_KELVIN, default=DEFAULT_MIN_KELVIN): vol.All(
vol.Coerce(int), vol.Range(min=1)
),
vol.Optional(CONF_MAX_KELVIN, default=DEFAULT_MAX_KELVIN): vol.All(
vol.Coerce(int), vol.Range(min=1)
),
}
)
class ClimateSchema:
"""Voluptuous schema for KNX climate devices."""
CONF_SETPOINT_SHIFT_ADDRESS = "setpoint_shift_address"
CONF_SETPOINT_SHIFT_STATE_ADDRESS = "setpoint_shift_state_address"
CONF_SETPOINT_SHIFT_MODE = "setpoint_shift_mode"
CONF_SETPOINT_SHIFT_MAX = "setpoint_shift_max"
CONF_SETPOINT_SHIFT_MIN = "setpoint_shift_min"
CONF_TEMPERATURE_ADDRESS = "temperature_address"
CONF_TEMPERATURE_STEP = "temperature_step"
CONF_TARGET_TEMPERATURE_ADDRESS = "target_temperature_address"
CONF_TARGET_TEMPERATURE_STATE_ADDRESS = "target_temperature_state_address"
CONF_OPERATION_MODE_ADDRESS = "operation_mode_address"
CONF_OPERATION_MODE_STATE_ADDRESS = "operation_mode_state_address"
CONF_CONTROLLER_STATUS_ADDRESS = "controller_status_address"
CONF_CONTROLLER_STATUS_STATE_ADDRESS = "controller_status_state_address"
CONF_CONTROLLER_MODE_ADDRESS = "controller_mode_address"
CONF_CONTROLLER_MODE_STATE_ADDRESS = "controller_mode_state_address"
CONF_HEAT_COOL_ADDRESS = "heat_cool_a
|
ddress"
CONF_HEAT_COOL_STATE_ADDRESS = "heat_cool_state_address"
CONF_OPERAT
|
ION_MODE_FROST_PROTECTION_ADDRESS = (
"operation_mode_frost_protection_address"
)
CONF_OPERATION_MODE_NIGHT_ADDRESS = "operation_mode_night_address"
CONF_OPERATION_MODE_COMFORT_ADDRESS = "operation_mode_comfort_address"
CONF_OPERATION_MODE_STANDBY_ADDRESS = "operation_mode_standby_address"
CONF_OPERATION_MODES = "operation_modes"
CONF_ON_OFF_ADDRESS = "on_off_address"
CONF_ON_OFF_STATE_ADDRESS = "on_off_state_address"
CONF_ON_OFF_INVERT = "on_off_invert"
CONF_MIN_TEMP = "min_temp"
CONF_MAX_TEMP = "max_temp"
DEFAULT_NAME = "KNX Climate"
DEFAULT_SETPOINT_SHIFT_MODE = "DPT6010"
DEFAULT_SETPOINT_SHIFT_MAX = 6
DEFAULT_SETPOINT_SHIFT_MIN = -6
DEFAULT_TEMPERATURE_STEP = 0.1
DEFAULT_ON_OFF_INVERT = False
SCHEMA = vol.All(
cv.deprecated("setpoint_shift_step", replacement_key=CONF_TEMPERATURE_STEP),
vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.
|
azlanismail/prismgames
|
examples/games/car/networkx/algorithms/flow/__init__.py
|
Python
|
gpl-2.0
| 98
| 0.010204
|
from networkx.algorithms.flow.maxflow i
|
mport *
from networkx.algorithms.flow.mincost i
|
mport *
|
clawpack/clawpack-4.x
|
doc/sphinx/conf.py
|
Python
|
bsd-3-clause
| 6,603
| 0.005755
|
# -*- coding: utf-8 -*-
#
# Clawpack documentation build configuration file, created by
# sphinx-quickstart on Wed Mar 25 12:07:14 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.append(os.path.abspath('../..'))
sys.path.append(os.path.abspath('./ext'))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest',
'sphinx.ext.intersphinx','plot_directive','only_directives',
'sphinx.ext.inheritance_diagram']
# extensions.append('sphinx.ext.jsmath')
extensions.append('sphinx.ext.pngmath')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Clawpack'
copyright = u'2009, Randall J. LeVeque and others'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '4.6'
# The full version, including alpha/beta/rc tags.
release = '4.6.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['users']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = 'math'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'default.css'
# html_style = 'mpl.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'clawlogo.jpg'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'clawicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an Op
|
enSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The valu
|
e of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Clawpackdoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', 'Clawpack.tex', ur'Clawpack Documentation',
ur'RJL', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/dev': None}
jsmath_path = 'jsmath/easy/load.js'
# jsmath_path = '_static/jsMath/easy/load.js'
keep_warnings = 'True'
|
edmorley/django
|
tests/forms_tests/widget_tests/test_hiddeninput.py
|
Python
|
bsd-3-clause
| 603
| 0.001658
|
from django.forms import HiddenInput
from .base import WidgetTest
class HiddenInputTest(
|
WidgetTest):
widget = HiddenInput()
def test_render(self):
self.check_html(self.widget, 'email', '', html='<input type="hidden" name="email" />')
def test_use_required_attribute(self):
# Always False to avoid browser validation on inputs hidden from the
# user.
self.assertIs(self.widget.use_required_attribute(None), False)
self.assertIs(self.widget.use_required_attribute(''), False)
self.assertIs(self.widget.use_re
|
quired_attribute('foo'), False)
|
FoldingAtHome/fah-control
|
fah/db/Database.py
|
Python
|
gpl-3.0
| 5,550
| 0.013694
|
################################################################################
# #
# Folding@Home Client Control (FAHControl) #
# Copyright (C) 2016-2020 foldingathome.org #
# Copyright (C) 2010-2016 Stanford University #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
#
|
(at your option) any later version. #
# #
# This program is di
|
stributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from fah.db import Column, Table
import sqlite3
class Database:
tables = [
Table('config',
[
Column('name', 'Text', 'NOT NULL'),
Column('value', 'Text', 'NOT NULL'),
],
'PRIMARY KEY (name)'),
Table('clients',
[
Column('name', 'Text', 'NOT NULL'),
Column('address', 'Text', 'NOT NULL'),
Column('port', 'Integer', 'NOT NULL'),
Column('password', 'Text', 'NOT NULL'),
],
'PRIMARY KEY (name)'),
]
def __init__(self, filename):
self.filename = filename
self.conn = sqlite3.connect(filename)
self.conn.row_factory = sqlite3.Row
self.queue = {}
def get_table(self, name):
for table in self.tables:
if table.name == name: return table
raise Exception('Table "%s" not found' % name)
def get_version(self):
return 6
def get_current_version(self):
return int(self.execute_one('PRAGMA user_version')[0])
def set_current_version(self, version):
self.write('PRAGMA user_version=%d' % version, True)
def set(self, name, value, commit = True, queue = False):
if queue: self.queue[name] = value
else:
self.insert('config', name = name, value = value)
if commit: self.commit()
def clear(self, name, commit = True):
self.delete('config', name = name)
if commit: self.commit()
def get(self, name):
c = self.get_table('config').select(self, 'value', name = name)
result = c.fetchone()
c.close()
if result: return result[0]
def has(self, name):
return self.get(name) != None
def default(self, name, default, commit = True):
if not self.has(name): self.set(name, default, commit)
def flush_queued(self):
if len(self.queue) == 0: return
for name, value in self.queue.items():
self.set(name, value, commit = False)
self.commit()
self.queue.clear()
def execute(self, sql):
#print 'SQL:', sql
c = self.conn.cursor()
c.execute(sql)
return c
def execute_one(self, sql):
c = self.execute(sql)
result = c.fetchone()
c.close()
return result
def write(self, sql, commit = False):
self.execute(sql).close()
if commit: self.commit()
def commit(self):
self.conn.commit()
def rollback(self):
self.conn.rollback()
def insert(self, table, **kwargs):
self.get_table(table).insert(self, **kwargs)
def delete(self, table, **kwargs):
self.get_table(table).delete(self, **kwargs)
def select(self, table, cols = None, **kwargs):
return self.get_table(table).select(self, cols, **kwargs)
def create(self):
for table in self.tables:
table.create(self)
self.commit()
def validate(self):
current = self.get_current_version()
if self.get_version() < current:
raise Exception('Configuration database "%s" version %d is newer than is supported %d'
% (self.filename, current, self.get_version()))
elif self.get_version() != current:
# Create or upgrade DB
if current == 0: self.create()
else:
if current <= 2:
# Just drop and recreate the clients table
self.execute('DROP TABLE IF EXISTS clients')
for table in self.tables:
if table.name == 'clients': table.create(self)
if current <= 5:
self.execute('DROP TABLE IF EXISTS projects')
self.set_current_version(self.get_version())
self.commit()
|
danaukes/popupcad
|
api_examples/switch_subdesign.py
|
Python
|
mit
| 1,814
| 0.011577
|
# -*- coding: utf-8 -*-
"""
Written by Daniel M. Aukes and CONTRIBUTORS
Email: danaukes<at>asu.edu.
Please see LICENSE for full license.
"""
import sys
import popupcad
import qt.QtCore as qc
import qt.QtGui as qg
if __name__=='__main__':
app = qg.QApplication(sys.argv[0])
filename_from = 'C:/Users/danaukes/Dropbox/zhis sentinal 11 files/modified/sentinal 11 manufacturing_R08.cad'
filename_to = 'C:/Users/danaukes/Dropbox/zhis sentinal 11 files/modified/sentinal 11 manufacturing_R09.cad'
d = popupcad.filetypes.design.Design.load_yaml(filename_from)
widget = qg.QDialog()
layout = qg.QVBoxLayout()
layout1 = qg.QHBoxLayout()
layout2 = qg.QHBoxLayout()
list1 = qg.QListWidget()
list2 = qg.QListWidget()
button_ok = qg.QPushButton('Ok')
button_cancel = qg.QPushButton('Cancel')
subdesign_list = list(d.subdesigns.values())
for item in subdesign_list:
list1.addItem(str(item))
list2.addItem(str(item))
layout1.addWidget(list1)
layout1.
|
addWidget(list2)
layout2.addWidget(button_ok)
layout2.addWidget(button_cancel)
layout.addLayout(layout1)
layout.addLayout(layout2)
widget.setLayout(layout)
button_ok.pressed.connect(widget.accept)
button_cancel.pressed.connect(widget.reject)
if widget.exec_():
i
|
f len(list1.selectedIndexes())==1 and len(list2.selectedIndexes())==1:
ii_from = list1.selectedIndexes()[0].row()
ii_to = list2.selectedIndexes()[0].row()
print(ii_from,ii_to)
d.replace_subdesign_refs(subdesign_list[ii_from].id,subdesign_list[ii_to].id)
d.subdesigns.pop(subdesign_list[ii_from].id)
d.save_yaml(filename_to)
sys.exit(app.exec_())
|
terryjbates/test-driven-development-with-python
|
myflaskapp/autoapp.py
|
Python
|
mit
| 278
| 0
|
# -*- coding: utf-8 -*-
"""Create an app
|
lication instance."""
from flask.helpers import get_debug_flag
from myflaskapp.app import create_app
from myflaskapp.settings import DevConfig, ProdConfig
CONFIG = DevConfig if get_debug_flag() else ProdConfig
app = create_app(CO
|
NFIG)
|
jorisvandenbossche/DS-python-data-analysis
|
notebooks/_solutions/case1_bike_count21.py
|
Python
|
bsd-3-clause
| 53
| 0.018868
|
df
|
_monthly = df.resample('M').sum()
df_monthly.plot(
|
)
|
Tumetsu/FMI-weather-downloader
|
tests/fmiapi/test_fmiparser.py
|
Python
|
gpl-2.0
| 2,344
| 0.00128
|
import copy
from fmiapi.fmixmlparser import FMIxmlParser
from tests.testUtils import *
from tests.fmiapi.testdata.expected_data import *
def describe_fmi_xml_parser():
parser = FMIxmlParser()
def describe_daily_data():
test_data1 = load_xml('./tests/fmiapi/testdata/daily_12_days.xml')
test_data2 = load_xml('./tests/fmiapi/testdata/daily_4_days.xml')
test_data3 = load_xml('./tests/fmiapi/testdata/daily_14_days.xml')
test_1965 = load_xml('./tests/fmiapi/testdata/daily_11_days_1965.xml')
def should_parse_xml():
result = parser.parse([test_data1])
assert_equal(12, len(result['time']))
assert 'time' in result
assert 'rrday' in result
assert 'tday' in result
assert 'snow' in result
assert 'tmin' in result
assert 'tmax' in result
assert 'place' in result
verify_dataframe(result, EXPECTED_DAILY_12_DAYS)
def should_parse_dates_before_1970_correctly():
result = parser.parse([test_1965])
assert_equal(11, len(result['time']))
verify_dataframe(result, EXPECTED_DAILY_1965)
def should_parse_multipart_request_correctly():
result = parser.parse([test_data1, test_data2, test_data3])
assert_equal(30, len(result['time']))
# concat th
|
ree different dicts to one df
expected_df = copy.deepcopy(EXPECTED_DAILY_12_DAYS)
for key in EXPEC
|
TED_DAILY_4_DAYS:
expected_df[key] = expected_df[key] + EXPECTED_DAILY_4_DAYS[key]
for key in EXPECTED_DAILY_14_DAYS:
expected_df[key] = expected_df[key] + EXPECTED_DAILY_14_DAYS[key]
verify_dataframe(result, expected_df)
def describe_realtime_data():
test_data1 = load_xml('./tests/fmiapi/testdata/realtime_1_day.xml')
def should_parse_xml_and_remove_full_nan_columns():
result = parser.parse([test_data1])
assert_equal(153, len(result['time']))
assert 'time' in result
assert 't2m' in result
assert 'rh' in result
assert 'td' in result
assert 'snow_aws' in result
assert 'place' in result
verify_dataframe(result, EXPECTED_REALTIME_1_DAY)
|
tommyip/zulip
|
zerver/migrations/0207_multiuseinvite_invited_as.py
|
Python
|
apache-2.0
| 482
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-02-06 21:49
from __future__ import unicode_literals
fro
|
m django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('zerver', '0206_stream_rendered_descript
|
ion'),
]
operations = [
migrations.AddField(
model_name='multiuseinvite',
name='invited_as',
field=models.PositiveSmallIntegerField(default=1),
),
]
|
gavyur/vkbuddy
|
config.py
|
Python
|
gpl-3.0
| 2,070
| 0.000966
|
# -*- coding: utf-8 -*-
# Config file handling module
# Copyright
|
(C) 2014 Yury Gavrilov <yuriy@igavrilov.ru>
# This file is part of VKBuddy.
# VKBuddy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# VKBuddy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABIL
|
ITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with VKBuddy. If not, see <http://www.gnu.org/licenses/>.
import yaml
class IncorrectConfig(Exception): pass
class BareConfig:
def __init__(self):
self.config = {}
self.required_list = []
def add_parameter(self, name, required=False, description='',
default=None, typ=str):
if required:
self.required_list.append(name)
self.config[name] = {
'description': description,
'default': default,
'type': typ
}
class Config:
def __init__(self, filename, bare):
cfile = open(filename, 'r')
self.__config = yaml.load(cfile)
cfile.close()
self.bare = bare
if not self.__config:
self.__config = {}
for param in bare.required_list:
if not param in self.__config:
raise IncorrectConfig(
'Required parameter \'{}\' not found'.format(param)
)
def __getitem__(self, item):
if item in self.__config:
if item in self.bare.config:
return self.bare.config[item]['type'](self.__config[item])
else:
return self.__config[item]
elif item in self.bare.config:
return self.bare.config[item]['default']
else:
raise KeyError(item)
|
brianmhunt/SIWorldMap
|
werkzeug/__init__.py
|
Python
|
mit
| 7,097
| 0.001832
|
# -*- coding: utf-8 -*-
"""
werkzeug
~~~~~~~~
Werkzeug is the Swiss Army knife of Python web development.
It provides useful classes and functions for any WSGI application to make
the life of a python web developer much easier. All of the provided
classes are independent from each other so you can mix it with any other
library.
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from types import ModuleType
import sys
# the version. Usually set automatically by a script.
__version__ = '0.8.1'
# This import magic raises concerns quite often which is why the implementation
# and motivation is explained here in detail now.
#
# The majority of the functions and classes provided by Werkzeug work on the
# HTTP and WSGI layer. There is no useful grouping for those which is why
# they are all importable from "werkzeug" instead of the modules where they are
# implemented. The downside of that is, that now everything would be loaded at
# once, even if unused.
#
# The implementation of a lazy-loading module in this file replaces the
# werkzeug package when imported from within. Attribute access to the werkzeug
# module will then lazily import from the modules that implement the objects.
# import mapping to objects in other modules
all_by_module = {
'werkzeug.debug': ['DebuggedApplication'],
'werkzeug.local': ['Local', 'LocalManager', 'LocalProxy',
'LocalStack', 'release_local'],
'werkzeug.templates': ['Template'],
'werkzeug.serving': ['run_simple'],
'werkzeug.test': ['Client', 'EnvironBuilder', 'create_environ',
'run_wsgi_app'],
'werkzeug.testapp': ['test_app'],
'werkzeug.exceptions': ['abort', 'Aborter'],
'werkzeug.urls': ['url_decode', 'url_encode', 'url_quote',
'url_quote_plus', 'url_unquote',
'url_unquote_plus', 'url_fix', 'Href',
'iri_to_uri', 'uri_to_iri'],
'werkzeug.formparser': ['parse_form_data'],
'werkzeug.utils': ['escape', 'environ_property',
'append_slash_redirect', 'redirect',
'cached_property', 'import_string',
'dump_cookie', 'parse_cookie', 'unescape',
'format_string', 'find_modules', 'header_property',
'html', 'xhtml', 'HTMLBuilder',
'validate_arguments', 'ArgumentValidationError',
'bind_arguments', 'secure_filename'],
'werkzeug.wsgi': ['get_current_url', 'get_host', 'pop_path_info',
'peek_path_info', 'SharedDataMiddleware',
'DispatcherMiddleware', 'ClosingIterator',
'FileWrapper', 'make_line_iter', 'LimitedStream',
'responder', 'wrap_file', 'extract_path_info'],
'werkzeug.datastructures': ['MultiDict', 'CombinedMultiDict', 'Headers',
'EnvironHeaders', 'ImmutableList',
'ImmutableDict', 'ImmutableMultiDict',
'TypeConversionDict', 'ImmutableTypeConversionDict',
'Accept', 'MIMEAccept', 'CharsetAccept',
'LanguageAccept', 'RequestCacheControl',
'ResponseCacheControl', 'ETags', 'HeaderSet',
'WWWAuthenticate', 'Authorization',
'FileMultiDict', 'CallbackDict', 'FileStorage',
'OrderedMultiDict', 'ImmutableOrderedMultiDict'],
'werkzeug.useragents': ['UserAgent'],
'werkzeug.http': ['parse_etags', 'parse_date', 'http_date',
'cookie_date', 'parse_cache_control_header',
'is_resource_modified', 'parse_accept_header',
'parse_set_header', 'quote_etag', 'unquote_etag',
'generate_etag', 'dump_header',
'parse_list_header', 'parse_dict_header',
'parse_authorization_header',
'parse_www_authenticate_header',
'remove_entity_headers', 'is_entity_header',
'remove_hop_by_hop_headers', 'parse_options_header',
'dump_options_header', 'is_hop_by_hop_header',
'unquote_header_value',
'quote_header_value', 'HTTP_STATUS_CODES'],
'werkzeug.wrappers': ['BaseResponse', 'BaseRequest', 'Request',
'Response', 'AcceptMixin', 'ETagRequestMixin',
'ETagResponseMixin'
|
, 'ResponseStreamMixin',
'CommonResponseDescriptorsMixin',
'UserAgentMixin', 'AuthorizationMixin',
'WWWAuthenticateMixin',
'CommonRequestDescriptorsMixin'],
'werkzeug.security': ['generate_password_hash', 'check_password_hash'],
# the undocumented easteregg ;-)
'werkzeug._internal': ['_easteregg']
}
# modules that s
|
hould be imported when accessed as attributes of werkzeug
attribute_modules = frozenset(['exceptions', 'routing', 'script'])
object_origins = {}
for module, items in all_by_module.iteritems():
for item in items:
object_origins[item] = module
class module(ModuleType):
"""Automatically import objects from the modules."""
def __getattr__(self, name):
if name in object_origins:
module = __import__(object_origins[name], None, None, [name])
for extra_name in all_by_module[module.__name__]:
setattr(self, extra_name, getattr(module, extra_name))
return getattr(module, name)
elif name in attribute_modules:
__import__('werkzeug.' + name)
return ModuleType.__getattribute__(self, name)
def __dir__(self):
"""Just show what we want to show."""
result = list(new_module.__all__)
result.extend(('__file__', '__path__', '__doc__', '__all__',
'__docformat__', '__name__', '__path__',
'__package__', '__version__'))
return result
# keep a reference to this module so that it's not garbage collected
old_module = sys.modules['werkzeug']
# setup the new module and patch it into the dict of loaded modules
new_module = sys.modules['werkzeug'] = module('werkzeug')
new_module.__dict__.update({
'__file__': __file__,
'__package__': 'werkzeug',
'__path__': __path__,
'__doc__': __doc__,
'__version__': __version__,
'__all__': tuple(object_origins) + tuple(attribute_modules),
'__docformat__': 'restructuredtext en'
})
|
romana/networking-romana
|
networking_romana/driver/ipam_romana.py
|
Python
|
apache-2.0
| 15,559
| 0.001928
|
# Copyright (c) 2016 Pani Networks Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import MySQLdb
import netaddr
from oslo_config import cfg
from oslo_log import log
from oslo_utils import uuidutils
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron.common import utils as common_utils
from neutron.common import exceptions
from neutron.ipam import driver as ipam_base
from neutron.ipam import exceptions as ipam_exc
from neutron.ipam import requests as ipam_req
from neutron.ipam import subnet_alloc
from neutron import manager
from six.moves.urllib.parse import urljoin
from six.moves.urllib.parse import urlparse
from networking_romana.driver import exceptions
from networking_romana.driver import utils
LOG = log.getLogger(__name__)
class RomanaDbSubnet(ipam_base.Subnet):
"""
Manage IP addresses for Romana IPAM driver.
"""
def __init__(self, internal_id, ctx, cidr=None,
ga
|
teway_ip=None, tenant_id=None,
subnet_id=None):
"""Initialize RomanaDbSubnet."""
LOG.debug("RomanaDbSubnet.__init__()")
self._cidr = cidr
self._pools = []
self._gateway_ip = gateway_ip
self._tenant_id = tenant_id
self._s
|
ubnet_id = subnet_id
self._context = ctx
self._neutron_id = internal_id
config = cfg.CONF.romana
LOG.debug("RomanaDbSubnet.__init__()")
if not config:
raise ipam_exc.exceptions.InvalidConfigurationOption(
{'opt_name': 'romana', 'opt_value': 'missing'})
self.romana_url = config.url
if not self.romana_url:
raise ipam_exc.exceptions.InvalidConfigurationOption(
{'opt_name': 'url', 'opt_value': 'missing'})
LOG.debug("romana_url: %s" % self.romana_url)
@classmethod
def create_from_subnet_request(cls, subnet_request, ctx):
"""Create from a subnet request."""
LOG.debug("RomanaDbSubnet.create_from_subnet_request()")
ipam_subnet_id = uuidutils.generate_uuid()
# Create subnet resource
me = cls(ipam_subnet_id,
ctx,
cidr=subnet_request.subnet_cidr,
gateway_ip=subnet_request.gateway_ip,
tenant_id=subnet_request.tenant_id,
subnet_id=subnet_request.subnet_id)
# Workaround for creating the 10/8 subnet
if subnet_request.subnet_cidr.prefixlen > 8:
me.allocate_segment()
return me
def allocate_segment(self):
"""This is a no-op in Romana model."""
LOG.debug("RomanaDbSubnet.allocate_segment()")
pass
@classmethod
def load(cls, neutron_subnet_id, ctx):
"""Load an IPAM subnet from the database given its neutron ID."""
LOG.debug("RomanaDbSubnet.load()")
neutron_subnet = cls._fetch_subnet(ctx, neutron_subnet_id)
retval = cls(neutron_subnet_id,
ctx,
cidr=neutron_subnet['cidr'],
gateway_ip=neutron_subnet['gateway_ip'],
tenant_id=neutron_subnet['tenant_id'],
subnet_id=neutron_subnet_id)
LOG.debug("IPAM subnet loaded: %s" % retval)
return retval
@classmethod
def _fetch_subnet(cls, context, id):
LOG.debug("RomanaDbSubnet._fetch_subnet()")
plugin = manager.NeutronManager.get_plugin()
return plugin._get_subnet(context, id)
def allocate(self, address_request):
"""Allocate Address by calling Romana IPAM Agent."""
LOG.debug("RomanaDbSubnet.allocate(%s)" % address_request)
if isinstance(address_request, ipam_req.SpecificAddressRequest):
msg = "Specific address allocation not supported by Romana."
raise exceptions.RomanaException(msg)
if isinstance(address_request, RomanaDhcpAddressRequest):
host_name = address_request.host_name
host_info = utils.find_host_info(self.romana_url, host_name)
ip = host_info.get("ip")
LOG.debug("Romana IPAM: To DHCP agent on host %s, assigning %s", host_name, ip)
return ip
ten_lookup = { 'external_id': address_request.tenant_id }
romana_tenant_id = utils.find_romana_id(self.romana_url, 'tenant', ten_lookup)
seg_lookup = { 'name' : address_request.segment_name,
'tenant_id' : romana_tenant_id}
romana_segment_id = utils.find_romana_id(self.romana_url, 'segment', seg_lookup)
host_lookup = { 'name' : address_request.host_name }
romana_host_id = utils.find_romana_id(self.romana_url, 'host', host_lookup)
ipam_service_url = utils.find_romana_service_url(self.romana_url,
'ipam')
url = urljoin(ipam_service_url, "/endpoints")
endpoint = {'tenant_id' : str(romana_tenant_id),
'segment_id' : str(romana_segment_id),
'host_id' : str(romana_host_id)}
try:
resp = utils.http_call("POST", url, endpoint)
ip = resp['ip']
except Exception as e:
LOG.error(e)
raise exceptions.RomanaException("Error allocating: %s" % e)
return ip
def deallocate(self, address):
"""Deallocate an IP Address. Really, it's a noop, here we are not doing anything.
The logic lives in ML2 driver.
"""
pass
def update_allocation_pools(self, pools):
"""Update Allocation Pools."""
LOG.debug("RomanaDbSubnet.update_allocation_pools()")
pass
def get_details(self):
"""Return subnet data as a SpecificSubnetRequest."""
LOG.debug("RomanaDbSubnet.get_details()")
return ipam_req.SpecificSubnetRequest(
self._tenant_id, self._neutron_id,
self._cidr, self._gateway_ip, self._pools)
class RomanaDhcpAddressRequest(ipam_req.AnyAddressRequest):
def __init__(self, host_name):
super(ipam_req.AnyAddressRequest, self).__init__()
self.host_name = host_name
class RomanaAnyAddressRequest(ipam_req.AnyAddressRequest):
"""Used to request any available address from the pool."""
def __init__(self, host_name, tenant_id, segment_name):
"""Initialize RomanaAnyAddressRequest."""
super(ipam_req.AnyAddressRequest, self).__init__()
self.host_name = host_name
self.tenant_id = tenant_id
self.segment_name = segment_name
class RomanaAddressRequestFactory(ipam_req.AddressRequestFactory):
"""Builds address request using ip information."""
_db_url = None
_db_conn_dict = None
@classmethod
def get_request(cls, context, port, ip_dict):
"""Get a prepared Address Request.
:param context: context
:param port: port dict
:param ip_dict: dict that can contain 'ip_address', 'mac' and
'subnet_cidr' keys. Request to generate is selected depending on
this ip_dict keys.
:return: returns prepared AddressRequest (specific or any)
"""
mac = port['mac_address']
owner = port.get('device_owner')
LOG.debug("AAA: \tTenant %s, is admin %s\n\tdevice owner: %s\n\t%s\n\t%s", context.tenant, context.is_admin, owner, port, ip_dict)
if owner == constants.DEVICE_OWNER_DHCP:
return RomanaDhcpAddressRequest(port.get('binding:host_id'))
# Lazily instantiate DB connection info.
if cls._db_url is None:
cls._db_url = cfg.CONF.
|
mishbahr/staticgen-demo
|
staticgen_demo/blog/conf.py
|
Python
|
bsd-3-clause
| 347
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings # noqa
from django.utils.translation import ugettext_lazy as _ # noqa
from appconf
|
import AppConf
class BlogConf(AppConf):
DISQUS_SHORTNAME = 'django-staticgen'
POST_IDENTIFIER = 'cu
|
rrent_post'
class Meta:
prefix = 'blog'
|
welchbj/tt
|
tt/satisfiability/__init__.py
|
Python
|
mit
| 59
| 0
|
"""Functionality
|
for determining logic satisfiasbility.""
|
"
|
szha/mxnet
|
python/mxnet/api.py
|
Python
|
apache-2.0
| 938
| 0
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
|
ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Functions defined in MXNet.
Acknowledgement: This file originates from incubator-tvm"""
from ._ffi.function im
|
port _init_api
_init_api("mxnet.api")
|
kacper1095/asl-translator
|
api/src/scripts/train_simple_network.py
|
Python
|
gpl-3.0
| 3,211
| 0.005294
|
import os
import argparse
import datetime
import yaml
import api.src.common.initial_environment_config
from ..models.dense import create_model
from ..data_processing.data_generator import DataGenerator
from ..common.config import TrainingConfig, DataConfig, Config
from ..common.utils import print_info, ensure_dir
from .plot_trainings import get_description_string
from keras.callbacks import ModelCheckpoint, CSVLogger, TensorBoard, LearningRateScheduler, EarlyStopping
RUNNING_TIME = datetime.datetime.now().strftime("%H_%M_%d_%m_%y")
def train(num_epochs, batch_size, input_size, num_workers):
if not Config.NO_SAVE:
ensure_dir(os.path.join(TrainingConfig.PATHS['MODELS'], RUNNING_TIME))
model = create_model((2592,))
callbacks = [
ModelCheckpoint(os.path.join(TrainingConfig.PATHS['MODELS'], RUNNING_TIME, 'weights.h5'), save_best_only=True, monitor=TrainingConfig.callbacks_monitor),
CSVLogger(os.path.join(TrainingConfig.PATHS['MODELS'], RUNNING_TIME, 'history.csv')),
LearningRateScheduler(TrainingConfig.schedule),
EarlyStopping(patience=5)
]if not Config.NO_SAVE else []
if not Config.NO_SAVE:
introduced_change = input("What new was introduced?: ")
with open(os.path.join(TrainingConfig.PATHS['MODELS'], RUNNING_TIME, 'change.txt'), 'w') as f:
f.write(introduced_change)
with open(os.path.join(TrainingConfig.PATHS['MODELS'], RUNNING_TIME, 'config.yml'), 'w') as f:
yaml.dump(list([TrainingConfig.get_config(), Config.get_config(), DataConfig.get_config()]), f, default_flow_style=False)
with open(os.path.join(TrainingConfig.PATHS['MODELS'], RUNNING_TIME, 'model.txt'), 'w') as f:
f.write(get_description_string(model))
optimizer = TrainingConfig.optimizer
data_generator_train = DataGenerator(DataConfig.PATHS['TRAINING_PROCESSED_DATA'], batch_size, input_size, False, True)
data_generator_valid = DataGenerator(DataConfig.PATHS['VALID_PROCESSED_DATA'], batch_size, input_size, True, True)
model.compile(optimizer, TrainingConfig.loss, metrics=TrainingConfig.metrics)
model.fit_generator(data_generator_train, samples_per_epoch=data_generator_train.samples_per_epoch, nb_epoch=num_epochs,
validation_data=data_generator_valid, nb_val_samples=data_generator_valid.samples_per_epoch,
callbacks=callbacks)
def main(args):
print_info("Training")
train(args.num_epochs, args.batch_size, args.input_size, args.num_workers)
print_info("Finished")
if __name__ == '__main__':
argparser = argparse.ArgumentParser(description='Script performing training')
argparser.add_argument('--num_epochs', default=TrainingConfig.NB_EPOCHS, type=int, help='Number of training
|
epochs')
argparser.add_argument('--num_workers', type=int, default=TrainingConfig.NUM_WORKERS, help='Number of workers during training')
argparser.add_argument('--batch_size', type=int, default=TrainingConfig.BATCH_SIZE, help='Batch size')
argparser.add_argument('--input_size', type=int, default=Config.IMAGE_SIZE, help='Imag
|
e size to input')
arguments = argparser.parse_args()
main(arguments)
|
town-hall-pinball/project-omega
|
pin/lib/score.py
|
Python
|
mit
| 4,429
| 0.001129
|
# Copyright (c) 2014 - 2016 townhallpinball.org
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS
|
OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACT
|
ION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from pin.lib import p, ui, util
class Classic(object):
initial = True
def __init__(self, handler):
self.handler = handler
self.display = ui.Panel()
self.player = ui.Text(top=4)
self.players = [
ui.Text(left=0, top=0),
ui.Text(right=0, top=0, x_align="right"),
ui.Text(left=0, bottom=7),
ui.Text(right=0, bottom=7, x_align="right")
]
self.ball = ui.Text(bottom=0, left=0, width=54, font="bm3", x_align="right")
self.credits_right = ui.Text(bottom=0, left=64, font="bm3")
self.credits_center = ui.Text(bottom=0, font="bm3")
self.display.add((self.player, self.players[0], self.players[1],
self.players[2], self.players[3], self.ball,
self.credits_right, self.credits_center))
self.handler.on("data_credits", self.update)
self.handler.on("add_player", self.update)
self.handler.on("next_player", self.next_player)
self.handler.on("player_score", self.score)
self.update()
def next_player(self):
self.initial = True
self.update()
def score(self):
self.initial = False
self.update()
def update(self, *args, **kwargs):
self.update_score(self.player, 0, single=True)
for index, player in enumerate(self.players):
self.update_score(player, index, single=False)
if p.game:
self.ball.show("BALL {}".format(p.game.ball))
self.credits_right.show(util.credits_string())
self.credits_center.hide()
else:
self.ball.hide()
self.credits_right.hide()
self.credits_center.show(util.credits_string())
def update_score(self, text, index, single):
show = True
if single and len(p.players) > 1:
show = False
if not single and len(p.players) == 1:
show = False
if index >= len(p.players):
show = False
if show:
score = p.players[index]["score"]
self.update_score_size(text, single, index)
text.show(util.format_score(score))
if index == p.player["index"] and self.initial and p.game:
text.effect("blink", duration=0.15, repeat=True)
else:
text.effect_cancel()
else:
text.hide()
# Adapted from
# https://github.com/preble/pyprocgame/blob/master/procgame/modes/scoredisplay.py#L104
def update_score_size(self, text, single, index):
score = p.players[index]["score"]
if single:
if score < 1e9:
text.update(font="bm10w")
elif score < 1e10:
text.update(font="bm10")
else:
text.update(font="bm10n")
elif not single and p.game and p.player["index"] == index:
if score < 1e6:
text.update(font="bm8w")
elif score < 1e7:
text.update(font="bm8")
else:
text.update(font="bm8n")
else:
if score < 1e6:
text.update(font="bm5w")
elif score < 1e7:
text.update(font="bm5")
else:
text.update(font="bm5n")
|
Saethlin/astrotools
|
fit_transit.py
|
Python
|
mit
| 3,396
| 0.001767
|
import numpy as np
import scipy.optimize
from scipy.signal import lombscargle
import batman
import ctools
import matplotlib.pyplot as plt
def periodogram(time, data, periods):
freq = 1/periods
nfactor = 2/(data.size * np.var(data))
power = nfactor * lombscargle(time, data-np.mean(data), freq*2*np.pi)
return power
def phase_dispersion_minimization(time, data, period):
mask = time > period
mtime = time.copy()
mtime[mask] = time[mask] % period
inds = np.argsort(mtime, kind='mergesort')
data = data[inds]
val = np.sum(np.abs(data[1:] - data[:-1]))
return val
def fit_transit(time, flux, period=None):
if period is None:
time_range = time.max()-time.min()
avg_spacing = time_range/time.size
start = avg_spacing
stop = time_range
periods = np.arange(start, stop, avg_spacing)
phase_dispersion = ctools.phase_dispersion(time, flux, periods)
power = periodogram(time, flux, periods)
period = 25.0
time %= period
inds = np.argsort(time, kind='mergesort')
time = time[inds]
flux = flux[inds]
flux /= np.median(flux) # Data must be normalized to use the rp parameter
in_transit = flux < 1-(1-flux.min())/2
# Estimate planet radius fro mthe transit depth
planet_radius = np.sqrt(1-np.median(flux[in_transit]))
# Estimate the location of the only dip
t0 = np.median(time[in_transit])
# Estimate semi-major axis from transit duration
duration = time[in_transit].max()-time[in_transit].min()
semi_major_axis = 1 / np.sin(duration * np.pi / period)
def transit_model_partial(time, *params):
return transit_model(time, period, t0, *params)
# Assume inclination of 90, with 0 eccentricity
p0 = [planet_radius, semi_major_axis, 90.0, 0.0, 90.0, 0.1, 0.3]
plt.plot(time, flux, 'k.')
plt.plot(time, transit_model_partial(time, *p0))
|
plt.show()
p, cov = sc
|
ipy.optimize.curve_fit(transit_model_partial, time, flux, p0=p0)
p0 = [period, t0] + list(p)
p, cov = scipy.optimize.curve_fit(transit_model, time, flux, p0=p0)
#plt.plot(time, flux, 'k.')
#plt.plot(time, transit_model(time, *p0))
#plt.plot(time, transit_model(time, *p))
#plt.show()
return p
def transit_model(time, period, t0, planet_radius, semi_major_axis,
inclination, eccentricity, longitude_of_periastron,
limb_linear, limb_quadratic):
params = batman.TransitParams()
params.per = period
params.t0 = t0
params.rp = planet_radius
params.a = semi_major_axis
params.inc = inclination
params.ecc = abs(eccentricity) % 1
params.w = longitude_of_periastron
params.u = [limb_linear, limb_quadratic]
params.limb_dark = 'quadratic'
model = batman.TransitModel(params, time)
return model.light_curve(params)
if __name__ == '__main__':
np.random.seed(1)
params = batman.TransitParams()
params.t0 = 1.0
params.per = 25.0
params.rp = 0.1
params.a = 15.0
params.inc = 90.0
params.ecc = 0.0
params.w = 90.0
params.u = [0.1, 0.3]
params.limb_dark = 'quadratic'
time = np.linspace(0, 100, 10000)
model = batman.TransitModel(params, time)
flux = model.light_curve(params)
flux += np.random.randn(time.size) * 0.001
print(fit_transit(time, flux))
|
cloudera/ibis
|
ibis/backends/tests/test_column.py
|
Python
|
apache-2.0
| 1,384
| 0
|
import numpy as np
import pandas as pd
import pytest
ROWID_ZERO_INDEXED_BACKENDS = ('omniscidb',)
@pytest.mark.parametrize(
'column',
[
'string_col',
'double_col',
'date_string_col',
pytest.param('timestamp_col', marks=pytest.mark.skip(reason='hangs')),
],
)
@pytest.mark.xfail_unsupported
def test_distinct_column(backend, alltypes, df, column):
expr = alltypes[column].distinct()
result = expr.execute()
expected = df[column].unique()
assert set(result) == set(expected)
@pytest.mark.xfail_unsupported
def test_rowid(con, backend):
t = con.table('functional_alltypes')
result = t[t.rowid()].execute()
first_value = 0 if backend.name() in ROWID_ZERO_INDEXED_BACKENDS else 1
expected = pd.Series(
range(first_value, first_value + len(result)),
dtype=np.int64,
name='rowid',
|
)
pd.testing.assert_series_equal(result.iloc[:, 0], expected)
@pytest.mark.xfail_unsupported
def test_named_rowid(con, backend):
t = con.table('functional_alltypes')
result = t[t.rowid().name('number')].execute()
first_value = 0 if backend.name() in ROWID_ZERO_IN
|
DEXED_BACKENDS else 1
expected = pd.Series(
range(first_value, first_value + len(result)),
dtype=np.int64,
name='number',
)
pd.testing.assert_series_equal(result.iloc[:, 0], expected)
|
Nodraak/Prologin2015
|
4_Expert-itinerant.py
|
Python
|
gpl-2.0
| 3,054
| 0.005239
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# @Author: Adrien Chardon
# @Date: 2014-11-16 14:29:10
# @Last Modified by: Adrien Chardon
# @Last Modified time: 2014-11-16 16:31:32
import time as t
################################################################################
# Usefull functions
################################################################################
# big number
infinity = 1000
# wrapper
def getObjectInList(nodes, id):
return nodes[id-1]
# Return all childs nodes that can be directly accessed from the node
def getChildOfNode(data, node):
ret = []
for dic in data:
if dic['start'] == node:
ret.append(dic['end'])
return ret
# return the time for traveling directly from node1 to node 2
def getDist(data, node1, node2):
for dic in data:
if (dic['start'] == node1) and (dic['end'] == node2):
return dic['time']
return infinity
################################################################################
# main
################################################################################
# get the time for one request
def expert_itinerant_one(nb_node, nb_link, nb_request, data, start, end):
nodes = []
for _ in range(nb_node):
tmp = {
'distanceFromStart': infinity,
'origin': -1,
}
nodes.append(tmp)
getObjectInList(nodes, start)['distanceFromStart'] = 0
getObjectInList(nodes, start)['origin'] = 0
notVisited = [start]
while notVisited != []:
cur_id = notVisited.pop()
cur = getObjectInList(nodes, cur_id)
for child_id in getChildOfNode(data, cur_id):
child = getObjectInList(nodes, child_id)
if (child['origin']
|
== -1) or (cur['distanceFromStart'] + getDist(data, cur_id, child_id) < child['distanceFromStart']):
child['distanceFromStart'] = cur['distanceFromStart'] + getDist(data, cur_id, child_id)
chi
|
ld['origin'] = cur_id
if child_id not in notVisited:
notVisited.append(child_id)
return getObjectInList(nodes, end)['distanceFromStart']
def expert_itinerant(nb_node, nb_link, nb_request, data, request):
for dic in request:
print expert_itinerant_one(nb_node, nb_link, nb_request, data, dic['start'], dic['end'])
if __name__ == '__main__':
nb_node, nb_link, nb_request = (int(i) for i in raw_input().split())
data = []
for _ in range(nb_link):
start, end, time = (int(i) for i in raw_input().split())
tmp = {
'start': start,
'end': end,
'time': time,
}
data.append(tmp)
data.sort(key=lambda tup: tup['start'])
request = []
for _ in range(nb_request):
start, end = (int(i) for i in raw_input().split())
tmp = {
'start': start,
'end': end,
}
request.append(tmp)
expert_itinerant(nb_node, nb_link, nb_request, data, request)
|
realityone/flaskbb
|
flaskbb/management/views.py
|
Python
|
bsd-3-clause
| 24,085
| 0.000166
|
# -*- coding: utf-8 -*-
"""
flaskbb.management.views
~~~~~~~~~~~~~~~~~~~~~~~~
This module handles the management views.
:copyright: (c) 2014 by the FlaskBB Team.
:license: BSD, see LICENSE for more details.
"""
import sys
from flask import (Blueprint, current_app, request, redirect, url_for, flash,
jsonify, __version__ as flask_version)
from flask_login import current_user, login_fresh
from flask_plugins import get_all_plugins, get_plugin, get_plugin_from_all
from flask_babelplus import gettext as _
from flask_allows import Permission, Not
from flaskbb import __version__ as flaskbb_version
from flaskbb._compat import iteritems
from flaskbb.forum.forms import UserSearchForm
from flaskbb.utils.settings import flaskbb_config
from flaskbb.utils.requirements import (IsAtleastModerator, IsAdmin,
CanBanUser, CanEditUser,
IsAtleastSuperModerator)
from flaskbb.extensions import db, allows
from flaskbb.utils.helpers import (render_template, time_diff, time_utcnow,
get_online_users)
from flaskbb.user.models import Guest, User, Group
from flaskbb.forum.models import Post, Topic, Forum, Category, Report
from flaskbb.management.models import Setting, SettingsGroup
from flaskbb.management.forms import (AddUserForm, EditUserForm, AddGroupForm,
EditGroupForm, EditForumForm,
AddForumForm, CategoryForm)
management = Blueprint("management", __name__)
@management.before_request
def check_fresh_login():
"""Checks if the login is fresh for the current user, otherwise the user
has to reauthenticate."""
if not login_fresh():
return current_app.login_manager.needs_refresh()
@management.route("/")
@allows.requires(IsAtleastModerator)
def overview():
# user and group stats
banned_users = User.query.filter(
Group.banned == True,
Group.id == User.primary_group_id
).count()
if not current_app.config["REDIS_ENABLED"]:
online_users = User.query.filter(User.lastseen >= time_diff()).count()
else:
online_users = len(get_online_users())
stats = {
# user stats
"all_users": User.query.count(),
"banned_users": banned_users,
"online_users": online_users,
"all_groups": Group.query.count(),
# forum stats
"report_count": Report.query.count(),
"topic_count": Topic.query.count(),
"post_count": Post.query.count(),
# misc stats
"plugins": get_all_plugins(),
"python_version": "%s.%s" % (sys.version_info[0], sys.version_info[1]),
"flask_version": flask_version,
"flaskbb_version": flaskbb_version
}
return render_template("management/overview.html", **stats)
@management.route("/settings", methods=["GET", "POST"])
@management.route("/settings/<path:slug>", methods=["GET", "POST"])
@allows.requires(IsAdmin)
def settings(slug=None):
slug = slug if slug else "general"
# get the currently active group
active_group = SettingsGroup.query.filter_by(key=slug).first_or_404()
# get all groups - used to build the navigation
all_groups = SettingsGroup.query.all()
SettingsForm = Setting.get_form(active_group)
old_settings = Setting.get_settings(active_group)
new_settings = {}
form = SettingsForm()
if form.validate_on_submit():
for key, values in iteritems(old_settings):
try:
# check if the value has changed
if values['value'] == form[key].data:
continue
else:
|
new_settings[key] = form[key].data
except KeyError:
pass
Setting.update(settings=new_settings, app=current_app)
flash(_("Settings saved."), "success")
else:
for key, values in iteritems(old_settings):
try:
form[key].data = values['value']
except (KeyError, ValueError):
pass
re
|
turn render_template("management/settings.html", form=form,
all_groups=all_groups, active_group=active_group)
# Users
@management.route("/users", methods=['GET', 'POST'])
@allows.requires(IsAtleastModerator)
def users():
page = request.args.get("page", 1, type=int)
search_form = UserSearchForm()
if search_form.validate():
users = search_form.get_results().\
paginate(page, flaskbb_config['USERS_PER_PAGE'], False)
return render_template("management/users.html", users=users,
search_form=search_form)
users = User.query. \
order_by(User.id.asc()).\
paginate(page, flaskbb_config['USERS_PER_PAGE'], False)
return render_template("management/users.html", users=users,
search_form=search_form)
@management.route("/users/<int:user_id>/edit", methods=["GET", "POST"])
@allows.requires(IsAtleastModerator)
def edit_user(user_id):
user = User.query.filter_by(id=user_id).first_or_404()
if not Permission(CanEditUser, identity=current_user):
flash(_("You are not allowed to edit this user."), "danger")
return redirect(url_for("management.users"))
member_group = db.and_(*[db.not_(getattr(Group, p)) for p in
['admin', 'mod', 'super_mod', 'banned', 'guest']])
filt = db.or_(
Group.id.in_(g.id for g in current_user.groups), member_group
)
if Permission(IsAtleastSuperModerator, identity=current_user):
filt = db.or_(filt, Group.mod)
if Permission(IsAdmin, identity=current_user):
filt = db.or_(filt, Group.admin, Group.super_mod)
if Permission(CanBanUser, identity=current_user):
filt = db.or_(filt, Group.banned)
group_query = Group.query.filter(filt)
form = EditUserForm(user)
form.primary_group.query = group_query
form.secondary_groups.query = group_query
if form.validate_on_submit():
form.populate_obj(user)
user.primary_group_id = form.primary_group.data.id
# Don't override the password
if form.password.data:
user.password = form.password.data
user.save(groups=form.secondary_groups.data)
flash(_("User updated."), "success")
return redirect(url_for("management.edit_user", user_id=user.id))
return render_template("management/user_form.html", form=form,
title=_("Edit User"))
@management.route("/users/delete", methods=["POST"])
@management.route("/users/<int:user_id>/delete", methods=["POST"])
@allows.requires(IsAdmin)
def delete_user(user_id=None):
# ajax request
if request.is_xhr:
ids = request.get_json()["ids"]
data = []
for user in User.query.filter(User.id.in_(ids)).all():
# do not delete current user
if current_user.id == user.id:
continue
if user.delete():
data.append({
"id": user.id,
"type": "delete",
"reverse": False,
"reverse_name": None,
"reverse_url": None
})
return jsonify(
message="{} users deleted.".format(len(data)),
category="success",
data=data,
status=200
)
user = User.query.filter_by(id=user_id).first_or_404()
if current_user.id == user.id:
flash(_("You cannot delete yourself.", "danger"))
return redirect(url_for("management.users"))
user.delete()
flash(_("User deleted."), "success")
return redirect(url_for("management.users"))
@management.route("/users/add", methods=["GET", "POST"])
@allows.requires(IsAdmin)
def add_user():
form = AddUserForm()
if form.validate_on_submit():
form.save()
flash(_("User added."), "success")
return redirect(url_for("management.users"))
return render_template("management/user_form.html", form=form,
|
glynjackson/django-oscar-sagepay
|
sagepay/templatetags/checkout_mode_tag.py
|
Python
|
mit
| 613
| 0.001631
|
from classytags.helpers import InclusionTag
from django import template
from django.conf import settings
from django.template.loader import render_to_string
register = template.Library()
class Banner(InclusionTag):
"""
Displays a checkout mode banner.
"""
template = 'sa
|
gepay/checkout_mode_banner.html'
def render_tag(self, context, **kwargs):
template = self.get_template(context, **kwargs)
if settings.SAGEPAY_MODE == "Live":
return
|
''
data = self.get_context(context, **kwargs)
return render_to_string(template, data)
register.tag(Banner)
|
kernow/ansible-modules-core
|
cloud/amazon/route53.py
|
Python
|
gpl-3.0
| 22,225
| 0.009719
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: route53
version_added: "1.3"
short_description: add or delete entries in Amazons Route53 DNS service
description:
- Creates and deletes DNS records in Amazons Route53 service
options:
command:
description:
- Specifies the action to take.
required: true
choices: [ 'get', 'create', 'delete' ]
zone:
description:
- The DNS zone to modify
required: true
hosted_zone_id:
description:
- The Hosted Zone ID of the DNS zone to modify
required: false
version_added: "2.0"
default: null
record:
description:
- The full DNS record to create or delete
required: true
ttl:
description:
- The TTL to give the new record
required: false
default: 3600 (one hour)
type:
description:
- The type of DNS record to create
required: true
choices: [ 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS', 'SOA' ]
alias:
description:
- Indicates if this is an alias record.
required: false
version_added: "1.9"
default: False
choices: [ 'True', 'False' ]
alias_hosted_zone_id:
description:
- The hosted zone identifier.
required: false
version_added: "1.9"
default: null
alias_evaluate_target_health:
description:
- Whether or not to evaluate an alias target health. Useful for aliases to Elastic Load Balancers.
required: false
version_added: "2.1"
default: false
value:
description:
- The new value when creating a DNS record. Multiple comma-spaced values are allowed for non-alias records. When deleting a record all values for the record must be specified or Route53 will not delete it.
required: false
default: null
overwrite:
description:
- Whether an existing record should be overwritten on create if values do not match
required: false
default: null
retry_interval:
description:
- In the case that route53 is still servicing a prior request, this module will wait and try again after this many seconds. If you have many domain names, the default of 500 seconds may be too long.
required: false
default: 500
private_zone:
description:
- If set to true, the private zone matching the requested name within the domain will be used if there are both public and private zones. The default is to use the public zone.
required: false
default: false
version_added: "1.9"
identifier:
description:
- Have to be specified for Weighted, latency-based and failover resource record sets only. An identifier
that differentiates among multiple resource record sets that have the
same combination of DNS name and type.
required: false
default: null
version_added: "2.0"
weight:
description:
- Weighted resource record sets only. Among resource record sets that
have the same combination of DNS name and type, a value that
determines what portion of traffic for the current resource record set
is routed to the associated location.
required: false
default: null
version_added: "2.0"
region:
description:
- Latency-based resource record sets only Among resource record sets
that have the same combination of DNS name and type, a value that
determines which region this should be associated with for the
latency-based routing
required: false
default: null
version_added: "2.0"
health_check:
description:
- Health check to associate with this record
required: false
default: null
version_added: "2.0"
failover:
description:
- Failover resource record sets only. Whether this is the primary or
secondary resource record set. Allowed values are PRIMARY and SECONDARY
required: false
default: null
version_added: "2.0"
vpc_id:
description:
- "When used in conjunction with private_zone: true, this will only modify records in the private hosted zone attached to this VPC."
- This allows you to have multiple private hosted zones, all with the same name, attached to different VPCs.
required: false
default: null
version_added: "2.0"
wait:
description:
- Wait until the changes have been replicated to all Amazon Route 53 DNS servers.
required: false
default: no
version_added: "2.1"
wait_timeout:
description:
- How long to wait for the changes to be replicated, in seconds.
required: false
default: 300
version_added: "2.1"
author:
- "Bruce Pennypacker (@bpennypacker)"
- "Mike Buzzetti <mike.buzzetti@gmail.com>"
extends_documentation_fragment: aws
'''
# FIXME: the command stuff should have a more state like configuration alias -- MPD
EXAMPLES = '''
# Add new.foo.com as an A record with 3 IPs and wait until the changes have been replicated
- route53:
command: create
zone: foo.com
record: new.foo.com
type: A
ttl: 7200
value: 1.1.1.1,2.2.2.2,3.3.3.3
wait: yes
# Retrieve the details for new.foo.com
- route53:
command: get
zone: foo.com
record: new.foo.com
type: A
register: rec
# Delete new.foo.com A record using the results from the get command
- route53:
command: delete
zone: foo.com
record: "{{ rec.set.record }}"
ttl: "{{ rec.set.ttl }}"
type: "{{ rec.set.type }}"
value: "{{ rec.set.value }}"
# Add an AAAA record. Note that because there are colons in the value
# that the entire parameter list must be quoted:
- route53:
command: "create"
zone: "foo.com"
record: "localhost.foo.com"
type: "AAAA"
ttl: "7200"
value: "::1"
# Add a SRV record with multiple fields for a service on port 22222
# For more information on SRV records see:
# https://en.wikipedia.org/wiki/SRV_record
- route53:
command: "create"
"zon
|
e": "foo.com"
"record": "_example-service._tcp.foo.com"
"type": "SRV"
"value": ["0 0 22222 host1.foo.com", "0 0 22222 host2.foo.com"]
# Add a TXT record. Note that TXT and SPF records must be surrounded
# by quotes when sent to Route 53:
- route53:
command: "create"
zone: "foo.com"
record: "localhost.foo.com"
type: "TXT"
ttl: "720
|
0"
value: '"bar"'
# Add an alias record that points to an Amazon ELB:
- route53:
command=create
zone=foo.com
record=elb.foo.com
type=A
value="{{ elb_dns_name }}"
alias=True
alias_hosted_zone_id="{{ elb_zone_id }}"
# Retrieve the details for elb.foo.com
- route53:
command: get
zone: foo.com
record: elb.foo.com
type: A
register: rec
# Delete an alias record using the results from the get command
- route53:
command: delete
zone: foo.com
record: "{{ rec.set.record }}"
ttl: "{{ rec.set.ttl }}"
type: "{{ rec.set.type }}"
value: "{{ rec.set.value }}"
alias: True
alias_hosted_zone_id: "{{ rec.set.alias_hosted_zone_id }}"
# Add an alias record that points to an Amazon ELB and evaluates it health:
- route53:
command=create
zone=foo.com
record=elb.foo.com
type=A
value="{{ elb_dns_name }}"
alias=True
alias_hosted_zone_id="{{ elb_zone_id }}"
alias_evaluate_target_health=True
# Add an AAAA record with Hosted Zone ID. Note that because there are colons in the value
# that the entire parameter list must be quote
|
CoderBotOrg/coderbotsrv
|
server/lib/engineauth/utils.py
|
Python
|
gpl-3.0
| 948
| 0.003165
|
def load_config(default_values, user_values):
if us
|
er_values is None:
return default_va
|
lues
config = {}
for k, v in user_values.items():
if k in default_values:
if isinstance(v, dict):
cloned = user_values[k].copy()
for key, value in default_values[k].items():
if key is not None and key not in user_values[k] \
or user_values[k][key] == '':
cloned[key] = value
config[k] = cloned
else:
config[k] = v
else:
config[k] = v
for k, v in default_values.items():
if k not in config:
config[k] = v
return config
def import_class(full_path):
path_split = full_path.split('.')
path = ".".join(path_split[:-1])
klass = path_split[-1:]
mod = __import__(path, fromlist=[klass])
return getattr(mod, klass[0])
|
philgyford/django-ditto
|
devproject/devproject/urls.py
|
Python
|
mit
| 795
| 0.001258
|
from django.conf import settings
from django.conf.urls import static
from django.urls import include, path, re_path
from django.contrib import admin
urlpatterns = [
path(r"admin/", admin.site.urls),
path(r"flickr/", include("ditto.flickr.urls")),
path(r"lastfm/", include("ditto.lastfm.urls")),
|
path(r"pinboard/", include("ditto.pinboard.urls")),
path(r"twitter/", include("ditto.twitter.urls")),
path(r"", include("ditto.core.urls")),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
re_path(r"^__debug__/", include(debug_toolbar.urls)),
]
urlpatterns += static.static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static.static(
setti
|
ngs.STATIC_URL, document_root=settings.STATIC_ROOT
)
|
nephila/cmsplugin-filer
|
cmsplugin_filer_file/south_migrations/0005_auto__chg_field_filerfile_file.py
|
Python
|
bsd-3-clause
| 10,023
| 0.007982
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'FilerFile.file'
db.alter_column(u'cmsplugin_filer_file_filerfile', 'file_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['filer.File'], null=True, on_delete=models.SET_NULL))
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'FilerFile.file'
raise RuntimeError("Cannot reverse this migration. 'FilerFile.file' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration
# Changing field 'FilerFile.file'
db.alter_column(u'cmsplugin_filer_file_filerfile', 'file_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['filer.File']))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
u'cmsplugin_filer_file.filerfile': {
'Meta': {'object_name': 'FilerFile', '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'file': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['filer.File']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'style': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'target_blank': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'all_files'", 'null': 'True', 'to': u"orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key'
|
: 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'aut
|
o_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'owned_files'", 'null': 'True', 'to': u"orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.rel
|
awsdocs/aws-doc-sdk-examples
|
python/test_tools/pinpoint_stubber.py
|
Python
|
apache-2.0
| 7,368
| 0.001629
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Stub functions that are used by the Amazon Pinpoint unit tests.
When tests are run against an actual AWS account, the stubber class does not
set up stubs and passes all calls through to the Boto 3 client.
"""
from test_tools.example_stubber import ExampleStubber
class PinpointStubber(ExampleStubber):
"""
A class that implements a variety of stub functions that are used by the
Amazon Pinpoint unit tests.
The stubbed functions all expect certain parameters to be passed to them as
part of the tests, and will raise errors when the actual parameters differ from
the expected.
"""
def __init__(self, client, use_stubs=True):
"""
Initializes the object with a specific client and configures it for
stubbing or AWS passthrough.
:param client: A Boto 3 Pinpoint client.
:param use_stubs: When True, use stubs to intercept requests. Otherwise,
pass requests through to AWS.
"""
super().__init__(client, use_stubs)
def stub_create_app(self, name):
self.add_response(
'create_app',
expected_params={'CreateApplicationRequest': {'Name': name}},
service_response={
'ApplicationResponse': {
'Arn': 'arn:aws:mobiletargeting:us-west-2:111122223333:apps/d41d8cd98f00b204e9800998ecf8427e',
'Id': 'd41d8cd98f00b204e9800998ecf8427e',
'Name': name
}
}
)
def stub_create_app_error(self, name, error_code):
self.add_client_error(
'create_app',
expected_params={'CreateApplicationRequest': {'Name': name}},
service_error_code=error_code
)
def stub_get_apps(self, apps):
self.add_response(
'get_apps',
expected_params={},
service_response={'ApplicationsResponse': {'Item': apps}}
)
def stub_get_apps_error(self, error_code):
self.add_client_error(
'get_apps',
expected_params={},
service_error_code=error_code
)
def stub_delete_app(self, app):
self.add_response(
'delete_app',
expected_params={'ApplicationId': app['Id']},
service_response={'ApplicationResponse': app}
)
def stub_delete_app_error(self, app, error_code):
self.add_client_error(
'delete_app',
expected_params={'ApplicationId': app['Id']},
service_error_code=error_code
)
def stub_send_email_messages(
self, app_id, sender, to_addresses, char_set, subject,
html_message, text_message, message_ids, error_code=None):
expected_params = {
'ApplicationId': app_id,
'MessageRequest': {
'Addresses': {
to_address: {'ChannelType': 'EMAIL'} for to_address in to_addresses
},
'MessageConfiguration': {
'EmailMessage': {
'FromAddress': sender,
'SimpleEmail': {
'Subject': {'Charset': char_set, 'Data': subject},
'HtmlPart': {'Charset': char_set, 'Data': html_message},
'TextPart': {'Charset': char_set, 'Data': text_message}}}}}}
response = {
'MessageResponse': {
'ApplicationId': app_id,
'Result': {
to_address: {
'MessageId': message_id,
'DeliveryStatus': 'SUCCESSFUL',
'StatusCode': 200
}
for to_address, message_id in zip(to_addresses, message_ids)
}
}
}
self._stub_bifurcator(
'send_messages', expected_params, response, error_code=error_code)
def stub_send_templated_email_messages(
self, app_id, sender, to_addresses, template_name, template_version,
message_ids, error_code=None):
expected_params = {
'ApplicationId': app_id,
'MessageRequest': {
'Addresses': {
to_address: {'ChannelType': 'EMAIL'} for to_address in to_addresses
},
'MessageConfiguration': {'EmailMessage': {'FromAddress': sender}},
'TemplateConfiguration': {
'EmailTemplate': {
'Name': template_name, 'Version': template_version}}}}
response = {
'Mes
|
sageResponse': {
'ApplicationId': app_id,
'Result': {
to_address: {
'MessageId': message_id,
'DeliveryStatus': 'SUCCESSFU
|
L',
'StatusCode': 200
}
for to_address, message_id in zip(to_addresses, message_ids)
}
}
}
self._stub_bifurcator(
'send_messages', expected_params, response, error_code=error_code)
def stub_send_sms_message(
self, app_id, origination_number, destination_number, message, message_type,
message_id, error_code=None):
expected_params = {
'ApplicationId': app_id,
'MessageRequest': {
'Addresses': {destination_number: {'ChannelType': 'SMS'}},
'MessageConfiguration': {
'SMSMessage': {
'Body': message,
'MessageType': message_type,
'OriginationNumber': origination_number}}}}
response = {'MessageResponse': {
'ApplicationId': app_id,
'Result': {
destination_number: {
'DeliveryStatus': 'SUCCESSFUL',
'StatusCode': 200,
'MessageId': message_id}}}}
self._stub_bifurcator(
'send_messages', expected_params, response, error_code=error_code)
def stub_send_templated_sms_message(
self, app_id, origination_number, destination_number, message_type,
template_name, template_version, message_id, error_code=None):
expected_params = {
'ApplicationId': app_id,
'MessageRequest': {
'Addresses': {destination_number: {'ChannelType': 'SMS'}},
'MessageConfiguration': {
'SMSMessage': {
'MessageType': message_type,
'OriginationNumber': origination_number}},
'TemplateConfiguration': {
'SMSTemplate': {
'Name': template_name, 'Version': template_version}}}}
response = {'MessageResponse': {
'ApplicationId': app_id,
'Result': {
destination_number: {
'DeliveryStatus': 'SUCCESSFUL',
'StatusCode': 200,
'MessageId': message_id}}}}
self._stub_bifurcator(
'send_messages', expected_params, response, error_code=error_code)
|
stalker314314/nasa_mala_kladionica
|
tests/unit/tests_view_results.py
|
Python
|
gpl-3.0
| 772
| 0.001295
|
# -*- coding: utf-8 -*-
from django.test import Client
from django.urls import reverse
from nmkapp import views
from .nmk_unit_test_case import NmkUnitTestCase
class ResultsTests(NmkUnitTestCase):
|
def test_anon_user(self):
"""
Test result view with anonymous user
"""
self.client = Client()
response = self.client.get(reverse(views.results))
self.assertEqual(response.status_code, 302)
def test_regular_user(self):
"""
Test result view with logged user
"""
self.client = Client()
self.assertTrue(self.cl
|
ient.login(username='kokan@mail.com', password='12345'))
response = self.client.get(reverse(views.results))
self.assertEqual(response.status_code, 200)
|
sysadminmatmoz/account-invoicing
|
account_invoice_check_total/__manifest__.py
|
Python
|
agpl-3.0
| 607
| 0
|
# -*- coding: utf-8 -*-
# Copyright 2
|
016 Acsone SA/NV
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
'name': 'Account Invoice Check Total',
'summary': """
Check if the verification total is equal to the bill's total""",
'version': '10.0.1.0.0',
'license': 'AGPL-3',
'author': 'Acsone SA/NV,Odoo Community Association (OCA)',
'website': 'https://acsone.eu/',
'depends'
|
: [
'account',
],
'data': [
'views/account_config_settings.xml',
'security/account_invoice_security.xml',
'views/account_invoice.xml',
],
}
|
ljean/coop_cms
|
coop_cms/forms/fields.py
|
Python
|
bsd-3-clause
| 371
| 0.002695
|
# -*- coding: utf-8 -*-
"""forms"""
import floppyforms.__future__ as floppyforms
class HidableMultipleChoiceField(floppyforms.MultipleChoiceF
|
ield):
"""
The MultipleChoiceField doesn't return an <input type="hidden"> when hidden but an empty string
Overload this field to restore an <input type="hidden">
|
"""
hidden_widget = floppyforms.HiddenInput
|
sniemi/SamPy
|
sandbox/src1/examples/subplot_toolbar.py
|
Python
|
bsd-2-clause
| 222
| 0.018018
|
#!/usr/bin/env python
from pylab import *
fig = figure()
subplot(221)
imshow(rand(100,100))
subplot(222)
imshow(rand(100,100))
subplot(223)
imshow(rand(100,100))
subp
|
lot
|
(224)
imshow(rand(100,100))
subplot_tool()
show()
|
vi/enki
|
tests/test_core/test_uisavefiles.py
|
Python
|
gpl-2.0
| 2,949
| 0.001695
|
#!/usr/bin/env python
import unittest
import os.path
import sys
import time
sys.path.insert(0, os.path.join(os.path.abspath(os.path.dirname(__file__)), ".."))
import base
from PyQt4.QtTest import QTest
from enki.core.core import core
class Test(base.TestCase):
def _verifyText(self, fileName, text):
with open(os.path.join(self.TEST_FILE_DIR, fileName)) as file_:
actualText = file_.read()
self.assertEqual(text, actualText)
def test_1(self):
# Close all, no modified files
self.createFile('file1.rb', 'asdf\nfdsa')
self.createFile('file2.rb', 'asdf\nfdsa')
self.keyClick('Ctrl+Shift+W')
self.assertIsNone(core.workspace().currentDocument())
@base.inMainLoop
def test_2(self):
# Close all, do not save
self.createFile('file1.rb', 'asdf\nfdsa')
self.createFile('file2.rb', 'fdsa')
self.keyClick('Ctrl+Enter')
self.keyClicks('new text')
self.openDialog(lambda: self.keyClick('Ctrl+Shift+W'),
lambda dialog: self.keyClick('w'))
self.assertIsNone(core.workspace().currentDocument())
self._verifyText('file2.rb', 'fdsa')
@base.inMainLoop
def test_3(self):
# Close all, cancel close
self.createFile('file1.rb', 'asdf\nfdsa')
self.createFile('file2.rb', 'fdsa')
self.keyClick('Ctrl+Enter')
self.keyClicks('new text')
self.openDialog(lambda: self.keyClick('Ctrl+Shift+W'),
lambda dialog: self.keyClick('c'))
self.assertIsNotNone(core.workspace().currentDocument())
@base.inMainLoop
def test_4(self):
# Close all, save
self.createFile('file1.rb', 'asdf\nfdsa')
self.createFile('file2.rb', 'fdsa')
self.keyClick('Ctrl+Enter')
self.keyClicks('new text+')
self.openDialog(lambda: self.keyClick('Ctrl+Shift+W'),
|
lambda dialog: self.keyClick('s'))
self.assertIsNone(core.workspace().currentDocument())
self._verifyText('file2.rb', 'new text+fdsa\n')
@base.inMainLoop
def test_5(self):
# Close all, reject save dialog
self.createFile('file1.rb', 'asdf\nfdsa')
self.createFile('file2.rb', 'fdsa')
self.keyClick('Ctrl
|
+N') # file without name
self.keyClicks('new text') # but modified
def inUiSaveFilesDialog(dialog):
# open and reject save dialog for file without name
def inSaveFileDialog(saveDialog):
QTest.qWait(4000)
self.keyClick('Esc')
self.openDialog(lambda: self.keyClick('s'),
inSaveFileDialog)
self.openDialog(lambda: self.keyClick('Ctrl+Shift+W'),
inUiSaveFilesDialog)
self.assertIsNotNone(core.workspace().currentDocument())
if __name__ == '__main__':
unittest.main()
|
gpodder/mygpo
|
mygpo/podcasts/migrations/0006_auto_20140614_0836.py
|
Python
|
agpl-3.0
| 701
| 0
|
# encoding: utf8
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [("podcasts", "0005_auto_20140610_1854")]
operations = [
mig
|
rations.AlterField(
model_name="episode",
name="outdated",
field=models.BooleanField(default=False, db_index=Tr
|
ue),
),
migrations.AlterField(
model_name="podcast",
name="outdated",
field=models.BooleanField(default=False, db_index=True),
),
migrations.AlterField(
model_name="episode",
name="guid",
field=models.CharField(max_length=100, null=True),
),
]
|
GroestlCoin/p2pool-grs
|
p2pool/util/p2protocol.py
|
Python
|
gpl-3.0
| 4,140
| 0.006039
|
'''
Generic message-based protocol used by Bitcoin and P2Pool for P2P communication
'''
import hashlib
import struct
from twisted.internet import protocol
from twisted.python import log
import p2pool
from p2pool.bitcoin import data as bitcoin_data
from p2pool.util import datachunker, variable
class TooLong(Exception):
pass
class Protocol(protocol.Protocol):
def __init__(self, message_prefix, max_payload_length, traffic_happened=variable.Event(), ignore_trailing_payload=False):
self._message_prefix = message_prefix
self._max_payload_length = max_payload_length
self.dataReceived2 = datachunker.DataChunker(self.dataReceiver())
self.traffic_happened = traffic_happened
self.ignore_trailing_payload = ignore_trailing_payload
def dataReceived(self, data):
self.traffic_happened.happened('p2p/in', len(data))
self.dataReceived2(data)
def dataReceiver(self):
while True:
start = ''
while start != self._message_prefix:
start = (start + (yield 1))[-len(self._message_prefix):]
command = (yield 12).rstrip('\0')
length, = struct.unpack('<I', (yield 4))
if length > self._max_payload_length:
print 'length too large'
continue
checksum = yield 4
payload = yield length
if bitcoin_data.grshash(payload)[:4] != checksum:
print 'invalid hash for', self.transport.getPeer().host, repr(command), length, checksum.encode('hex')
if p2pool.DEBUG:
print __import__('groestlcoin_hash').getHash(payload, len(payload))[:4].encode('hex'), payload.encode('hex')
self.badPeerHappened()
continue
type_ = getattr(self, 'message_' + command, None)
if type_ is None:
if p2pool.DEBUG:
print 'no type for', repr(command)
continue
try:
self.packetReceived(command, type_.unpack(payload, self.ignore_trailing_payload))
except:
print 'RECV', command, payload[:100].encode('hex') + ('...' if len(payload) > 100 else '')
log.err(None, 'Error handling message: (see RECV line)')
self.disconnect()
def packetReceived(self, command, payload2):
handler = getattr(self, 'handle_' + command, None)
if handler is None:
if p2pool.DEBUG:
print 'no handler for', repr(command)
return
if getattr(self, 'connected', True) and not getattr(self, 'disconnecting', False):
handler(**payload2)
def disconnect(self):
if hasattr(self.transport, 'abortConnection'):
# Available since Twisted 11.1
self
|
.transport.abortConnection()
else:
# This doesn't always close timed out connections! warned about in main
self.transport.loseConnection()
def badPeerHappene
|
d(self):
self.disconnect()
def sendPacket(self, command, payload2):
if len(command) >= 12:
raise ValueError('command too long')
type_ = getattr(self, 'message_' + command, None)
if type_ is None:
raise ValueError('invalid command')
#print 'SEND', command, repr(payload2)[:500]
payload = type_.pack(payload2)
if len(payload) > self._max_payload_length:
raise TooLong('payload too long')
data = self._message_prefix + struct.pack('<12sI', command, len(payload)) + bitcoin_data.grshash(payload)[:4] + payload
self.traffic_happened.happened('p2p/out', len(data))
self.transport.write(data)
def __getattr__(self, attr):
prefix = 'send_'
if attr.startswith(prefix):
command = attr[len(prefix):]
return lambda **payload2: self.sendPacket(command, payload2)
#return protocol.Protocol.__getattr__(self, attr)
raise AttributeError(attr)
|
alonisser/Open-Knesset
|
laws/admin.py
|
Python
|
bsd-3-clause
| 5,183
| 0.001929
|
from django.db.models import Q
from import_export.admin import ImportExportModelAdmin
from models import Vote, Law, PrivateProposal, KnessetProposal, GovProposal, Bill, GovLegislationCommitteeDecision
from laws.management.commands.scrape_votes import Command as ScrapeVotesCommand
from django.utils.translation import ugettext_lazy as _
from django.contrib import admin
class MissingDataVotesFilter(admin.SimpleListFilter):
# Human-readable title which will be displayed in the
# right admin sidebar just above the filter options.
title = _('Missing data votes')
# Parameter for the filter that will be used in the URL query.
parameter_name = 'is_missing_data_vote'
def lookups(self, request, model_admin):
"""
Returns a list of tuples. The first element in each
tuple is the coded value for the option that will
appear in the URL query. The second element is the
human-readable name for the option that will appear
in the right sidebar.
"""
return (
('is_missing_data_vote', _('Vote has missing data')),
)
def queryset(self, request, queryset):
"""
Returns the filtered queryset based on the value
provided in the query string and retrievable via
`self.value()`.
"""
# Compare the requested value
# to decide how to filter the queryset.
if self.value() == 'is_missing_data_vote':
return queryset.filter(Q(votes_count=0) | Q(votes_count=None))
else:
return queryset
class VoteAdmin(ImportExportModelAdmin):
# filter_horizontal = ('voted_for','voted_against','voted_abstain','didnt_vote')
list_display = (
'__unicode__', 'short_summary', 'full_text_link', 'votes_count', 'for_votes_count', 'against_votes_count',
'abstain_votes_count')
search_fields = ('title', 'summary', 'full_text', 'id', 'src_id')
list_filter = (MissingDataVotesFilter, )
def update_vote(self, request, queryset):
vote_count = queryset.count()
for vote in queryset:
vote.update_vote_properties()
self.message_user(request, "successfully updated {0} votes".format(vote_count))
update_vote.short_description = 'update vote properties and calculations'
def recreate_vote(self, request, queryset):
recreated_votes = ScrapeVotesCommand().recreate_objects(queryset.values_list('pk', flat=True))
recreated_vote_ids_string = ', '.join([str(v.pk) for v in recreated_votes])
self.message_user(request, "su
|
ccessfully recreated {0} votes: {1}".format(len(recreated_votes),
|
recreated_vote_ids_string))
recreate_vote.short_description = "recreate vote by deleting and then getting fresh data from knesset api"
actions = ['update_vote', 'recreate_vote']
admin.site.register(Vote, VoteAdmin)
class LawAdmin(ImportExportModelAdmin):
search_fields = ('title',)
list_display = ('title', 'merged_into')
admin.site.register(Law, LawAdmin)
class PrivateProposalAdmin(admin.ModelAdmin):
pass
admin.site.register(PrivateProposal, PrivateProposalAdmin)
class KnessetProposalAdmin(admin.ModelAdmin):
pass
admin.site.register(KnessetProposal, KnessetProposalAdmin)
class GovProposalAdmin(admin.ModelAdmin):
search_fields = ('title', 'booklet_number')
list_display = ('bill', 'booklet_number', 'knesset_id', 'date')
list_filter = ('knesset_id',)
admin.site.register(GovProposal, GovProposalAdmin)
class MissingLawListFilter(admin.SimpleListFilter):
# Human-readable title which will be displayed in the
# right admin sidebar just above the filter options.
title = _('Missing Laws')
# Parameter for the filter that will be used in the URL query.
parameter_name = 'is_missing_law'
def lookups(self, request, model_admin):
"""
Returns a list of tuples. The first element in each
tuple is the coded value for the option that will
appear in the URL query. The second element is the
human-readable name for the option that will appear
in the right sidebar.
"""
return (
('missing_law', _('Has Missing Law')),
# ('90s', _('in the nineties')),
)
def queryset(self, request, queryset):
"""
Returns the filtered queryset based on the value
provided in the query string and retrievable via
`self.value()`.
"""
# Compare the requested value (either '80s' or '90s')
# to decide how to filter the queryset.
if self.value() == 'missing_law':
return queryset.filter(law=None)
else:
return queryset
class BillAdmin(admin.ModelAdmin):
list_display = ('law', 'title', 'stage')
search_fields = ('title',)
list_filter = ('stage', MissingLawListFilter)
admin.site.register(Bill, BillAdmin)
class GovLegislationCommitteeDecisionAdmin(admin.ModelAdmin):
pass
admin.site.register(GovLegislationCommitteeDecision, GovLegislationCommitteeDecisionAdmin)
|
kambysese/mne-python
|
mne/__init__.py
|
Python
|
bsd-3-clause
| 5,932
| 0.000337
|
"""MNE software for MEG and EEG data analysis."""
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch mark
|
er is: 'X.Y.devN' where N is an integer.
#
from ._version import __version__
# have to import verbose first since it's needed by
|
many things
from .utils import (set_log_level, set_log_file, verbose, set_config,
get_config, get_config_path, set_cache_dir,
set_memmap_min_size, grand_average, sys_info, open_docs)
from .io.pick import (pick_types, pick_channels,
pick_channels_regexp, pick_channels_forward,
pick_types_forward, pick_channels_cov,
pick_channels_evoked, pick_info,
channel_type, channel_indices_by_type)
from .io.base import concatenate_raws
from .io.meas_info import create_info, Info
from .io.proj import Projection
from .io.kit import read_epochs_kit
from .io.eeglab import read_epochs_eeglab
from .io.reference import (set_eeg_reference, set_bipolar_reference,
add_reference_channels)
from .io.what import what
from .bem import (make_sphere_model, make_bem_model, make_bem_solution,
read_bem_surfaces, write_bem_surfaces, write_head_bem,
read_bem_solution, write_bem_solution)
from .cov import (read_cov, write_cov, Covariance, compute_raw_covariance,
compute_covariance, whiten_evoked, make_ad_hoc_cov)
from .event import (read_events, write_events, find_events, merge_events,
pick_events, make_fixed_length_events, concatenate_events,
find_stim_steps, AcqParserFIF)
from .forward import (read_forward_solution, apply_forward, apply_forward_raw,
average_forward_solutions, Forward,
write_forward_solution, make_forward_solution,
convert_forward_solution, make_field_map,
make_forward_dipole, use_coil_def)
from .source_estimate import (read_source_estimate,
SourceEstimate, VectorSourceEstimate,
VolSourceEstimate, VolVectorSourceEstimate,
MixedSourceEstimate, MixedVectorSourceEstimate,
grade_to_tris,
spatial_src_adjacency,
spatial_tris_adjacency,
spatial_dist_adjacency,
spatial_inter_hemi_adjacency,
spatio_temporal_src_adjacency,
spatio_temporal_tris_adjacency,
spatio_temporal_dist_adjacency,
extract_label_time_course, stc_near_sensors)
from .surface import (read_surface, write_surface, decimate_surface, read_tri,
read_morph_map, get_head_surf, get_meg_helmet_surf,
dig_mri_distances)
from .morph import (SourceMorph, read_source_morph, grade_to_vertices,
compute_source_morph)
from .source_space import (read_source_spaces, vertex_to_mni,
head_to_mni, head_to_mri, read_talxfm,
write_source_spaces, setup_source_space,
setup_volume_source_space, SourceSpaces,
add_source_space_distances, morph_source_spaces,
get_volume_labels_from_aseg,
get_volume_labels_from_src, read_freesurfer_lut)
from .annotations import (Annotations, read_annotations, annotations_from_events,
events_from_annotations)
from .epochs import (BaseEpochs, Epochs, EpochsArray, read_epochs,
concatenate_epochs, make_fixed_length_epochs)
from .evoked import Evoked, EvokedArray, read_evokeds, write_evokeds, combine_evoked
from .label import (read_label, label_sign_flip,
write_label, stc_to_label, grow_labels, Label, split_label,
BiHemiLabel, read_labels_from_annot, write_labels_to_annot,
random_parcellation, morph_labels, labels_to_stc)
from .misc import parse_config, read_reject_parameters
from .coreg import (create_default_subject, scale_bem, scale_mri, scale_labels,
scale_source_space)
from .transforms import (read_trans, write_trans,
transform_surface_to, Transform)
from .proj import (read_proj, write_proj, compute_proj_epochs,
compute_proj_evoked, compute_proj_raw, sensitivity_map)
from .dipole import read_dipole, Dipole, DipoleFixed, fit_dipole
from .channels import (equalize_channels, rename_channels, find_layout,
read_vectorview_selection)
from .report import Report, open_report
from .io import read_epochs_fieldtrip, read_evoked_fieldtrip, read_evokeds_mff
from .rank import compute_rank
from . import beamformer
from . import channels
from . import chpi
from . import commands
from . import connectivity
from . import coreg
from . import cuda
from . import datasets
from . import dipole
from . import epochs
from . import event
from . import externals
from . import io
from . import filter
from . import gui
from . import inverse_sparse
from . import minimum_norm
from . import preprocessing
from . import simulation
from . import stats
from . import surface
from . import time_frequency
from . import viz
from . import decoding
# deprecations
from .utils import deprecated_alias
deprecated_alias('read_selection', read_vectorview_selection)
# initialize logging
set_log_level(None, False)
set_log_file()
|
bsmr-eve/Pyfa
|
eos/effects/subsystembonusgallentepropulsion2agility.py
|
Python
|
gpl-3.0
| 331
| 0.003021
|
# subsystemBonusGallentePropulsion2Agility
#
# Used by
|
:
# Subsystem: Proteus Propulsion - Hyperspatial Optimization
type = "passive"
def handler(fit, src, context):
fit.ship.boostItemAttr("agility", src.getModifiedItemAttr(
|
"subsystemBonusGallentePropulsion2"),
skill="Gallente Propulsion Systems")
|
crhaithcock/RushHour
|
RushHourPy/numpy_utilities.py
|
Python
|
cc0-1.0
| 413
| 0.031477
|
import numpy a
|
s np
vec_bitstring_3 = np.
|
vectorize(lambda x: np.binary_repr(x,width=3) )
def board_to_int(v):
t = vec_bitstring_3(v)
return int(''.join(np.apply_along_axis(lambda x: ''.join(x), 1,t)),2)
def int_to_board(i):
#i = '154444257952488798331863040'
s = bin(int(i))[2:].zfill(108)
v = np.array([int(s[i:i+3],2) for i in range(0,len(s),3)],dtype=int)
return v.reshape((6,6))
|
ElitosGon/medgoproject
|
medgointranet/migrations/0013_atencion_formulario.py
|
Python
|
apache-2.0
| 507
| 0.001976
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-03-07 02:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('medgointranet', '0012_motivoanulacion'),
]
operations = [
migra
|
tions.AddField(
model_name='atencion',
name='formulario',
field=models.BooleanField(default=False, verbose_name='¿Docto
|
r completo formulario?'),
),
]
|
yeti-platform/yeti
|
plugins/feeds/public/alienvault_ip_reputation.py
|
Python
|
apache-2.0
| 1,699
| 0
|
import logging
from datetime import timedelta
from core import Feed
from core.errors import ObservableVal
|
idationError
from core.observables import Ip
class AlienVaultIPReputation(Feed):
default_values = {
"frequency": timedelta(hours=4),
"name": "AlienVaultIPReputation",
"source": "http://reputation.alienvault.com/reputation.data",
"description": "Reputation IP generated by Alienvault",
}
def update(self):
for index, line in self.update_csv(
delimiter="#",
|
comment=None,
header=None,
names=[
"IP",
"number_1",
"number_2",
"Tag",
"Country",
"City",
"Coord",
"number_3",
],
):
self.analyze(line)
def analyze(self, item):
try:
context = dict(source=self.name)
ip_str = item["IP"]
category = item["Tag"]
country = item["Country"]
ip = None
try:
ip = Ip.get_or_create(value=ip_str)
except ObservableValidationError as e:
logging.error(e)
return False
ip.add_source(self.name)
context["country"] = country
context["threat"] = category
context["reliability"] = item["number_1"]
context["risk"] = item["number_2"]
ip.tag(category)
ip.add_context(context)
except Exception as e:
logging.error("Error to process the item %s %s" % (item, e))
return False
return True
|
airelil/pywinauto-64
|
pywinauto/unittests/test_common_controls.py
|
Python
|
lgpl-2.1
| 33,312
| 0.005734
|
# GUI Application automation and testing library
# Copyright (C) 2006 Mark Mc Mahon
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
from __future__ import print_function
"Tests for classes in controls\common_controls.py"
__revision__ = "$Revision: 234 $"
import sys
import ctypes
import unittest
import time
import pprint
import pdb
import os
sys.path.append(".")
from pywinauto import six
from pywinau
|
to.controls import common_controls
from pywinauto.controls.common_controls import *
from pywinauto.win32structures import RECT
from pywinauto.controls import WrapHandle
#from pywinauto.controls.HwndWrapper import HwndWrapper
from pywinauto import findbestma
|
tch
from pywinauto.SendKeysCtypes import is_x64
from pywinauto.RemoteMemoryBlock import AccessDenied
from pywinauto.RemoteMemoryBlock import RemoteMemoryBlock
controlspy_folder = os.path.join(
os.path.dirname(__file__), "..\..\controlspy0998")
if is_x64():
controlspy_folder = os.path.join(controlspy_folder, 'x64')
class RemoteMemoryBlockTestCases(unittest.TestCase):
def test__init__fail(self):
self.assertRaises(AccessDenied, RemoteMemoryBlock, 0)
def test__init__fail(self):
self.assertRaises(AccessDenied, RemoteMemoryBlock, 0)
class ListViewTestCases(unittest.TestCase):
"Unit tests for the ListViewWrapper class"
def setUp(self):
"""Start the application set some data and ensure the application
is in the state we want it."""
# start the application
from pywinauto.application import Application
app = Application()
app_path = os.path.join(controlspy_folder, "List View.exe")
app.start_(app_path)
#print('app_path: ' + app_path)
self.texts = [
("Mercury", '57,910,000', '4,880', '3.30e23'),
("Venus", '108,200,000', '12,103.6', '4.869e24'),
("Earth", '149,600,000', '12,756.3', '5.9736e24'),
("Mars", '227,940,000', '6,794', '6.4219e23'),
("Jupiter", '778,330,000', '142,984', '1.900e27'),
("Saturn", '1,429,400,000', '120,536', '5.68e26'),
("Uranus", '2,870,990,000', '51,118', '8.683e25'),
("Neptune", '4,504,000,000', '49,532', '1.0247e26'),
("Pluto", '5,913,520,000', '2,274', '1.27e22'),
]
self.app = app
self.dlg = app.MicrosoftControlSpy #top_window_()
self.ctrl = app.MicrosoftControlSpy.ListView.WrapperObject()
#self.dlg.MenuSelect("Styles")
# select show selection always!
#app.ControlStyles.ListBox1.TypeKeys("{UP}" * 26 + "{SPACE}")
#self.app.ControlStyles.ListBox1.Select("LVS_SHOWSELALWAYS")
#self.app.ControlStyles.ApplyStylesSetWindowLong.Click()
#self.app.ControlStyles.SendMessage(win32defines.WM_CLOSE)
def tearDown(self):
"Close the application after tests"
# close the application
self.dlg.SendMessage(win32defines.WM_CLOSE)
def testFriendlyClass(self):
"Make sure the ListView friendly class is set correctly"
self.assertEquals (self.ctrl.FriendlyClassName(), "ListView")
def testColumnCount(self):
"Test the ListView ColumnCount method"
self.assertEquals (self.ctrl.ColumnCount(), 4)
def testItemCount(self):
"Test the ListView ItemCount method"
self.assertEquals (self.ctrl.ItemCount(), 9)
def testItemText(self):
"Test the ListView item.Text property"
item = self.ctrl.GetItem(1)
self.assertEquals(item['text'], "Venus")
def testItems(self):
"Test the ListView Items method"
flat_texts = []
for row in self.texts:
flat_texts.extend(row)
for i, item in enumerate(self.ctrl.Items()):
self.assertEquals(item['text'], flat_texts[i])
def testTexts(self):
"Test the ListView Texts method"
flat_texts = []
for row in self.texts:
flat_texts.extend(row)
self.assertEquals(flat_texts, self.ctrl.Texts()[1:])
def testGetItem(self):
"Test the ListView GetItem method"
for row in range(self.ctrl.ItemCount()):
for col in range(self.ctrl.ColumnCount()):
self.assertEquals(
self.ctrl.GetItem(row, col)['text'], self.texts[row][col])
def testGetItemText(self):
"Test the ListView GetItem method - with text this time"
for text in [row[0] for row in self.texts]:
self.assertEquals(
self.ctrl.GetItem(text)['text'], text)
self.assertRaises(ValueError, self.ctrl.GetItem, "Item not in this list")
def testColumn(self):
"Test the ListView Columns method"
cols = self.ctrl.Columns()
self.assertEqual (len(cols), self.ctrl.ColumnCount())
# TODO: add more checking of column values
#for col in cols:
# print(col)
def testGetSelectionCount(self):
"Test the ListView GetSelectedCount method"
self.assertEquals(self.ctrl.GetSelectedCount(), 0)
self.ctrl.Select(1)
self.ctrl.Select(7)
self.assertEquals(self.ctrl.GetSelectedCount(), 2)
# def testGetSelectionCount(self):
# "Test the ListView GetSelectedCount method"
#
# self.assertEquals(self.ctrl.GetSelectedCount(), 0)
#
# self.ctrl.Select(1)
# self.ctrl.Select(7)
#
# self.assertEquals(self.ctrl.GetSelectedCount(), 2)
def testIsSelected(self):
"Test ListView IsSelected for some items"
# ensure that the item is not selected
self.assertEquals(self.ctrl.IsSelected(1), False)
# select an item
self.ctrl.Select(1)
# now ensure that the item is selected
self.assertEquals(self.ctrl.IsSelected(1), True)
def _testFocused(self):
"Test checking the focus of some ListView items"
print("Select something quick!!")
import time
time.sleep(3)
#self.ctrl.Select(1)
print(self.ctrl.IsFocused(0))
print(self.ctrl.IsFocused(1))
print(self.ctrl.IsFocused(2))
print(self.ctrl.IsFocused(3))
print(self.ctrl.IsFocused(4))
print(self.ctrl.IsFocused(5))
#for col in cols:
# print(col)
def testSelect(self):
"Test ListView Selecting some items"
self.ctrl.Select(1)
self.ctrl.Select(3)
self.ctrl.Select(4)
self.assertRaises(IndexError, self.ctrl.Deselect, 23)
self.assertEquals(self.ctrl.GetSelectedCount(), 3)
def testSelectText(self):
"Test ListView Selecting some items"
self.ctrl.Select("Venus")
self.ctrl.Select("Jupiter")
self.ctrl.Select("Uranus")
self.assertRaises(ValueError, self.ctrl.Deselect, "Item not in list")
self.assertEquals(self.ctrl.GetSelectedCount(), 3)
def testDeselect(self):
"Test ListView Selecting some items"
self.ctrl.Select(1)
self.ctrl.Select(4)
self.ctrl.Deselect(3)
self.ctrl.Deselect(4)
self.assertRaises(IndexError, self.ctrl.Deselect, 23)
self.assertEquals(self.ctrl.GetSelectedCount(), 1)
def testGetProperties(self):
"Test getting the properties for the listview control"
props = self.ctrl.GetProperties()
self.assertEquals(
"ListView", props['FriendlyClassName'])
self.assertEquals(
|
knowmetools/km-api
|
km_api/functional_tests/know_me/profile/profile_topics/test_profile_topic_list.py
|
Python
|
apache-2.0
| 2,774
| 0
|
from rest_framework import status
from test_utils import serialized_time
def test_get_profile_topics(
api_client, enable_premium_requirement, profile_topic_factory, user_factory
):
"""
Premium users should be able to list their own profile topics.
"""
password = "password"
user = user_factory(has_premium=True, password=password)
api_client.log_in(user
|
.primary_email.email, password)
topic = profile_topic_factory(profile__km_user__user=user)
url = f"/know-me/profile/profiles/{topic.profile.pk}/topics/"
response = api_client.get(url)
assert response.status_code == status.HTTP_200_OK
assert response.json() == [
{
"id": topic.pk,
"url": api_client.bui
|
ld_full_url(
f"/know-me/profile/profile-topics/{topic.pk}/"
),
"created_at": serialized_time(topic.created_at),
"updated_at": serialized_time(topic.updated_at),
"is_detailed": topic.is_detailed,
"items_url": api_client.build_full_url(
f"/know-me/profile/profile-topics/{topic.pk}/items/"
),
"name": topic.name,
"permissions": {"read": True, "write": True},
"profile_id": topic.profile.pk,
}
]
def test_post_create_topic(
api_client, enable_premium_requirement, profile_factory, user_factory
):
"""
Premium users should be able to add new topics to their own
profiles.
"""
password = "password"
user = user_factory(has_premium=True, password=password)
api_client.log_in(user.primary_email.email, password)
profile = profile_factory(km_user__user=user)
url = f"/know-me/profile/profiles/{profile.pk}/topics/"
data = {"name": "Test Topic"}
response = api_client.post(url, data)
assert response.status_code == status.HTTP_201_CREATED
assert response.json()["name"] == data["name"]
def test_put_topic_order(
api_client, enable_premium_requirement, profile_topic_factory, user_factory
):
"""
Premium users should be able to sort their own profile topics with
respect to the parent profile.
"""
password = "password"
user = user_factory(has_premium=True, password=password)
api_client.log_in(user.primary_email.email, password)
t1 = profile_topic_factory(profile__km_user__user=user)
t2 = profile_topic_factory(profile=t1.profile)
url = f"/know-me/profile/profiles/{t1.profile.pk}/topics/"
data = {"order": [t2.pk, t1.pk]}
response = api_client.put(url, data)
assert response.status_code == status.HTTP_200_OK
# The collection should now be sorted
topics = api_client.get(url).json()
assert list(map(lambda topic: topic["id"], topics)) == data["order"]
|
facebook/fbthrift
|
thrift/test/py/TestSerializationSorted.py
|
Python
|
apache-2.0
| 3,066
| 0.000326
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import textwrap
import unittest
# The sorted one.
from SortKeys.ttypes import SortedStruct, NegativeId
from SortSets.ttypes import SortedSetStruct
from thrift.protocol import TSimpleJSONProtocol
from thrift.transport.TTransport import TMemoryBuffer
def writeToJSON(obj):
trans = TMemoryBuffer()
proto = TSimpleJSONProtocol.TSimpleJSONProtocol(trans)
obj.write(proto)
return trans.getvalue()
def readStructFromJSON(jstr, struct_type):
stuff = struct_type()
trans = TMemoryBuffer(jstr)
proto = TSimpleJSONProtocol.TSimpleJSONProtocol(trans, struct_type.thrift_spec)
stuff.read(proto)
return stuff
class TestSortKeys(unittest.TestCase):
def testSorted(self):
static_struct = SortedStruct(aMap={"b": 1.0, "a": 1.0})
unsorted_blob = b'{\n "aMap": {\n "b": 1.0,\n "a": 1.0\n }\n}'
sorted_blob = b'{\n "aMap": {\n "a": 1.0,\n "b": 1.0\n }\n}'
sorted_struct = readStructFromJSON(unsorted_blob, SortedStruct)
blob = writeToJSON(sorted_struc
|
t)
self.assertNotEqual(blob, unsort
|
ed_blob)
self.assertEqual(blob, sorted_blob)
self.assertEqual(static_struct, sorted_struct)
def testSetSorted(self):
unsorted_set = set(["5", "4", "3", "2", "1", "0"])
static_struct = SortedSetStruct(aSet=unsorted_set)
unsorted_blob = (
textwrap.dedent(
"""\
{{
"aSet": [
"{}"
]
}}"""
)
.format('",\n "'.join(unsorted_set))
.encode()
)
sorted_blob = (
textwrap.dedent(
"""\
{{
"aSet": [
"{}"
]
}}"""
)
.format('",\n "'.join(sorted(unsorted_set)))
.encode()
)
sorted_struct = readStructFromJSON(unsorted_blob, SortedSetStruct)
blob = writeToJSON(sorted_struct)
self.assertNotEqual(blob, unsorted_blob)
self.assertEqual(blob, sorted_blob)
self.assertEqual(static_struct, sorted_struct)
def testNegativeId(self):
obj = NegativeId()
self.assertEqual(obj.field1, 1)
self.assertEqual(obj.field2, 2)
self.assertEqual(obj.field3, 3)
|
jjas0nn/solvem
|
tensorflow/lib/python2.7/site-packages/tensorflow/python/debug/lib/stepper_test.py
|
Python
|
mit
| 32,092
| 0.002586
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests of the tfdbg Stepper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.client import session
from tensorflow.python.debug.lib.stepper import NodeStepper
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import gradient_descent
class StepperTest(test_util.TensorFlowTestCase):
def setUp(self):
self.a = variables.Variable(2.0, name="a")
self.b = variables.Variable(3.0, name="b")
self.c = math_ops.multiply(self.a, self.b, name="c") # Should be 6.0.
self.d = math_ops.multiply(self.a, self.a, name="d") # Should be 4.0.
self.e = math_ops.multiply(self.d, self.c, name="e") # Should be 24.0.
self.f_y = constant_op.constant(0.30, name="f_y")
self.f = math_ops.div(self.b, self.f_y, name="f") # Should be 10.0.
# The there nodes x, y and z form a graph with "cross-links" in. I.e., x
# and y are both direct inputs to z, but x is also a direct input to y.
self.x = variables.Variable(2.0, name="x") # Should be 2.0
self.y = math_ops.negative(self.x, name="y") # Should be -2.0.
self.z = math_ops.multiply(self.x, self.y, name="z") # Should be -4.0.
self.sess = session.Session()
self.sess.run(variables.global_variables_initializer())
self.sess = session.Session()
self.sess.run(variables.global_variables_initializer())
def tearDown(self):
ops.reset_default_graph()
def testContToFetchNotInTransitiveClosureShouldError(self):
stepper = NodeStepper(self.sess, "e:0")
sorted_nodes = stepper.sorted_nodes()
self.assertEqual(7, len(sorted_nodes))
self.assertLess(sorted_nodes.index("a"), sorted_nodes.index("a/read"))
self.assertLess(sorted_nodes.index("b"), sorted_nodes.index("b/read"))
self.assertLess(sorted_nodes.index("a"), sorted_nodes.index("c"))
self.assertLess(sorted_nodes.index("b"), sorted_nodes.index("c"))
self.assertLess(sorted_nodes.index("a"), sorted_nodes.index("d"))
self.assertLess(sorted_nodes.index("d"), sorted_nodes.index("e"))
self.assertLess(sorted_nodes.index("c"), sorted_nodes.index("e"))
self.assertSetEqual(
{"e:0", "d:0", "c:0", "a/read:0", "b
|
/read:0", "b:0", "a:0"},
set(stepper.closure_elements()))
with self.assertRaisesRegexp(
ValueError,
"Target \"f:0\" is not in the transi
|
tive closure for the fetch of the "
"stepper"):
stepper.cont("f:0")
def testContToNodeNameShouldReturnTensorvalue(self):
stepper = NodeStepper(self.sess, "e:0")
cont_result = stepper.cont("c")
self.assertAllClose(6.0, cont_result)
def testUsingNamesNotUsingIntermediateTensors(self):
stepper = NodeStepper(self.sess, "e:0")
# The first cont() call should have used no feeds.
result = stepper.cont("c:0")
self.assertAllClose(6.0, result)
self.assertEqual({}, stepper.last_feed_types())
# The second cont() call should have used the tensor handle from the
# previous cont() call.
result = stepper.cont("e:0")
self.assertAllClose(24.0, result)
self.assertEqual({
"c:0": NodeStepper.FEED_TYPE_HANDLE
}, stepper.last_feed_types())
def testUsingNodesNotUsingIntermediateTensors(self):
stepper = NodeStepper(self.sess, self.e)
# There should be no handles before any cont() calls.
self.assertEqual([], stepper.handle_names())
self.assertSetEqual(set(), stepper.handle_node_names())
# Before the cont() call, the stepper should not have access to the value
# of c:0.
with self.assertRaisesRegexp(
ValueError,
"This stepper instance does not have access to the value of tensor "
"\"c:0\""):
stepper.get_tensor_value("c:0")
# Using the node/tensor itself, instead of the name str, should work on
# cont().
result = stepper.cont(self.c)
self.assertAllClose(6.0, result)
self.assertEqual({}, stepper.last_feed_types())
self.assertEqual(["c:0"], stepper.handle_names())
self.assertEqual({"c"}, stepper.handle_node_names())
# After the cont() call, the stepper should have access to the value of c:0
# via a tensor handle.
self.assertAllClose(6.0, stepper.get_tensor_value("c:0"))
result = stepper.cont(self.e)
self.assertAllClose(24.0, result)
self.assertEqual({
"c:0": NodeStepper.FEED_TYPE_HANDLE
}, stepper.last_feed_types())
def testIsFeedableShouldGiveCorrectAnswers(self):
stepper = NodeStepper(self.sess, self.e)
self.assertTrue(stepper.is_feedable("a/read:0"))
self.assertTrue(stepper.is_feedable("b/read:0"))
self.assertTrue(stepper.is_feedable("c:0"))
self.assertTrue(stepper.is_feedable("d:0"))
def testOverrideValue(self):
stepper = NodeStepper(self.sess, self.e)
result = stepper.cont(self.c)
self.assertAllClose(6.0, result)
self.assertEqual({}, stepper.last_feed_types())
# There should be no overrides before any cont() calls.
self.assertEqual([], stepper.override_names())
# Calling cont() on c again should lead to use of the handle.
result = stepper.cont(self.c)
self.assertAllClose(6.0, result)
self.assertEqual({
"c:0": NodeStepper.FEED_TYPE_HANDLE
}, stepper.last_feed_types())
# Override c:0.
stepper.override_tensor("c:0", 7.0)
# After the overriding, calling get_tensor_value() on c:0 should yield the
# overriding value.
self.assertEqual(7.0, stepper.get_tensor_value("c:0"))
# Now c:0 should have only an override value, but no cached handle, because
# the handle should have been invalidated.
self.assertEqual([], stepper.handle_names())
self.assertSetEqual(set(), stepper.handle_node_names())
self.assertEqual(["c:0"], stepper.override_names())
# Run a downstream tensor after the value override.
result = stepper.cont(self.e)
self.assertAllClose(28.0, result) # Should reflect the overriding value.
# Should use override, instead of the handle.
self.assertEqual({
"c:0": NodeStepper.FEED_TYPE_OVERRIDE
}, stepper.last_feed_types())
def testOverrideValueTwice(self):
stepper = NodeStepper(self.sess, self.e)
# Override once.
stepper.override_tensor("c:0", 7.0)
self.assertAllClose(28.0, stepper.cont(self.e))
self.assertEqual({
"c:0": NodeStepper.FEED_TYPE_OVERRIDE
}, stepper.last_feed_types())
self.assertEqual(["e:0"], stepper.handle_names())
self.assertSetEqual({"e"}, stepper.handle_node_names())
self.assertEqual(["c:0"], stepper.override_names())
# Calling cont(self.e) again. This time the cached tensor handle of e
# should be used.
self.assertEqual(28.0, stepper.cont(self.e))
self.assertEqual({
"e:0": NodeStepper.FEED_TYPE_HANDLE
}, stepper.last_feed_types())
# Override c again. This should have invalidated the cache for e.
stepper.override_tensor("c:0", 8.0)
self.assertEqual([], stepper.handle_names())
self.assertEqual(set(), stepper.handle_node_names())
self.assertEqual(["c:0"], stepper.override_names())
self.assertAllClose(32.0, ste
|
jellis18/enterprise
|
enterprise/signals/utils.py
|
Python
|
mit
| 29,066
| 0.001239
|
# utils.py
"""
Utilities module containing various useful
functions for use in other modules.
"""
import logging
import numpy as np
import scipy.linalg as sl
import scipy.sparse as sps
import scipy.special as ss
from pkg_resources import Requirement, resource_filename
from scipy.integrate import odeint
from scipy.interpolate import interp1d
from sksparse.cholmod import cholesky
import enterprise
from enterprise import constants as const
from enterprise import signals as sigs # noqa: F401
from enterprise.signals.gp_bases import ( # noqa: F401
createfourierdesignmatrix_dm,
createfourierdesignmatrix_env,
createfourierdesignmatrix_eph,
createfourierdesignmatrix_ephem,
createfourierdesignmatrix_red,
)
from enterprise.signals.gp_priors import powerlaw, turnover # noqa: F401
from enterprise.signals.parameter import function
logger = logging.getLogger(__name__)
def get_coefficients(pta, params, n=1, phiinv_method="cliques", common_sparse=False):
ret = []
TNrs = pta.get_TNr(params)
TNTs = pta.get_TNT(params)
phiinvs = pta.get_phiinv(params, logdet=False, method=phiinv_method)
# ...repeated code in the two if branches... refactor at will!
if pta._commonsignals:
if common_sparse:
Sigma = sps.block_diag(TNTs, "csc") + sps.csc_matrix(phiinvs)
TNr = np.concatenate(TNrs)
ch = cholesky(Sigma)
mn = ch(TNr)
Li = sps.linalg.inv(ch.L()).toarray()
else:
Sigma = sl.block_diag(*TNTs) + phiinvs
TNr = np.concatenate(TNrs)
u, s, _ = sl.svd(Sigma)
mn = np.dot(u, np.dot(u.T, TNr) / s)
Li = u * np.sqrt(1 / s)
for j in range(n):
b = mn + np.dot(Li, np.random.randn(Li.shape[0]))
pardict, ntot = {}, 0
for i, model in enumerate(pta.pulsarmodels):
for sig in model._signals:
if sig.signal_type in ["basis", "common basis"]:
nb = sig.get_basis(params=params).shape[1]
if nb + ntot > len(b):
raise IndexError(
"Missing some parameters! " "You need to disable GP " "basis column reuse."
)
pardict[sig.name + "_coefficients"] = b[ntot : nb + ntot]
ntot += nb
if len(ret) <= j:
ret.append(params.copy())
ret[j].update(pardict)
return ret[0] if n == 1 else ret
else:
for i, model in enumerate(pta.pulsarmodels):
phiinv, d, TNT = phiinvs[i], TNrs[i], TNTs[i]
Sigma = TNT + (np.diag(phiinv) if phiinv.ndim == 1 else phiinv)
try:
u, s, _ = sl.svd(Sigma)
mn = np.dot(u, np.dot(u.T, d) / s)
Li = u * np.sqrt(1 / s)
except np.linalg.LinAlgError:
Q, R = sl.qr(Sigma)
Sigi = sl.solve(R, Q.T)
mn = np.dot(Sigi, d)
u, s, _ = sl.svd(Sigi)
Li = u * np.sqrt(1 / s)
for j in range(n):
b = mn + np.dot(Li, np.random.randn(Li.shape[0]))
pardict, ntot = {}, 0
for sig in model._signals:
if sig.signal_type == "basis":
nb = sig.get_basis(params=params).shape[1]
if nb + ntot > len(b):
raise IndexError(
"Missing some parameters! " "You need to disable GP " "basis column reuse."
)
pardict[sig.name + "_coefficients"] = b[ntot : nb + ntot]
ntot += nb
if len(ret) <= j:
ret.append(params.copy())
ret[j].update(pardict)
return ret[0] if n == 1 else ret
class KernelMatrix(np.ndarray):
def __new__(cls, init):
if isinstance(init, int):
ret = np.zeros(init, "d").view(cls)
else:
ret = init.view(cls)
if ret.ndim == 2:
ret._cliques = -1 * np.ones(ret.shape[0])
ret._clcount = 0
return ret
# see PTA._setcliques
def _setcliques(self, idxs):
allidx = set(self._cliques[idxs])
maxidx = max(allidx)
if maxidx == -1:
self._cliques[idxs] = self._clcount
self._clcount = self._clcount + 1
else:
self._cliques[idxs] = maxidx
if len(allidx) > 1:
self._cliques[np.in1d(self._cliques, allidx)] = maxidx
def add(self, other, idx):
if other.ndim == 2 and self.ndim == 1:
self = KernelMatrix(np.diag(self))
if self.ndim == 1:
self[idx] += other
else:
if other.ndim == 1:
self[idx, idx] += other
else:
self._setcliques(idx)
idx = (idx, idx) if isinstance(idx, slice) else (idx[:, None], idx)
self[idx] += other
return self
def set(self, other, idx):
if other.ndim == 2 and self.ndim == 1:
self = KernelMatrix(np.diag(self))
if self.ndim == 1:
self[idx] = other
else:
if other.ndim == 1:
self[idx, idx] = other
else:
self._setcliques(idx)
idx = (idx, idx) if isinstance(idx, slice) else (idx[:, None], idx)
self[idx] = other
return self
def inv(self, logdet=False):
if self.ndim == 1:
inv = 1.0 / self
if logdet:
return inv, np.sum(np.log(self))
else:
return inv
else:
try:
cf = sl.cho_factor(self)
inv = sl.cho_solve(cf, np.identity(cf[0].shape[0]))
if logdet:
ld = 2.0 * np.sum(np.log(np.diag(cf[0])))
except np.linalg.LinAlgError:
u, s, v = np.linalg.svd(self)
inv = np.dot(u / s, u.T)
if logdet:
ld = np.sum(np.log(s))
if logdet:
return inv, ld
else:
return inv
def create_stabletimingdesi
|
gnmatrix(designmat, fastDesign=True):
"""
Stabilize the timing-model design matrix.
:param designmat: Pulsar timing model design matrix
:param fastDesign: Stabilize the design matrix the fast way [True]
:return: Mm: Stabilized timing model design matrix
"""
Mm = designmat.copy()
if fastDesign:
norm = np.sqrt(np.sum(Mm ** 2, axis=0))
Mm /= norm
else:
u, s, v = np.
|
linalg.svd(Mm)
Mm = u[:, : len(s)]
return Mm
###################################
# Deterministic GW signal functions
###################################
def make_ecc_interpolant():
"""
Make interpolation function from eccentricity file to
determine number of harmonics to use for a given
eccentricity.
:returns: interpolant
"""
pth = resource_filename(Requirement.parse("libstempo"), "libstempo/ecc_vs_nharm.txt")
fil = np.loadtxt(pth)
return interp1d(fil[:, 0], fil[:, 1])
def get_edot(F, mc, e):
"""
Compute eccentricity derivative from Taylor et al. (2016)
:param F: Orbital frequency [Hz]
:param mc: Chirp mass of binary [Solar Mass]
:param e: Eccentricity of binary
:returns: de/dt
"""
# chirp mass
mc *= const.Tsun
dedt = -304 / (15 * mc) * (2 * np.pi * mc * F) ** (8 / 3) * e * (1 + 121 / 304 * e ** 2) / ((1 - e ** 2) ** (5 / 2))
return dedt
def get_Fdot(F, mc, e):
"""
Compute frequency derivative from Taylor et al. (2016)
:param F: Orbital frequency [Hz]
:param mc: Chirp mass of binary [Solar Mass]
:param e: Eccentricity of binary
:returns: dF/dt
"""
# chirp mass
mc *= const.Tsun
dFdt = (
48
/ (5 * np.pi * mc ** 2)
* (2 * np.pi * mc * F) ** (11 / 3)
* (1 + 73 / 24 *
|
kirberich/djangae
|
djangae/db/constraints.py
|
Python
|
bsd-3-clause
| 11,037
| 0.003534
|
import datetime
import logging
from django.conf import settings
from django.core.exceptions import NON_FIELD_ERRORS
from google.appengine.api.datastore import Key, Delete, MAX_ALLOWABLE_QUERIES
from google.appengine.datastore.datastore_rpc import TransactionOptions
from google.appengine.ext import db
from .unique_utils import unique_identifiers_from_entity
from .utils import key_exists
from djangae.db.backends.appengine.dbapi import IntegrityError, NotSupportedError
DJANGAE_LOG = logging.getLogger("djangae")
def has_active_unique_constraints(model_or_instance):
"""
Returns true if the model/instance has unique fields or unique_together fields and unique
constraint checking is enabled on the model
"""
django_opts = getattr(model_or_instance, "_meta", None)
# If there are no unique fields on the model, return false
if not django_opts.unique_together and not any(x.unique for x in django_opts.fields):
return False
opts = getattr(model_or_instance, "Djangae", None)
if opts:
if hasattr(opts, "disable_constraint_checks"):
if opts.disable_constraint_checks:
return False
else:
return True
return not getattr(settings, "DJANGAE_DISABLE_CONSTRAINT_CHECKS", False)
class KeyProperty(db.Property):
"""A property that stores a datastore.Key reference to another object.
Think of this as a Django GenericForeignKey which returns only the PK value, not the whole
object, or a db.ReferenceProperty which can point to any model kind, and only returns the Key.
"""
def validate(self, value):
if value is None or isinstance(value, Key):
return value
raise ValueError("KeyProperty only accepts datastore.Key or None")
class UniqueMarker(db.Model):
instance = KeyProperty()
created = db.DateTimeProperty(required=True, auto_now_add=True)
@staticmethod
def kind():
return "_djangae_unique_marker"
@db.transactional(propagation=TransactionOptions.INDEPENDENT, xg=True)
def acquire_identifiers(identifiers, entity_key):
return _acquire_identifiers(identifiers, entity_key)
def _acquire_identifiers(identifiers, entity_key):
# This must always be in a cross-group transaction, because even if there's only 1 identifider,
# in the case where that identifier already exists, we then check if its `instance` exists
assert entity_key
namespace = entity_key.namespace() or None
identifier_keys = [
Key.from_path(UniqueMarker.kind(), identifier, namespace=namespace) for identifier in identifiers
]
existing_markers = UniqueMarker.get(identifier_keys)
markers_to_create = []
markers = []
for identifier_key, existing_marker in zip(identifier_keys, existing_markers):
# Backwards compatability: we used to create the markers first in an independent transaction
# and then create the entity and update the `instance` on the markers. This meant that it
# was possible that the independent marker creation transaction finished first and the outer
# transaction failed, causing stale markers to be left behind. We no longer do it this way
# but we still want to ignore any old stale markers, hence if instance is None we overwrite.
now = datetime.datetime.utcnow()
if not existing_marker or existing_marker.instance is None:
markers_to_create.append(UniqueMarker(
key=identifier_key,
|
instance=entity_key,
created=now
))
elif existing_marker.instance != entity_key and key_exists(existing_marker.instance):
fields_and_values = identifier_key.name().split("|")
table_name = fields_and_values[0]
fields_and_values = fields_and_values[1:]
fields = [x.split(":")[0] for x
|
in fields_and_values]
raise IntegrityError("Unique constraint violation for kind {} on fields: {}".format(table_name, ", ".join(fields)))
elif existing_marker.instance != entity_key:
markers_to_create.append(UniqueMarker(
key=identifier_key,
instance=entity_key,
created=now
))
else:
# The marker is ours anyway
markers.append(existing_marker)
db.put(markers_to_create)
return markers + markers_to_create
def get_markers_for_update(model, old_entity, new_entity):
"""
Given an old entity state, and the new state, updates the identifiers
appropriately. Should be called before saving the new_state
"""
old_ids = set(unique_identifiers_from_entity(model, old_entity, ignore_pk=True))
new_ids = set(unique_identifiers_from_entity(model, new_entity, ignore_pk=True))
to_release = old_ids - new_ids
to_acquire = new_ids - old_ids
return to_acquire, to_release
def update_instance_on_markers(entity, markers):
# TODO: fix me!
def update(marker, instance):
marker = UniqueMarker.get(marker.key())
if not marker:
return
marker.instance = instance
marker.put()
@db.transactional(propagation=TransactionOptions.INDEPENDENT, xg=True)
def update_all():
instance = entity.key()
for marker in markers:
update(marker, instance)
update_all()
def acquire(model, entity):
"""
Given a model and entity, this tries to acquire unique marker locks for the instance. If
the locks already exist then an IntegrityError will be thrown.
"""
identifiers = unique_identifiers_from_entity(model, entity, ignore_pk=True)
return acquire_identifiers(identifiers, entity.key())
def release_markers(markers):
""" Delete the given UniqueMarker objects. """
# Note that these should all be from the same Django model instance, and therefore there should
# be a maximum of 25 of them (because everything blows up if you have more than that - limitation)
@db.transactional(propagation=TransactionOptions.INDEPENDENT, xg=len(markers) > 1)
def txn():
Delete([marker.key() for marker in markers])
txn()
def release_identifiers(identifiers, namespace):
@db.transactional(propagation=TransactionOptions.INDEPENDENT, xg=len(identifiers) > 1)
def txn():
_release_identifiers(identifiers, namespace)
txn()
def _release_identifiers(identifiers, namespace):
keys = [Key.from_path(UniqueMarker.kind(), x, namespace=namespace) for x in identifiers]
Delete(keys)
def release(model, entity):
""" Delete the UniqueMarker objects for the given entity. """
if not has_active_unique_constraints(model):
return
identifiers = unique_identifiers_from_entity(model, entity, ignore_pk=True)
# Key.from_path expects None for an empty namespace, but Key.namespace() returns ''
namespace = entity.key().namespace() or None
release_identifiers(identifiers, namespace=namespace)
@db.transactional(propagation=TransactionOptions.INDEPENDENT, xg=True)
def update_identifiers(to_acquire, to_release, key):
""" A combination of acquire_identifiers and release_identifiers in a combined transaction. """
_acquire_identifiers(to_acquire, key)
_release_identifiers(to_release, key.namespace() or None)
class UniquenessMixin(object):
""" Mixin overriding the methods checking value uniqueness.
For models defining unique constraints this mixin should be inherited from.
When iterable (list or set) fields are marked as unique it must be used.
This is a copy of Django's implementation, save for the part marked by the comment.
"""
def _perform_unique_checks(self, unique_checks):
errors = {}
for model_class, unique_check in unique_checks:
lookup_kwargs = {}
for field_name in unique_check:
f = self._meta.get_field(field_name)
lookup_value = getattr(self, f.attname)
if lookup_value is None:
continue
if f.primary_key and not self._state.adding:
co
|
samueldotj/TeeRISC-Simulator
|
tests/configs/simple-atomic-dummychecker.py
|
Python
|
bsd-3-clause
| 2,321
| 0
|
# Copyright (c) 2013 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF S
|
UCH DAMAGE.
#
# Authors: Andreas Hansson
from m5.objects import *
from base_config import *
root = BaseSESystemUniprocessor(mem_mode='atomic',
cpu_class=AtomicSimpleCPU,
checker=True).create_root()
|
obrienadam/boTAB
|
boTAB.py
|
Python
|
gpl-2.0
| 2,667
| 0.003
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
boTAB
=====
This solver uses the popular TAB model to simulate the atomization of droplets
Author: Adam O'Brien
"""
from input import *
from math import exp, cos, sin, sqrt
from fluid import *
from evaporation import *
from TAB import *
from output import *
import copy as cp
def main():
print ""
print "boTAB |"
print "-------"
print " Compute the break-up of a drop in a uniform cross-flow", "\n"
# Open up a configuration file
userInput = readInputFile()
freestream = Freestream()
initialDroplet = Droplet()
dropletInlet = DropletInlet()
# Set object parameters from
|
the input file
setObjectParametersFromInput(userInput, freestream, initialDroplet, dropletInlet)
# Set-up the simulation parameters in accordance with the input
maxTime = userInput["maxTime"]
nTimeSteps = userInput["nTim
|
eSteps"]
# Initialize a droplet list, with one copy of the initial droplet
droplets = [cp.deepcopy(initialDroplet)]
# Initialize misc parameters
dt = maxTime/nTimeSteps
t = [0.]
nChildDroplets = 0
# Begin the simulation
print "\nBeginning time-stepping..."
###########################################################################
# #
# Main Iteration Loop #
# #
###########################################################################
for stepNo in range(1, nTimeSteps + 1):
for droplet in droplets:
droplet.advectPredictorCorrector(freestream, dt)
evaporate(freestream, droplets, dt)
nChildDroplets += breakupTab(freestream, droplets, dt)
dropletInlet.addDrops(initialDroplet, droplets, dt)
t.append(t[-1] + dt)
if stepNo%(nTimeSteps/20) == 0:
completionPercentage = float(stepNo)/float(nTimeSteps)*100.
print "-----------------------------------------------------------"
print "Time-stepping completion : %s%%"%(completionPercentage)
print "Number of droplets in domain :", len(droplets)
print "Simulation time elapsed : %s seconds"%(t[-1])
print "Simulation time remaining : %s seconds"%(maxTime - t[-1])
print "Number of child drops :", nChildDroplets
print "\nTime-stepping complete. Finalizing output..."
plotDroplets(droplets)
# Execute the main function
if __name__ == "__main__":
main()
|
CSL-Consulting/asterisk-installation-script
|
asterisk-installation-script.py
|
Python
|
gpl-2.0
| 2,662
| 0.009016
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
######################
|
###############################################
# Author: Cristian Segura L. #
# Email: cristian+dot+segura+dot+lepe+arroba+dot+gmail+dot+com
|
#
# Creation Date: Sat nov 8 20:11:38 CLST 2014 #
# Version: 0.1 #
# License: GPL v2.0 (check LICENSE file) #
# Usage: Installation of Asterisk IP PBX #
# Dependencies: #
# + Python 2.7 #
# + wget #
# Tested on : #
# + Ubuntu Desktop 13.10 amd64 #
#####################################################################
import subprocess
import time
import datetime
import os
# Create working directory to download source code
nowDateTime = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
wrkDir = "%s-asterisk11-sources" % ( nowDateTime )
if not os.path.exists( wrkDir ):
os.makedirs( wrkDir )
else:
print "[+] ERROR: Cannot create directory %s" % (wrkDir)
exit -1
#print str(os.path) + wrkDir
# print str(os.getcwd()) + '/' + wrkDir
print "[+] Changing to directory %s" % (wrkDir)
os.chdir( os.getcwd() + '/' + wrkDir )
# Downloading Files
baseURL = "http://downloads.asterisk.org/pub/telephony"
lAstDir = "asterisk"
lAstTgzFile = "asterisk-11-current.tar.gz"
lAstDownPath = baseURL + '/' + lAstDir + '/' + lAstTgzFile
lPriDir = "libpri"
lPriTgzFile = "libpri-1.4-current.tar.gz"
lPriDownPath = baseURL + '/' + lPriDir + '/' + lPriTgzFile
print ""
print "*****************************************************"
print "* *"
print "* Downloading LIBPRI *"
print "* *"
print "*****************************************************"
print "[+]downloading file: %s using wget" % ( lPriTgzFile )
print ""
subprocess.call( ["wget", lPriDownPath] )
print ""
print "*****************************************************"
print "* *"
print "* Downloading ASTERISK *"
print "* *"
print "*****************************************************"
print "[+]downloading file: %s using wget" % ( lAstTgzFile )
print ""
subprocess.call( ["wget", lAstDownPath] )
|
eJRF/ejrf
|
questionnaire/tests/factories/theme_factory.py
|
Python
|
bsd-3-clause
| 196
| 0.005102
|
import factory
from questionnaire.models import Theme
class ThemeFac
|
tory(factory.DjangoModelFactory):
class Meta:
model = Theme
|
name = "A title"
description = 'Description'
|
csridhar/58A78C12-3B74-48F6-B265-887C33ED5F98-odat-5DD613ED-1FE0-4D6A-8A20-4C26C3F2C95B
|
src/main.py
|
Python
|
mit
| 528
| 0.007576
|
#!/usr/bin/env python
from server import Serve
from utils import get_authenticated_user
import os
import sys
authenticated_user = None
try:
authent
|
icated_user = get_authenticated_user('server.cfg')
except IOError:
print ("File 'server.cfg' doesn't exist on disk. Please ensure that it"
" does and try again.")
sys.exit(1)
except ValueError:
print ("'server.cfg' is empty. P
|
lease run 'python get_oauth_token.py' prior.")
sys.exit(1)
# Start appserver
app = Serve(__name__, authenticated_user)
|
yuweijun/learning-programming
|
language-python/getitem.py
|
Python
|
mit
| 281
| 0.064057
|
#! /usr/bin/python
class Indexer:
def __geti
|
tem__(self, index):
return index ** 2
x = Indexer()
for i in range(5):
print x[i],
class Stepper:
def __getitem__
|
(self, index):
return self.data[index]
s = Stepper()
s.data = "spam"
for x in s:
print x,
print s.data[0]
|
dragonfly-science/django-custom-user
|
test_settings/settings_subclass.py
|
Python
|
bsd-3-clause
| 339
| 0
|
DEBUG = True
USE_
|
TZ = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
|
}
}
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'custom_user',
'test_custom_user_subclass',
]
SECRET_KEY = 'not_random'
AUTH_USER_MODEL = 'test_custom_user_subclass.MyCustomEmailUser'
|
rdegraw/numbers-py
|
prime_requests.py
|
Python
|
unlicense
| 748
| 0.05615
|
#-----------------------------------------------------
#
# Find the next prime number as the user keeps asking
#
#-----------------------------------------------------
import sys
#------------------------
# is the number a prime?
#------------------------
def is_prime( num ):
for i in range( 2, num ):
if num % i == 0:
return False
if num != 1:
return True
else:
return False
#--------------------
# main
#--------------------
if __name__ == "__main__":
curr_prime = 1 #initialize the prime number
while Tr
|
ue:
response
|
= raw_input( "Print the next prime? [Y]es " )
if response.upper().startswith('Y'):
while True:
curr_prime += 1
if is_prime(curr_prime):
print curr_prime
break
else:
break
|
bitmazk/django-monitoring
|
monitoring/views.py
|
Python
|
mit
| 2,525
| 0
|
"""Views for the monitoring app."""
from django.contrib.auth.decorators import login_required
from django.db.models import Count
from django.http import Http404
from django.utils.decorators import method_decorator
from django.views.generic import ListView, TemplateView
from .register import monitor
class MonitoringViewMixin(object):
"""Helper methods that all monitoring base views need."""
view_name = None
monitor_title = None
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs): # pragma: no cover
if not request.user.is_staff:
raise Http404
self.request = request
self.monitor_name = request.GET.get('monitor')
return super(MonitoringViewMixin, self).dispatch(
request, *args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super(MonitoringViewMixin, self).get_context_data(**kwargs)
ctx.update({
'monitor_title': self.monitor_title,
'monitor_name': self.monitor_name,
})
return ctx
def get_template_names(self):
"""
Returns the template name for the view based on the view's model.
"""
return [self.model.get_template_name(), ]
def get_view_name(self):
"""
Returns the view name based on the view's model.
If you have set the ``view_name`` attribute on the view, that will be
returned instead.
"""
if self.view_name is not None:
return self.view_name
return 'monitoring_{0}'.format(self.model.__name__.lower())
class IntegerCountView(MonitoringViewMixin, ListView):
"""Default view for the ``IntegerCountBase`` monitor model."""
monitor_title = 'Integer Count'
def get_queryset(self):
qs = super(IntegerCountView, self
|
).get_queryset()
qs = qs.values('date_created').annotate(
count=Count('date_created')).distinct()
return qs
class MonitoringView(TemplateView):
template_name = 'monitoring/index.html'
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
if not request.user.is_staff:
raise Http404
self.request = request
return super(MonitoringView, self).disp
|
atch(
request, *args, **kwargs)
def get_context_data(self, **kwargs):
ctx = super(MonitoringView, self).get_context_data(**kwargs)
ctx.update({
'monitor': monitor,
})
return ctx
|
olexiim/edx-platform
|
common/test/acceptance/pages/studio/users.py
|
Python
|
agpl-3.0
| 6,650
| 0.001955
|
"""
Page classes to test either the Course Team page or the Library Team page.
"""
from bok_choy.promise import EmptyPromise
from bok_choy.page_object import PageObject
from ...tests.helpers import disable_animations
from . import BASE_URL
def wait_for_ajax_or_reload(browser):
"""
Wait for all ajax requests to finish, OR for the page to reload.
Normal wait_for_ajax() chokes on occasion if the pages reloads,
giving "WebDriverException: Message: u'jQuery is not defined'"
"""
def _is_ajax_finished():
""" Wait for jQuery to finish all AJAX calls, if it is present. """
return browser.execute_script("return typeof(jQuery) == 'undefined' || jQuery.active == 0")
EmptyPromise(_is_ajax_finished, "Finished waiting for ajax requests.").fulfill()
class UsersPage(PageObject):
"""
Base class for either the Course Team page or the Library Team page
"""
def __init__(self, browser, locator):
super(UsersPage, self).__init__(browser)
self.locator = locator
@property
def url(self):
"""
URL to this page - override in subclass
"""
raise NotImplementedError
def is_browser_on_page(self):
"""
Returns True iff the browser has loaded the page.
"""
return self.q(css='body.view-team').present
@property
def users(self):
"""
Return a list of users listed on this page.
"""
return self.q(css='.user-list .user-item').map(
lambda el: UserWrapper(self.browser, el.get_attribute('data-email'))
).results
@property
def has_add_button(self):
"""
Is the "New Team Member" button present?
"""
return self.q(css='.create-user-button').present
def click_add_button(self):
"""
Click on the "New Team Member" button
"""
self.q(css='.create-user-button').click()
@property
def new_user_form_visible(self):
""" Is the new user form visible? """
return self.q(css='.form-create.create-user .user-email-input').visible
def set_new_user_email(self, email):
""" Set the value of the "New User Email Address" field. """
self
|
.q(css='.form-create.create-user .user-email-input').fill(email)
def click_submit_new_user_form(self):
""" Submit the "New User" form """
self.q(css='.form-create.create-user .action-primary').click()
wait_for_ajax_or_reload(self.browser)
class LibraryUsersPage(UsersPage):
"""
Library Team page in Studio
"""
|
@property
def url(self):
"""
URL to the "User Access" page for the given library.
"""
return "{}/library/{}/team/".format(BASE_URL, unicode(self.locator))
class UserWrapper(PageObject):
"""
A PageObject representing a wrapper around a user listed on the course/library team page.
"""
url = None
COMPONENT_BUTTONS = {
'basic_tab': '.editor-tabs li.inner_tab_wrap:nth-child(1) > a',
'advanced_tab': '.editor-tabs li.inner_tab_wrap:nth-child(2) > a',
'save_settings': '.action-save',
}
def __init__(self, browser, email):
super(UserWrapper, self).__init__(browser)
self.email = email
self.selector = '.user-list .user-item[data-email="{}"]'.format(self.email)
def is_browser_on_page(self):
"""
Sanity check that our wrapper element is on the page.
"""
return self.q(css=self.selector).present
def _bounded_selector(self, selector):
"""
Return `selector`, but limited to this particular user entry's context
"""
return '{} {}'.format(self.selector, selector)
@property
def name(self):
""" Get this user's username, as displayed. """
return self.q(css=self._bounded_selector('.user-username')).text[0]
@property
def role_label(self):
""" Get this user's role, as displayed. """
return self.q(css=self._bounded_selector('.flag-role .value')).text[0]
@property
def is_current_user(self):
""" Does the UI indicate that this is the current user? """
return self.q(css=self._bounded_selector('.flag-role .msg-you')).present
@property
def can_promote(self):
""" Can this user be promoted to a more powerful role? """
return self.q(css=self._bounded_selector('.add-admin-role')).present
@property
def promote_button_text(self):
""" What does the promote user button say? """
return self.q(css=self._bounded_selector('.add-admin-role')).text[0]
def click_promote(self):
""" Click on the button to promote this user to the more powerful role """
self.q(css=self._bounded_selector('.add-admin-role')).click()
wait_for_ajax_or_reload(self.browser)
@property
def can_demote(self):
""" Can this user be demoted to a less powerful role? """
return self.q(css=self._bounded_selector('.remove-admin-role')).present
@property
def demote_button_text(self):
""" What does the demote user button say? """
return self.q(css=self._bounded_selector('.remove-admin-role')).text[0]
def click_demote(self):
""" Click on the button to demote this user to the less powerful role """
self.q(css=self._bounded_selector('.remove-admin-role')).click()
wait_for_ajax_or_reload(self.browser)
@property
def can_delete(self):
""" Can this user be deleted? """
return self.q(css=self._bounded_selector('.action-delete:not(.is-disabled) .remove-user')).present
def click_delete(self):
""" Click the button to delete this user. """
disable_animations(self)
self.q(css=self._bounded_selector('.remove-user')).click()
# We can't use confirm_prompt because its wait_for_ajax is flaky when the page is expected to reload.
self.wait_for_element_visibility('.prompt', 'Prompt is visible')
self.wait_for_element_visibility('.prompt .action-primary', 'Confirmation button is visible')
self.q(css='.prompt .action-primary').click()
wait_for_ajax_or_reload(self.browser)
@property
def has_no_change_warning(self):
""" Does this have a warning in place of the promote/demote buttons? """
return self.q(css=self._bounded_selector('.notoggleforyou')).present
@property
def no_change_warning_text(self):
""" Text of the warning seen in place of the promote/demote buttons. """
return self.q(css=self._bounded_selector('.notoggleforyou')).text[0]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.