commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
60068d4deeba541b9518579d6d8473c4300e189d
|
tests/functional/test_crash.py
|
tests/functional/test_crash.py
|
import os.path
from os import unlink
from utils.launcher import Launcher
from utils.entries import Entries
from utils.loop import CounterLoop, BooleanLoop
from utils.files import generate, checksum
from utils.tempdirs import TempDirs
launcher = None
dirs = TempDirs()
rep1, rep2 = dirs.create(), dirs.create()
json_file = 'test_crash.json'
def setup_module(module):
global launcher
entries = Entries()
entries.add('local_storage', 'rep1', {'root': rep1})
entries.add('local_storage', 'rep2', {'root': rep2})
entries.save(json_file)
launcher = Launcher(json_file)
def teardown_module(module):
launcher.kill()
unlink(json_file)
dirs.delete()
def launcher_startup():
loop = CounterLoop(3)
launcher.on_referee_started(loop.check)
launcher.on_driver_started(loop.check, driver='rep1')
launcher.on_driver_started(loop.check, driver='rep2')
launcher()
loop.run(timeout=5)
def test_crach():
filename = 'crash'
loop = BooleanLoop()
launcher.on_transfer_started(
loop.stop, d_from='rep1', d_to='rep2', filename=filename
)
launcher_startup()
generate(os.path.join(rep1, filename), 1000)
loop.run(timeout=5)
launcher.kill()
launcher.unset_all_events()
loop = BooleanLoop()
launcher.on_transfer_ended(
loop.stop, d_from='rep1', d_to='rep2', filename=filename
)
launcher_startup()
loop.run(timeout=5)
assert(checksum(os.path.join(rep1, filename)) ==
checksum(os.path.join(rep2, filename)))
launcher.kill()
|
Test killing onitu during a transfer
|
Test killing onitu during a transfer
|
Python
|
mit
|
onitu/onitu,onitu/onitu,onitu/onitu
|
Test killing onitu during a transfer
|
import os.path
from os import unlink
from utils.launcher import Launcher
from utils.entries import Entries
from utils.loop import CounterLoop, BooleanLoop
from utils.files import generate, checksum
from utils.tempdirs import TempDirs
launcher = None
dirs = TempDirs()
rep1, rep2 = dirs.create(), dirs.create()
json_file = 'test_crash.json'
def setup_module(module):
global launcher
entries = Entries()
entries.add('local_storage', 'rep1', {'root': rep1})
entries.add('local_storage', 'rep2', {'root': rep2})
entries.save(json_file)
launcher = Launcher(json_file)
def teardown_module(module):
launcher.kill()
unlink(json_file)
dirs.delete()
def launcher_startup():
loop = CounterLoop(3)
launcher.on_referee_started(loop.check)
launcher.on_driver_started(loop.check, driver='rep1')
launcher.on_driver_started(loop.check, driver='rep2')
launcher()
loop.run(timeout=5)
def test_crach():
filename = 'crash'
loop = BooleanLoop()
launcher.on_transfer_started(
loop.stop, d_from='rep1', d_to='rep2', filename=filename
)
launcher_startup()
generate(os.path.join(rep1, filename), 1000)
loop.run(timeout=5)
launcher.kill()
launcher.unset_all_events()
loop = BooleanLoop()
launcher.on_transfer_ended(
loop.stop, d_from='rep1', d_to='rep2', filename=filename
)
launcher_startup()
loop.run(timeout=5)
assert(checksum(os.path.join(rep1, filename)) ==
checksum(os.path.join(rep2, filename)))
launcher.kill()
|
<commit_before><commit_msg>Test killing onitu during a transfer<commit_after>
|
import os.path
from os import unlink
from utils.launcher import Launcher
from utils.entries import Entries
from utils.loop import CounterLoop, BooleanLoop
from utils.files import generate, checksum
from utils.tempdirs import TempDirs
launcher = None
dirs = TempDirs()
rep1, rep2 = dirs.create(), dirs.create()
json_file = 'test_crash.json'
def setup_module(module):
global launcher
entries = Entries()
entries.add('local_storage', 'rep1', {'root': rep1})
entries.add('local_storage', 'rep2', {'root': rep2})
entries.save(json_file)
launcher = Launcher(json_file)
def teardown_module(module):
launcher.kill()
unlink(json_file)
dirs.delete()
def launcher_startup():
loop = CounterLoop(3)
launcher.on_referee_started(loop.check)
launcher.on_driver_started(loop.check, driver='rep1')
launcher.on_driver_started(loop.check, driver='rep2')
launcher()
loop.run(timeout=5)
def test_crach():
filename = 'crash'
loop = BooleanLoop()
launcher.on_transfer_started(
loop.stop, d_from='rep1', d_to='rep2', filename=filename
)
launcher_startup()
generate(os.path.join(rep1, filename), 1000)
loop.run(timeout=5)
launcher.kill()
launcher.unset_all_events()
loop = BooleanLoop()
launcher.on_transfer_ended(
loop.stop, d_from='rep1', d_to='rep2', filename=filename
)
launcher_startup()
loop.run(timeout=5)
assert(checksum(os.path.join(rep1, filename)) ==
checksum(os.path.join(rep2, filename)))
launcher.kill()
|
Test killing onitu during a transferimport os.path
from os import unlink
from utils.launcher import Launcher
from utils.entries import Entries
from utils.loop import CounterLoop, BooleanLoop
from utils.files import generate, checksum
from utils.tempdirs import TempDirs
launcher = None
dirs = TempDirs()
rep1, rep2 = dirs.create(), dirs.create()
json_file = 'test_crash.json'
def setup_module(module):
global launcher
entries = Entries()
entries.add('local_storage', 'rep1', {'root': rep1})
entries.add('local_storage', 'rep2', {'root': rep2})
entries.save(json_file)
launcher = Launcher(json_file)
def teardown_module(module):
launcher.kill()
unlink(json_file)
dirs.delete()
def launcher_startup():
loop = CounterLoop(3)
launcher.on_referee_started(loop.check)
launcher.on_driver_started(loop.check, driver='rep1')
launcher.on_driver_started(loop.check, driver='rep2')
launcher()
loop.run(timeout=5)
def test_crach():
filename = 'crash'
loop = BooleanLoop()
launcher.on_transfer_started(
loop.stop, d_from='rep1', d_to='rep2', filename=filename
)
launcher_startup()
generate(os.path.join(rep1, filename), 1000)
loop.run(timeout=5)
launcher.kill()
launcher.unset_all_events()
loop = BooleanLoop()
launcher.on_transfer_ended(
loop.stop, d_from='rep1', d_to='rep2', filename=filename
)
launcher_startup()
loop.run(timeout=5)
assert(checksum(os.path.join(rep1, filename)) ==
checksum(os.path.join(rep2, filename)))
launcher.kill()
|
<commit_before><commit_msg>Test killing onitu during a transfer<commit_after>import os.path
from os import unlink
from utils.launcher import Launcher
from utils.entries import Entries
from utils.loop import CounterLoop, BooleanLoop
from utils.files import generate, checksum
from utils.tempdirs import TempDirs
launcher = None
dirs = TempDirs()
rep1, rep2 = dirs.create(), dirs.create()
json_file = 'test_crash.json'
def setup_module(module):
global launcher
entries = Entries()
entries.add('local_storage', 'rep1', {'root': rep1})
entries.add('local_storage', 'rep2', {'root': rep2})
entries.save(json_file)
launcher = Launcher(json_file)
def teardown_module(module):
launcher.kill()
unlink(json_file)
dirs.delete()
def launcher_startup():
loop = CounterLoop(3)
launcher.on_referee_started(loop.check)
launcher.on_driver_started(loop.check, driver='rep1')
launcher.on_driver_started(loop.check, driver='rep2')
launcher()
loop.run(timeout=5)
def test_crach():
filename = 'crash'
loop = BooleanLoop()
launcher.on_transfer_started(
loop.stop, d_from='rep1', d_to='rep2', filename=filename
)
launcher_startup()
generate(os.path.join(rep1, filename), 1000)
loop.run(timeout=5)
launcher.kill()
launcher.unset_all_events()
loop = BooleanLoop()
launcher.on_transfer_ended(
loop.stop, d_from='rep1', d_to='rep2', filename=filename
)
launcher_startup()
loop.run(timeout=5)
assert(checksum(os.path.join(rep1, filename)) ==
checksum(os.path.join(rep2, filename)))
launcher.kill()
|
|
2a3b89f42cde7088b304a3f224eaf52894f544ec
|
misc/utils/LSL_Tests/RecieveAppStatistics.py
|
misc/utils/LSL_Tests/RecieveAppStatistics.py
|
"""Example program to show how to read a multi-channel time series from LSL."""
from pylsl import StreamInlet, resolve_stream
import sys
# first resolve an EEG stream on the lab network
print("looking for an Unity3D.AppStatistics stream...")
streams = resolve_stream('type', 'Unity3D.FPS.FT')
# create a new inlet to read from the stream
inlet = StreamInlet(streams[0])
while True:
# get a new sample (you can also omit the timestamp part if you're not
# interested in it)
sample, timestamp = inlet.pull_sample()
print '\r' + str(round(timestamp)) + '\t' + str(sample),
sys.stdout.flush()
|
Add an python example for stream testing
|
Add an python example for stream testing
|
Python
|
mit
|
xfleckx/BeMoBI,xfleckx/BeMoBI
|
Add an python example for stream testing
|
"""Example program to show how to read a multi-channel time series from LSL."""
from pylsl import StreamInlet, resolve_stream
import sys
# first resolve an EEG stream on the lab network
print("looking for an Unity3D.AppStatistics stream...")
streams = resolve_stream('type', 'Unity3D.FPS.FT')
# create a new inlet to read from the stream
inlet = StreamInlet(streams[0])
while True:
# get a new sample (you can also omit the timestamp part if you're not
# interested in it)
sample, timestamp = inlet.pull_sample()
print '\r' + str(round(timestamp)) + '\t' + str(sample),
sys.stdout.flush()
|
<commit_before><commit_msg>Add an python example for stream testing<commit_after>
|
"""Example program to show how to read a multi-channel time series from LSL."""
from pylsl import StreamInlet, resolve_stream
import sys
# first resolve an EEG stream on the lab network
print("looking for an Unity3D.AppStatistics stream...")
streams = resolve_stream('type', 'Unity3D.FPS.FT')
# create a new inlet to read from the stream
inlet = StreamInlet(streams[0])
while True:
# get a new sample (you can also omit the timestamp part if you're not
# interested in it)
sample, timestamp = inlet.pull_sample()
print '\r' + str(round(timestamp)) + '\t' + str(sample),
sys.stdout.flush()
|
Add an python example for stream testing"""Example program to show how to read a multi-channel time series from LSL."""
from pylsl import StreamInlet, resolve_stream
import sys
# first resolve an EEG stream on the lab network
print("looking for an Unity3D.AppStatistics stream...")
streams = resolve_stream('type', 'Unity3D.FPS.FT')
# create a new inlet to read from the stream
inlet = StreamInlet(streams[0])
while True:
# get a new sample (you can also omit the timestamp part if you're not
# interested in it)
sample, timestamp = inlet.pull_sample()
print '\r' + str(round(timestamp)) + '\t' + str(sample),
sys.stdout.flush()
|
<commit_before><commit_msg>Add an python example for stream testing<commit_after>"""Example program to show how to read a multi-channel time series from LSL."""
from pylsl import StreamInlet, resolve_stream
import sys
# first resolve an EEG stream on the lab network
print("looking for an Unity3D.AppStatistics stream...")
streams = resolve_stream('type', 'Unity3D.FPS.FT')
# create a new inlet to read from the stream
inlet = StreamInlet(streams[0])
while True:
# get a new sample (you can also omit the timestamp part if you're not
# interested in it)
sample, timestamp = inlet.pull_sample()
print '\r' + str(round(timestamp)) + '\t' + str(sample),
sys.stdout.flush()
|
|
6ad72a0c624abdda0df8d5c49366bfc597a12340
|
cptm/tests/test_utils_experiment.py
|
cptm/tests/test_utils_experiment.py
|
from nose.tools import assert_equal, assert_false
from os import remove
from os.path import join
from json import dump
from cptm.utils.experiment import load_config, add_parameter, thetaFileName, \
topicFileName, opinionFileName, tarFileName, experimentName
def setup():
global jsonFile
global config
global nTopics
jsonFile = 'config.json'
# create cofig.json
params = {}
with open(jsonFile, 'wb') as f:
dump(params, f, sort_keys=True, indent=4)
config = load_config(jsonFile)
nTopics = 100
def teardown():
remove(jsonFile)
def test_load_config_default_values():
params = {}
params['inputData'] = None
params['outDir'] = '/{}'
params['testSplit'] = 20
params['minFreq'] = None
params['removeTopTF'] = None
params['removeTopDF'] = None
params['nIter'] = 200
params['beta'] = 0.02
params['beta_o'] = 0.02
params['expNumTopics'] = range(20, 201, 20)
params['nTopics'] = None
params['nProcesses'] = None
params['topicLines'] = [0]
params['opinionLines'] = [1]
params['sampleEstimateStart'] = None
params['sampleEstimateEnd'] = None
for p, v in params.iteritems():
yield assert_equal, v, params[p]
def test_add_parameter():
pName = 'nTopics'
yield assert_false, hasattr(config, pName)
add_parameter(pName, nTopics, jsonFile)
config2 = load_config(jsonFile)
yield assert_equal, config2[pName], nTopics
def test_thetaFileName():
config['nTopics'] = nTopics
fName = thetaFileName(config)
assert_equal(fName, '/theta_{}.csv'.format(nTopics))
def test_topicFileName():
config['nTopics'] = nTopics
fName = topicFileName(config)
assert_equal(fName, '/topics_{}.csv'.format(nTopics))
def test_opinionFileName():
config['nTopics'] = nTopics
return join(params.get('outDir').format(''),
'opinions_{}_{}.csv'.format(name, nTopics))
#def experimentName(params):
# fName = params.get('outDir')
# fName = fName.replace('/{}', '')
# _p, name = os.path.split(fName)
# return name
#def tarFileName(params):
# nTopics = params.get('nTopics')
# name = experimentName(params)
# return os.path.join(params.get('outDir').format(''),
# '{}_{}.tgz'.format(name, nTopics))
|
Add tests for utils experiment module
|
Add tests for utils experiment module
|
Python
|
apache-2.0
|
NLeSC/cptm,NLeSC/cptm
|
Add tests for utils experiment module
|
from nose.tools import assert_equal, assert_false
from os import remove
from os.path import join
from json import dump
from cptm.utils.experiment import load_config, add_parameter, thetaFileName, \
topicFileName, opinionFileName, tarFileName, experimentName
def setup():
global jsonFile
global config
global nTopics
jsonFile = 'config.json'
# create cofig.json
params = {}
with open(jsonFile, 'wb') as f:
dump(params, f, sort_keys=True, indent=4)
config = load_config(jsonFile)
nTopics = 100
def teardown():
remove(jsonFile)
def test_load_config_default_values():
params = {}
params['inputData'] = None
params['outDir'] = '/{}'
params['testSplit'] = 20
params['minFreq'] = None
params['removeTopTF'] = None
params['removeTopDF'] = None
params['nIter'] = 200
params['beta'] = 0.02
params['beta_o'] = 0.02
params['expNumTopics'] = range(20, 201, 20)
params['nTopics'] = None
params['nProcesses'] = None
params['topicLines'] = [0]
params['opinionLines'] = [1]
params['sampleEstimateStart'] = None
params['sampleEstimateEnd'] = None
for p, v in params.iteritems():
yield assert_equal, v, params[p]
def test_add_parameter():
pName = 'nTopics'
yield assert_false, hasattr(config, pName)
add_parameter(pName, nTopics, jsonFile)
config2 = load_config(jsonFile)
yield assert_equal, config2[pName], nTopics
def test_thetaFileName():
config['nTopics'] = nTopics
fName = thetaFileName(config)
assert_equal(fName, '/theta_{}.csv'.format(nTopics))
def test_topicFileName():
config['nTopics'] = nTopics
fName = topicFileName(config)
assert_equal(fName, '/topics_{}.csv'.format(nTopics))
def test_opinionFileName():
config['nTopics'] = nTopics
return join(params.get('outDir').format(''),
'opinions_{}_{}.csv'.format(name, nTopics))
#def experimentName(params):
# fName = params.get('outDir')
# fName = fName.replace('/{}', '')
# _p, name = os.path.split(fName)
# return name
#def tarFileName(params):
# nTopics = params.get('nTopics')
# name = experimentName(params)
# return os.path.join(params.get('outDir').format(''),
# '{}_{}.tgz'.format(name, nTopics))
|
<commit_before><commit_msg>Add tests for utils experiment module<commit_after>
|
from nose.tools import assert_equal, assert_false
from os import remove
from os.path import join
from json import dump
from cptm.utils.experiment import load_config, add_parameter, thetaFileName, \
topicFileName, opinionFileName, tarFileName, experimentName
def setup():
global jsonFile
global config
global nTopics
jsonFile = 'config.json'
# create cofig.json
params = {}
with open(jsonFile, 'wb') as f:
dump(params, f, sort_keys=True, indent=4)
config = load_config(jsonFile)
nTopics = 100
def teardown():
remove(jsonFile)
def test_load_config_default_values():
params = {}
params['inputData'] = None
params['outDir'] = '/{}'
params['testSplit'] = 20
params['minFreq'] = None
params['removeTopTF'] = None
params['removeTopDF'] = None
params['nIter'] = 200
params['beta'] = 0.02
params['beta_o'] = 0.02
params['expNumTopics'] = range(20, 201, 20)
params['nTopics'] = None
params['nProcesses'] = None
params['topicLines'] = [0]
params['opinionLines'] = [1]
params['sampleEstimateStart'] = None
params['sampleEstimateEnd'] = None
for p, v in params.iteritems():
yield assert_equal, v, params[p]
def test_add_parameter():
pName = 'nTopics'
yield assert_false, hasattr(config, pName)
add_parameter(pName, nTopics, jsonFile)
config2 = load_config(jsonFile)
yield assert_equal, config2[pName], nTopics
def test_thetaFileName():
config['nTopics'] = nTopics
fName = thetaFileName(config)
assert_equal(fName, '/theta_{}.csv'.format(nTopics))
def test_topicFileName():
config['nTopics'] = nTopics
fName = topicFileName(config)
assert_equal(fName, '/topics_{}.csv'.format(nTopics))
def test_opinionFileName():
config['nTopics'] = nTopics
return join(params.get('outDir').format(''),
'opinions_{}_{}.csv'.format(name, nTopics))
#def experimentName(params):
# fName = params.get('outDir')
# fName = fName.replace('/{}', '')
# _p, name = os.path.split(fName)
# return name
#def tarFileName(params):
# nTopics = params.get('nTopics')
# name = experimentName(params)
# return os.path.join(params.get('outDir').format(''),
# '{}_{}.tgz'.format(name, nTopics))
|
Add tests for utils experiment modulefrom nose.tools import assert_equal, assert_false
from os import remove
from os.path import join
from json import dump
from cptm.utils.experiment import load_config, add_parameter, thetaFileName, \
topicFileName, opinionFileName, tarFileName, experimentName
def setup():
global jsonFile
global config
global nTopics
jsonFile = 'config.json'
# create cofig.json
params = {}
with open(jsonFile, 'wb') as f:
dump(params, f, sort_keys=True, indent=4)
config = load_config(jsonFile)
nTopics = 100
def teardown():
remove(jsonFile)
def test_load_config_default_values():
params = {}
params['inputData'] = None
params['outDir'] = '/{}'
params['testSplit'] = 20
params['minFreq'] = None
params['removeTopTF'] = None
params['removeTopDF'] = None
params['nIter'] = 200
params['beta'] = 0.02
params['beta_o'] = 0.02
params['expNumTopics'] = range(20, 201, 20)
params['nTopics'] = None
params['nProcesses'] = None
params['topicLines'] = [0]
params['opinionLines'] = [1]
params['sampleEstimateStart'] = None
params['sampleEstimateEnd'] = None
for p, v in params.iteritems():
yield assert_equal, v, params[p]
def test_add_parameter():
pName = 'nTopics'
yield assert_false, hasattr(config, pName)
add_parameter(pName, nTopics, jsonFile)
config2 = load_config(jsonFile)
yield assert_equal, config2[pName], nTopics
def test_thetaFileName():
config['nTopics'] = nTopics
fName = thetaFileName(config)
assert_equal(fName, '/theta_{}.csv'.format(nTopics))
def test_topicFileName():
config['nTopics'] = nTopics
fName = topicFileName(config)
assert_equal(fName, '/topics_{}.csv'.format(nTopics))
def test_opinionFileName():
config['nTopics'] = nTopics
return join(params.get('outDir').format(''),
'opinions_{}_{}.csv'.format(name, nTopics))
#def experimentName(params):
# fName = params.get('outDir')
# fName = fName.replace('/{}', '')
# _p, name = os.path.split(fName)
# return name
#def tarFileName(params):
# nTopics = params.get('nTopics')
# name = experimentName(params)
# return os.path.join(params.get('outDir').format(''),
# '{}_{}.tgz'.format(name, nTopics))
|
<commit_before><commit_msg>Add tests for utils experiment module<commit_after>from nose.tools import assert_equal, assert_false
from os import remove
from os.path import join
from json import dump
from cptm.utils.experiment import load_config, add_parameter, thetaFileName, \
topicFileName, opinionFileName, tarFileName, experimentName
def setup():
global jsonFile
global config
global nTopics
jsonFile = 'config.json'
# create cofig.json
params = {}
with open(jsonFile, 'wb') as f:
dump(params, f, sort_keys=True, indent=4)
config = load_config(jsonFile)
nTopics = 100
def teardown():
remove(jsonFile)
def test_load_config_default_values():
params = {}
params['inputData'] = None
params['outDir'] = '/{}'
params['testSplit'] = 20
params['minFreq'] = None
params['removeTopTF'] = None
params['removeTopDF'] = None
params['nIter'] = 200
params['beta'] = 0.02
params['beta_o'] = 0.02
params['expNumTopics'] = range(20, 201, 20)
params['nTopics'] = None
params['nProcesses'] = None
params['topicLines'] = [0]
params['opinionLines'] = [1]
params['sampleEstimateStart'] = None
params['sampleEstimateEnd'] = None
for p, v in params.iteritems():
yield assert_equal, v, params[p]
def test_add_parameter():
pName = 'nTopics'
yield assert_false, hasattr(config, pName)
add_parameter(pName, nTopics, jsonFile)
config2 = load_config(jsonFile)
yield assert_equal, config2[pName], nTopics
def test_thetaFileName():
config['nTopics'] = nTopics
fName = thetaFileName(config)
assert_equal(fName, '/theta_{}.csv'.format(nTopics))
def test_topicFileName():
config['nTopics'] = nTopics
fName = topicFileName(config)
assert_equal(fName, '/topics_{}.csv'.format(nTopics))
def test_opinionFileName():
config['nTopics'] = nTopics
return join(params.get('outDir').format(''),
'opinions_{}_{}.csv'.format(name, nTopics))
#def experimentName(params):
# fName = params.get('outDir')
# fName = fName.replace('/{}', '')
# _p, name = os.path.split(fName)
# return name
#def tarFileName(params):
# nTopics = params.get('nTopics')
# name = experimentName(params)
# return os.path.join(params.get('outDir').format(''),
# '{}_{}.tgz'.format(name, nTopics))
|
|
e6e5fbb671c2539f4f82c6eaca51fbf400133482
|
utils/Target/ARM/analyze-match-table.py
|
utils/Target/ARM/analyze-match-table.py
|
#!/usr/bin/env python
def analyze_match_table(path):
# Extract the instruction table.
data = open(path).read()
start = data.index("static const MatchEntry MatchTable")
end = data.index("\n};\n", start)
lines = data[start:end].split("\n")[1:]
# Parse the instructions.
insns = []
for ln in lines:
ln = ln.split("{", 1)[1]
ln = ln.rsplit("}", 1)[0]
a,bc = ln.split("{", 1)
b,c = bc.split("}", 1)
code, string, converter, _ = [s.strip()
for s in a.split(",")]
items = [s.strip() for s in b.split(",")]
_,features = [s.strip() for s in c.split(",")]
assert string[0] == string[-1] == '"'
string = string[1:-1]
insns.append((code,string,converter,items,features))
# For every mnemonic, compute whether or not it can have a carry setting
# operand and whether or not it can have a predication code.
mnemonic_flags = {}
for insn in insns:
mnemonic = insn[1]
items = insn[3]
flags = mnemonic_flags[mnemonic] = mnemonic_flags.get(mnemonic, set())
flags.update(items)
mnemonics = set(mnemonic_flags)
ccout_mnemonics = set(m for m in mnemonics
if 'MCK_CCOut' in mnemonic_flags[m])
condcode_mnemonics = set(m for m in mnemonics
if 'MCK_CondCode' in mnemonic_flags[m])
noncondcode_mnemonics = mnemonics - condcode_mnemonics
print ' || '.join('Mnemonic == "%s"' % m
for m in ccout_mnemonics)
print ' || '.join('Mnemonic == "%s"' % m
for m in noncondcode_mnemonics)
def main():
import sys
if len(sys.argv) == 1:
import os
from lit.Util import capture
llvm_obj_root = capture(["llvm-config", "--obj-root"])
file = os.path.join(llvm_obj_root,
"lib/Target/ARM/ARMGenAsmMatcher.inc")
elif len(sys.argv) == 2:
file = sys.argv[1]
else:
raise NotImplementedError
analyze_match_table(file)
if __name__ == '__main__':
main()
|
Write a silly Python script to compute some hard coded info from the generated ARM match table, which is substantially more efficient than dealing with tblgen.
|
McARM: Write a silly Python script to compute some hard coded info from the
generated ARM match table, which is substantially more efficient than dealing
with tblgen.
git-svn-id: 0ff597fd157e6f4fc38580e8d64ab130330d2411@123252 91177308-0d34-0410-b5e6-96231b3b80d8
|
Python
|
apache-2.0
|
GPUOpen-Drivers/llvm,chubbymaggie/asap,dslab-epfl/asap,llvm-mirror/llvm,llvm-mirror/llvm,apple/swift-llvm,llvm-mirror/llvm,apple/swift-llvm,GPUOpen-Drivers/llvm,llvm-mirror/llvm,dslab-epfl/asap,apple/swift-llvm,GPUOpen-Drivers/llvm,GPUOpen-Drivers/llvm,GPUOpen-Drivers/llvm,llvm-mirror/llvm,chubbymaggie/asap,apple/swift-llvm,GPUOpen-Drivers/llvm,GPUOpen-Drivers/llvm,GPUOpen-Drivers/llvm,apple/swift-llvm,chubbymaggie/asap,dslab-epfl/asap,dslab-epfl/asap,chubbymaggie/asap,apple/swift-llvm,llvm-mirror/llvm,llvm-mirror/llvm,dslab-epfl/asap,chubbymaggie/asap,apple/swift-llvm,chubbymaggie/asap,dslab-epfl/asap,llvm-mirror/llvm,llvm-mirror/llvm,apple/swift-llvm,dslab-epfl/asap
|
McARM: Write a silly Python script to compute some hard coded info from the
generated ARM match table, which is substantially more efficient than dealing
with tblgen.
git-svn-id: 0ff597fd157e6f4fc38580e8d64ab130330d2411@123252 91177308-0d34-0410-b5e6-96231b3b80d8
|
#!/usr/bin/env python
def analyze_match_table(path):
# Extract the instruction table.
data = open(path).read()
start = data.index("static const MatchEntry MatchTable")
end = data.index("\n};\n", start)
lines = data[start:end].split("\n")[1:]
# Parse the instructions.
insns = []
for ln in lines:
ln = ln.split("{", 1)[1]
ln = ln.rsplit("}", 1)[0]
a,bc = ln.split("{", 1)
b,c = bc.split("}", 1)
code, string, converter, _ = [s.strip()
for s in a.split(",")]
items = [s.strip() for s in b.split(",")]
_,features = [s.strip() for s in c.split(",")]
assert string[0] == string[-1] == '"'
string = string[1:-1]
insns.append((code,string,converter,items,features))
# For every mnemonic, compute whether or not it can have a carry setting
# operand and whether or not it can have a predication code.
mnemonic_flags = {}
for insn in insns:
mnemonic = insn[1]
items = insn[3]
flags = mnemonic_flags[mnemonic] = mnemonic_flags.get(mnemonic, set())
flags.update(items)
mnemonics = set(mnemonic_flags)
ccout_mnemonics = set(m for m in mnemonics
if 'MCK_CCOut' in mnemonic_flags[m])
condcode_mnemonics = set(m for m in mnemonics
if 'MCK_CondCode' in mnemonic_flags[m])
noncondcode_mnemonics = mnemonics - condcode_mnemonics
print ' || '.join('Mnemonic == "%s"' % m
for m in ccout_mnemonics)
print ' || '.join('Mnemonic == "%s"' % m
for m in noncondcode_mnemonics)
def main():
import sys
if len(sys.argv) == 1:
import os
from lit.Util import capture
llvm_obj_root = capture(["llvm-config", "--obj-root"])
file = os.path.join(llvm_obj_root,
"lib/Target/ARM/ARMGenAsmMatcher.inc")
elif len(sys.argv) == 2:
file = sys.argv[1]
else:
raise NotImplementedError
analyze_match_table(file)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>McARM: Write a silly Python script to compute some hard coded info from the
generated ARM match table, which is substantially more efficient than dealing
with tblgen.
git-svn-id: 0ff597fd157e6f4fc38580e8d64ab130330d2411@123252 91177308-0d34-0410-b5e6-96231b3b80d8<commit_after>
|
#!/usr/bin/env python
def analyze_match_table(path):
# Extract the instruction table.
data = open(path).read()
start = data.index("static const MatchEntry MatchTable")
end = data.index("\n};\n", start)
lines = data[start:end].split("\n")[1:]
# Parse the instructions.
insns = []
for ln in lines:
ln = ln.split("{", 1)[1]
ln = ln.rsplit("}", 1)[0]
a,bc = ln.split("{", 1)
b,c = bc.split("}", 1)
code, string, converter, _ = [s.strip()
for s in a.split(",")]
items = [s.strip() for s in b.split(",")]
_,features = [s.strip() for s in c.split(",")]
assert string[0] == string[-1] == '"'
string = string[1:-1]
insns.append((code,string,converter,items,features))
# For every mnemonic, compute whether or not it can have a carry setting
# operand and whether or not it can have a predication code.
mnemonic_flags = {}
for insn in insns:
mnemonic = insn[1]
items = insn[3]
flags = mnemonic_flags[mnemonic] = mnemonic_flags.get(mnemonic, set())
flags.update(items)
mnemonics = set(mnemonic_flags)
ccout_mnemonics = set(m for m in mnemonics
if 'MCK_CCOut' in mnemonic_flags[m])
condcode_mnemonics = set(m for m in mnemonics
if 'MCK_CondCode' in mnemonic_flags[m])
noncondcode_mnemonics = mnemonics - condcode_mnemonics
print ' || '.join('Mnemonic == "%s"' % m
for m in ccout_mnemonics)
print ' || '.join('Mnemonic == "%s"' % m
for m in noncondcode_mnemonics)
def main():
import sys
if len(sys.argv) == 1:
import os
from lit.Util import capture
llvm_obj_root = capture(["llvm-config", "--obj-root"])
file = os.path.join(llvm_obj_root,
"lib/Target/ARM/ARMGenAsmMatcher.inc")
elif len(sys.argv) == 2:
file = sys.argv[1]
else:
raise NotImplementedError
analyze_match_table(file)
if __name__ == '__main__':
main()
|
McARM: Write a silly Python script to compute some hard coded info from the
generated ARM match table, which is substantially more efficient than dealing
with tblgen.
git-svn-id: 0ff597fd157e6f4fc38580e8d64ab130330d2411@123252 91177308-0d34-0410-b5e6-96231b3b80d8#!/usr/bin/env python
def analyze_match_table(path):
# Extract the instruction table.
data = open(path).read()
start = data.index("static const MatchEntry MatchTable")
end = data.index("\n};\n", start)
lines = data[start:end].split("\n")[1:]
# Parse the instructions.
insns = []
for ln in lines:
ln = ln.split("{", 1)[1]
ln = ln.rsplit("}", 1)[0]
a,bc = ln.split("{", 1)
b,c = bc.split("}", 1)
code, string, converter, _ = [s.strip()
for s in a.split(",")]
items = [s.strip() for s in b.split(",")]
_,features = [s.strip() for s in c.split(",")]
assert string[0] == string[-1] == '"'
string = string[1:-1]
insns.append((code,string,converter,items,features))
# For every mnemonic, compute whether or not it can have a carry setting
# operand and whether or not it can have a predication code.
mnemonic_flags = {}
for insn in insns:
mnemonic = insn[1]
items = insn[3]
flags = mnemonic_flags[mnemonic] = mnemonic_flags.get(mnemonic, set())
flags.update(items)
mnemonics = set(mnemonic_flags)
ccout_mnemonics = set(m for m in mnemonics
if 'MCK_CCOut' in mnemonic_flags[m])
condcode_mnemonics = set(m for m in mnemonics
if 'MCK_CondCode' in mnemonic_flags[m])
noncondcode_mnemonics = mnemonics - condcode_mnemonics
print ' || '.join('Mnemonic == "%s"' % m
for m in ccout_mnemonics)
print ' || '.join('Mnemonic == "%s"' % m
for m in noncondcode_mnemonics)
def main():
import sys
if len(sys.argv) == 1:
import os
from lit.Util import capture
llvm_obj_root = capture(["llvm-config", "--obj-root"])
file = os.path.join(llvm_obj_root,
"lib/Target/ARM/ARMGenAsmMatcher.inc")
elif len(sys.argv) == 2:
file = sys.argv[1]
else:
raise NotImplementedError
analyze_match_table(file)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>McARM: Write a silly Python script to compute some hard coded info from the
generated ARM match table, which is substantially more efficient than dealing
with tblgen.
git-svn-id: 0ff597fd157e6f4fc38580e8d64ab130330d2411@123252 91177308-0d34-0410-b5e6-96231b3b80d8<commit_after>#!/usr/bin/env python
def analyze_match_table(path):
# Extract the instruction table.
data = open(path).read()
start = data.index("static const MatchEntry MatchTable")
end = data.index("\n};\n", start)
lines = data[start:end].split("\n")[1:]
# Parse the instructions.
insns = []
for ln in lines:
ln = ln.split("{", 1)[1]
ln = ln.rsplit("}", 1)[0]
a,bc = ln.split("{", 1)
b,c = bc.split("}", 1)
code, string, converter, _ = [s.strip()
for s in a.split(",")]
items = [s.strip() for s in b.split(",")]
_,features = [s.strip() for s in c.split(",")]
assert string[0] == string[-1] == '"'
string = string[1:-1]
insns.append((code,string,converter,items,features))
# For every mnemonic, compute whether or not it can have a carry setting
# operand and whether or not it can have a predication code.
mnemonic_flags = {}
for insn in insns:
mnemonic = insn[1]
items = insn[3]
flags = mnemonic_flags[mnemonic] = mnemonic_flags.get(mnemonic, set())
flags.update(items)
mnemonics = set(mnemonic_flags)
ccout_mnemonics = set(m for m in mnemonics
if 'MCK_CCOut' in mnemonic_flags[m])
condcode_mnemonics = set(m for m in mnemonics
if 'MCK_CondCode' in mnemonic_flags[m])
noncondcode_mnemonics = mnemonics - condcode_mnemonics
print ' || '.join('Mnemonic == "%s"' % m
for m in ccout_mnemonics)
print ' || '.join('Mnemonic == "%s"' % m
for m in noncondcode_mnemonics)
def main():
import sys
if len(sys.argv) == 1:
import os
from lit.Util import capture
llvm_obj_root = capture(["llvm-config", "--obj-root"])
file = os.path.join(llvm_obj_root,
"lib/Target/ARM/ARMGenAsmMatcher.inc")
elif len(sys.argv) == 2:
file = sys.argv[1]
else:
raise NotImplementedError
analyze_match_table(file)
if __name__ == '__main__':
main()
|
|
aab833a4a267ed46e83a5968e87d357ae3a5a12b
|
utils/LSL_Tests/RecieveDemoStream.py
|
utils/LSL_Tests/RecieveDemoStream.py
|
"""Example program to show how to read a marker time series from LSL."""
import sys
sys.path.append('./pylsl') # help python find pylsl relative to this example program
from pylsl import StreamInlet, resolve_stream
# first resolve an EEG stream on the lab network
targetStreamType = 'Unity.Quaternion'
print 'looking for an stream of type ' + targetStreamType
streams = resolve_stream('type', targetStreamType)
streamsFound = len(streams)
if (streamsFound > 0):
print 'found ' + str(streamsFound)
else:
print 'found none',
# create a new inlet to read from the stream
inlet = StreamInlet(streams[0])
while True:
sample, timestamp = inlet.pull_sample()
if(sample):
print "\033[K", str(timestamp) + ' Quaternion: ' + ' '.join(str(sample[x]) for x in range(0,len(sample))), "\r",
sys.stdout.flush()
|
Add new DemoStream example corresponding to the LSL4Unity Project
|
Add new DemoStream example corresponding to the LSL4Unity Project
|
Python
|
mit
|
xfleckx/BeMoBI_Tools,xfleckx/BeMoBI_Tools,xfleckx/BeMoBI_Tools
|
Add new DemoStream example corresponding to the LSL4Unity Project
|
"""Example program to show how to read a marker time series from LSL."""
import sys
sys.path.append('./pylsl') # help python find pylsl relative to this example program
from pylsl import StreamInlet, resolve_stream
# first resolve an EEG stream on the lab network
targetStreamType = 'Unity.Quaternion'
print 'looking for an stream of type ' + targetStreamType
streams = resolve_stream('type', targetStreamType)
streamsFound = len(streams)
if (streamsFound > 0):
print 'found ' + str(streamsFound)
else:
print 'found none',
# create a new inlet to read from the stream
inlet = StreamInlet(streams[0])
while True:
sample, timestamp = inlet.pull_sample()
if(sample):
print "\033[K", str(timestamp) + ' Quaternion: ' + ' '.join(str(sample[x]) for x in range(0,len(sample))), "\r",
sys.stdout.flush()
|
<commit_before><commit_msg>Add new DemoStream example corresponding to the LSL4Unity Project<commit_after>
|
"""Example program to show how to read a marker time series from LSL."""
import sys
sys.path.append('./pylsl') # help python find pylsl relative to this example program
from pylsl import StreamInlet, resolve_stream
# first resolve an EEG stream on the lab network
targetStreamType = 'Unity.Quaternion'
print 'looking for an stream of type ' + targetStreamType
streams = resolve_stream('type', targetStreamType)
streamsFound = len(streams)
if (streamsFound > 0):
print 'found ' + str(streamsFound)
else:
print 'found none',
# create a new inlet to read from the stream
inlet = StreamInlet(streams[0])
while True:
sample, timestamp = inlet.pull_sample()
if(sample):
print "\033[K", str(timestamp) + ' Quaternion: ' + ' '.join(str(sample[x]) for x in range(0,len(sample))), "\r",
sys.stdout.flush()
|
Add new DemoStream example corresponding to the LSL4Unity Project"""Example program to show how to read a marker time series from LSL."""
import sys
sys.path.append('./pylsl') # help python find pylsl relative to this example program
from pylsl import StreamInlet, resolve_stream
# first resolve an EEG stream on the lab network
targetStreamType = 'Unity.Quaternion'
print 'looking for an stream of type ' + targetStreamType
streams = resolve_stream('type', targetStreamType)
streamsFound = len(streams)
if (streamsFound > 0):
print 'found ' + str(streamsFound)
else:
print 'found none',
# create a new inlet to read from the stream
inlet = StreamInlet(streams[0])
while True:
sample, timestamp = inlet.pull_sample()
if(sample):
print "\033[K", str(timestamp) + ' Quaternion: ' + ' '.join(str(sample[x]) for x in range(0,len(sample))), "\r",
sys.stdout.flush()
|
<commit_before><commit_msg>Add new DemoStream example corresponding to the LSL4Unity Project<commit_after>"""Example program to show how to read a marker time series from LSL."""
import sys
sys.path.append('./pylsl') # help python find pylsl relative to this example program
from pylsl import StreamInlet, resolve_stream
# first resolve an EEG stream on the lab network
targetStreamType = 'Unity.Quaternion'
print 'looking for an stream of type ' + targetStreamType
streams = resolve_stream('type', targetStreamType)
streamsFound = len(streams)
if (streamsFound > 0):
print 'found ' + str(streamsFound)
else:
print 'found none',
# create a new inlet to read from the stream
inlet = StreamInlet(streams[0])
while True:
sample, timestamp = inlet.pull_sample()
if(sample):
print "\033[K", str(timestamp) + ' Quaternion: ' + ' '.join(str(sample[x]) for x in range(0,len(sample))), "\r",
sys.stdout.flush()
|
|
2e0fbcb3ec1c2f0311d7ee4bbfeac33662f66089
|
monitor_process.py
|
monitor_process.py
|
import subprocess
""" If the program is running "ps -ef | grep program" will return 2 or more rows
(one with the program itself and the second one with "grep program").
Otherwise, it will only return one row ("grep program")
You can trigger the alert on this if required.
"""
def monitor_process(name):
args=['ps','-ef']
args1=['grep','-c','%s' %name]
process_ps = subprocess.Popen(args, stdout=subprocess.PIPE, shell=False)
process_monitor = subprocess.Popen(args1, stdin=process_ps.stdout, stdout=subprocess.PIPE, shell=False)
# Allow process_ps to receive a SIGPIPE if process_monitor exits.
process_ps.stdout.close()
return process_monitor.communicate()[0]
if __name__== "__main__":
print monitor_process('firefox')
|
Monitor process using subprocess module
|
Monitor process using subprocess module
|
Python
|
apache-2.0
|
PSJoshi/python_scripts
|
Monitor process using subprocess module
|
import subprocess
""" If the program is running "ps -ef | grep program" will return 2 or more rows
(one with the program itself and the second one with "grep program").
Otherwise, it will only return one row ("grep program")
You can trigger the alert on this if required.
"""
def monitor_process(name):
args=['ps','-ef']
args1=['grep','-c','%s' %name]
process_ps = subprocess.Popen(args, stdout=subprocess.PIPE, shell=False)
process_monitor = subprocess.Popen(args1, stdin=process_ps.stdout, stdout=subprocess.PIPE, shell=False)
# Allow process_ps to receive a SIGPIPE if process_monitor exits.
process_ps.stdout.close()
return process_monitor.communicate()[0]
if __name__== "__main__":
print monitor_process('firefox')
|
<commit_before><commit_msg>Monitor process using subprocess module<commit_after>
|
import subprocess
""" If the program is running "ps -ef | grep program" will return 2 or more rows
(one with the program itself and the second one with "grep program").
Otherwise, it will only return one row ("grep program")
You can trigger the alert on this if required.
"""
def monitor_process(name):
args=['ps','-ef']
args1=['grep','-c','%s' %name]
process_ps = subprocess.Popen(args, stdout=subprocess.PIPE, shell=False)
process_monitor = subprocess.Popen(args1, stdin=process_ps.stdout, stdout=subprocess.PIPE, shell=False)
# Allow process_ps to receive a SIGPIPE if process_monitor exits.
process_ps.stdout.close()
return process_monitor.communicate()[0]
if __name__== "__main__":
print monitor_process('firefox')
|
Monitor process using subprocess moduleimport subprocess
""" If the program is running "ps -ef | grep program" will return 2 or more rows
(one with the program itself and the second one with "grep program").
Otherwise, it will only return one row ("grep program")
You can trigger the alert on this if required.
"""
def monitor_process(name):
args=['ps','-ef']
args1=['grep','-c','%s' %name]
process_ps = subprocess.Popen(args, stdout=subprocess.PIPE, shell=False)
process_monitor = subprocess.Popen(args1, stdin=process_ps.stdout, stdout=subprocess.PIPE, shell=False)
# Allow process_ps to receive a SIGPIPE if process_monitor exits.
process_ps.stdout.close()
return process_monitor.communicate()[0]
if __name__== "__main__":
print monitor_process('firefox')
|
<commit_before><commit_msg>Monitor process using subprocess module<commit_after>import subprocess
""" If the program is running "ps -ef | grep program" will return 2 or more rows
(one with the program itself and the second one with "grep program").
Otherwise, it will only return one row ("grep program")
You can trigger the alert on this if required.
"""
def monitor_process(name):
args=['ps','-ef']
args1=['grep','-c','%s' %name]
process_ps = subprocess.Popen(args, stdout=subprocess.PIPE, shell=False)
process_monitor = subprocess.Popen(args1, stdin=process_ps.stdout, stdout=subprocess.PIPE, shell=False)
# Allow process_ps to receive a SIGPIPE if process_monitor exits.
process_ps.stdout.close()
return process_monitor.communicate()[0]
if __name__== "__main__":
print monitor_process('firefox')
|
|
ca9ed2756a12a2587f5b4d021597d2229196da50
|
api/common/migrations/0007_add_china_region.py
|
api/common/migrations/0007_add_china_region.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-06-24 21:52
from __future__ import unicode_literals
from django.db import migrations
def forwards(apps, schema_editor):
Region = apps.get_model('common.Region')
region_to_add = 'China'
try:
Region.objects.get(name=region_to_add)
except Region.DoesNotExist:
Region.objects.create(name=region_to_add)
class Migration(migrations.Migration):
dependencies = [
('common', '0006_emailrecord'),
]
operations = [
migrations.RunPython(forwards, migrations.RunPython.noop)
]
|
Add migration to add china region
|
Add migration to add china region
|
Python
|
apache-2.0
|
prattl/teamfinder,prattl/teamfinder,prattl/teamfinder,prattl/teamfinder
|
Add migration to add china region
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-06-24 21:52
from __future__ import unicode_literals
from django.db import migrations
def forwards(apps, schema_editor):
Region = apps.get_model('common.Region')
region_to_add = 'China'
try:
Region.objects.get(name=region_to_add)
except Region.DoesNotExist:
Region.objects.create(name=region_to_add)
class Migration(migrations.Migration):
dependencies = [
('common', '0006_emailrecord'),
]
operations = [
migrations.RunPython(forwards, migrations.RunPython.noop)
]
|
<commit_before><commit_msg>Add migration to add china region<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-06-24 21:52
from __future__ import unicode_literals
from django.db import migrations
def forwards(apps, schema_editor):
Region = apps.get_model('common.Region')
region_to_add = 'China'
try:
Region.objects.get(name=region_to_add)
except Region.DoesNotExist:
Region.objects.create(name=region_to_add)
class Migration(migrations.Migration):
dependencies = [
('common', '0006_emailrecord'),
]
operations = [
migrations.RunPython(forwards, migrations.RunPython.noop)
]
|
Add migration to add china region# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-06-24 21:52
from __future__ import unicode_literals
from django.db import migrations
def forwards(apps, schema_editor):
Region = apps.get_model('common.Region')
region_to_add = 'China'
try:
Region.objects.get(name=region_to_add)
except Region.DoesNotExist:
Region.objects.create(name=region_to_add)
class Migration(migrations.Migration):
dependencies = [
('common', '0006_emailrecord'),
]
operations = [
migrations.RunPython(forwards, migrations.RunPython.noop)
]
|
<commit_before><commit_msg>Add migration to add china region<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-06-24 21:52
from __future__ import unicode_literals
from django.db import migrations
def forwards(apps, schema_editor):
Region = apps.get_model('common.Region')
region_to_add = 'China'
try:
Region.objects.get(name=region_to_add)
except Region.DoesNotExist:
Region.objects.create(name=region_to_add)
class Migration(migrations.Migration):
dependencies = [
('common', '0006_emailrecord'),
]
operations = [
migrations.RunPython(forwards, migrations.RunPython.noop)
]
|
|
680b2cb1488f83aef5b45476e23bd93a90069872
|
herd-code/herd-tools/herd-content-loader/herdcl/hook-otags.py
|
herd-code/herd-tools/herd-content-loader/herdcl/hook-otags.py
|
hiddenimports = [
'numpy',
'pandas._libs.tslibs.timedeltas',
'pandas._libs.tslibs.nattype',
'pandas._libs.tslibs.np_datetime',
'pandas._libs.skiplist'
]
|
Create Content Loader app to Herd/DM standards - Configure Pyinstaller
|
DM-12166: Create Content Loader app to Herd/DM standards
- Configure Pyinstaller
|
Python
|
apache-2.0
|
FINRAOS/herd,FINRAOS/herd,FINRAOS/herd,FINRAOS/herd,FINRAOS/herd
|
DM-12166: Create Content Loader app to Herd/DM standards
- Configure Pyinstaller
|
hiddenimports = [
'numpy',
'pandas._libs.tslibs.timedeltas',
'pandas._libs.tslibs.nattype',
'pandas._libs.tslibs.np_datetime',
'pandas._libs.skiplist'
]
|
<commit_before><commit_msg>DM-12166: Create Content Loader app to Herd/DM standards
- Configure Pyinstaller<commit_after>
|
hiddenimports = [
'numpy',
'pandas._libs.tslibs.timedeltas',
'pandas._libs.tslibs.nattype',
'pandas._libs.tslibs.np_datetime',
'pandas._libs.skiplist'
]
|
DM-12166: Create Content Loader app to Herd/DM standards
- Configure Pyinstallerhiddenimports = [
'numpy',
'pandas._libs.tslibs.timedeltas',
'pandas._libs.tslibs.nattype',
'pandas._libs.tslibs.np_datetime',
'pandas._libs.skiplist'
]
|
<commit_before><commit_msg>DM-12166: Create Content Loader app to Herd/DM standards
- Configure Pyinstaller<commit_after>hiddenimports = [
'numpy',
'pandas._libs.tslibs.timedeltas',
'pandas._libs.tslibs.nattype',
'pandas._libs.tslibs.np_datetime',
'pandas._libs.skiplist'
]
|
|
56d14e7b0386588afd39f2413fafe0b9ba41806d
|
tests/app/soc/modules/gsoc/views/test_slot_transfer_admin.py
|
tests/app/soc/modules/gsoc/views/test_slot_transfer_admin.py
|
# Copyright 2013 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for slot transfer admin view."""
from tests import profile_utils
from tests import test_utils
class SlotsTransferAdminPageTest(test_utils.GSoCDjangoTestCase):
"""Unit tests for SlotsTransferAdminPage class."""
def setUp(self):
self.init()
self.url = '/gsoc/admin/slots/transfer/%s' % self.gsoc.key().name()
def testLoneUserAccessForbidden(self):
response = self.get(self.url)
self.assertResponseForbidden(response)
self.assertErrorTemplatesUsed(response)
def testStudentAccessForbidden(self):
self.data.createStudent()
response = self.get(self.url)
self.assertResponseForbidden(response)
self.assertErrorTemplatesUsed(response)
def testMentorAccessForbidden(self):
self.data.createMentor(self.org)
response = self.get(self.url)
self.assertResponseForbidden(response)
self.assertErrorTemplatesUsed(response)
def testOrgAdminAccessForbidden(self):
self.data.createOrgAdmin(self.org)
response = self.get(self.url)
self.assertResponseForbidden(response)
self.assertErrorTemplatesUsed(response)
def testHostAccessGranted(self):
self.data.createHost()
response = self.get(self.url)
self.assertResponseOK(response)
|
Access checking unit tests for SlotsTransferAdminPage.
|
Access checking unit tests for SlotsTransferAdminPage.
|
Python
|
apache-2.0
|
rhyolight/nupic.son,rhyolight/nupic.son,rhyolight/nupic.son
|
Access checking unit tests for SlotsTransferAdminPage.
|
# Copyright 2013 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for slot transfer admin view."""
from tests import profile_utils
from tests import test_utils
class SlotsTransferAdminPageTest(test_utils.GSoCDjangoTestCase):
"""Unit tests for SlotsTransferAdminPage class."""
def setUp(self):
self.init()
self.url = '/gsoc/admin/slots/transfer/%s' % self.gsoc.key().name()
def testLoneUserAccessForbidden(self):
response = self.get(self.url)
self.assertResponseForbidden(response)
self.assertErrorTemplatesUsed(response)
def testStudentAccessForbidden(self):
self.data.createStudent()
response = self.get(self.url)
self.assertResponseForbidden(response)
self.assertErrorTemplatesUsed(response)
def testMentorAccessForbidden(self):
self.data.createMentor(self.org)
response = self.get(self.url)
self.assertResponseForbidden(response)
self.assertErrorTemplatesUsed(response)
def testOrgAdminAccessForbidden(self):
self.data.createOrgAdmin(self.org)
response = self.get(self.url)
self.assertResponseForbidden(response)
self.assertErrorTemplatesUsed(response)
def testHostAccessGranted(self):
self.data.createHost()
response = self.get(self.url)
self.assertResponseOK(response)
|
<commit_before><commit_msg>Access checking unit tests for SlotsTransferAdminPage.<commit_after>
|
# Copyright 2013 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for slot transfer admin view."""
from tests import profile_utils
from tests import test_utils
class SlotsTransferAdminPageTest(test_utils.GSoCDjangoTestCase):
"""Unit tests for SlotsTransferAdminPage class."""
def setUp(self):
self.init()
self.url = '/gsoc/admin/slots/transfer/%s' % self.gsoc.key().name()
def testLoneUserAccessForbidden(self):
response = self.get(self.url)
self.assertResponseForbidden(response)
self.assertErrorTemplatesUsed(response)
def testStudentAccessForbidden(self):
self.data.createStudent()
response = self.get(self.url)
self.assertResponseForbidden(response)
self.assertErrorTemplatesUsed(response)
def testMentorAccessForbidden(self):
self.data.createMentor(self.org)
response = self.get(self.url)
self.assertResponseForbidden(response)
self.assertErrorTemplatesUsed(response)
def testOrgAdminAccessForbidden(self):
self.data.createOrgAdmin(self.org)
response = self.get(self.url)
self.assertResponseForbidden(response)
self.assertErrorTemplatesUsed(response)
def testHostAccessGranted(self):
self.data.createHost()
response = self.get(self.url)
self.assertResponseOK(response)
|
Access checking unit tests for SlotsTransferAdminPage.# Copyright 2013 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for slot transfer admin view."""
from tests import profile_utils
from tests import test_utils
class SlotsTransferAdminPageTest(test_utils.GSoCDjangoTestCase):
"""Unit tests for SlotsTransferAdminPage class."""
def setUp(self):
self.init()
self.url = '/gsoc/admin/slots/transfer/%s' % self.gsoc.key().name()
def testLoneUserAccessForbidden(self):
response = self.get(self.url)
self.assertResponseForbidden(response)
self.assertErrorTemplatesUsed(response)
def testStudentAccessForbidden(self):
self.data.createStudent()
response = self.get(self.url)
self.assertResponseForbidden(response)
self.assertErrorTemplatesUsed(response)
def testMentorAccessForbidden(self):
self.data.createMentor(self.org)
response = self.get(self.url)
self.assertResponseForbidden(response)
self.assertErrorTemplatesUsed(response)
def testOrgAdminAccessForbidden(self):
self.data.createOrgAdmin(self.org)
response = self.get(self.url)
self.assertResponseForbidden(response)
self.assertErrorTemplatesUsed(response)
def testHostAccessGranted(self):
self.data.createHost()
response = self.get(self.url)
self.assertResponseOK(response)
|
<commit_before><commit_msg>Access checking unit tests for SlotsTransferAdminPage.<commit_after># Copyright 2013 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for slot transfer admin view."""
from tests import profile_utils
from tests import test_utils
class SlotsTransferAdminPageTest(test_utils.GSoCDjangoTestCase):
"""Unit tests for SlotsTransferAdminPage class."""
def setUp(self):
self.init()
self.url = '/gsoc/admin/slots/transfer/%s' % self.gsoc.key().name()
def testLoneUserAccessForbidden(self):
response = self.get(self.url)
self.assertResponseForbidden(response)
self.assertErrorTemplatesUsed(response)
def testStudentAccessForbidden(self):
self.data.createStudent()
response = self.get(self.url)
self.assertResponseForbidden(response)
self.assertErrorTemplatesUsed(response)
def testMentorAccessForbidden(self):
self.data.createMentor(self.org)
response = self.get(self.url)
self.assertResponseForbidden(response)
self.assertErrorTemplatesUsed(response)
def testOrgAdminAccessForbidden(self):
self.data.createOrgAdmin(self.org)
response = self.get(self.url)
self.assertResponseForbidden(response)
self.assertErrorTemplatesUsed(response)
def testHostAccessGranted(self):
self.data.createHost()
response = self.get(self.url)
self.assertResponseOK(response)
|
|
2299343d8b10658cc6682b23dbf9be9d5fd290f6
|
tests/testdata.py
|
tests/testdata.py
|
import ConfigParser
import csv
import unittest
class DataTest(unittest.TestCase):
def setUp(self):
config = ConfigParser.RawConfigParser()
config.read('../app.config')
# Load the data from the csv into an array
self.data = []
with open('../data/%s' % config.get('data', 'filename'), 'rb') as csvfile:
reader = csv.reader(csvfile)
# Skip header and parse data
reader.next()
for row in reader:
self.data.append([s.strip() for s in row])
def test_complete(self):
'''Ensure there are no day/country pairs missing data'''
date_country = dict()
dates = set()
countries = set()
for date, country, video_id in self.data:
dates.add(date)
countries.add(country)
date_country[date] = date_country.get(date, {})
date_country[date][country] = date_country[date].get(country, 0) + 1
for date in dates:
for country in countries:
count = date_country.get(date,{}).get(country,0)
self.assertNotEqual((date, country, count), (date, country, 0))
if __name__ == '__main__':
unittest.main()
|
Add unit test for data integrity.
|
Add unit test for data integrity.
|
Python
|
bsd-3-clause
|
c4fcm/WhatWeWatch-Analysis,c4fcm/WhatWeWatch-Analysis,c4fcm/WhatWeWatch-Analysis
|
Add unit test for data integrity.
|
import ConfigParser
import csv
import unittest
class DataTest(unittest.TestCase):
def setUp(self):
config = ConfigParser.RawConfigParser()
config.read('../app.config')
# Load the data from the csv into an array
self.data = []
with open('../data/%s' % config.get('data', 'filename'), 'rb') as csvfile:
reader = csv.reader(csvfile)
# Skip header and parse data
reader.next()
for row in reader:
self.data.append([s.strip() for s in row])
def test_complete(self):
'''Ensure there are no day/country pairs missing data'''
date_country = dict()
dates = set()
countries = set()
for date, country, video_id in self.data:
dates.add(date)
countries.add(country)
date_country[date] = date_country.get(date, {})
date_country[date][country] = date_country[date].get(country, 0) + 1
for date in dates:
for country in countries:
count = date_country.get(date,{}).get(country,0)
self.assertNotEqual((date, country, count), (date, country, 0))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unit test for data integrity.<commit_after>
|
import ConfigParser
import csv
import unittest
class DataTest(unittest.TestCase):
def setUp(self):
config = ConfigParser.RawConfigParser()
config.read('../app.config')
# Load the data from the csv into an array
self.data = []
with open('../data/%s' % config.get('data', 'filename'), 'rb') as csvfile:
reader = csv.reader(csvfile)
# Skip header and parse data
reader.next()
for row in reader:
self.data.append([s.strip() for s in row])
def test_complete(self):
'''Ensure there are no day/country pairs missing data'''
date_country = dict()
dates = set()
countries = set()
for date, country, video_id in self.data:
dates.add(date)
countries.add(country)
date_country[date] = date_country.get(date, {})
date_country[date][country] = date_country[date].get(country, 0) + 1
for date in dates:
for country in countries:
count = date_country.get(date,{}).get(country,0)
self.assertNotEqual((date, country, count), (date, country, 0))
if __name__ == '__main__':
unittest.main()
|
Add unit test for data integrity.import ConfigParser
import csv
import unittest
class DataTest(unittest.TestCase):
def setUp(self):
config = ConfigParser.RawConfigParser()
config.read('../app.config')
# Load the data from the csv into an array
self.data = []
with open('../data/%s' % config.get('data', 'filename'), 'rb') as csvfile:
reader = csv.reader(csvfile)
# Skip header and parse data
reader.next()
for row in reader:
self.data.append([s.strip() for s in row])
def test_complete(self):
'''Ensure there are no day/country pairs missing data'''
date_country = dict()
dates = set()
countries = set()
for date, country, video_id in self.data:
dates.add(date)
countries.add(country)
date_country[date] = date_country.get(date, {})
date_country[date][country] = date_country[date].get(country, 0) + 1
for date in dates:
for country in countries:
count = date_country.get(date,{}).get(country,0)
self.assertNotEqual((date, country, count), (date, country, 0))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unit test for data integrity.<commit_after>import ConfigParser
import csv
import unittest
class DataTest(unittest.TestCase):
def setUp(self):
config = ConfigParser.RawConfigParser()
config.read('../app.config')
# Load the data from the csv into an array
self.data = []
with open('../data/%s' % config.get('data', 'filename'), 'rb') as csvfile:
reader = csv.reader(csvfile)
# Skip header and parse data
reader.next()
for row in reader:
self.data.append([s.strip() for s in row])
def test_complete(self):
'''Ensure there are no day/country pairs missing data'''
date_country = dict()
dates = set()
countries = set()
for date, country, video_id in self.data:
dates.add(date)
countries.add(country)
date_country[date] = date_country.get(date, {})
date_country[date][country] = date_country[date].get(country, 0) + 1
for date in dates:
for country in countries:
count = date_country.get(date,{}).get(country,0)
self.assertNotEqual((date, country, count), (date, country, 0))
if __name__ == '__main__':
unittest.main()
|
|
422390ff7eb4d97eaf0c5c1a1b250010ee766ec7
|
tools/cleanPYC.py
|
tools/cleanPYC.py
|
import re
import os
import sys
print("%s path\n" % sys.argv[0])
path = sys.argv[1]
for root, dirs, files in os.walk(path):
for file_ in files:
if re.match(".*.pyc$", file_):
abs_file = os.path.join(root, file_)
print("Clean %s" % abs_file)
os.remove(abs_file)
|
Add tool for clean pyc files
|
Add tool for clean pyc files
Signed-off-by: xcgspring <8f4f8d15922e4269158d45cde01dc3497961f40d@126.com>
|
Python
|
apache-2.0
|
xcgspring/AXUI,xcgspring/AXUI,xcgspring/AXUI
|
Add tool for clean pyc files
Signed-off-by: xcgspring <8f4f8d15922e4269158d45cde01dc3497961f40d@126.com>
|
import re
import os
import sys
print("%s path\n" % sys.argv[0])
path = sys.argv[1]
for root, dirs, files in os.walk(path):
for file_ in files:
if re.match(".*.pyc$", file_):
abs_file = os.path.join(root, file_)
print("Clean %s" % abs_file)
os.remove(abs_file)
|
<commit_before><commit_msg>Add tool for clean pyc files
Signed-off-by: xcgspring <8f4f8d15922e4269158d45cde01dc3497961f40d@126.com><commit_after>
|
import re
import os
import sys
print("%s path\n" % sys.argv[0])
path = sys.argv[1]
for root, dirs, files in os.walk(path):
for file_ in files:
if re.match(".*.pyc$", file_):
abs_file = os.path.join(root, file_)
print("Clean %s" % abs_file)
os.remove(abs_file)
|
Add tool for clean pyc files
Signed-off-by: xcgspring <8f4f8d15922e4269158d45cde01dc3497961f40d@126.com>
import re
import os
import sys
print("%s path\n" % sys.argv[0])
path = sys.argv[1]
for root, dirs, files in os.walk(path):
for file_ in files:
if re.match(".*.pyc$", file_):
abs_file = os.path.join(root, file_)
print("Clean %s" % abs_file)
os.remove(abs_file)
|
<commit_before><commit_msg>Add tool for clean pyc files
Signed-off-by: xcgspring <8f4f8d15922e4269158d45cde01dc3497961f40d@126.com><commit_after>
import re
import os
import sys
print("%s path\n" % sys.argv[0])
path = sys.argv[1]
for root, dirs, files in os.walk(path):
for file_ in files:
if re.match(".*.pyc$", file_):
abs_file = os.path.join(root, file_)
print("Clean %s" % abs_file)
os.remove(abs_file)
|
|
5ed7db70874f3ebfe9c946d38ccf12228dacac3a
|
tests/test_git.py
|
tests/test_git.py
|
from unittest import TestCase
from mock import MagicMock, patch
from nose.tools import raises
from pyolite.git import Git
class TestGit(TestCase):
@raises(ValueError)
def test_commit_with_no_message(self):
mock_repo = MagicMock()
mock_index = MagicMock()
mock_remotes = MagicMock()
mock_repo.index = mock_index
mock_repo.remotes.origin = mock_remotes
with patch.multiple('pyolite.git', Repo=mock_repo):
git = Git('~/path/to/repo')
objects = ['simple_object', 'more_complex_one']
git.commit(objects, '')
|
Test if we tried to commit with an empty message, it should raise a ValueError
|
Test if we tried to commit with an empty message, it should raise a ValueError
|
Python
|
bsd-2-clause
|
PressLabs/pyolite,shawkinsl/pyolite
|
Test if we tried to commit with an empty message, it should raise a ValueError
|
from unittest import TestCase
from mock import MagicMock, patch
from nose.tools import raises
from pyolite.git import Git
class TestGit(TestCase):
@raises(ValueError)
def test_commit_with_no_message(self):
mock_repo = MagicMock()
mock_index = MagicMock()
mock_remotes = MagicMock()
mock_repo.index = mock_index
mock_repo.remotes.origin = mock_remotes
with patch.multiple('pyolite.git', Repo=mock_repo):
git = Git('~/path/to/repo')
objects = ['simple_object', 'more_complex_one']
git.commit(objects, '')
|
<commit_before><commit_msg>Test if we tried to commit with an empty message, it should raise a ValueError<commit_after>
|
from unittest import TestCase
from mock import MagicMock, patch
from nose.tools import raises
from pyolite.git import Git
class TestGit(TestCase):
@raises(ValueError)
def test_commit_with_no_message(self):
mock_repo = MagicMock()
mock_index = MagicMock()
mock_remotes = MagicMock()
mock_repo.index = mock_index
mock_repo.remotes.origin = mock_remotes
with patch.multiple('pyolite.git', Repo=mock_repo):
git = Git('~/path/to/repo')
objects = ['simple_object', 'more_complex_one']
git.commit(objects, '')
|
Test if we tried to commit with an empty message, it should raise a ValueErrorfrom unittest import TestCase
from mock import MagicMock, patch
from nose.tools import raises
from pyolite.git import Git
class TestGit(TestCase):
@raises(ValueError)
def test_commit_with_no_message(self):
mock_repo = MagicMock()
mock_index = MagicMock()
mock_remotes = MagicMock()
mock_repo.index = mock_index
mock_repo.remotes.origin = mock_remotes
with patch.multiple('pyolite.git', Repo=mock_repo):
git = Git('~/path/to/repo')
objects = ['simple_object', 'more_complex_one']
git.commit(objects, '')
|
<commit_before><commit_msg>Test if we tried to commit with an empty message, it should raise a ValueError<commit_after>from unittest import TestCase
from mock import MagicMock, patch
from nose.tools import raises
from pyolite.git import Git
class TestGit(TestCase):
@raises(ValueError)
def test_commit_with_no_message(self):
mock_repo = MagicMock()
mock_index = MagicMock()
mock_remotes = MagicMock()
mock_repo.index = mock_index
mock_repo.remotes.origin = mock_remotes
with patch.multiple('pyolite.git', Repo=mock_repo):
git = Git('~/path/to/repo')
objects = ['simple_object', 'more_complex_one']
git.commit(objects, '')
|
|
fc44d4463045e458796d13b3c97b34cf6ba47f61
|
bluechip/player/createpitchweights.py
|
bluechip/player/createpitchweights.py
|
import random
from player.models import Player, Pitch, PlayerPitchWeight
#TODO: Need to centralize this function call.
random.seed(123456789)
pitch_records = Pitch.objects.all().order_by('id')
pitches_count = pitch_records.count()
for p in Player.objects.all():
weights = []
sum_weights = 0
for _ in xrange(pitches_count):
mu = 1.0 / pitches_count
sigma = (2.0 / 3.0) * mu
w = random.normalvariate(mu, sigma)
w = max(w, 0.0)
weights.append(w)
sum_weights += w
# Normalize weights before creating records
for i in xrange(len(weights)):
weights[i] /= sum_weights
j = 0
for pitch in pitch_records:
ppw = PlayerPitchWeight(player=p, pitch=pitch, weight=weights[j])
ppw.save()
j += 1
|
Add script to create the player pitch weights.
|
Add script to create the player pitch weights.
|
Python
|
mit
|
isuraed/bluechip
|
Add script to create the player pitch weights.
|
import random
from player.models import Player, Pitch, PlayerPitchWeight
#TODO: Need to centralize this function call.
random.seed(123456789)
pitch_records = Pitch.objects.all().order_by('id')
pitches_count = pitch_records.count()
for p in Player.objects.all():
weights = []
sum_weights = 0
for _ in xrange(pitches_count):
mu = 1.0 / pitches_count
sigma = (2.0 / 3.0) * mu
w = random.normalvariate(mu, sigma)
w = max(w, 0.0)
weights.append(w)
sum_weights += w
# Normalize weights before creating records
for i in xrange(len(weights)):
weights[i] /= sum_weights
j = 0
for pitch in pitch_records:
ppw = PlayerPitchWeight(player=p, pitch=pitch, weight=weights[j])
ppw.save()
j += 1
|
<commit_before><commit_msg>Add script to create the player pitch weights.<commit_after>
|
import random
from player.models import Player, Pitch, PlayerPitchWeight
#TODO: Need to centralize this function call.
random.seed(123456789)
pitch_records = Pitch.objects.all().order_by('id')
pitches_count = pitch_records.count()
for p in Player.objects.all():
weights = []
sum_weights = 0
for _ in xrange(pitches_count):
mu = 1.0 / pitches_count
sigma = (2.0 / 3.0) * mu
w = random.normalvariate(mu, sigma)
w = max(w, 0.0)
weights.append(w)
sum_weights += w
# Normalize weights before creating records
for i in xrange(len(weights)):
weights[i] /= sum_weights
j = 0
for pitch in pitch_records:
ppw = PlayerPitchWeight(player=p, pitch=pitch, weight=weights[j])
ppw.save()
j += 1
|
Add script to create the player pitch weights.import random
from player.models import Player, Pitch, PlayerPitchWeight
#TODO: Need to centralize this function call.
random.seed(123456789)
pitch_records = Pitch.objects.all().order_by('id')
pitches_count = pitch_records.count()
for p in Player.objects.all():
weights = []
sum_weights = 0
for _ in xrange(pitches_count):
mu = 1.0 / pitches_count
sigma = (2.0 / 3.0) * mu
w = random.normalvariate(mu, sigma)
w = max(w, 0.0)
weights.append(w)
sum_weights += w
# Normalize weights before creating records
for i in xrange(len(weights)):
weights[i] /= sum_weights
j = 0
for pitch in pitch_records:
ppw = PlayerPitchWeight(player=p, pitch=pitch, weight=weights[j])
ppw.save()
j += 1
|
<commit_before><commit_msg>Add script to create the player pitch weights.<commit_after>import random
from player.models import Player, Pitch, PlayerPitchWeight
#TODO: Need to centralize this function call.
random.seed(123456789)
pitch_records = Pitch.objects.all().order_by('id')
pitches_count = pitch_records.count()
for p in Player.objects.all():
weights = []
sum_weights = 0
for _ in xrange(pitches_count):
mu = 1.0 / pitches_count
sigma = (2.0 / 3.0) * mu
w = random.normalvariate(mu, sigma)
w = max(w, 0.0)
weights.append(w)
sum_weights += w
# Normalize weights before creating records
for i in xrange(len(weights)):
weights[i] /= sum_weights
j = 0
for pitch in pitch_records:
ppw = PlayerPitchWeight(player=p, pitch=pitch, weight=weights[j])
ppw.save()
j += 1
|
|
d3f68c385da4d2fa864ba748f41785be01c26c34
|
py/student-attendance-record-i.py
|
py/student-attendance-record-i.py
|
class Solution(object):
def checkRecord(self, s):
"""
:type s: str
:rtype: bool
"""
A = False
L = 0
for c in s:
if c == 'L':
L += 1
if L > 2:
return False
else:
L = 0
if c == 'A':
if A:
return False
else:
A = True
return True
|
Add py solution for 551. Student Attendance Record I
|
Add py solution for 551. Student Attendance Record I
551. Student Attendance Record I: https://leetcode.com/problems/student-attendance-record-i/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 551. Student Attendance Record I
551. Student Attendance Record I: https://leetcode.com/problems/student-attendance-record-i/
|
class Solution(object):
def checkRecord(self, s):
"""
:type s: str
:rtype: bool
"""
A = False
L = 0
for c in s:
if c == 'L':
L += 1
if L > 2:
return False
else:
L = 0
if c == 'A':
if A:
return False
else:
A = True
return True
|
<commit_before><commit_msg>Add py solution for 551. Student Attendance Record I
551. Student Attendance Record I: https://leetcode.com/problems/student-attendance-record-i/<commit_after>
|
class Solution(object):
def checkRecord(self, s):
"""
:type s: str
:rtype: bool
"""
A = False
L = 0
for c in s:
if c == 'L':
L += 1
if L > 2:
return False
else:
L = 0
if c == 'A':
if A:
return False
else:
A = True
return True
|
Add py solution for 551. Student Attendance Record I
551. Student Attendance Record I: https://leetcode.com/problems/student-attendance-record-i/class Solution(object):
def checkRecord(self, s):
"""
:type s: str
:rtype: bool
"""
A = False
L = 0
for c in s:
if c == 'L':
L += 1
if L > 2:
return False
else:
L = 0
if c == 'A':
if A:
return False
else:
A = True
return True
|
<commit_before><commit_msg>Add py solution for 551. Student Attendance Record I
551. Student Attendance Record I: https://leetcode.com/problems/student-attendance-record-i/<commit_after>class Solution(object):
def checkRecord(self, s):
"""
:type s: str
:rtype: bool
"""
A = False
L = 0
for c in s:
if c == 'L':
L += 1
if L > 2:
return False
else:
L = 0
if c == 'A':
if A:
return False
else:
A = True
return True
|
|
a7ccd7bc02476cfad85280ff1e742671453360de
|
migrations/versions/420_dos_is_coming.py
|
migrations/versions/420_dos_is_coming.py
|
"""DOS is coming
Revision ID: 420
Revises: 410_remove_empty_drafts
Create Date: 2015-11-16 14:10:35.814066
"""
# revision identifiers, used by Alembic.
revision = '420'
down_revision = '410_remove_empty_drafts'
from alembic import op
import sqlalchemy as sa
from app.models import Framework
def upgrade():
op.execute("COMMIT")
op.execute("ALTER TYPE framework_enum ADD VALUE IF NOT EXISTS 'dos' after 'gcloud'")
framework = Framework.query.filter(Framework.slug == 'digital-outcomes-and-specialists').first()
if not framework:
op.execute("""
INSERT INTO frameworks (name, framework, status, slug)
values('Digital Outcomes and Specialists', 'dos', 'coming', 'digital-outcomes-and-specialists')
""")
def downgrade():
op.execute("""
DELETE FROM frameworks where slug='digital-outcomes-and-specialists'
""")
|
Add Digital Outcomes and Specialists to frameworks
|
Add Digital Outcomes and Specialists to frameworks
This commit checks to see if:
- the framework exists in the enum before trying to add it
- the framework exists in the table before trying to add
This means that it won’t fall over on enviroments where DOS has already been
created (eg local dev machines, preview).
The downgrade will fail if data associated with a framework has been created (eg
a supplier has registered interest). We made this trade off because:
- can rollback production if anything goes wrong if we deploy this
- no framework-related data will be created on production until the framework
is open—by which time it will be way too late to rollback this far
_Paired with @TheDoubleK_
|
Python
|
mit
|
alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api
|
Add Digital Outcomes and Specialists to frameworks
This commit checks to see if:
- the framework exists in the enum before trying to add it
- the framework exists in the table before trying to add
This means that it won’t fall over on enviroments where DOS has already been
created (eg local dev machines, preview).
The downgrade will fail if data associated with a framework has been created (eg
a supplier has registered interest). We made this trade off because:
- can rollback production if anything goes wrong if we deploy this
- no framework-related data will be created on production until the framework
is open—by which time it will be way too late to rollback this far
_Paired with @TheDoubleK_
|
"""DOS is coming
Revision ID: 420
Revises: 410_remove_empty_drafts
Create Date: 2015-11-16 14:10:35.814066
"""
# revision identifiers, used by Alembic.
revision = '420'
down_revision = '410_remove_empty_drafts'
from alembic import op
import sqlalchemy as sa
from app.models import Framework
def upgrade():
op.execute("COMMIT")
op.execute("ALTER TYPE framework_enum ADD VALUE IF NOT EXISTS 'dos' after 'gcloud'")
framework = Framework.query.filter(Framework.slug == 'digital-outcomes-and-specialists').first()
if not framework:
op.execute("""
INSERT INTO frameworks (name, framework, status, slug)
values('Digital Outcomes and Specialists', 'dos', 'coming', 'digital-outcomes-and-specialists')
""")
def downgrade():
op.execute("""
DELETE FROM frameworks where slug='digital-outcomes-and-specialists'
""")
|
<commit_before><commit_msg>Add Digital Outcomes and Specialists to frameworks
This commit checks to see if:
- the framework exists in the enum before trying to add it
- the framework exists in the table before trying to add
This means that it won’t fall over on enviroments where DOS has already been
created (eg local dev machines, preview).
The downgrade will fail if data associated with a framework has been created (eg
a supplier has registered interest). We made this trade off because:
- can rollback production if anything goes wrong if we deploy this
- no framework-related data will be created on production until the framework
is open—by which time it will be way too late to rollback this far
_Paired with @TheDoubleK_<commit_after>
|
"""DOS is coming
Revision ID: 420
Revises: 410_remove_empty_drafts
Create Date: 2015-11-16 14:10:35.814066
"""
# revision identifiers, used by Alembic.
revision = '420'
down_revision = '410_remove_empty_drafts'
from alembic import op
import sqlalchemy as sa
from app.models import Framework
def upgrade():
op.execute("COMMIT")
op.execute("ALTER TYPE framework_enum ADD VALUE IF NOT EXISTS 'dos' after 'gcloud'")
framework = Framework.query.filter(Framework.slug == 'digital-outcomes-and-specialists').first()
if not framework:
op.execute("""
INSERT INTO frameworks (name, framework, status, slug)
values('Digital Outcomes and Specialists', 'dos', 'coming', 'digital-outcomes-and-specialists')
""")
def downgrade():
op.execute("""
DELETE FROM frameworks where slug='digital-outcomes-and-specialists'
""")
|
Add Digital Outcomes and Specialists to frameworks
This commit checks to see if:
- the framework exists in the enum before trying to add it
- the framework exists in the table before trying to add
This means that it won’t fall over on enviroments where DOS has already been
created (eg local dev machines, preview).
The downgrade will fail if data associated with a framework has been created (eg
a supplier has registered interest). We made this trade off because:
- can rollback production if anything goes wrong if we deploy this
- no framework-related data will be created on production until the framework
is open—by which time it will be way too late to rollback this far
_Paired with @TheDoubleK_"""DOS is coming
Revision ID: 420
Revises: 410_remove_empty_drafts
Create Date: 2015-11-16 14:10:35.814066
"""
# revision identifiers, used by Alembic.
revision = '420'
down_revision = '410_remove_empty_drafts'
from alembic import op
import sqlalchemy as sa
from app.models import Framework
def upgrade():
op.execute("COMMIT")
op.execute("ALTER TYPE framework_enum ADD VALUE IF NOT EXISTS 'dos' after 'gcloud'")
framework = Framework.query.filter(Framework.slug == 'digital-outcomes-and-specialists').first()
if not framework:
op.execute("""
INSERT INTO frameworks (name, framework, status, slug)
values('Digital Outcomes and Specialists', 'dos', 'coming', 'digital-outcomes-and-specialists')
""")
def downgrade():
op.execute("""
DELETE FROM frameworks where slug='digital-outcomes-and-specialists'
""")
|
<commit_before><commit_msg>Add Digital Outcomes and Specialists to frameworks
This commit checks to see if:
- the framework exists in the enum before trying to add it
- the framework exists in the table before trying to add
This means that it won’t fall over on enviroments where DOS has already been
created (eg local dev machines, preview).
The downgrade will fail if data associated with a framework has been created (eg
a supplier has registered interest). We made this trade off because:
- can rollback production if anything goes wrong if we deploy this
- no framework-related data will be created on production until the framework
is open—by which time it will be way too late to rollback this far
_Paired with @TheDoubleK_<commit_after>"""DOS is coming
Revision ID: 420
Revises: 410_remove_empty_drafts
Create Date: 2015-11-16 14:10:35.814066
"""
# revision identifiers, used by Alembic.
revision = '420'
down_revision = '410_remove_empty_drafts'
from alembic import op
import sqlalchemy as sa
from app.models import Framework
def upgrade():
op.execute("COMMIT")
op.execute("ALTER TYPE framework_enum ADD VALUE IF NOT EXISTS 'dos' after 'gcloud'")
framework = Framework.query.filter(Framework.slug == 'digital-outcomes-and-specialists').first()
if not framework:
op.execute("""
INSERT INTO frameworks (name, framework, status, slug)
values('Digital Outcomes and Specialists', 'dos', 'coming', 'digital-outcomes-and-specialists')
""")
def downgrade():
op.execute("""
DELETE FROM frameworks where slug='digital-outcomes-and-specialists'
""")
|
|
c5dfcffdf743e2c26b8dba6e3be8aee7d7aaa608
|
test/test_join_bytes.py
|
test/test_join_bytes.py
|
import re
import linesep
try:
from StringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
# Based on <https://pytest.org/latest/example/parametrize.html#a-quick-port-of-testscenarios>
def pytest_generate_tests(metafunc):
idlist = []
argvalues = []
for scenario in metafunc.module.scenarios:
idlist.append(scenario[0])
argvalues.append([scenario[1][argname] for argname in metafunc.fixturenames])
metafunc.parametrize(metafunc.fixturenames, argvalues, ids=idlist, scope="module")
scenarios = [
('empty', {
"entries": [],
"sep": b'\n',
"preceded": b'',
"terminated": b'',
"separated": b'',
}),
('empty_str', {
"entries": [b''],
"sep": b'\n',
"preceded": b'\n',
"terminated": b'\n',
"separated": b'',
}),
]
def test_join_preceded(entries, sep, preceded):
assert linesep.join_preceded(entries, sep) == preceded
def test_join_terminated(entries, sep, terminated):
assert linesep.join_terminated(entries, sep) == terminated
def test_join_separated(entries, sep, separated):
assert linesep.join_separated(entries, sep) == separated
def test_write_preceded(entries, sep, preceded):
fp = BytesIO()
linesep.write_preceded(fp, entries, sep)
assert fp.getvalue() == preceded
def test_write_terminated(entries, sep, terminated):
fp = BytesIO()
linesep.write_terminated(fp, entries, sep)
assert fp.getvalue() == terminated
def test_write_separated(entries, sep, separated):
fp = BytesIO()
linesep.write_separated(fp, entries, sep)
assert fp.getvalue() == separated
|
Test `write_*` and `join_*` on bytes
|
Test `write_*` and `join_*` on bytes
|
Python
|
mit
|
jwodder/linesep
|
Test `write_*` and `join_*` on bytes
|
import re
import linesep
try:
from StringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
# Based on <https://pytest.org/latest/example/parametrize.html#a-quick-port-of-testscenarios>
def pytest_generate_tests(metafunc):
idlist = []
argvalues = []
for scenario in metafunc.module.scenarios:
idlist.append(scenario[0])
argvalues.append([scenario[1][argname] for argname in metafunc.fixturenames])
metafunc.parametrize(metafunc.fixturenames, argvalues, ids=idlist, scope="module")
scenarios = [
('empty', {
"entries": [],
"sep": b'\n',
"preceded": b'',
"terminated": b'',
"separated": b'',
}),
('empty_str', {
"entries": [b''],
"sep": b'\n',
"preceded": b'\n',
"terminated": b'\n',
"separated": b'',
}),
]
def test_join_preceded(entries, sep, preceded):
assert linesep.join_preceded(entries, sep) == preceded
def test_join_terminated(entries, sep, terminated):
assert linesep.join_terminated(entries, sep) == terminated
def test_join_separated(entries, sep, separated):
assert linesep.join_separated(entries, sep) == separated
def test_write_preceded(entries, sep, preceded):
fp = BytesIO()
linesep.write_preceded(fp, entries, sep)
assert fp.getvalue() == preceded
def test_write_terminated(entries, sep, terminated):
fp = BytesIO()
linesep.write_terminated(fp, entries, sep)
assert fp.getvalue() == terminated
def test_write_separated(entries, sep, separated):
fp = BytesIO()
linesep.write_separated(fp, entries, sep)
assert fp.getvalue() == separated
|
<commit_before><commit_msg>Test `write_*` and `join_*` on bytes<commit_after>
|
import re
import linesep
try:
from StringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
# Based on <https://pytest.org/latest/example/parametrize.html#a-quick-port-of-testscenarios>
def pytest_generate_tests(metafunc):
idlist = []
argvalues = []
for scenario in metafunc.module.scenarios:
idlist.append(scenario[0])
argvalues.append([scenario[1][argname] for argname in metafunc.fixturenames])
metafunc.parametrize(metafunc.fixturenames, argvalues, ids=idlist, scope="module")
scenarios = [
('empty', {
"entries": [],
"sep": b'\n',
"preceded": b'',
"terminated": b'',
"separated": b'',
}),
('empty_str', {
"entries": [b''],
"sep": b'\n',
"preceded": b'\n',
"terminated": b'\n',
"separated": b'',
}),
]
def test_join_preceded(entries, sep, preceded):
assert linesep.join_preceded(entries, sep) == preceded
def test_join_terminated(entries, sep, terminated):
assert linesep.join_terminated(entries, sep) == terminated
def test_join_separated(entries, sep, separated):
assert linesep.join_separated(entries, sep) == separated
def test_write_preceded(entries, sep, preceded):
fp = BytesIO()
linesep.write_preceded(fp, entries, sep)
assert fp.getvalue() == preceded
def test_write_terminated(entries, sep, terminated):
fp = BytesIO()
linesep.write_terminated(fp, entries, sep)
assert fp.getvalue() == terminated
def test_write_separated(entries, sep, separated):
fp = BytesIO()
linesep.write_separated(fp, entries, sep)
assert fp.getvalue() == separated
|
Test `write_*` and `join_*` on bytesimport re
import linesep
try:
from StringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
# Based on <https://pytest.org/latest/example/parametrize.html#a-quick-port-of-testscenarios>
def pytest_generate_tests(metafunc):
idlist = []
argvalues = []
for scenario in metafunc.module.scenarios:
idlist.append(scenario[0])
argvalues.append([scenario[1][argname] for argname in metafunc.fixturenames])
metafunc.parametrize(metafunc.fixturenames, argvalues, ids=idlist, scope="module")
scenarios = [
('empty', {
"entries": [],
"sep": b'\n',
"preceded": b'',
"terminated": b'',
"separated": b'',
}),
('empty_str', {
"entries": [b''],
"sep": b'\n',
"preceded": b'\n',
"terminated": b'\n',
"separated": b'',
}),
]
def test_join_preceded(entries, sep, preceded):
assert linesep.join_preceded(entries, sep) == preceded
def test_join_terminated(entries, sep, terminated):
assert linesep.join_terminated(entries, sep) == terminated
def test_join_separated(entries, sep, separated):
assert linesep.join_separated(entries, sep) == separated
def test_write_preceded(entries, sep, preceded):
fp = BytesIO()
linesep.write_preceded(fp, entries, sep)
assert fp.getvalue() == preceded
def test_write_terminated(entries, sep, terminated):
fp = BytesIO()
linesep.write_terminated(fp, entries, sep)
assert fp.getvalue() == terminated
def test_write_separated(entries, sep, separated):
fp = BytesIO()
linesep.write_separated(fp, entries, sep)
assert fp.getvalue() == separated
|
<commit_before><commit_msg>Test `write_*` and `join_*` on bytes<commit_after>import re
import linesep
try:
from StringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
# Based on <https://pytest.org/latest/example/parametrize.html#a-quick-port-of-testscenarios>
def pytest_generate_tests(metafunc):
idlist = []
argvalues = []
for scenario in metafunc.module.scenarios:
idlist.append(scenario[0])
argvalues.append([scenario[1][argname] for argname in metafunc.fixturenames])
metafunc.parametrize(metafunc.fixturenames, argvalues, ids=idlist, scope="module")
scenarios = [
('empty', {
"entries": [],
"sep": b'\n',
"preceded": b'',
"terminated": b'',
"separated": b'',
}),
('empty_str', {
"entries": [b''],
"sep": b'\n',
"preceded": b'\n',
"terminated": b'\n',
"separated": b'',
}),
]
def test_join_preceded(entries, sep, preceded):
assert linesep.join_preceded(entries, sep) == preceded
def test_join_terminated(entries, sep, terminated):
assert linesep.join_terminated(entries, sep) == terminated
def test_join_separated(entries, sep, separated):
assert linesep.join_separated(entries, sep) == separated
def test_write_preceded(entries, sep, preceded):
fp = BytesIO()
linesep.write_preceded(fp, entries, sep)
assert fp.getvalue() == preceded
def test_write_terminated(entries, sep, terminated):
fp = BytesIO()
linesep.write_terminated(fp, entries, sep)
assert fp.getvalue() == terminated
def test_write_separated(entries, sep, separated):
fp = BytesIO()
linesep.write_separated(fp, entries, sep)
assert fp.getvalue() == separated
|
|
a30cd68e77242df4efadc75c4390dd8a3ce68612
|
src/ggrc/migrations/versions/20170103101308_42b22b9ca859__fix_audit_empty_status.py
|
src/ggrc/migrations/versions/20170103101308_42b22b9ca859__fix_audit_empty_status.py
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Fix audit empty status
Create Date: 2016-12-22 13:53:24.497701
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '42b22b9ca859'
down_revision = '4fcaef05479f'
VALID_STATES = (
u'Planned', u'In Progress', u'Manager Review',
u'Ready for External Review', u'Completed'
)
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.execute("UPDATE audits SET status='Planned' WHERE status=0")
op.alter_column('audits', 'status', nullable=True, type_=sa.String(250),
existing_type=sa.Enum(*VALID_STATES))
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
op.alter_column('audits', 'status', nullable=False,
type_=sa.Enum(*VALID_STATES), existing_type=sa.String)
|
Add data migration for Audit's empty status
|
Add data migration for Audit's empty status
|
Python
|
apache-2.0
|
AleksNeStu/ggrc-core,AleksNeStu/ggrc-core,VinnieJohns/ggrc-core,plamut/ggrc-core,selahssea/ggrc-core,plamut/ggrc-core,selahssea/ggrc-core,AleksNeStu/ggrc-core,selahssea/ggrc-core,plamut/ggrc-core,VinnieJohns/ggrc-core,plamut/ggrc-core,selahssea/ggrc-core,VinnieJohns/ggrc-core,AleksNeStu/ggrc-core,VinnieJohns/ggrc-core
|
Add data migration for Audit's empty status
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Fix audit empty status
Create Date: 2016-12-22 13:53:24.497701
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '42b22b9ca859'
down_revision = '4fcaef05479f'
VALID_STATES = (
u'Planned', u'In Progress', u'Manager Review',
u'Ready for External Review', u'Completed'
)
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.execute("UPDATE audits SET status='Planned' WHERE status=0")
op.alter_column('audits', 'status', nullable=True, type_=sa.String(250),
existing_type=sa.Enum(*VALID_STATES))
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
op.alter_column('audits', 'status', nullable=False,
type_=sa.Enum(*VALID_STATES), existing_type=sa.String)
|
<commit_before><commit_msg>Add data migration for Audit's empty status<commit_after>
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Fix audit empty status
Create Date: 2016-12-22 13:53:24.497701
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '42b22b9ca859'
down_revision = '4fcaef05479f'
VALID_STATES = (
u'Planned', u'In Progress', u'Manager Review',
u'Ready for External Review', u'Completed'
)
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.execute("UPDATE audits SET status='Planned' WHERE status=0")
op.alter_column('audits', 'status', nullable=True, type_=sa.String(250),
existing_type=sa.Enum(*VALID_STATES))
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
op.alter_column('audits', 'status', nullable=False,
type_=sa.Enum(*VALID_STATES), existing_type=sa.String)
|
Add data migration for Audit's empty status# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Fix audit empty status
Create Date: 2016-12-22 13:53:24.497701
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '42b22b9ca859'
down_revision = '4fcaef05479f'
VALID_STATES = (
u'Planned', u'In Progress', u'Manager Review',
u'Ready for External Review', u'Completed'
)
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.execute("UPDATE audits SET status='Planned' WHERE status=0")
op.alter_column('audits', 'status', nullable=True, type_=sa.String(250),
existing_type=sa.Enum(*VALID_STATES))
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
op.alter_column('audits', 'status', nullable=False,
type_=sa.Enum(*VALID_STATES), existing_type=sa.String)
|
<commit_before><commit_msg>Add data migration for Audit's empty status<commit_after># Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Fix audit empty status
Create Date: 2016-12-22 13:53:24.497701
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '42b22b9ca859'
down_revision = '4fcaef05479f'
VALID_STATES = (
u'Planned', u'In Progress', u'Manager Review',
u'Ready for External Review', u'Completed'
)
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.execute("UPDATE audits SET status='Planned' WHERE status=0")
op.alter_column('audits', 'status', nullable=True, type_=sa.String(250),
existing_type=sa.Enum(*VALID_STATES))
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
op.alter_column('audits', 'status', nullable=False,
type_=sa.Enum(*VALID_STATES), existing_type=sa.String)
|
|
66b5a1089ed0ce2e615f889f35b5e39db91950ae
|
mezzanine/core/management/commands/runserver.py
|
mezzanine/core/management/commands/runserver.py
|
import os
from django.conf import settings
from django.contrib.staticfiles.management.commands import runserver
from django.contrib.staticfiles.handlers import StaticFilesHandler
from django.http import Http404
from django.views.static import serve
class MezzStaticFilesHandler(StaticFilesHandler):
def get_response(self, request):
try:
return super(MezzStaticFilesHandler, self).get_response(request)
except Http404:
handled = (settings.STATIC_URL, settings.MEDIA_URL)
if request.path.startswith(handled):
path = self.file_path(request.path).replace(os.sep, "/")
return serve(request, path, document_root=settings.STATIC_ROOT)
raise
class Command(runserver.Command):
"""
Overrides runserver so that we can serve uploaded files
during development, and not require every single developer on
every single one of their projects to have to set up multiple
web server aliases for serving static content.
See https://code.djangoproject.com/ticket/15199
For ease, we also serve any static files that have been stored
under the project's ``STATIC_ROOT``.
"""
def get_handler(self, *args, **options):
handler = super(Command, self).get_handler(*args, **options)
if settings.DEBUG or options["insecure_serving"]:
handler = MezzStaticFilesHandler(handler)
return handler
|
import os
from django.conf import settings
from django.contrib.staticfiles.management.commands import runserver
from django.contrib.staticfiles.handlers import StaticFilesHandler
from django.views.static import serve
class MezzStaticFilesHandler(StaticFilesHandler):
def get_response(self, request):
response = super(MezzStaticFilesHandler, self).get_response(request)
handled = (settings.STATIC_URL, settings.MEDIA_URL)
if response.status_code == 404 and request.path.startswith(handled):
path = self.file_path(request.path).replace(os.sep, "/")
return serve(request, path, document_root=settings.STATIC_ROOT)
class Command(runserver.Command):
"""
Overrides runserver so that we can serve uploaded files
during development, and not require every single developer on
every single one of their projects to have to set up multiple
web server aliases for serving static content.
See https://code.djangoproject.com/ticket/15199
For ease, we also serve any static files that have been stored
under the project's ``STATIC_ROOT``.
"""
def get_handler(self, *args, **options):
handler = super(Command, self).get_handler(*args, **options)
if settings.DEBUG or options["insecure_serving"]:
handler = MezzStaticFilesHandler(handler)
return handler
|
Fix serving uploaded files during development.
|
Fix serving uploaded files during development.
|
Python
|
bsd-2-clause
|
SoLoHiC/mezzanine,industrydive/mezzanine,jjz/mezzanine,Kniyl/mezzanine,stephenmcd/mezzanine,adrian-the-git/mezzanine,biomassives/mezzanine,emile2016/mezzanine,dekomote/mezzanine-modeltranslation-backport,Cicero-Zhao/mezzanine,nikolas/mezzanine,industrydive/mezzanine,fusionbox/mezzanine,saintbird/mezzanine,douglaskastle/mezzanine,PegasusWang/mezzanine,damnfine/mezzanine,dustinrb/mezzanine,douglaskastle/mezzanine,gradel/mezzanine,viaregio/mezzanine,vladir/mezzanine,ryneeverett/mezzanine,webounty/mezzanine,nikolas/mezzanine,theclanks/mezzanine,batpad/mezzanine,Skytorn86/mezzanine,cccs-web/mezzanine,agepoly/mezzanine,ryneeverett/mezzanine,AlexHill/mezzanine,readevalprint/mezzanine,wyzex/mezzanine,tuxinhang1989/mezzanine,frankier/mezzanine,jjz/mezzanine,Cajoline/mezzanine,Kniyl/mezzanine,geodesign/mezzanine,dustinrb/mezzanine,wyzex/mezzanine,biomassives/mezzanine,frankier/mezzanine,dsanders11/mezzanine,sjdines/mezzanine,wyzex/mezzanine,molokov/mezzanine,PegasusWang/mezzanine,molokov/mezzanine,frankchin/mezzanine,nikolas/mezzanine,gradel/mezzanine,adrian-the-git/mezzanine,sjuxax/mezzanine,stephenmcd/mezzanine,jerivas/mezzanine,damnfine/mezzanine,wbtuomela/mezzanine,christianwgd/mezzanine,sjdines/mezzanine,vladir/mezzanine,dovydas/mezzanine,viaregio/mezzanine,dustinrb/mezzanine,frankier/mezzanine,readevalprint/mezzanine,viaregio/mezzanine,dekomote/mezzanine-modeltranslation-backport,jjz/mezzanine,jerivas/mezzanine,mush42/mezzanine,spookylukey/mezzanine,Skytorn86/mezzanine,joshcartme/mezzanine,promil23/mezzanine,dsanders11/mezzanine,spookylukey/mezzanine,mush42/mezzanine,jerivas/mezzanine,spookylukey/mezzanine,fusionbox/mezzanine,ZeroXn/mezzanine,joshcartme/mezzanine,geodesign/mezzanine,wbtuomela/mezzanine,SoLoHiC/mezzanine,sjuxax/mezzanine,tuxinhang1989/mezzanine,eino-makitalo/mezzanine,industrydive/mezzanine,douglaskastle/mezzanine,saintbird/mezzanine,dovydas/mezzanine,stephenmcd/mezzanine,geodesign/mezzanine,PegasusWang/mezzanine,agepoly/mezzanine,sjdines/mezzanine,molokov/mezzanine,christianwgd/mezzanine,theclanks/mezzanine,adrian-the-git/mezzanine,dekomote/mezzanine-modeltranslation-backport,theclanks/mezzanine,webounty/mezzanine,promil23/mezzanine,ZeroXn/mezzanine,Cajoline/mezzanine,emile2016/mezzanine,eino-makitalo/mezzanine,emile2016/mezzanine,damnfine/mezzanine,readevalprint/mezzanine,eino-makitalo/mezzanine,AlexHill/mezzanine,ryneeverett/mezzanine,Kniyl/mezzanine,mush42/mezzanine,webounty/mezzanine,frankchin/mezzanine,wbtuomela/mezzanine,cccs-web/mezzanine,Cicero-Zhao/mezzanine,Cajoline/mezzanine,agepoly/mezzanine,batpad/mezzanine,biomassives/mezzanine,SoLoHiC/mezzanine,gradel/mezzanine,joshcartme/mezzanine,tuxinhang1989/mezzanine,sjuxax/mezzanine,frankchin/mezzanine,dovydas/mezzanine,christianwgd/mezzanine,vladir/mezzanine,ZeroXn/mezzanine,promil23/mezzanine,dsanders11/mezzanine,saintbird/mezzanine,Skytorn86/mezzanine
|
import os
from django.conf import settings
from django.contrib.staticfiles.management.commands import runserver
from django.contrib.staticfiles.handlers import StaticFilesHandler
from django.http import Http404
from django.views.static import serve
class MezzStaticFilesHandler(StaticFilesHandler):
def get_response(self, request):
try:
return super(MezzStaticFilesHandler, self).get_response(request)
except Http404:
handled = (settings.STATIC_URL, settings.MEDIA_URL)
if request.path.startswith(handled):
path = self.file_path(request.path).replace(os.sep, "/")
return serve(request, path, document_root=settings.STATIC_ROOT)
raise
class Command(runserver.Command):
"""
Overrides runserver so that we can serve uploaded files
during development, and not require every single developer on
every single one of their projects to have to set up multiple
web server aliases for serving static content.
See https://code.djangoproject.com/ticket/15199
For ease, we also serve any static files that have been stored
under the project's ``STATIC_ROOT``.
"""
def get_handler(self, *args, **options):
handler = super(Command, self).get_handler(*args, **options)
if settings.DEBUG or options["insecure_serving"]:
handler = MezzStaticFilesHandler(handler)
return handler
Fix serving uploaded files during development.
|
import os
from django.conf import settings
from django.contrib.staticfiles.management.commands import runserver
from django.contrib.staticfiles.handlers import StaticFilesHandler
from django.views.static import serve
class MezzStaticFilesHandler(StaticFilesHandler):
def get_response(self, request):
response = super(MezzStaticFilesHandler, self).get_response(request)
handled = (settings.STATIC_URL, settings.MEDIA_URL)
if response.status_code == 404 and request.path.startswith(handled):
path = self.file_path(request.path).replace(os.sep, "/")
return serve(request, path, document_root=settings.STATIC_ROOT)
class Command(runserver.Command):
"""
Overrides runserver so that we can serve uploaded files
during development, and not require every single developer on
every single one of their projects to have to set up multiple
web server aliases for serving static content.
See https://code.djangoproject.com/ticket/15199
For ease, we also serve any static files that have been stored
under the project's ``STATIC_ROOT``.
"""
def get_handler(self, *args, **options):
handler = super(Command, self).get_handler(*args, **options)
if settings.DEBUG or options["insecure_serving"]:
handler = MezzStaticFilesHandler(handler)
return handler
|
<commit_before>
import os
from django.conf import settings
from django.contrib.staticfiles.management.commands import runserver
from django.contrib.staticfiles.handlers import StaticFilesHandler
from django.http import Http404
from django.views.static import serve
class MezzStaticFilesHandler(StaticFilesHandler):
def get_response(self, request):
try:
return super(MezzStaticFilesHandler, self).get_response(request)
except Http404:
handled = (settings.STATIC_URL, settings.MEDIA_URL)
if request.path.startswith(handled):
path = self.file_path(request.path).replace(os.sep, "/")
return serve(request, path, document_root=settings.STATIC_ROOT)
raise
class Command(runserver.Command):
"""
Overrides runserver so that we can serve uploaded files
during development, and not require every single developer on
every single one of their projects to have to set up multiple
web server aliases for serving static content.
See https://code.djangoproject.com/ticket/15199
For ease, we also serve any static files that have been stored
under the project's ``STATIC_ROOT``.
"""
def get_handler(self, *args, **options):
handler = super(Command, self).get_handler(*args, **options)
if settings.DEBUG or options["insecure_serving"]:
handler = MezzStaticFilesHandler(handler)
return handler
<commit_msg>Fix serving uploaded files during development.<commit_after>
|
import os
from django.conf import settings
from django.contrib.staticfiles.management.commands import runserver
from django.contrib.staticfiles.handlers import StaticFilesHandler
from django.views.static import serve
class MezzStaticFilesHandler(StaticFilesHandler):
def get_response(self, request):
response = super(MezzStaticFilesHandler, self).get_response(request)
handled = (settings.STATIC_URL, settings.MEDIA_URL)
if response.status_code == 404 and request.path.startswith(handled):
path = self.file_path(request.path).replace(os.sep, "/")
return serve(request, path, document_root=settings.STATIC_ROOT)
class Command(runserver.Command):
"""
Overrides runserver so that we can serve uploaded files
during development, and not require every single developer on
every single one of their projects to have to set up multiple
web server aliases for serving static content.
See https://code.djangoproject.com/ticket/15199
For ease, we also serve any static files that have been stored
under the project's ``STATIC_ROOT``.
"""
def get_handler(self, *args, **options):
handler = super(Command, self).get_handler(*args, **options)
if settings.DEBUG or options["insecure_serving"]:
handler = MezzStaticFilesHandler(handler)
return handler
|
import os
from django.conf import settings
from django.contrib.staticfiles.management.commands import runserver
from django.contrib.staticfiles.handlers import StaticFilesHandler
from django.http import Http404
from django.views.static import serve
class MezzStaticFilesHandler(StaticFilesHandler):
def get_response(self, request):
try:
return super(MezzStaticFilesHandler, self).get_response(request)
except Http404:
handled = (settings.STATIC_URL, settings.MEDIA_URL)
if request.path.startswith(handled):
path = self.file_path(request.path).replace(os.sep, "/")
return serve(request, path, document_root=settings.STATIC_ROOT)
raise
class Command(runserver.Command):
"""
Overrides runserver so that we can serve uploaded files
during development, and not require every single developer on
every single one of their projects to have to set up multiple
web server aliases for serving static content.
See https://code.djangoproject.com/ticket/15199
For ease, we also serve any static files that have been stored
under the project's ``STATIC_ROOT``.
"""
def get_handler(self, *args, **options):
handler = super(Command, self).get_handler(*args, **options)
if settings.DEBUG or options["insecure_serving"]:
handler = MezzStaticFilesHandler(handler)
return handler
Fix serving uploaded files during development.
import os
from django.conf import settings
from django.contrib.staticfiles.management.commands import runserver
from django.contrib.staticfiles.handlers import StaticFilesHandler
from django.views.static import serve
class MezzStaticFilesHandler(StaticFilesHandler):
def get_response(self, request):
response = super(MezzStaticFilesHandler, self).get_response(request)
handled = (settings.STATIC_URL, settings.MEDIA_URL)
if response.status_code == 404 and request.path.startswith(handled):
path = self.file_path(request.path).replace(os.sep, "/")
return serve(request, path, document_root=settings.STATIC_ROOT)
class Command(runserver.Command):
"""
Overrides runserver so that we can serve uploaded files
during development, and not require every single developer on
every single one of their projects to have to set up multiple
web server aliases for serving static content.
See https://code.djangoproject.com/ticket/15199
For ease, we also serve any static files that have been stored
under the project's ``STATIC_ROOT``.
"""
def get_handler(self, *args, **options):
handler = super(Command, self).get_handler(*args, **options)
if settings.DEBUG or options["insecure_serving"]:
handler = MezzStaticFilesHandler(handler)
return handler
|
<commit_before>
import os
from django.conf import settings
from django.contrib.staticfiles.management.commands import runserver
from django.contrib.staticfiles.handlers import StaticFilesHandler
from django.http import Http404
from django.views.static import serve
class MezzStaticFilesHandler(StaticFilesHandler):
def get_response(self, request):
try:
return super(MezzStaticFilesHandler, self).get_response(request)
except Http404:
handled = (settings.STATIC_URL, settings.MEDIA_URL)
if request.path.startswith(handled):
path = self.file_path(request.path).replace(os.sep, "/")
return serve(request, path, document_root=settings.STATIC_ROOT)
raise
class Command(runserver.Command):
"""
Overrides runserver so that we can serve uploaded files
during development, and not require every single developer on
every single one of their projects to have to set up multiple
web server aliases for serving static content.
See https://code.djangoproject.com/ticket/15199
For ease, we also serve any static files that have been stored
under the project's ``STATIC_ROOT``.
"""
def get_handler(self, *args, **options):
handler = super(Command, self).get_handler(*args, **options)
if settings.DEBUG or options["insecure_serving"]:
handler = MezzStaticFilesHandler(handler)
return handler
<commit_msg>Fix serving uploaded files during development.<commit_after>
import os
from django.conf import settings
from django.contrib.staticfiles.management.commands import runserver
from django.contrib.staticfiles.handlers import StaticFilesHandler
from django.views.static import serve
class MezzStaticFilesHandler(StaticFilesHandler):
def get_response(self, request):
response = super(MezzStaticFilesHandler, self).get_response(request)
handled = (settings.STATIC_URL, settings.MEDIA_URL)
if response.status_code == 404 and request.path.startswith(handled):
path = self.file_path(request.path).replace(os.sep, "/")
return serve(request, path, document_root=settings.STATIC_ROOT)
class Command(runserver.Command):
"""
Overrides runserver so that we can serve uploaded files
during development, and not require every single developer on
every single one of their projects to have to set up multiple
web server aliases for serving static content.
See https://code.djangoproject.com/ticket/15199
For ease, we also serve any static files that have been stored
under the project's ``STATIC_ROOT``.
"""
def get_handler(self, *args, **options):
handler = super(Command, self).get_handler(*args, **options)
if settings.DEBUG or options["insecure_serving"]:
handler = MezzStaticFilesHandler(handler)
return handler
|
2ef9fce02be94f8c4e9b5c52ca04a05cce1b5ede
|
LiSE/LiSE/server/__main__.py
|
LiSE/LiSE/server/__main__.py
|
import cherrypy
from argparse import ArgumentParser
from . import LiSEHandleWebService
parser = ArgumentParser()
parser.add_argument('world', action='store', required=True)
parser.add_argument('-c', '--code', action='store')
args = parser.parse_args()
conf = {
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.sessions.on': True,
'tools.response_headers.on': True,
'tools.response_headers.headers': [('Content-Type', 'application/json')],
'tools.encode.on': True,
'tools.encode.encoding': 'utf-8'
}
}
cherrypy.quickstart(LiSEHandleWebService(args.world, args.code), '/', conf)
|
Allow to start server as a module
|
Allow to start server as a module
|
Python
|
agpl-3.0
|
LogicalDash/LiSE,LogicalDash/LiSE
|
Allow to start server as a module
|
import cherrypy
from argparse import ArgumentParser
from . import LiSEHandleWebService
parser = ArgumentParser()
parser.add_argument('world', action='store', required=True)
parser.add_argument('-c', '--code', action='store')
args = parser.parse_args()
conf = {
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.sessions.on': True,
'tools.response_headers.on': True,
'tools.response_headers.headers': [('Content-Type', 'application/json')],
'tools.encode.on': True,
'tools.encode.encoding': 'utf-8'
}
}
cherrypy.quickstart(LiSEHandleWebService(args.world, args.code), '/', conf)
|
<commit_before><commit_msg>Allow to start server as a module<commit_after>
|
import cherrypy
from argparse import ArgumentParser
from . import LiSEHandleWebService
parser = ArgumentParser()
parser.add_argument('world', action='store', required=True)
parser.add_argument('-c', '--code', action='store')
args = parser.parse_args()
conf = {
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.sessions.on': True,
'tools.response_headers.on': True,
'tools.response_headers.headers': [('Content-Type', 'application/json')],
'tools.encode.on': True,
'tools.encode.encoding': 'utf-8'
}
}
cherrypy.quickstart(LiSEHandleWebService(args.world, args.code), '/', conf)
|
Allow to start server as a moduleimport cherrypy
from argparse import ArgumentParser
from . import LiSEHandleWebService
parser = ArgumentParser()
parser.add_argument('world', action='store', required=True)
parser.add_argument('-c', '--code', action='store')
args = parser.parse_args()
conf = {
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.sessions.on': True,
'tools.response_headers.on': True,
'tools.response_headers.headers': [('Content-Type', 'application/json')],
'tools.encode.on': True,
'tools.encode.encoding': 'utf-8'
}
}
cherrypy.quickstart(LiSEHandleWebService(args.world, args.code), '/', conf)
|
<commit_before><commit_msg>Allow to start server as a module<commit_after>import cherrypy
from argparse import ArgumentParser
from . import LiSEHandleWebService
parser = ArgumentParser()
parser.add_argument('world', action='store', required=True)
parser.add_argument('-c', '--code', action='store')
args = parser.parse_args()
conf = {
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.sessions.on': True,
'tools.response_headers.on': True,
'tools.response_headers.headers': [('Content-Type', 'application/json')],
'tools.encode.on': True,
'tools.encode.encoding': 'utf-8'
}
}
cherrypy.quickstart(LiSEHandleWebService(args.world, args.code), '/', conf)
|
|
a0124a990b4afe0cd5fd3971bae1e43f417bc1b2
|
corehq/apps/domain/management/commands/find_secure_submission_image_domains.py
|
corehq/apps/domain/management/commands/find_secure_submission_image_domains.py
|
from django.core.management.base import BaseCommand
from corehq.apps.domain.models import Domain
import csv
class Command(BaseCommand):
help = 'Find domains with secure submissions and image questions'
def handle(self, *args, **options):
with open('domain_results.csv', 'wb+') as csvfile:
csv_writer = csv.writer(
csvfile,
delimiter=',',
quotechar='|',
quoting=csv.QUOTE_MINIMAL
)
csv_writer.writerow(['domain', 'app', 'domain_creator'])
for domain in Domain.get_all(include_docs=True):
if domain.secure_submissions:
for app in domain.full_applications(include_builds=False):
for module in app.modules:
for form in module.forms:
for question in form.get_questions(app.langs):
if question['type'] == 'Image':
csv_writer.writerow([
domain.name,
app.name,
domain.creating_user
])
|
Add management command to find domains impacted by 502 bug
|
Add management command to find domains impacted by 502 bug
|
Python
|
bsd-3-clause
|
puttarajubr/commcare-hq,SEL-Columbia/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,SEL-Columbia/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,SEL-Columbia/commcare-hq
|
Add management command to find domains impacted by 502 bug
|
from django.core.management.base import BaseCommand
from corehq.apps.domain.models import Domain
import csv
class Command(BaseCommand):
help = 'Find domains with secure submissions and image questions'
def handle(self, *args, **options):
with open('domain_results.csv', 'wb+') as csvfile:
csv_writer = csv.writer(
csvfile,
delimiter=',',
quotechar='|',
quoting=csv.QUOTE_MINIMAL
)
csv_writer.writerow(['domain', 'app', 'domain_creator'])
for domain in Domain.get_all(include_docs=True):
if domain.secure_submissions:
for app in domain.full_applications(include_builds=False):
for module in app.modules:
for form in module.forms:
for question in form.get_questions(app.langs):
if question['type'] == 'Image':
csv_writer.writerow([
domain.name,
app.name,
domain.creating_user
])
|
<commit_before><commit_msg>Add management command to find domains impacted by 502 bug<commit_after>
|
from django.core.management.base import BaseCommand
from corehq.apps.domain.models import Domain
import csv
class Command(BaseCommand):
help = 'Find domains with secure submissions and image questions'
def handle(self, *args, **options):
with open('domain_results.csv', 'wb+') as csvfile:
csv_writer = csv.writer(
csvfile,
delimiter=',',
quotechar='|',
quoting=csv.QUOTE_MINIMAL
)
csv_writer.writerow(['domain', 'app', 'domain_creator'])
for domain in Domain.get_all(include_docs=True):
if domain.secure_submissions:
for app in domain.full_applications(include_builds=False):
for module in app.modules:
for form in module.forms:
for question in form.get_questions(app.langs):
if question['type'] == 'Image':
csv_writer.writerow([
domain.name,
app.name,
domain.creating_user
])
|
Add management command to find domains impacted by 502 bugfrom django.core.management.base import BaseCommand
from corehq.apps.domain.models import Domain
import csv
class Command(BaseCommand):
help = 'Find domains with secure submissions and image questions'
def handle(self, *args, **options):
with open('domain_results.csv', 'wb+') as csvfile:
csv_writer = csv.writer(
csvfile,
delimiter=',',
quotechar='|',
quoting=csv.QUOTE_MINIMAL
)
csv_writer.writerow(['domain', 'app', 'domain_creator'])
for domain in Domain.get_all(include_docs=True):
if domain.secure_submissions:
for app in domain.full_applications(include_builds=False):
for module in app.modules:
for form in module.forms:
for question in form.get_questions(app.langs):
if question['type'] == 'Image':
csv_writer.writerow([
domain.name,
app.name,
domain.creating_user
])
|
<commit_before><commit_msg>Add management command to find domains impacted by 502 bug<commit_after>from django.core.management.base import BaseCommand
from corehq.apps.domain.models import Domain
import csv
class Command(BaseCommand):
help = 'Find domains with secure submissions and image questions'
def handle(self, *args, **options):
with open('domain_results.csv', 'wb+') as csvfile:
csv_writer = csv.writer(
csvfile,
delimiter=',',
quotechar='|',
quoting=csv.QUOTE_MINIMAL
)
csv_writer.writerow(['domain', 'app', 'domain_creator'])
for domain in Domain.get_all(include_docs=True):
if domain.secure_submissions:
for app in domain.full_applications(include_builds=False):
for module in app.modules:
for form in module.forms:
for question in form.get_questions(app.langs):
if question['type'] == 'Image':
csv_writer.writerow([
domain.name,
app.name,
domain.creating_user
])
|
|
278cd37ada508701896c2669a215365785f5a261
|
evalExp.py
|
evalExp.py
|
from keywords import *
from reg import *
from parse import parse
def evalExp():
expr = parse(fetch(EXPR)) # make dedicated fetch_expr()?
# expr = transformMacros(expr)
evalFunc = getEvalFunc(expr)
# evalFunc()
# reassign next step
def getEvalFunc(expr):
if isVar(expr):
return compVar
if isNum(expr):
return compNum
# else
tag, *_ = expr
keyword_groups = {
define_keys : evalDef,
ass_keys : evalAss,
lambda_keys : evalLambda,
if_keys : evalIf,
begin_keys : evalBegin,
quote_keys : evalQuote
}
for group in keyword_groups:
if tag in group:
return keyword_groups[group]
# default
return evalApp
def isNum(exp):
try:
return type(int(exp)) == int
except:
return False
def isVar(exp):
return type(exp) == str
|
Add eval dispatch (copied from compyle)
|
Add eval dispatch (copied from compyle)
|
Python
|
mit
|
nickdrozd/ecio-lisp,nickdrozd/ecio-lisp
|
Add eval dispatch (copied from compyle)
|
from keywords import *
from reg import *
from parse import parse
def evalExp():
expr = parse(fetch(EXPR)) # make dedicated fetch_expr()?
# expr = transformMacros(expr)
evalFunc = getEvalFunc(expr)
# evalFunc()
# reassign next step
def getEvalFunc(expr):
if isVar(expr):
return compVar
if isNum(expr):
return compNum
# else
tag, *_ = expr
keyword_groups = {
define_keys : evalDef,
ass_keys : evalAss,
lambda_keys : evalLambda,
if_keys : evalIf,
begin_keys : evalBegin,
quote_keys : evalQuote
}
for group in keyword_groups:
if tag in group:
return keyword_groups[group]
# default
return evalApp
def isNum(exp):
try:
return type(int(exp)) == int
except:
return False
def isVar(exp):
return type(exp) == str
|
<commit_before><commit_msg>Add eval dispatch (copied from compyle)<commit_after>
|
from keywords import *
from reg import *
from parse import parse
def evalExp():
expr = parse(fetch(EXPR)) # make dedicated fetch_expr()?
# expr = transformMacros(expr)
evalFunc = getEvalFunc(expr)
# evalFunc()
# reassign next step
def getEvalFunc(expr):
if isVar(expr):
return compVar
if isNum(expr):
return compNum
# else
tag, *_ = expr
keyword_groups = {
define_keys : evalDef,
ass_keys : evalAss,
lambda_keys : evalLambda,
if_keys : evalIf,
begin_keys : evalBegin,
quote_keys : evalQuote
}
for group in keyword_groups:
if tag in group:
return keyword_groups[group]
# default
return evalApp
def isNum(exp):
try:
return type(int(exp)) == int
except:
return False
def isVar(exp):
return type(exp) == str
|
Add eval dispatch (copied from compyle)from keywords import *
from reg import *
from parse import parse
def evalExp():
expr = parse(fetch(EXPR)) # make dedicated fetch_expr()?
# expr = transformMacros(expr)
evalFunc = getEvalFunc(expr)
# evalFunc()
# reassign next step
def getEvalFunc(expr):
if isVar(expr):
return compVar
if isNum(expr):
return compNum
# else
tag, *_ = expr
keyword_groups = {
define_keys : evalDef,
ass_keys : evalAss,
lambda_keys : evalLambda,
if_keys : evalIf,
begin_keys : evalBegin,
quote_keys : evalQuote
}
for group in keyword_groups:
if tag in group:
return keyword_groups[group]
# default
return evalApp
def isNum(exp):
try:
return type(int(exp)) == int
except:
return False
def isVar(exp):
return type(exp) == str
|
<commit_before><commit_msg>Add eval dispatch (copied from compyle)<commit_after>from keywords import *
from reg import *
from parse import parse
def evalExp():
expr = parse(fetch(EXPR)) # make dedicated fetch_expr()?
# expr = transformMacros(expr)
evalFunc = getEvalFunc(expr)
# evalFunc()
# reassign next step
def getEvalFunc(expr):
if isVar(expr):
return compVar
if isNum(expr):
return compNum
# else
tag, *_ = expr
keyword_groups = {
define_keys : evalDef,
ass_keys : evalAss,
lambda_keys : evalLambda,
if_keys : evalIf,
begin_keys : evalBegin,
quote_keys : evalQuote
}
for group in keyword_groups:
if tag in group:
return keyword_groups[group]
# default
return evalApp
def isNum(exp):
try:
return type(int(exp)) == int
except:
return False
def isVar(exp):
return type(exp) == str
|
|
9bffe981c018213b87d015a20603c092567bbdf4
|
cobaltuoft/cobalt.py
|
cobaltuoft/cobalt.py
|
from .endpoints import Endpoints
from .helpers import get, scrape_filters
class Cobalt:
def __init__(self, api_key=None):
self.host = 'http://cobalt.qas.im/api/1.0'
self.headers = {
'Referer': 'Cobalt-UofT-Python'
}
if not api_key or not self._is_valid_key(api_key):
raise ValueError('Expected valid API key.')
self.headers['Authorization'] = api_key
self.filter_map = scrape_filters()
def _get(self, url, params=None):
return get(url=url, params=params, headers=self.headers)
def _is_valid_key(self, key):
payload = {'key': key}
r = self._get(self.host, params=payload)
return r.reason == 'Not Found' and r.status_code == 404
def _run(self, api, endpoint=None, params=None):
res = Endpoints.run(api=api,
endpoint=endpoint,
params=params,
map=self.filter_map[api],
get=self._get)
return res.json()
def athletics(self, endpoint=None, params=None):
return self._run(api='athletics', endpoint=endpoint, params=params)
def buildings(self, endpoint=None, params=None):
return self._run(api='buildings', endpoint=endpoint, params=params)
def courses(self, endpoint=None, params=None):
return self._run(api='courses', endpoint=endpoint, params=params)
def food(self, endpoint=None, params=None):
return self._run(api='food', endpoint=endpoint, params=params)
def textbooks(self, endpoint=None, params=None):
return self._run(api='textbooks', endpoint=endpoint, params=params)
|
Initialize multiple class setup; add remaining APIs
|
Initialize multiple class setup; add remaining APIs
|
Python
|
mit
|
kshvmdn/cobalt-uoft-python
|
Initialize multiple class setup; add remaining APIs
|
from .endpoints import Endpoints
from .helpers import get, scrape_filters
class Cobalt:
def __init__(self, api_key=None):
self.host = 'http://cobalt.qas.im/api/1.0'
self.headers = {
'Referer': 'Cobalt-UofT-Python'
}
if not api_key or not self._is_valid_key(api_key):
raise ValueError('Expected valid API key.')
self.headers['Authorization'] = api_key
self.filter_map = scrape_filters()
def _get(self, url, params=None):
return get(url=url, params=params, headers=self.headers)
def _is_valid_key(self, key):
payload = {'key': key}
r = self._get(self.host, params=payload)
return r.reason == 'Not Found' and r.status_code == 404
def _run(self, api, endpoint=None, params=None):
res = Endpoints.run(api=api,
endpoint=endpoint,
params=params,
map=self.filter_map[api],
get=self._get)
return res.json()
def athletics(self, endpoint=None, params=None):
return self._run(api='athletics', endpoint=endpoint, params=params)
def buildings(self, endpoint=None, params=None):
return self._run(api='buildings', endpoint=endpoint, params=params)
def courses(self, endpoint=None, params=None):
return self._run(api='courses', endpoint=endpoint, params=params)
def food(self, endpoint=None, params=None):
return self._run(api='food', endpoint=endpoint, params=params)
def textbooks(self, endpoint=None, params=None):
return self._run(api='textbooks', endpoint=endpoint, params=params)
|
<commit_before><commit_msg>Initialize multiple class setup; add remaining APIs<commit_after>
|
from .endpoints import Endpoints
from .helpers import get, scrape_filters
class Cobalt:
def __init__(self, api_key=None):
self.host = 'http://cobalt.qas.im/api/1.0'
self.headers = {
'Referer': 'Cobalt-UofT-Python'
}
if not api_key or not self._is_valid_key(api_key):
raise ValueError('Expected valid API key.')
self.headers['Authorization'] = api_key
self.filter_map = scrape_filters()
def _get(self, url, params=None):
return get(url=url, params=params, headers=self.headers)
def _is_valid_key(self, key):
payload = {'key': key}
r = self._get(self.host, params=payload)
return r.reason == 'Not Found' and r.status_code == 404
def _run(self, api, endpoint=None, params=None):
res = Endpoints.run(api=api,
endpoint=endpoint,
params=params,
map=self.filter_map[api],
get=self._get)
return res.json()
def athletics(self, endpoint=None, params=None):
return self._run(api='athletics', endpoint=endpoint, params=params)
def buildings(self, endpoint=None, params=None):
return self._run(api='buildings', endpoint=endpoint, params=params)
def courses(self, endpoint=None, params=None):
return self._run(api='courses', endpoint=endpoint, params=params)
def food(self, endpoint=None, params=None):
return self._run(api='food', endpoint=endpoint, params=params)
def textbooks(self, endpoint=None, params=None):
return self._run(api='textbooks', endpoint=endpoint, params=params)
|
Initialize multiple class setup; add remaining APIsfrom .endpoints import Endpoints
from .helpers import get, scrape_filters
class Cobalt:
def __init__(self, api_key=None):
self.host = 'http://cobalt.qas.im/api/1.0'
self.headers = {
'Referer': 'Cobalt-UofT-Python'
}
if not api_key or not self._is_valid_key(api_key):
raise ValueError('Expected valid API key.')
self.headers['Authorization'] = api_key
self.filter_map = scrape_filters()
def _get(self, url, params=None):
return get(url=url, params=params, headers=self.headers)
def _is_valid_key(self, key):
payload = {'key': key}
r = self._get(self.host, params=payload)
return r.reason == 'Not Found' and r.status_code == 404
def _run(self, api, endpoint=None, params=None):
res = Endpoints.run(api=api,
endpoint=endpoint,
params=params,
map=self.filter_map[api],
get=self._get)
return res.json()
def athletics(self, endpoint=None, params=None):
return self._run(api='athletics', endpoint=endpoint, params=params)
def buildings(self, endpoint=None, params=None):
return self._run(api='buildings', endpoint=endpoint, params=params)
def courses(self, endpoint=None, params=None):
return self._run(api='courses', endpoint=endpoint, params=params)
def food(self, endpoint=None, params=None):
return self._run(api='food', endpoint=endpoint, params=params)
def textbooks(self, endpoint=None, params=None):
return self._run(api='textbooks', endpoint=endpoint, params=params)
|
<commit_before><commit_msg>Initialize multiple class setup; add remaining APIs<commit_after>from .endpoints import Endpoints
from .helpers import get, scrape_filters
class Cobalt:
def __init__(self, api_key=None):
self.host = 'http://cobalt.qas.im/api/1.0'
self.headers = {
'Referer': 'Cobalt-UofT-Python'
}
if not api_key or not self._is_valid_key(api_key):
raise ValueError('Expected valid API key.')
self.headers['Authorization'] = api_key
self.filter_map = scrape_filters()
def _get(self, url, params=None):
return get(url=url, params=params, headers=self.headers)
def _is_valid_key(self, key):
payload = {'key': key}
r = self._get(self.host, params=payload)
return r.reason == 'Not Found' and r.status_code == 404
def _run(self, api, endpoint=None, params=None):
res = Endpoints.run(api=api,
endpoint=endpoint,
params=params,
map=self.filter_map[api],
get=self._get)
return res.json()
def athletics(self, endpoint=None, params=None):
return self._run(api='athletics', endpoint=endpoint, params=params)
def buildings(self, endpoint=None, params=None):
return self._run(api='buildings', endpoint=endpoint, params=params)
def courses(self, endpoint=None, params=None):
return self._run(api='courses', endpoint=endpoint, params=params)
def food(self, endpoint=None, params=None):
return self._run(api='food', endpoint=endpoint, params=params)
def textbooks(self, endpoint=None, params=None):
return self._run(api='textbooks', endpoint=endpoint, params=params)
|
|
316a82c5465a13770404b6a302348f192618cd27
|
libqtile/command_interface.py
|
libqtile/command_interface.py
|
# Copyright (c) 2019, Sean Vig. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from abc import abstractmethod, ABCMeta
from typing import Any, Dict, Tuple
from libqtile.command_graph import CommandGraphCall
class EagerCommandInterface(metaclass=ABCMeta):
"""
Defines an interface which can be used to eagerly evaluate a given call on
a command graph. The implementations of this may use an IPC call to access
the running qtile instance remotely, or directly access the qtile instance
from within the same process.
"""
@abstractmethod
def execute(self, call: CommandGraphCall, args: Tuple, kwargs: Dict) -> Any:
"""Execute the given call, returning the result of the execution
Perform the given command graph call, calling the function with the
given arguments and keyword arguments.
Parameters
----------
call: CommandGraphCall
The call on the command graph that is to be performed.
args:
The arguments to pass into the command graph call.
kwargs:
The keyword arguments to pass into the command graph call.
"""
pass # pragma: no cover
|
Add an interface for eagerly evaluating command graph elements
|
Add an interface for eagerly evaluating command graph elements
Setup a new interface that evaluates commands on the command graph as
they are issued. This is roughly the interface of the current Client.
|
Python
|
mit
|
soulchainer/qtile,zordsdavini/qtile,ramnes/qtile,qtile/qtile,tych0/qtile,soulchainer/qtile,qtile/qtile,zordsdavini/qtile,ramnes/qtile,tych0/qtile
|
Add an interface for eagerly evaluating command graph elements
Setup a new interface that evaluates commands on the command graph as
they are issued. This is roughly the interface of the current Client.
|
# Copyright (c) 2019, Sean Vig. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from abc import abstractmethod, ABCMeta
from typing import Any, Dict, Tuple
from libqtile.command_graph import CommandGraphCall
class EagerCommandInterface(metaclass=ABCMeta):
"""
Defines an interface which can be used to eagerly evaluate a given call on
a command graph. The implementations of this may use an IPC call to access
the running qtile instance remotely, or directly access the qtile instance
from within the same process.
"""
@abstractmethod
def execute(self, call: CommandGraphCall, args: Tuple, kwargs: Dict) -> Any:
"""Execute the given call, returning the result of the execution
Perform the given command graph call, calling the function with the
given arguments and keyword arguments.
Parameters
----------
call: CommandGraphCall
The call on the command graph that is to be performed.
args:
The arguments to pass into the command graph call.
kwargs:
The keyword arguments to pass into the command graph call.
"""
pass # pragma: no cover
|
<commit_before><commit_msg>Add an interface for eagerly evaluating command graph elements
Setup a new interface that evaluates commands on the command graph as
they are issued. This is roughly the interface of the current Client.<commit_after>
|
# Copyright (c) 2019, Sean Vig. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from abc import abstractmethod, ABCMeta
from typing import Any, Dict, Tuple
from libqtile.command_graph import CommandGraphCall
class EagerCommandInterface(metaclass=ABCMeta):
"""
Defines an interface which can be used to eagerly evaluate a given call on
a command graph. The implementations of this may use an IPC call to access
the running qtile instance remotely, or directly access the qtile instance
from within the same process.
"""
@abstractmethod
def execute(self, call: CommandGraphCall, args: Tuple, kwargs: Dict) -> Any:
"""Execute the given call, returning the result of the execution
Perform the given command graph call, calling the function with the
given arguments and keyword arguments.
Parameters
----------
call: CommandGraphCall
The call on the command graph that is to be performed.
args:
The arguments to pass into the command graph call.
kwargs:
The keyword arguments to pass into the command graph call.
"""
pass # pragma: no cover
|
Add an interface for eagerly evaluating command graph elements
Setup a new interface that evaluates commands on the command graph as
they are issued. This is roughly the interface of the current Client.# Copyright (c) 2019, Sean Vig. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from abc import abstractmethod, ABCMeta
from typing import Any, Dict, Tuple
from libqtile.command_graph import CommandGraphCall
class EagerCommandInterface(metaclass=ABCMeta):
"""
Defines an interface which can be used to eagerly evaluate a given call on
a command graph. The implementations of this may use an IPC call to access
the running qtile instance remotely, or directly access the qtile instance
from within the same process.
"""
@abstractmethod
def execute(self, call: CommandGraphCall, args: Tuple, kwargs: Dict) -> Any:
"""Execute the given call, returning the result of the execution
Perform the given command graph call, calling the function with the
given arguments and keyword arguments.
Parameters
----------
call: CommandGraphCall
The call on the command graph that is to be performed.
args:
The arguments to pass into the command graph call.
kwargs:
The keyword arguments to pass into the command graph call.
"""
pass # pragma: no cover
|
<commit_before><commit_msg>Add an interface for eagerly evaluating command graph elements
Setup a new interface that evaluates commands on the command graph as
they are issued. This is roughly the interface of the current Client.<commit_after># Copyright (c) 2019, Sean Vig. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from abc import abstractmethod, ABCMeta
from typing import Any, Dict, Tuple
from libqtile.command_graph import CommandGraphCall
class EagerCommandInterface(metaclass=ABCMeta):
"""
Defines an interface which can be used to eagerly evaluate a given call on
a command graph. The implementations of this may use an IPC call to access
the running qtile instance remotely, or directly access the qtile instance
from within the same process.
"""
@abstractmethod
def execute(self, call: CommandGraphCall, args: Tuple, kwargs: Dict) -> Any:
"""Execute the given call, returning the result of the execution
Perform the given command graph call, calling the function with the
given arguments and keyword arguments.
Parameters
----------
call: CommandGraphCall
The call on the command graph that is to be performed.
args:
The arguments to pass into the command graph call.
kwargs:
The keyword arguments to pass into the command graph call.
"""
pass # pragma: no cover
|
|
dff1f9176d7ce77a242263bfc9a0760cd31f0585
|
regex_proxy.py
|
regex_proxy.py
|
from regex import *
from regex import compile as raw_compile
_cache = {}
# Wrap regex.compile up so we have a global cache
def compile(s, *args, **args):
global _cache
try:
return _cache[s]
except KeyError:
r = raw_compile(s, *args, **kwargs)
_cache[s] = r
return r
|
Add a prototype for cached regex.compile()
|
Add a prototype for cached regex.compile()
|
Python
|
apache-2.0
|
Charcoal-SE/SmokeDetector,Charcoal-SE/SmokeDetector
|
Add a prototype for cached regex.compile()
|
from regex import *
from regex import compile as raw_compile
_cache = {}
# Wrap regex.compile up so we have a global cache
def compile(s, *args, **args):
global _cache
try:
return _cache[s]
except KeyError:
r = raw_compile(s, *args, **kwargs)
_cache[s] = r
return r
|
<commit_before><commit_msg>Add a prototype for cached regex.compile()<commit_after>
|
from regex import *
from regex import compile as raw_compile
_cache = {}
# Wrap regex.compile up so we have a global cache
def compile(s, *args, **args):
global _cache
try:
return _cache[s]
except KeyError:
r = raw_compile(s, *args, **kwargs)
_cache[s] = r
return r
|
Add a prototype for cached regex.compile()from regex import *
from regex import compile as raw_compile
_cache = {}
# Wrap regex.compile up so we have a global cache
def compile(s, *args, **args):
global _cache
try:
return _cache[s]
except KeyError:
r = raw_compile(s, *args, **kwargs)
_cache[s] = r
return r
|
<commit_before><commit_msg>Add a prototype for cached regex.compile()<commit_after>from regex import *
from regex import compile as raw_compile
_cache = {}
# Wrap regex.compile up so we have a global cache
def compile(s, *args, **args):
global _cache
try:
return _cache[s]
except KeyError:
r = raw_compile(s, *args, **kwargs)
_cache[s] = r
return r
|
|
09112412a4814e3727def2547765546bf44c1e7d
|
test/algorithms/refinement/test_cspad_refinement.py
|
test/algorithms/refinement/test_cspad_refinement.py
|
# Test multiple stills refinement.
from __future__ import absolute_import, division, print_function
import os
from dxtbx.model.experiment_list import ExperimentListFactory
import procrunner
def test1(dials_regression, run_in_tmpdir):
"""
Refinement test of 300 CSPAD images, testing auto_reduction, parameter
fixing, constraints, SparseLevMar, and sauter_poon outlier rejection. See
README in the regression folder for more details.
"""
from scitbx import matrix
data_dir = os.path.join(dials_regression, "refinement_test_data", "cspad_refinement")
result = procrunner.run_process([
"dials.refine",
os.path.join(data_dir, "cspad_refined_experiments_step6_level2_300.json"),
os.path.join(data_dir, "cspad_reflections_step7_300.pickle"),
os.path.join(data_dir, "refine.phil"),
])
assert result['exitcode'] == 0
assert result['stderr'] == ''
# load results
reg_exp = ExperimentListFactory.from_json_file(
os.path.join(data_dir, "regression_experiments.json"),
check_format=False)
ref_exp = ExperimentListFactory.from_json_file("refined_experiments.json",
check_format=False)
# compare results
tol = 1e-5
for b1, b2 in zip(reg_exp.beams(), ref_exp.beams()):
assert b1.is_similar_to(b2, wavelength_tolerance=tol,
direction_tolerance=tol,
polarization_normal_tolerance=tol,
polarization_fraction_tolerance=tol)
s0_1 = matrix.col(b1.get_unit_s0())
s0_2 = matrix.col(b2.get_unit_s0())
assert s0_1.accute_angle(s0_2, deg=True) < 0.0057 # ~0.1 mrad
for c1, c2 in zip(reg_exp.crystals(), ref_exp.crystals()):
assert c1.is_similar_to(c2)
for d1, d2 in zip(reg_exp.detectors(), ref_exp.detectors()):
assert d1.is_similar_to(d2,
fast_axis_tolerance=1e-4, slow_axis_tolerance=1e-4, origin_tolerance=1e-2)
|
Test joint refinement of 300 cspad images using Brewster 2018 methods.
|
Test joint refinement of 300 cspad images using Brewster 2018 methods.
This reproduces the last panel of Figure 3, refining the outer shell of the cspad. It tests auto_reduction, parameter fixing, constraints, SparseLevMar, and sauter_poon outlier rejection.
Test passes on Centos 6.9 and mac 10.13.6.
|
Python
|
bsd-3-clause
|
dials/dials,dials/dials,dials/dials,dials/dials,dials/dials
|
Test joint refinement of 300 cspad images using Brewster 2018 methods.
This reproduces the last panel of Figure 3, refining the outer shell of the cspad. It tests auto_reduction, parameter fixing, constraints, SparseLevMar, and sauter_poon outlier rejection.
Test passes on Centos 6.9 and mac 10.13.6.
|
# Test multiple stills refinement.
from __future__ import absolute_import, division, print_function
import os
from dxtbx.model.experiment_list import ExperimentListFactory
import procrunner
def test1(dials_regression, run_in_tmpdir):
"""
Refinement test of 300 CSPAD images, testing auto_reduction, parameter
fixing, constraints, SparseLevMar, and sauter_poon outlier rejection. See
README in the regression folder for more details.
"""
from scitbx import matrix
data_dir = os.path.join(dials_regression, "refinement_test_data", "cspad_refinement")
result = procrunner.run_process([
"dials.refine",
os.path.join(data_dir, "cspad_refined_experiments_step6_level2_300.json"),
os.path.join(data_dir, "cspad_reflections_step7_300.pickle"),
os.path.join(data_dir, "refine.phil"),
])
assert result['exitcode'] == 0
assert result['stderr'] == ''
# load results
reg_exp = ExperimentListFactory.from_json_file(
os.path.join(data_dir, "regression_experiments.json"),
check_format=False)
ref_exp = ExperimentListFactory.from_json_file("refined_experiments.json",
check_format=False)
# compare results
tol = 1e-5
for b1, b2 in zip(reg_exp.beams(), ref_exp.beams()):
assert b1.is_similar_to(b2, wavelength_tolerance=tol,
direction_tolerance=tol,
polarization_normal_tolerance=tol,
polarization_fraction_tolerance=tol)
s0_1 = matrix.col(b1.get_unit_s0())
s0_2 = matrix.col(b2.get_unit_s0())
assert s0_1.accute_angle(s0_2, deg=True) < 0.0057 # ~0.1 mrad
for c1, c2 in zip(reg_exp.crystals(), ref_exp.crystals()):
assert c1.is_similar_to(c2)
for d1, d2 in zip(reg_exp.detectors(), ref_exp.detectors()):
assert d1.is_similar_to(d2,
fast_axis_tolerance=1e-4, slow_axis_tolerance=1e-4, origin_tolerance=1e-2)
|
<commit_before><commit_msg>Test joint refinement of 300 cspad images using Brewster 2018 methods.
This reproduces the last panel of Figure 3, refining the outer shell of the cspad. It tests auto_reduction, parameter fixing, constraints, SparseLevMar, and sauter_poon outlier rejection.
Test passes on Centos 6.9 and mac 10.13.6.<commit_after>
|
# Test multiple stills refinement.
from __future__ import absolute_import, division, print_function
import os
from dxtbx.model.experiment_list import ExperimentListFactory
import procrunner
def test1(dials_regression, run_in_tmpdir):
"""
Refinement test of 300 CSPAD images, testing auto_reduction, parameter
fixing, constraints, SparseLevMar, and sauter_poon outlier rejection. See
README in the regression folder for more details.
"""
from scitbx import matrix
data_dir = os.path.join(dials_regression, "refinement_test_data", "cspad_refinement")
result = procrunner.run_process([
"dials.refine",
os.path.join(data_dir, "cspad_refined_experiments_step6_level2_300.json"),
os.path.join(data_dir, "cspad_reflections_step7_300.pickle"),
os.path.join(data_dir, "refine.phil"),
])
assert result['exitcode'] == 0
assert result['stderr'] == ''
# load results
reg_exp = ExperimentListFactory.from_json_file(
os.path.join(data_dir, "regression_experiments.json"),
check_format=False)
ref_exp = ExperimentListFactory.from_json_file("refined_experiments.json",
check_format=False)
# compare results
tol = 1e-5
for b1, b2 in zip(reg_exp.beams(), ref_exp.beams()):
assert b1.is_similar_to(b2, wavelength_tolerance=tol,
direction_tolerance=tol,
polarization_normal_tolerance=tol,
polarization_fraction_tolerance=tol)
s0_1 = matrix.col(b1.get_unit_s0())
s0_2 = matrix.col(b2.get_unit_s0())
assert s0_1.accute_angle(s0_2, deg=True) < 0.0057 # ~0.1 mrad
for c1, c2 in zip(reg_exp.crystals(), ref_exp.crystals()):
assert c1.is_similar_to(c2)
for d1, d2 in zip(reg_exp.detectors(), ref_exp.detectors()):
assert d1.is_similar_to(d2,
fast_axis_tolerance=1e-4, slow_axis_tolerance=1e-4, origin_tolerance=1e-2)
|
Test joint refinement of 300 cspad images using Brewster 2018 methods.
This reproduces the last panel of Figure 3, refining the outer shell of the cspad. It tests auto_reduction, parameter fixing, constraints, SparseLevMar, and sauter_poon outlier rejection.
Test passes on Centos 6.9 and mac 10.13.6.# Test multiple stills refinement.
from __future__ import absolute_import, division, print_function
import os
from dxtbx.model.experiment_list import ExperimentListFactory
import procrunner
def test1(dials_regression, run_in_tmpdir):
"""
Refinement test of 300 CSPAD images, testing auto_reduction, parameter
fixing, constraints, SparseLevMar, and sauter_poon outlier rejection. See
README in the regression folder for more details.
"""
from scitbx import matrix
data_dir = os.path.join(dials_regression, "refinement_test_data", "cspad_refinement")
result = procrunner.run_process([
"dials.refine",
os.path.join(data_dir, "cspad_refined_experiments_step6_level2_300.json"),
os.path.join(data_dir, "cspad_reflections_step7_300.pickle"),
os.path.join(data_dir, "refine.phil"),
])
assert result['exitcode'] == 0
assert result['stderr'] == ''
# load results
reg_exp = ExperimentListFactory.from_json_file(
os.path.join(data_dir, "regression_experiments.json"),
check_format=False)
ref_exp = ExperimentListFactory.from_json_file("refined_experiments.json",
check_format=False)
# compare results
tol = 1e-5
for b1, b2 in zip(reg_exp.beams(), ref_exp.beams()):
assert b1.is_similar_to(b2, wavelength_tolerance=tol,
direction_tolerance=tol,
polarization_normal_tolerance=tol,
polarization_fraction_tolerance=tol)
s0_1 = matrix.col(b1.get_unit_s0())
s0_2 = matrix.col(b2.get_unit_s0())
assert s0_1.accute_angle(s0_2, deg=True) < 0.0057 # ~0.1 mrad
for c1, c2 in zip(reg_exp.crystals(), ref_exp.crystals()):
assert c1.is_similar_to(c2)
for d1, d2 in zip(reg_exp.detectors(), ref_exp.detectors()):
assert d1.is_similar_to(d2,
fast_axis_tolerance=1e-4, slow_axis_tolerance=1e-4, origin_tolerance=1e-2)
|
<commit_before><commit_msg>Test joint refinement of 300 cspad images using Brewster 2018 methods.
This reproduces the last panel of Figure 3, refining the outer shell of the cspad. It tests auto_reduction, parameter fixing, constraints, SparseLevMar, and sauter_poon outlier rejection.
Test passes on Centos 6.9 and mac 10.13.6.<commit_after># Test multiple stills refinement.
from __future__ import absolute_import, division, print_function
import os
from dxtbx.model.experiment_list import ExperimentListFactory
import procrunner
def test1(dials_regression, run_in_tmpdir):
"""
Refinement test of 300 CSPAD images, testing auto_reduction, parameter
fixing, constraints, SparseLevMar, and sauter_poon outlier rejection. See
README in the regression folder for more details.
"""
from scitbx import matrix
data_dir = os.path.join(dials_regression, "refinement_test_data", "cspad_refinement")
result = procrunner.run_process([
"dials.refine",
os.path.join(data_dir, "cspad_refined_experiments_step6_level2_300.json"),
os.path.join(data_dir, "cspad_reflections_step7_300.pickle"),
os.path.join(data_dir, "refine.phil"),
])
assert result['exitcode'] == 0
assert result['stderr'] == ''
# load results
reg_exp = ExperimentListFactory.from_json_file(
os.path.join(data_dir, "regression_experiments.json"),
check_format=False)
ref_exp = ExperimentListFactory.from_json_file("refined_experiments.json",
check_format=False)
# compare results
tol = 1e-5
for b1, b2 in zip(reg_exp.beams(), ref_exp.beams()):
assert b1.is_similar_to(b2, wavelength_tolerance=tol,
direction_tolerance=tol,
polarization_normal_tolerance=tol,
polarization_fraction_tolerance=tol)
s0_1 = matrix.col(b1.get_unit_s0())
s0_2 = matrix.col(b2.get_unit_s0())
assert s0_1.accute_angle(s0_2, deg=True) < 0.0057 # ~0.1 mrad
for c1, c2 in zip(reg_exp.crystals(), ref_exp.crystals()):
assert c1.is_similar_to(c2)
for d1, d2 in zip(reg_exp.detectors(), ref_exp.detectors()):
assert d1.is_similar_to(d2,
fast_axis_tolerance=1e-4, slow_axis_tolerance=1e-4, origin_tolerance=1e-2)
|
|
724e86e31b6584012af5afe458e0823b9a2ca7ab
|
myclass/class_create_spark.py
|
myclass/class_create_spark.py
|
# -*- coding: utf-8 -*-
# !/usr/bin/python
################################### PART0 DESCRIPTION #################################
# Filename: class_save_word_to_database.py
# Description:
#
# Author: Shuai Yuan
# E-mail: ysh329@sina.com
# Create: 2015-11-17 20:43:09
# Last:
__author__ = 'yuens'
################################### PART1 IMPORT ######################################
import logging
import time
from pyspark import SparkContext, SparkConf
################################### PART2 CLASS && FUNCTION ###########################
class CreateSpark(object):
def __init__(self, pyspark_app_name):
self.start = time.clock()
logging.basicConfig(level = logging.INFO,
format = '%(asctime)s %(levelname)5s %(filename)19s[line:%(lineno)3d] %(funcName)s %(message)s',
datefmt = '%y-%m-%d %H:%M:%S',
filename = './main.log',
filemode = 'a')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)5s %(filename)19s[line:%(lineno)3d] %(funcName)s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
logging.info("START CLASS {class_name}.".format(class_name = CreateSpark.__name__))
# Configure Spark
try:
conf = SparkConf().setAppName(pyspark_app_name).setMaster("local[8]")
self.sc = SparkContext(conf = conf)
logging.info("Start pyspark successfully.")
except Exception as e:
logging.error("Fail in starting pyspark.")
logging.error(e)
def return_spark_context(self):
return self.sc
def __del__(self):
# Close SparkContext
try:
self.sc.stop()
logging.info("close SparkContext successfully.")
except Exception as e:
logging.error(e)
logging.info("END CLASS {class_name}.".format(class_name = CreateSpark.__name__))
self.end = time.clock()
logging.info("The class {class_name} run time is : {delta_time} seconds".format(class_name = CreateSpark.__name__, delta_time = self.end))
################################### PART3 CLASS TEST ##################################
"""
# initialization parameter
pyspark_app_name = "spam-msg-classifier"
SparkCreator = CreateSpark(pyspark_app_name = pyspark_app_name)
pyspark_sc = SparkCreator.return_spark_context()
logging.info("sc.version:{0}".format(pyspark_sc.version))
"""
|
Create a class named "CreateSpark", which is to solove the problem of "Cannot run multiple SparkContexts at once; existing SparkContext(app=spam-msg-classifier, master=local[8]) created by __init__"
|
Create a class named "CreateSpark", which is to solove the problem of "Cannot run multiple SparkContexts at once; existing SparkContext(app=spam-msg-classifier, master=local[8]) created by __init__"
|
Python
|
apache-2.0
|
ysh329/spam-msg-classifier
|
Create a class named "CreateSpark", which is to solove the problem of "Cannot run multiple SparkContexts at once; existing SparkContext(app=spam-msg-classifier, master=local[8]) created by __init__"
|
# -*- coding: utf-8 -*-
# !/usr/bin/python
################################### PART0 DESCRIPTION #################################
# Filename: class_save_word_to_database.py
# Description:
#
# Author: Shuai Yuan
# E-mail: ysh329@sina.com
# Create: 2015-11-17 20:43:09
# Last:
__author__ = 'yuens'
################################### PART1 IMPORT ######################################
import logging
import time
from pyspark import SparkContext, SparkConf
################################### PART2 CLASS && FUNCTION ###########################
class CreateSpark(object):
def __init__(self, pyspark_app_name):
self.start = time.clock()
logging.basicConfig(level = logging.INFO,
format = '%(asctime)s %(levelname)5s %(filename)19s[line:%(lineno)3d] %(funcName)s %(message)s',
datefmt = '%y-%m-%d %H:%M:%S',
filename = './main.log',
filemode = 'a')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)5s %(filename)19s[line:%(lineno)3d] %(funcName)s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
logging.info("START CLASS {class_name}.".format(class_name = CreateSpark.__name__))
# Configure Spark
try:
conf = SparkConf().setAppName(pyspark_app_name).setMaster("local[8]")
self.sc = SparkContext(conf = conf)
logging.info("Start pyspark successfully.")
except Exception as e:
logging.error("Fail in starting pyspark.")
logging.error(e)
def return_spark_context(self):
return self.sc
def __del__(self):
# Close SparkContext
try:
self.sc.stop()
logging.info("close SparkContext successfully.")
except Exception as e:
logging.error(e)
logging.info("END CLASS {class_name}.".format(class_name = CreateSpark.__name__))
self.end = time.clock()
logging.info("The class {class_name} run time is : {delta_time} seconds".format(class_name = CreateSpark.__name__, delta_time = self.end))
################################### PART3 CLASS TEST ##################################
"""
# initialization parameter
pyspark_app_name = "spam-msg-classifier"
SparkCreator = CreateSpark(pyspark_app_name = pyspark_app_name)
pyspark_sc = SparkCreator.return_spark_context()
logging.info("sc.version:{0}".format(pyspark_sc.version))
"""
|
<commit_before><commit_msg>Create a class named "CreateSpark", which is to solove the problem of "Cannot run multiple SparkContexts at once; existing SparkContext(app=spam-msg-classifier, master=local[8]) created by __init__"<commit_after>
|
# -*- coding: utf-8 -*-
# !/usr/bin/python
################################### PART0 DESCRIPTION #################################
# Filename: class_save_word_to_database.py
# Description:
#
# Author: Shuai Yuan
# E-mail: ysh329@sina.com
# Create: 2015-11-17 20:43:09
# Last:
__author__ = 'yuens'
################################### PART1 IMPORT ######################################
import logging
import time
from pyspark import SparkContext, SparkConf
################################### PART2 CLASS && FUNCTION ###########################
class CreateSpark(object):
def __init__(self, pyspark_app_name):
self.start = time.clock()
logging.basicConfig(level = logging.INFO,
format = '%(asctime)s %(levelname)5s %(filename)19s[line:%(lineno)3d] %(funcName)s %(message)s',
datefmt = '%y-%m-%d %H:%M:%S',
filename = './main.log',
filemode = 'a')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)5s %(filename)19s[line:%(lineno)3d] %(funcName)s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
logging.info("START CLASS {class_name}.".format(class_name = CreateSpark.__name__))
# Configure Spark
try:
conf = SparkConf().setAppName(pyspark_app_name).setMaster("local[8]")
self.sc = SparkContext(conf = conf)
logging.info("Start pyspark successfully.")
except Exception as e:
logging.error("Fail in starting pyspark.")
logging.error(e)
def return_spark_context(self):
return self.sc
def __del__(self):
# Close SparkContext
try:
self.sc.stop()
logging.info("close SparkContext successfully.")
except Exception as e:
logging.error(e)
logging.info("END CLASS {class_name}.".format(class_name = CreateSpark.__name__))
self.end = time.clock()
logging.info("The class {class_name} run time is : {delta_time} seconds".format(class_name = CreateSpark.__name__, delta_time = self.end))
################################### PART3 CLASS TEST ##################################
"""
# initialization parameter
pyspark_app_name = "spam-msg-classifier"
SparkCreator = CreateSpark(pyspark_app_name = pyspark_app_name)
pyspark_sc = SparkCreator.return_spark_context()
logging.info("sc.version:{0}".format(pyspark_sc.version))
"""
|
Create a class named "CreateSpark", which is to solove the problem of "Cannot run multiple SparkContexts at once; existing SparkContext(app=spam-msg-classifier, master=local[8]) created by __init__"# -*- coding: utf-8 -*-
# !/usr/bin/python
################################### PART0 DESCRIPTION #################################
# Filename: class_save_word_to_database.py
# Description:
#
# Author: Shuai Yuan
# E-mail: ysh329@sina.com
# Create: 2015-11-17 20:43:09
# Last:
__author__ = 'yuens'
################################### PART1 IMPORT ######################################
import logging
import time
from pyspark import SparkContext, SparkConf
################################### PART2 CLASS && FUNCTION ###########################
class CreateSpark(object):
def __init__(self, pyspark_app_name):
self.start = time.clock()
logging.basicConfig(level = logging.INFO,
format = '%(asctime)s %(levelname)5s %(filename)19s[line:%(lineno)3d] %(funcName)s %(message)s',
datefmt = '%y-%m-%d %H:%M:%S',
filename = './main.log',
filemode = 'a')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)5s %(filename)19s[line:%(lineno)3d] %(funcName)s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
logging.info("START CLASS {class_name}.".format(class_name = CreateSpark.__name__))
# Configure Spark
try:
conf = SparkConf().setAppName(pyspark_app_name).setMaster("local[8]")
self.sc = SparkContext(conf = conf)
logging.info("Start pyspark successfully.")
except Exception as e:
logging.error("Fail in starting pyspark.")
logging.error(e)
def return_spark_context(self):
return self.sc
def __del__(self):
# Close SparkContext
try:
self.sc.stop()
logging.info("close SparkContext successfully.")
except Exception as e:
logging.error(e)
logging.info("END CLASS {class_name}.".format(class_name = CreateSpark.__name__))
self.end = time.clock()
logging.info("The class {class_name} run time is : {delta_time} seconds".format(class_name = CreateSpark.__name__, delta_time = self.end))
################################### PART3 CLASS TEST ##################################
"""
# initialization parameter
pyspark_app_name = "spam-msg-classifier"
SparkCreator = CreateSpark(pyspark_app_name = pyspark_app_name)
pyspark_sc = SparkCreator.return_spark_context()
logging.info("sc.version:{0}".format(pyspark_sc.version))
"""
|
<commit_before><commit_msg>Create a class named "CreateSpark", which is to solove the problem of "Cannot run multiple SparkContexts at once; existing SparkContext(app=spam-msg-classifier, master=local[8]) created by __init__"<commit_after># -*- coding: utf-8 -*-
# !/usr/bin/python
################################### PART0 DESCRIPTION #################################
# Filename: class_save_word_to_database.py
# Description:
#
# Author: Shuai Yuan
# E-mail: ysh329@sina.com
# Create: 2015-11-17 20:43:09
# Last:
__author__ = 'yuens'
################################### PART1 IMPORT ######################################
import logging
import time
from pyspark import SparkContext, SparkConf
################################### PART2 CLASS && FUNCTION ###########################
class CreateSpark(object):
def __init__(self, pyspark_app_name):
self.start = time.clock()
logging.basicConfig(level = logging.INFO,
format = '%(asctime)s %(levelname)5s %(filename)19s[line:%(lineno)3d] %(funcName)s %(message)s',
datefmt = '%y-%m-%d %H:%M:%S',
filename = './main.log',
filemode = 'a')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)5s %(filename)19s[line:%(lineno)3d] %(funcName)s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
logging.info("START CLASS {class_name}.".format(class_name = CreateSpark.__name__))
# Configure Spark
try:
conf = SparkConf().setAppName(pyspark_app_name).setMaster("local[8]")
self.sc = SparkContext(conf = conf)
logging.info("Start pyspark successfully.")
except Exception as e:
logging.error("Fail in starting pyspark.")
logging.error(e)
def return_spark_context(self):
return self.sc
def __del__(self):
# Close SparkContext
try:
self.sc.stop()
logging.info("close SparkContext successfully.")
except Exception as e:
logging.error(e)
logging.info("END CLASS {class_name}.".format(class_name = CreateSpark.__name__))
self.end = time.clock()
logging.info("The class {class_name} run time is : {delta_time} seconds".format(class_name = CreateSpark.__name__, delta_time = self.end))
################################### PART3 CLASS TEST ##################################
"""
# initialization parameter
pyspark_app_name = "spam-msg-classifier"
SparkCreator = CreateSpark(pyspark_app_name = pyspark_app_name)
pyspark_sc = SparkCreator.return_spark_context()
logging.info("sc.version:{0}".format(pyspark_sc.version))
"""
|
|
8632b60718fa353797ffc53281e57a37caf9452f
|
set_address.py
|
set_address.py
|
import zmq
import time
import sys
print sys.argv[1:]
# ZeroMQ Context
context = zmq.Context()
sock_live = context.socket(zmq.PUB)
sock_live.connect("tcp://"+sys.argv[1])
time.sleep(1)
# Send multipart only allows send byte arrays, so we convert everything to strings before sending
# [TODO] add .encode('UTF-8') when we switch to python3.
sock_live.send_multipart(["set-address",'pair',sys.argv[2],"0"])
sock_live.close()
|
Add config command for setting the address of rf sensors.
|
Add config command for setting the address of rf sensors.
|
Python
|
bsd-3-clause
|
geekylou/sensor_net,geekylou/sensor_net,geekylou/sensor_net,geekylou/sensor_net,geekylou/sensor_net
|
Add config command for setting the address of rf sensors.
|
import zmq
import time
import sys
print sys.argv[1:]
# ZeroMQ Context
context = zmq.Context()
sock_live = context.socket(zmq.PUB)
sock_live.connect("tcp://"+sys.argv[1])
time.sleep(1)
# Send multipart only allows send byte arrays, so we convert everything to strings before sending
# [TODO] add .encode('UTF-8') when we switch to python3.
sock_live.send_multipart(["set-address",'pair',sys.argv[2],"0"])
sock_live.close()
|
<commit_before><commit_msg>Add config command for setting the address of rf sensors.<commit_after>
|
import zmq
import time
import sys
print sys.argv[1:]
# ZeroMQ Context
context = zmq.Context()
sock_live = context.socket(zmq.PUB)
sock_live.connect("tcp://"+sys.argv[1])
time.sleep(1)
# Send multipart only allows send byte arrays, so we convert everything to strings before sending
# [TODO] add .encode('UTF-8') when we switch to python3.
sock_live.send_multipart(["set-address",'pair',sys.argv[2],"0"])
sock_live.close()
|
Add config command for setting the address of rf sensors.import zmq
import time
import sys
print sys.argv[1:]
# ZeroMQ Context
context = zmq.Context()
sock_live = context.socket(zmq.PUB)
sock_live.connect("tcp://"+sys.argv[1])
time.sleep(1)
# Send multipart only allows send byte arrays, so we convert everything to strings before sending
# [TODO] add .encode('UTF-8') when we switch to python3.
sock_live.send_multipart(["set-address",'pair',sys.argv[2],"0"])
sock_live.close()
|
<commit_before><commit_msg>Add config command for setting the address of rf sensors.<commit_after>import zmq
import time
import sys
print sys.argv[1:]
# ZeroMQ Context
context = zmq.Context()
sock_live = context.socket(zmq.PUB)
sock_live.connect("tcp://"+sys.argv[1])
time.sleep(1)
# Send multipart only allows send byte arrays, so we convert everything to strings before sending
# [TODO] add .encode('UTF-8') when we switch to python3.
sock_live.send_multipart(["set-address",'pair',sys.argv[2],"0"])
sock_live.close()
|
|
c5a2167a63516c23390263408fcd2c9a4f654fc8
|
webcomix/tests/test_comic_spider.py
|
webcomix/tests/test_comic_spider.py
|
from webcomix.comic_spider import ComicSpider
def test_parse_yields_good_page(mocker):
mock_response = mocker.patch('scrapy.http.Response')
mock_response.urljoin.return_value = "http://xkcd.com/3/"
mock_response.url = "http://xkcd.com/2/"
mock_selector = mocker.patch('scrapy.selector.SelectorList')
mock_response.xpath.return_value = mock_selector
mock_selector.extract_first.side_effect = [
'//imgs.xkcd.com/comics/tree_cropped_(1).jpg', 'xkcd.com/3/'
]
spider = ComicSpider()
result = spider.parse(mock_response)
results = list(result)
assert len(results) == 2
assert results[0].get(
'url') == "http://imgs.xkcd.com/comics/tree_cropped_(1).jpg"
assert results[1].url == "http://xkcd.com/3/"
def test_parse_yields_bad_page(mocker):
mock_response = mocker.patch('scrapy.http.Response')
mock_response.urljoin.return_value = "http://xkcd.com/3/"
mock_response.url = "http://xkcd.com/2/"
mock_selector = mocker.patch('scrapy.selector.SelectorList')
mock_response.xpath.return_value = mock_selector
mock_selector.extract_first.side_effect = [None, 'xkcd.com/3/']
spider = ComicSpider()
result = spider.parse(mock_response)
results = list(result)
assert len(results) == 1
assert results[0].url == "http://xkcd.com/3/"
|
Add tests for the parse method of the spider
|
Add tests for the parse method of the spider
|
Python
|
mit
|
J-CPelletier/webcomix,J-CPelletier/webcomix,J-CPelletier/WebComicToCBZ
|
Add tests for the parse method of the spider
|
from webcomix.comic_spider import ComicSpider
def test_parse_yields_good_page(mocker):
mock_response = mocker.patch('scrapy.http.Response')
mock_response.urljoin.return_value = "http://xkcd.com/3/"
mock_response.url = "http://xkcd.com/2/"
mock_selector = mocker.patch('scrapy.selector.SelectorList')
mock_response.xpath.return_value = mock_selector
mock_selector.extract_first.side_effect = [
'//imgs.xkcd.com/comics/tree_cropped_(1).jpg', 'xkcd.com/3/'
]
spider = ComicSpider()
result = spider.parse(mock_response)
results = list(result)
assert len(results) == 2
assert results[0].get(
'url') == "http://imgs.xkcd.com/comics/tree_cropped_(1).jpg"
assert results[1].url == "http://xkcd.com/3/"
def test_parse_yields_bad_page(mocker):
mock_response = mocker.patch('scrapy.http.Response')
mock_response.urljoin.return_value = "http://xkcd.com/3/"
mock_response.url = "http://xkcd.com/2/"
mock_selector = mocker.patch('scrapy.selector.SelectorList')
mock_response.xpath.return_value = mock_selector
mock_selector.extract_first.side_effect = [None, 'xkcd.com/3/']
spider = ComicSpider()
result = spider.parse(mock_response)
results = list(result)
assert len(results) == 1
assert results[0].url == "http://xkcd.com/3/"
|
<commit_before><commit_msg>Add tests for the parse method of the spider<commit_after>
|
from webcomix.comic_spider import ComicSpider
def test_parse_yields_good_page(mocker):
mock_response = mocker.patch('scrapy.http.Response')
mock_response.urljoin.return_value = "http://xkcd.com/3/"
mock_response.url = "http://xkcd.com/2/"
mock_selector = mocker.patch('scrapy.selector.SelectorList')
mock_response.xpath.return_value = mock_selector
mock_selector.extract_first.side_effect = [
'//imgs.xkcd.com/comics/tree_cropped_(1).jpg', 'xkcd.com/3/'
]
spider = ComicSpider()
result = spider.parse(mock_response)
results = list(result)
assert len(results) == 2
assert results[0].get(
'url') == "http://imgs.xkcd.com/comics/tree_cropped_(1).jpg"
assert results[1].url == "http://xkcd.com/3/"
def test_parse_yields_bad_page(mocker):
mock_response = mocker.patch('scrapy.http.Response')
mock_response.urljoin.return_value = "http://xkcd.com/3/"
mock_response.url = "http://xkcd.com/2/"
mock_selector = mocker.patch('scrapy.selector.SelectorList')
mock_response.xpath.return_value = mock_selector
mock_selector.extract_first.side_effect = [None, 'xkcd.com/3/']
spider = ComicSpider()
result = spider.parse(mock_response)
results = list(result)
assert len(results) == 1
assert results[0].url == "http://xkcd.com/3/"
|
Add tests for the parse method of the spiderfrom webcomix.comic_spider import ComicSpider
def test_parse_yields_good_page(mocker):
mock_response = mocker.patch('scrapy.http.Response')
mock_response.urljoin.return_value = "http://xkcd.com/3/"
mock_response.url = "http://xkcd.com/2/"
mock_selector = mocker.patch('scrapy.selector.SelectorList')
mock_response.xpath.return_value = mock_selector
mock_selector.extract_first.side_effect = [
'//imgs.xkcd.com/comics/tree_cropped_(1).jpg', 'xkcd.com/3/'
]
spider = ComicSpider()
result = spider.parse(mock_response)
results = list(result)
assert len(results) == 2
assert results[0].get(
'url') == "http://imgs.xkcd.com/comics/tree_cropped_(1).jpg"
assert results[1].url == "http://xkcd.com/3/"
def test_parse_yields_bad_page(mocker):
mock_response = mocker.patch('scrapy.http.Response')
mock_response.urljoin.return_value = "http://xkcd.com/3/"
mock_response.url = "http://xkcd.com/2/"
mock_selector = mocker.patch('scrapy.selector.SelectorList')
mock_response.xpath.return_value = mock_selector
mock_selector.extract_first.side_effect = [None, 'xkcd.com/3/']
spider = ComicSpider()
result = spider.parse(mock_response)
results = list(result)
assert len(results) == 1
assert results[0].url == "http://xkcd.com/3/"
|
<commit_before><commit_msg>Add tests for the parse method of the spider<commit_after>from webcomix.comic_spider import ComicSpider
def test_parse_yields_good_page(mocker):
mock_response = mocker.patch('scrapy.http.Response')
mock_response.urljoin.return_value = "http://xkcd.com/3/"
mock_response.url = "http://xkcd.com/2/"
mock_selector = mocker.patch('scrapy.selector.SelectorList')
mock_response.xpath.return_value = mock_selector
mock_selector.extract_first.side_effect = [
'//imgs.xkcd.com/comics/tree_cropped_(1).jpg', 'xkcd.com/3/'
]
spider = ComicSpider()
result = spider.parse(mock_response)
results = list(result)
assert len(results) == 2
assert results[0].get(
'url') == "http://imgs.xkcd.com/comics/tree_cropped_(1).jpg"
assert results[1].url == "http://xkcd.com/3/"
def test_parse_yields_bad_page(mocker):
mock_response = mocker.patch('scrapy.http.Response')
mock_response.urljoin.return_value = "http://xkcd.com/3/"
mock_response.url = "http://xkcd.com/2/"
mock_selector = mocker.patch('scrapy.selector.SelectorList')
mock_response.xpath.return_value = mock_selector
mock_selector.extract_first.side_effect = [None, 'xkcd.com/3/']
spider = ComicSpider()
result = spider.parse(mock_response)
results = list(result)
assert len(results) == 1
assert results[0].url == "http://xkcd.com/3/"
|
|
510b90d42dbccd0aa1e3ff48ee8dbe7230b65185
|
get_stats_from.py
|
get_stats_from.py
|
import argparse
import csv
from glob import glob
import re
import statistics
import sys
def get_stats_from(files_names, files_content):
for i in range(len(files_content)):
file_name = files_names[i]
file_content = files_content[i]
print("FILE : {0}".format(files_names[i]))
print("\t*MEAN : {0}".format(statistics.mean(file_content)))
print("\t*MEDIAN : {0}".format(statistics.median(file_content)))
try:
print("\t*MOST TYPICAL VALUE : {0}".format(statistics.mode(file_content)))
except:
print("2 most typical values!")
print("\t*STANDARD DEVIATION : {0}".format(statistics.stdev(file_content)))
print("\t*VARIANCE : {0}".format(statistics.variance(file_content)))
def get_global_stats(files_content):
data = []
for sublist in files_content:
data = data + sublist
print("*GLOBAL MEAN : {0}".format(statistics.mean(data)))
print("*GLOBAL MEDIAN : {0}".format(statistics.median(data)))
try:
print("*GLOBAL MOST TYPICAL VALUE : {0}".format(statistics.mode(data)))
except:
print("2 most typical values!")
print("*GLOBAL STANDARD DEVIATION : {0}".format(statistics.stdev(data)))
print("*GLOBAL VARIANCE : {0}".format(statistics.variance(data)))
def main():
parser = argparse.ArgumentParser(description='Get stats from Powertool output')
parser.add_argument('-p', '--path', type=str, default=None, required=True,
help="specify path to your directories")
parser.add_argument('-o', '--output', action="store_true",
help="save the output in the analysed directory")
args = parser.parse_args()
directories = glob(args.path+"*")
if len(directories) == 0:
sys.exit(1)
csv_files = []
for directory in directories:
current_files = [x for x in glob(directory + "/*") if ".csv" in x]
csv_files = csv_files + current_files
files_content = []
for csv_file in csv_files:
with open(csv_file, "r") as csv_content:
csv_reader = csv.reader(csv_content)
files_content.append([float(row[0]) for row in csv_reader if not (re.match("^\d+?\.\d+?$", row[0]) is None)])
get_stats_from(directories, files_content)
get_global_stats(files_content)
if __name__ == '__main__':
main()
|
Add script to compute some stats about data from energy consumption measures
|
Add script to compute some stats about data from energy consumption measures
|
Python
|
agpl-3.0
|
SOMCA/hot-pepper,SOMCA/hot-pepper,SOMCA/hot-pepper,SOMCA/hot-pepper,SOMCA/hot-pepper
|
Add script to compute some stats about data from energy consumption measures
|
import argparse
import csv
from glob import glob
import re
import statistics
import sys
def get_stats_from(files_names, files_content):
for i in range(len(files_content)):
file_name = files_names[i]
file_content = files_content[i]
print("FILE : {0}".format(files_names[i]))
print("\t*MEAN : {0}".format(statistics.mean(file_content)))
print("\t*MEDIAN : {0}".format(statistics.median(file_content)))
try:
print("\t*MOST TYPICAL VALUE : {0}".format(statistics.mode(file_content)))
except:
print("2 most typical values!")
print("\t*STANDARD DEVIATION : {0}".format(statistics.stdev(file_content)))
print("\t*VARIANCE : {0}".format(statistics.variance(file_content)))
def get_global_stats(files_content):
data = []
for sublist in files_content:
data = data + sublist
print("*GLOBAL MEAN : {0}".format(statistics.mean(data)))
print("*GLOBAL MEDIAN : {0}".format(statistics.median(data)))
try:
print("*GLOBAL MOST TYPICAL VALUE : {0}".format(statistics.mode(data)))
except:
print("2 most typical values!")
print("*GLOBAL STANDARD DEVIATION : {0}".format(statistics.stdev(data)))
print("*GLOBAL VARIANCE : {0}".format(statistics.variance(data)))
def main():
parser = argparse.ArgumentParser(description='Get stats from Powertool output')
parser.add_argument('-p', '--path', type=str, default=None, required=True,
help="specify path to your directories")
parser.add_argument('-o', '--output', action="store_true",
help="save the output in the analysed directory")
args = parser.parse_args()
directories = glob(args.path+"*")
if len(directories) == 0:
sys.exit(1)
csv_files = []
for directory in directories:
current_files = [x for x in glob(directory + "/*") if ".csv" in x]
csv_files = csv_files + current_files
files_content = []
for csv_file in csv_files:
with open(csv_file, "r") as csv_content:
csv_reader = csv.reader(csv_content)
files_content.append([float(row[0]) for row in csv_reader if not (re.match("^\d+?\.\d+?$", row[0]) is None)])
get_stats_from(directories, files_content)
get_global_stats(files_content)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to compute some stats about data from energy consumption measures<commit_after>
|
import argparse
import csv
from glob import glob
import re
import statistics
import sys
def get_stats_from(files_names, files_content):
for i in range(len(files_content)):
file_name = files_names[i]
file_content = files_content[i]
print("FILE : {0}".format(files_names[i]))
print("\t*MEAN : {0}".format(statistics.mean(file_content)))
print("\t*MEDIAN : {0}".format(statistics.median(file_content)))
try:
print("\t*MOST TYPICAL VALUE : {0}".format(statistics.mode(file_content)))
except:
print("2 most typical values!")
print("\t*STANDARD DEVIATION : {0}".format(statistics.stdev(file_content)))
print("\t*VARIANCE : {0}".format(statistics.variance(file_content)))
def get_global_stats(files_content):
data = []
for sublist in files_content:
data = data + sublist
print("*GLOBAL MEAN : {0}".format(statistics.mean(data)))
print("*GLOBAL MEDIAN : {0}".format(statistics.median(data)))
try:
print("*GLOBAL MOST TYPICAL VALUE : {0}".format(statistics.mode(data)))
except:
print("2 most typical values!")
print("*GLOBAL STANDARD DEVIATION : {0}".format(statistics.stdev(data)))
print("*GLOBAL VARIANCE : {0}".format(statistics.variance(data)))
def main():
parser = argparse.ArgumentParser(description='Get stats from Powertool output')
parser.add_argument('-p', '--path', type=str, default=None, required=True,
help="specify path to your directories")
parser.add_argument('-o', '--output', action="store_true",
help="save the output in the analysed directory")
args = parser.parse_args()
directories = glob(args.path+"*")
if len(directories) == 0:
sys.exit(1)
csv_files = []
for directory in directories:
current_files = [x for x in glob(directory + "/*") if ".csv" in x]
csv_files = csv_files + current_files
files_content = []
for csv_file in csv_files:
with open(csv_file, "r") as csv_content:
csv_reader = csv.reader(csv_content)
files_content.append([float(row[0]) for row in csv_reader if not (re.match("^\d+?\.\d+?$", row[0]) is None)])
get_stats_from(directories, files_content)
get_global_stats(files_content)
if __name__ == '__main__':
main()
|
Add script to compute some stats about data from energy consumption measuresimport argparse
import csv
from glob import glob
import re
import statistics
import sys
def get_stats_from(files_names, files_content):
for i in range(len(files_content)):
file_name = files_names[i]
file_content = files_content[i]
print("FILE : {0}".format(files_names[i]))
print("\t*MEAN : {0}".format(statistics.mean(file_content)))
print("\t*MEDIAN : {0}".format(statistics.median(file_content)))
try:
print("\t*MOST TYPICAL VALUE : {0}".format(statistics.mode(file_content)))
except:
print("2 most typical values!")
print("\t*STANDARD DEVIATION : {0}".format(statistics.stdev(file_content)))
print("\t*VARIANCE : {0}".format(statistics.variance(file_content)))
def get_global_stats(files_content):
data = []
for sublist in files_content:
data = data + sublist
print("*GLOBAL MEAN : {0}".format(statistics.mean(data)))
print("*GLOBAL MEDIAN : {0}".format(statistics.median(data)))
try:
print("*GLOBAL MOST TYPICAL VALUE : {0}".format(statistics.mode(data)))
except:
print("2 most typical values!")
print("*GLOBAL STANDARD DEVIATION : {0}".format(statistics.stdev(data)))
print("*GLOBAL VARIANCE : {0}".format(statistics.variance(data)))
def main():
parser = argparse.ArgumentParser(description='Get stats from Powertool output')
parser.add_argument('-p', '--path', type=str, default=None, required=True,
help="specify path to your directories")
parser.add_argument('-o', '--output', action="store_true",
help="save the output in the analysed directory")
args = parser.parse_args()
directories = glob(args.path+"*")
if len(directories) == 0:
sys.exit(1)
csv_files = []
for directory in directories:
current_files = [x for x in glob(directory + "/*") if ".csv" in x]
csv_files = csv_files + current_files
files_content = []
for csv_file in csv_files:
with open(csv_file, "r") as csv_content:
csv_reader = csv.reader(csv_content)
files_content.append([float(row[0]) for row in csv_reader if not (re.match("^\d+?\.\d+?$", row[0]) is None)])
get_stats_from(directories, files_content)
get_global_stats(files_content)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to compute some stats about data from energy consumption measures<commit_after>import argparse
import csv
from glob import glob
import re
import statistics
import sys
def get_stats_from(files_names, files_content):
for i in range(len(files_content)):
file_name = files_names[i]
file_content = files_content[i]
print("FILE : {0}".format(files_names[i]))
print("\t*MEAN : {0}".format(statistics.mean(file_content)))
print("\t*MEDIAN : {0}".format(statistics.median(file_content)))
try:
print("\t*MOST TYPICAL VALUE : {0}".format(statistics.mode(file_content)))
except:
print("2 most typical values!")
print("\t*STANDARD DEVIATION : {0}".format(statistics.stdev(file_content)))
print("\t*VARIANCE : {0}".format(statistics.variance(file_content)))
def get_global_stats(files_content):
data = []
for sublist in files_content:
data = data + sublist
print("*GLOBAL MEAN : {0}".format(statistics.mean(data)))
print("*GLOBAL MEDIAN : {0}".format(statistics.median(data)))
try:
print("*GLOBAL MOST TYPICAL VALUE : {0}".format(statistics.mode(data)))
except:
print("2 most typical values!")
print("*GLOBAL STANDARD DEVIATION : {0}".format(statistics.stdev(data)))
print("*GLOBAL VARIANCE : {0}".format(statistics.variance(data)))
def main():
parser = argparse.ArgumentParser(description='Get stats from Powertool output')
parser.add_argument('-p', '--path', type=str, default=None, required=True,
help="specify path to your directories")
parser.add_argument('-o', '--output', action="store_true",
help="save the output in the analysed directory")
args = parser.parse_args()
directories = glob(args.path+"*")
if len(directories) == 0:
sys.exit(1)
csv_files = []
for directory in directories:
current_files = [x for x in glob(directory + "/*") if ".csv" in x]
csv_files = csv_files + current_files
files_content = []
for csv_file in csv_files:
with open(csv_file, "r") as csv_content:
csv_reader = csv.reader(csv_content)
files_content.append([float(row[0]) for row in csv_reader if not (re.match("^\d+?\.\d+?$", row[0]) is None)])
get_stats_from(directories, files_content)
get_global_stats(files_content)
if __name__ == '__main__':
main()
|
|
20b450c4cd0ff9c57d894fa263056ff4cd2dbf07
|
vim_turing_machine/machines/merge_business_hours/vim_merge_business_hours.py
|
vim_turing_machine/machines/merge_business_hours/vim_merge_business_hours.py
|
from vim_turing_machine.machines.merge_business_hours.merge_business_hours import merge_business_hours_transitions
from vim_turing_machine.vim_machine import VimTuringMachine
if __name__ == '__main__':
merge_business_hours = VimTuringMachine(merge_business_hours_transitions(), debug=True)
merge_business_hours.run(initial_tape=sys.argv[1], max_steps=50)
|
Add a vim version of merge business hours
|
Add a vim version of merge business hours
|
Python
|
mit
|
ealter/vim_turing_machine,ealter/vim_turing_machine
|
Add a vim version of merge business hours
|
from vim_turing_machine.machines.merge_business_hours.merge_business_hours import merge_business_hours_transitions
from vim_turing_machine.vim_machine import VimTuringMachine
if __name__ == '__main__':
merge_business_hours = VimTuringMachine(merge_business_hours_transitions(), debug=True)
merge_business_hours.run(initial_tape=sys.argv[1], max_steps=50)
|
<commit_before><commit_msg>Add a vim version of merge business hours<commit_after>
|
from vim_turing_machine.machines.merge_business_hours.merge_business_hours import merge_business_hours_transitions
from vim_turing_machine.vim_machine import VimTuringMachine
if __name__ == '__main__':
merge_business_hours = VimTuringMachine(merge_business_hours_transitions(), debug=True)
merge_business_hours.run(initial_tape=sys.argv[1], max_steps=50)
|
Add a vim version of merge business hoursfrom vim_turing_machine.machines.merge_business_hours.merge_business_hours import merge_business_hours_transitions
from vim_turing_machine.vim_machine import VimTuringMachine
if __name__ == '__main__':
merge_business_hours = VimTuringMachine(merge_business_hours_transitions(), debug=True)
merge_business_hours.run(initial_tape=sys.argv[1], max_steps=50)
|
<commit_before><commit_msg>Add a vim version of merge business hours<commit_after>from vim_turing_machine.machines.merge_business_hours.merge_business_hours import merge_business_hours_transitions
from vim_turing_machine.vim_machine import VimTuringMachine
if __name__ == '__main__':
merge_business_hours = VimTuringMachine(merge_business_hours_transitions(), debug=True)
merge_business_hours.run(initial_tape=sys.argv[1], max_steps=50)
|
|
9fdd671d9c0b91dc789ebf3b24226edb3e6a072a
|
sleep/migrations/0002_load_metrics.py
|
sleep/migrations/0002_load_metrics.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.core.management import call_command
def load_metrics():
call_command('loaddata', 'metrics.json')
class Migration(migrations.Migration):
dependencies = [
('sleep', '0001_initial'),
]
operations = [
]
|
Add new migration to load metrics fixtures
|
Add new migration to load metrics fixtures
|
Python
|
mit
|
sleepers-anonymous/zscore,sleepers-anonymous/zscore,sleepers-anonymous/zscore,sleepers-anonymous/zscore
|
Add new migration to load metrics fixtures
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.core.management import call_command
def load_metrics():
call_command('loaddata', 'metrics.json')
class Migration(migrations.Migration):
dependencies = [
('sleep', '0001_initial'),
]
operations = [
]
|
<commit_before><commit_msg>Add new migration to load metrics fixtures<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.core.management import call_command
def load_metrics():
call_command('loaddata', 'metrics.json')
class Migration(migrations.Migration):
dependencies = [
('sleep', '0001_initial'),
]
operations = [
]
|
Add new migration to load metrics fixtures# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.core.management import call_command
def load_metrics():
call_command('loaddata', 'metrics.json')
class Migration(migrations.Migration):
dependencies = [
('sleep', '0001_initial'),
]
operations = [
]
|
<commit_before><commit_msg>Add new migration to load metrics fixtures<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.core.management import call_command
def load_metrics():
call_command('loaddata', 'metrics.json')
class Migration(migrations.Migration):
dependencies = [
('sleep', '0001_initial'),
]
operations = [
]
|
|
f032556bf07b37f9544c71ecad7aed472021bc97
|
sql/branch.py
|
sql/branch.py
|
import sys
from gratipay import wireup
db = wireup.db(wireup.env())
participants = db.all("""
SELECT p.*::participants
FROM participants p
WHERE (
SELECT error
FROM current_exchange_routes er
WHERE er.participant = p.id
AND network = 'braintree-cc'
) <> ''
""")
total = len(participants)
print("%s participants with failing cards" % total)
counter = 1
for p in participants:
sys.stdout.write("\rUpdating (%i/%i)" % (counter, total))
sys.stdout.flush()
counter += 1
p.update_giving_and_teams()
print("Done!")
|
Add script to update giving and teams receiving
|
Add script to update giving and teams receiving
|
Python
|
mit
|
gratipay/gratipay.com,gratipay/gratipay.com,gratipay/gratipay.com,gratipay/gratipay.com
|
Add script to update giving and teams receiving
|
import sys
from gratipay import wireup
db = wireup.db(wireup.env())
participants = db.all("""
SELECT p.*::participants
FROM participants p
WHERE (
SELECT error
FROM current_exchange_routes er
WHERE er.participant = p.id
AND network = 'braintree-cc'
) <> ''
""")
total = len(participants)
print("%s participants with failing cards" % total)
counter = 1
for p in participants:
sys.stdout.write("\rUpdating (%i/%i)" % (counter, total))
sys.stdout.flush()
counter += 1
p.update_giving_and_teams()
print("Done!")
|
<commit_before><commit_msg>Add script to update giving and teams receiving<commit_after>
|
import sys
from gratipay import wireup
db = wireup.db(wireup.env())
participants = db.all("""
SELECT p.*::participants
FROM participants p
WHERE (
SELECT error
FROM current_exchange_routes er
WHERE er.participant = p.id
AND network = 'braintree-cc'
) <> ''
""")
total = len(participants)
print("%s participants with failing cards" % total)
counter = 1
for p in participants:
sys.stdout.write("\rUpdating (%i/%i)" % (counter, total))
sys.stdout.flush()
counter += 1
p.update_giving_and_teams()
print("Done!")
|
Add script to update giving and teams receivingimport sys
from gratipay import wireup
db = wireup.db(wireup.env())
participants = db.all("""
SELECT p.*::participants
FROM participants p
WHERE (
SELECT error
FROM current_exchange_routes er
WHERE er.participant = p.id
AND network = 'braintree-cc'
) <> ''
""")
total = len(participants)
print("%s participants with failing cards" % total)
counter = 1
for p in participants:
sys.stdout.write("\rUpdating (%i/%i)" % (counter, total))
sys.stdout.flush()
counter += 1
p.update_giving_and_teams()
print("Done!")
|
<commit_before><commit_msg>Add script to update giving and teams receiving<commit_after>import sys
from gratipay import wireup
db = wireup.db(wireup.env())
participants = db.all("""
SELECT p.*::participants
FROM participants p
WHERE (
SELECT error
FROM current_exchange_routes er
WHERE er.participant = p.id
AND network = 'braintree-cc'
) <> ''
""")
total = len(participants)
print("%s participants with failing cards" % total)
counter = 1
for p in participants:
sys.stdout.write("\rUpdating (%i/%i)" % (counter, total))
sys.stdout.flush()
counter += 1
p.update_giving_and_teams()
print("Done!")
|
|
fcf691454b8607fec9d7f5cba43579dc02c26c8b
|
tests/pgi_covergage.py
|
tests/pgi_covergage.py
|
"""
find pgi coverage of all gi.repositorys.
you need to have access to both 'gi' and 'pgi' in the current python
environment.
In a virtualenv this works:
$ pip install pgi
$ pip install vext.gi
$ python pgi_coverage.py
"""
TYPELIB_DIR="/usr/lib/girepository-1.0"
from os.path import basename
from glob import glob
from textwrap import dedent
def test_pgi_coverage(gi_module, pgi_module):
name_width = len(max(dir(gi_module), key=len))
print('%s %s' % (gi_module.__name__.rjust(name_width), pgi_module.__name__))
for name in dir(gi_module):
if name.startswith('_'):
continue
status = 'OK'
try:
getattr(pgi_module, name)
except NotImplementedError as e:
#status = "FAIL: '%s'" % str(e.__class__.__name__)
status = "FAIL"
for line in str(e).splitlines():
if line.startswith('NotImplementedError:'):
status = status + " " + line
print("%s\t%s" % (name.rjust(name_width), status))
print("")
def test_coverage(typelib):
code = dedent("""
from pgi.repository import {0} as PGI_{0}
from gi.repository import {0} as GI_{0}
test_pgi_coverage(GI_{0}, PGI_{0})
""".format(typelib))
try:
print("PGI coverage of %s" % typelib)
exec(code)
except Exception as e:
print("Skipped because of %s during test" % str(e))
def get_typelibs():
typelibs = []
for typelib in glob(TYPELIB_DIR + "/*.typelib"):
fn = basename(typelib).partition("-")[0]
typelibs.append(fn)
return typelibs
if __name__=='__main__':
typelibs = get_typelibs()
for typelib in typelibs:
test_coverage(typelib)
|
Check coverage of pgi, vs gi
|
Check coverage of pgi, vs gi
This loops through all the typelibs it can find and the methods, then tries to call them, to build a report to see coverage.
It's a fairly brute force approach but works.. not sure if this is the place to put this in the repo
Could probably extend this in various ways, maybe html output would be useful for some sort of web page ?
|
Python
|
lgpl-2.1
|
lazka/pgi,lazka/pgi
|
Check coverage of pgi, vs gi
This loops through all the typelibs it can find and the methods, then tries to call them, to build a report to see coverage.
It's a fairly brute force approach but works.. not sure if this is the place to put this in the repo
Could probably extend this in various ways, maybe html output would be useful for some sort of web page ?
|
"""
find pgi coverage of all gi.repositorys.
you need to have access to both 'gi' and 'pgi' in the current python
environment.
In a virtualenv this works:
$ pip install pgi
$ pip install vext.gi
$ python pgi_coverage.py
"""
TYPELIB_DIR="/usr/lib/girepository-1.0"
from os.path import basename
from glob import glob
from textwrap import dedent
def test_pgi_coverage(gi_module, pgi_module):
name_width = len(max(dir(gi_module), key=len))
print('%s %s' % (gi_module.__name__.rjust(name_width), pgi_module.__name__))
for name in dir(gi_module):
if name.startswith('_'):
continue
status = 'OK'
try:
getattr(pgi_module, name)
except NotImplementedError as e:
#status = "FAIL: '%s'" % str(e.__class__.__name__)
status = "FAIL"
for line in str(e).splitlines():
if line.startswith('NotImplementedError:'):
status = status + " " + line
print("%s\t%s" % (name.rjust(name_width), status))
print("")
def test_coverage(typelib):
code = dedent("""
from pgi.repository import {0} as PGI_{0}
from gi.repository import {0} as GI_{0}
test_pgi_coverage(GI_{0}, PGI_{0})
""".format(typelib))
try:
print("PGI coverage of %s" % typelib)
exec(code)
except Exception as e:
print("Skipped because of %s during test" % str(e))
def get_typelibs():
typelibs = []
for typelib in glob(TYPELIB_DIR + "/*.typelib"):
fn = basename(typelib).partition("-")[0]
typelibs.append(fn)
return typelibs
if __name__=='__main__':
typelibs = get_typelibs()
for typelib in typelibs:
test_coverage(typelib)
|
<commit_before><commit_msg>Check coverage of pgi, vs gi
This loops through all the typelibs it can find and the methods, then tries to call them, to build a report to see coverage.
It's a fairly brute force approach but works.. not sure if this is the place to put this in the repo
Could probably extend this in various ways, maybe html output would be useful for some sort of web page ?<commit_after>
|
"""
find pgi coverage of all gi.repositorys.
you need to have access to both 'gi' and 'pgi' in the current python
environment.
In a virtualenv this works:
$ pip install pgi
$ pip install vext.gi
$ python pgi_coverage.py
"""
TYPELIB_DIR="/usr/lib/girepository-1.0"
from os.path import basename
from glob import glob
from textwrap import dedent
def test_pgi_coverage(gi_module, pgi_module):
name_width = len(max(dir(gi_module), key=len))
print('%s %s' % (gi_module.__name__.rjust(name_width), pgi_module.__name__))
for name in dir(gi_module):
if name.startswith('_'):
continue
status = 'OK'
try:
getattr(pgi_module, name)
except NotImplementedError as e:
#status = "FAIL: '%s'" % str(e.__class__.__name__)
status = "FAIL"
for line in str(e).splitlines():
if line.startswith('NotImplementedError:'):
status = status + " " + line
print("%s\t%s" % (name.rjust(name_width), status))
print("")
def test_coverage(typelib):
code = dedent("""
from pgi.repository import {0} as PGI_{0}
from gi.repository import {0} as GI_{0}
test_pgi_coverage(GI_{0}, PGI_{0})
""".format(typelib))
try:
print("PGI coverage of %s" % typelib)
exec(code)
except Exception as e:
print("Skipped because of %s during test" % str(e))
def get_typelibs():
typelibs = []
for typelib in glob(TYPELIB_DIR + "/*.typelib"):
fn = basename(typelib).partition("-")[0]
typelibs.append(fn)
return typelibs
if __name__=='__main__':
typelibs = get_typelibs()
for typelib in typelibs:
test_coverage(typelib)
|
Check coverage of pgi, vs gi
This loops through all the typelibs it can find and the methods, then tries to call them, to build a report to see coverage.
It's a fairly brute force approach but works.. not sure if this is the place to put this in the repo
Could probably extend this in various ways, maybe html output would be useful for some sort of web page ?"""
find pgi coverage of all gi.repositorys.
you need to have access to both 'gi' and 'pgi' in the current python
environment.
In a virtualenv this works:
$ pip install pgi
$ pip install vext.gi
$ python pgi_coverage.py
"""
TYPELIB_DIR="/usr/lib/girepository-1.0"
from os.path import basename
from glob import glob
from textwrap import dedent
def test_pgi_coverage(gi_module, pgi_module):
name_width = len(max(dir(gi_module), key=len))
print('%s %s' % (gi_module.__name__.rjust(name_width), pgi_module.__name__))
for name in dir(gi_module):
if name.startswith('_'):
continue
status = 'OK'
try:
getattr(pgi_module, name)
except NotImplementedError as e:
#status = "FAIL: '%s'" % str(e.__class__.__name__)
status = "FAIL"
for line in str(e).splitlines():
if line.startswith('NotImplementedError:'):
status = status + " " + line
print("%s\t%s" % (name.rjust(name_width), status))
print("")
def test_coverage(typelib):
code = dedent("""
from pgi.repository import {0} as PGI_{0}
from gi.repository import {0} as GI_{0}
test_pgi_coverage(GI_{0}, PGI_{0})
""".format(typelib))
try:
print("PGI coverage of %s" % typelib)
exec(code)
except Exception as e:
print("Skipped because of %s during test" % str(e))
def get_typelibs():
typelibs = []
for typelib in glob(TYPELIB_DIR + "/*.typelib"):
fn = basename(typelib).partition("-")[0]
typelibs.append(fn)
return typelibs
if __name__=='__main__':
typelibs = get_typelibs()
for typelib in typelibs:
test_coverage(typelib)
|
<commit_before><commit_msg>Check coverage of pgi, vs gi
This loops through all the typelibs it can find and the methods, then tries to call them, to build a report to see coverage.
It's a fairly brute force approach but works.. not sure if this is the place to put this in the repo
Could probably extend this in various ways, maybe html output would be useful for some sort of web page ?<commit_after>"""
find pgi coverage of all gi.repositorys.
you need to have access to both 'gi' and 'pgi' in the current python
environment.
In a virtualenv this works:
$ pip install pgi
$ pip install vext.gi
$ python pgi_coverage.py
"""
TYPELIB_DIR="/usr/lib/girepository-1.0"
from os.path import basename
from glob import glob
from textwrap import dedent
def test_pgi_coverage(gi_module, pgi_module):
name_width = len(max(dir(gi_module), key=len))
print('%s %s' % (gi_module.__name__.rjust(name_width), pgi_module.__name__))
for name in dir(gi_module):
if name.startswith('_'):
continue
status = 'OK'
try:
getattr(pgi_module, name)
except NotImplementedError as e:
#status = "FAIL: '%s'" % str(e.__class__.__name__)
status = "FAIL"
for line in str(e).splitlines():
if line.startswith('NotImplementedError:'):
status = status + " " + line
print("%s\t%s" % (name.rjust(name_width), status))
print("")
def test_coverage(typelib):
code = dedent("""
from pgi.repository import {0} as PGI_{0}
from gi.repository import {0} as GI_{0}
test_pgi_coverage(GI_{0}, PGI_{0})
""".format(typelib))
try:
print("PGI coverage of %s" % typelib)
exec(code)
except Exception as e:
print("Skipped because of %s during test" % str(e))
def get_typelibs():
typelibs = []
for typelib in glob(TYPELIB_DIR + "/*.typelib"):
fn = basename(typelib).partition("-")[0]
typelibs.append(fn)
return typelibs
if __name__=='__main__':
typelibs = get_typelibs()
for typelib in typelibs:
test_coverage(typelib)
|
|
5c602a98098bdedeffc2b7359a4b3d8407cb1449
|
scripts/migrate_inconsistent_file_keys.py
|
scripts/migrate_inconsistent_file_keys.py
|
#!/usr/bin/env python
# encoding: utf-8
"""Find all nodes with different sets of keys for `files_current` and
`files_versions`, and ensure that all keys present in the former are also
present in the latter.
"""
from website.models import Node
from website.app import init_app
def find_file_mismatch_nodes():
"""Find nodes with inconsistent `files_current` and `files_versions` field
keys.
"""
return [
node for node in Node.find()
if set(node.files_versions.keys()) != set(node.files_current.keys())
]
def migrate_node(node):
"""Ensure that all keys present in `files_current` are also present in
`files_versions`.
"""
for key, file_id in node.files_current.iteritems():
if key not in node.files_versions:
node.files_versions[key] = [file_id]
else:
if file_id not in node.files_versions[key]:
node.files_versions[key].append(file_id)
node.save()
def main(dry_run=True):
init_app()
nodes = find_file_mismatch_nodes()
print('Migrating {0} nodes'.format(len(nodes)))
if dry_run:
return
for node in nodes:
migrate_node(node)
if __name__ == '__main__':
import sys
dry_run = 'dry' in sys.argv
main(dry_run=dry_run)
from nose.tools import * # noqa
from tests.base import OsfTestCase
from tests.factories import ProjectFactory
from framework.auth import Auth
class TestMigrateFiles(OsfTestCase):
def clear(self):
Node.remove()
def setUp(self):
super(TestMigrateFiles, self).setUp()
self.clear()
self.nodes = []
for idx in range(3):
node = ProjectFactory()
node.add_file(
Auth(user=node.creator),
'name',
'contents',
len('contents'),
'text/plain',
)
self.nodes.append(node)
self.nodes[-1].files_versions = {}
self.nodes[-1].save()
# Sanity check
assert_in('name', self.nodes[-1].files_current)
assert_not_in('name', self.nodes[-1].files_versions)
def tearDown(self):
super(TestMigrateFiles, self).tearDown()
self.clear()
def test_get_targets(self):
targets = find_file_mismatch_nodes()
assert_equal(len(targets), 1)
assert_equal(targets[0], self.nodes[-1])
def test_migrate(self):
main(dry_run=False)
assert_equal(len(find_file_mismatch_nodes()), 0)
assert_in('name', self.nodes[-1].files_versions)
assert_equal(
self.nodes[-1].files_current['name'],
self.nodes[-1].files_versions['name'][0],
)
|
Add migration to ensure consistency on file keys.
|
Add migration to ensure consistency on file keys.
Resolves https://github.com/CenterForOpenScience/openscienceframework.org/issues/1119
|
Python
|
apache-2.0
|
rdhyee/osf.io,amyshi188/osf.io,sloria/osf.io,zachjanicki/osf.io,mluo613/osf.io,asanfilippo7/osf.io,brandonPurvis/osf.io,doublebits/osf.io,mfraezz/osf.io,laurenrevere/osf.io,pattisdr/osf.io,erinspace/osf.io,bdyetton/prettychart,revanthkolli/osf.io,MerlinZhang/osf.io,caseyrygt/osf.io,TomHeatwole/osf.io,cldershem/osf.io,HalcyonChimera/osf.io,amyshi188/osf.io,HarryRybacki/osf.io,icereval/osf.io,ticklemepierce/osf.io,RomanZWang/osf.io,emetsger/osf.io,revanthkolli/osf.io,fabianvf/osf.io,himanshuo/osf.io,kch8qx/osf.io,jolene-esposito/osf.io,cldershem/osf.io,HalcyonChimera/osf.io,kch8qx/osf.io,saradbowman/osf.io,njantrania/osf.io,lamdnhan/osf.io,MerlinZhang/osf.io,baylee-d/osf.io,cosenal/osf.io,DanielSBrown/osf.io,alexschiller/osf.io,HarryRybacki/osf.io,cslzchen/osf.io,brandonPurvis/osf.io,jnayak1/osf.io,jinluyuan/osf.io,sloria/osf.io,danielneis/osf.io,jnayak1/osf.io,himanshuo/osf.io,binoculars/osf.io,fabianvf/osf.io,samanehsan/osf.io,felliott/osf.io,billyhunt/osf.io,reinaH/osf.io,TomHeatwole/osf.io,haoyuchen1992/osf.io,aaxelb/osf.io,samanehsan/osf.io,Ghalko/osf.io,haoyuchen1992/osf.io,danielneis/osf.io,asanfilippo7/osf.io,mattclark/osf.io,jmcarp/osf.io,monikagrabowska/osf.io,adlius/osf.io,caseyrygt/osf.io,DanielSBrown/osf.io,lamdnhan/osf.io,Ghalko/osf.io,aaxelb/osf.io,jnayak1/osf.io,cosenal/osf.io,cwisecarver/osf.io,RomanZWang/osf.io,jeffreyliu3230/osf.io,GageGaskins/osf.io,sloria/osf.io,doublebits/osf.io,jinluyuan/osf.io,jmcarp/osf.io,brianjgeiger/osf.io,kwierman/osf.io,mfraezz/osf.io,GaryKriebel/osf.io,HalcyonChimera/osf.io,wearpants/osf.io,arpitar/osf.io,icereval/osf.io,hmoco/osf.io,acshi/osf.io,KAsante95/osf.io,jnayak1/osf.io,HarryRybacki/osf.io,leb2dg/osf.io,billyhunt/osf.io,GaryKriebel/osf.io,bdyetton/prettychart,alexschiller/osf.io,rdhyee/osf.io,hmoco/osf.io,RomanZWang/osf.io,TomHeatwole/osf.io,TomHeatwole/osf.io,mluo613/osf.io,arpitar/osf.io,acshi/osf.io,DanielSBrown/osf.io,GageGaskins/osf.io,HalcyonChimera/osf.io,pattisdr/osf.io,ticklemepierce/osf.io,hmoco/osf.io,KAsante95/osf.io,kch8qx/osf.io,jolene-esposito/osf.io,amyshi188/osf.io,reinaH/osf.io,bdyetton/prettychart,pattisdr/osf.io,abought/osf.io,chennan47/osf.io,ticklemepierce/osf.io,cldershem/osf.io,caneruguz/osf.io,MerlinZhang/osf.io,jeffreyliu3230/osf.io,chennan47/osf.io,jinluyuan/osf.io,ZobairAlijan/osf.io,petermalcolm/osf.io,hmoco/osf.io,sbt9uc/osf.io,dplorimer/osf,ZobairAlijan/osf.io,kwierman/osf.io,njantrania/osf.io,leb2dg/osf.io,zamattiac/osf.io,acshi/osf.io,fabianvf/osf.io,SSJohns/osf.io,ckc6cz/osf.io,crcresearch/osf.io,petermalcolm/osf.io,billyhunt/osf.io,amyshi188/osf.io,acshi/osf.io,HarryRybacki/osf.io,SSJohns/osf.io,kushG/osf.io,chennan47/osf.io,felliott/osf.io,AndrewSallans/osf.io,Johnetordoff/osf.io,GaryKriebel/osf.io,abought/osf.io,leb2dg/osf.io,doublebits/osf.io,chrisseto/osf.io,dplorimer/osf,dplorimer/osf,jolene-esposito/osf.io,cslzchen/osf.io,baylee-d/osf.io,mluke93/osf.io,njantrania/osf.io,alexschiller/osf.io,samchrisinger/osf.io,CenterForOpenScience/osf.io,leb2dg/osf.io,petermalcolm/osf.io,cldershem/osf.io,AndrewSallans/osf.io,CenterForOpenScience/osf.io,cwisecarver/osf.io,alexschiller/osf.io,reinaH/osf.io,zamattiac/osf.io,GageGaskins/osf.io,jolene-esposito/osf.io,adlius/osf.io,cslzchen/osf.io,mattclark/osf.io,zkraime/osf.io,jeffreyliu3230/osf.io,cwisecarver/osf.io,reinaH/osf.io,Nesiehr/osf.io,icereval/osf.io,acshi/osf.io,GageGaskins/osf.io,kushG/osf.io,saradbowman/osf.io,lamdnhan/osf.io,bdyetton/prettychart,kwierman/osf.io,cwisecarver/osf.io,felliott/osf.io,chrisseto/osf.io,caneruguz/osf.io,samanehsan/osf.io,mfraezz/osf.io,kwierman/osf.io,Nesiehr/osf.io,chrisseto/osf.io,ckc6cz/osf.io,haoyuchen1992/osf.io,caneruguz/osf.io,TomBaxter/osf.io,SSJohns/osf.io,jmcarp/osf.io,alexschiller/osf.io,arpitar/osf.io,sbt9uc/osf.io,caseyrollins/osf.io,Nesiehr/osf.io,zachjanicki/osf.io,brandonPurvis/osf.io,cosenal/osf.io,rdhyee/osf.io,TomBaxter/osf.io,asanfilippo7/osf.io,ZobairAlijan/osf.io,lyndsysimon/osf.io,njantrania/osf.io,KAsante95/osf.io,asanfilippo7/osf.io,jeffreyliu3230/osf.io,CenterForOpenScience/osf.io,barbour-em/osf.io,kch8qx/osf.io,mluo613/osf.io,dplorimer/osf,haoyuchen1992/osf.io,barbour-em/osf.io,wearpants/osf.io,zachjanicki/osf.io,zkraime/osf.io,monikagrabowska/osf.io,zkraime/osf.io,brandonPurvis/osf.io,billyhunt/osf.io,revanthkolli/osf.io,lamdnhan/osf.io,binoculars/osf.io,Ghalko/osf.io,ckc6cz/osf.io,caneruguz/osf.io,crcresearch/osf.io,felliott/osf.io,lyndsysimon/osf.io,sbt9uc/osf.io,kch8qx/osf.io,abought/osf.io,SSJohns/osf.io,danielneis/osf.io,mluo613/osf.io,zamattiac/osf.io,emetsger/osf.io,mfraezz/osf.io,lyndsysimon/osf.io,himanshuo/osf.io,emetsger/osf.io,lyndsysimon/osf.io,caseyrygt/osf.io,KAsante95/osf.io,GageGaskins/osf.io,mluke93/osf.io,mattclark/osf.io,KAsante95/osf.io,caseyrygt/osf.io,doublebits/osf.io,mluke93/osf.io,caseyrollins/osf.io,petermalcolm/osf.io,Johnetordoff/osf.io,emetsger/osf.io,sbt9uc/osf.io,billyhunt/osf.io,RomanZWang/osf.io,monikagrabowska/osf.io,wearpants/osf.io,caseyrollins/osf.io,adlius/osf.io,chrisseto/osf.io,brianjgeiger/osf.io,Johnetordoff/osf.io,arpitar/osf.io,monikagrabowska/osf.io,barbour-em/osf.io,samanehsan/osf.io,GaryKriebel/osf.io,cosenal/osf.io,aaxelb/osf.io,baylee-d/osf.io,cslzchen/osf.io,rdhyee/osf.io,crcresearch/osf.io,samchrisinger/osf.io,ZobairAlijan/osf.io,Ghalko/osf.io,adlius/osf.io,brianjgeiger/osf.io,samchrisinger/osf.io,jmcarp/osf.io,erinspace/osf.io,binoculars/osf.io,zachjanicki/osf.io,doublebits/osf.io,zkraime/osf.io,MerlinZhang/osf.io,laurenrevere/osf.io,brandonPurvis/osf.io,ticklemepierce/osf.io,kushG/osf.io,barbour-em/osf.io,mluo613/osf.io,CenterForOpenScience/osf.io,revanthkolli/osf.io,TomBaxter/osf.io,kushG/osf.io,laurenrevere/osf.io,DanielSBrown/osf.io,brianjgeiger/osf.io,himanshuo/osf.io,erinspace/osf.io,zamattiac/osf.io,jinluyuan/osf.io,samchrisinger/osf.io,wearpants/osf.io,Johnetordoff/osf.io,aaxelb/osf.io,fabianvf/osf.io,RomanZWang/osf.io,ckc6cz/osf.io,abought/osf.io,danielneis/osf.io,mluke93/osf.io,Nesiehr/osf.io,monikagrabowska/osf.io
|
Add migration to ensure consistency on file keys.
Resolves https://github.com/CenterForOpenScience/openscienceframework.org/issues/1119
|
#!/usr/bin/env python
# encoding: utf-8
"""Find all nodes with different sets of keys for `files_current` and
`files_versions`, and ensure that all keys present in the former are also
present in the latter.
"""
from website.models import Node
from website.app import init_app
def find_file_mismatch_nodes():
"""Find nodes with inconsistent `files_current` and `files_versions` field
keys.
"""
return [
node for node in Node.find()
if set(node.files_versions.keys()) != set(node.files_current.keys())
]
def migrate_node(node):
"""Ensure that all keys present in `files_current` are also present in
`files_versions`.
"""
for key, file_id in node.files_current.iteritems():
if key not in node.files_versions:
node.files_versions[key] = [file_id]
else:
if file_id not in node.files_versions[key]:
node.files_versions[key].append(file_id)
node.save()
def main(dry_run=True):
init_app()
nodes = find_file_mismatch_nodes()
print('Migrating {0} nodes'.format(len(nodes)))
if dry_run:
return
for node in nodes:
migrate_node(node)
if __name__ == '__main__':
import sys
dry_run = 'dry' in sys.argv
main(dry_run=dry_run)
from nose.tools import * # noqa
from tests.base import OsfTestCase
from tests.factories import ProjectFactory
from framework.auth import Auth
class TestMigrateFiles(OsfTestCase):
def clear(self):
Node.remove()
def setUp(self):
super(TestMigrateFiles, self).setUp()
self.clear()
self.nodes = []
for idx in range(3):
node = ProjectFactory()
node.add_file(
Auth(user=node.creator),
'name',
'contents',
len('contents'),
'text/plain',
)
self.nodes.append(node)
self.nodes[-1].files_versions = {}
self.nodes[-1].save()
# Sanity check
assert_in('name', self.nodes[-1].files_current)
assert_not_in('name', self.nodes[-1].files_versions)
def tearDown(self):
super(TestMigrateFiles, self).tearDown()
self.clear()
def test_get_targets(self):
targets = find_file_mismatch_nodes()
assert_equal(len(targets), 1)
assert_equal(targets[0], self.nodes[-1])
def test_migrate(self):
main(dry_run=False)
assert_equal(len(find_file_mismatch_nodes()), 0)
assert_in('name', self.nodes[-1].files_versions)
assert_equal(
self.nodes[-1].files_current['name'],
self.nodes[-1].files_versions['name'][0],
)
|
<commit_before><commit_msg>Add migration to ensure consistency on file keys.
Resolves https://github.com/CenterForOpenScience/openscienceframework.org/issues/1119<commit_after>
|
#!/usr/bin/env python
# encoding: utf-8
"""Find all nodes with different sets of keys for `files_current` and
`files_versions`, and ensure that all keys present in the former are also
present in the latter.
"""
from website.models import Node
from website.app import init_app
def find_file_mismatch_nodes():
"""Find nodes with inconsistent `files_current` and `files_versions` field
keys.
"""
return [
node for node in Node.find()
if set(node.files_versions.keys()) != set(node.files_current.keys())
]
def migrate_node(node):
"""Ensure that all keys present in `files_current` are also present in
`files_versions`.
"""
for key, file_id in node.files_current.iteritems():
if key not in node.files_versions:
node.files_versions[key] = [file_id]
else:
if file_id not in node.files_versions[key]:
node.files_versions[key].append(file_id)
node.save()
def main(dry_run=True):
init_app()
nodes = find_file_mismatch_nodes()
print('Migrating {0} nodes'.format(len(nodes)))
if dry_run:
return
for node in nodes:
migrate_node(node)
if __name__ == '__main__':
import sys
dry_run = 'dry' in sys.argv
main(dry_run=dry_run)
from nose.tools import * # noqa
from tests.base import OsfTestCase
from tests.factories import ProjectFactory
from framework.auth import Auth
class TestMigrateFiles(OsfTestCase):
def clear(self):
Node.remove()
def setUp(self):
super(TestMigrateFiles, self).setUp()
self.clear()
self.nodes = []
for idx in range(3):
node = ProjectFactory()
node.add_file(
Auth(user=node.creator),
'name',
'contents',
len('contents'),
'text/plain',
)
self.nodes.append(node)
self.nodes[-1].files_versions = {}
self.nodes[-1].save()
# Sanity check
assert_in('name', self.nodes[-1].files_current)
assert_not_in('name', self.nodes[-1].files_versions)
def tearDown(self):
super(TestMigrateFiles, self).tearDown()
self.clear()
def test_get_targets(self):
targets = find_file_mismatch_nodes()
assert_equal(len(targets), 1)
assert_equal(targets[0], self.nodes[-1])
def test_migrate(self):
main(dry_run=False)
assert_equal(len(find_file_mismatch_nodes()), 0)
assert_in('name', self.nodes[-1].files_versions)
assert_equal(
self.nodes[-1].files_current['name'],
self.nodes[-1].files_versions['name'][0],
)
|
Add migration to ensure consistency on file keys.
Resolves https://github.com/CenterForOpenScience/openscienceframework.org/issues/1119#!/usr/bin/env python
# encoding: utf-8
"""Find all nodes with different sets of keys for `files_current` and
`files_versions`, and ensure that all keys present in the former are also
present in the latter.
"""
from website.models import Node
from website.app import init_app
def find_file_mismatch_nodes():
"""Find nodes with inconsistent `files_current` and `files_versions` field
keys.
"""
return [
node for node in Node.find()
if set(node.files_versions.keys()) != set(node.files_current.keys())
]
def migrate_node(node):
"""Ensure that all keys present in `files_current` are also present in
`files_versions`.
"""
for key, file_id in node.files_current.iteritems():
if key not in node.files_versions:
node.files_versions[key] = [file_id]
else:
if file_id not in node.files_versions[key]:
node.files_versions[key].append(file_id)
node.save()
def main(dry_run=True):
init_app()
nodes = find_file_mismatch_nodes()
print('Migrating {0} nodes'.format(len(nodes)))
if dry_run:
return
for node in nodes:
migrate_node(node)
if __name__ == '__main__':
import sys
dry_run = 'dry' in sys.argv
main(dry_run=dry_run)
from nose.tools import * # noqa
from tests.base import OsfTestCase
from tests.factories import ProjectFactory
from framework.auth import Auth
class TestMigrateFiles(OsfTestCase):
def clear(self):
Node.remove()
def setUp(self):
super(TestMigrateFiles, self).setUp()
self.clear()
self.nodes = []
for idx in range(3):
node = ProjectFactory()
node.add_file(
Auth(user=node.creator),
'name',
'contents',
len('contents'),
'text/plain',
)
self.nodes.append(node)
self.nodes[-1].files_versions = {}
self.nodes[-1].save()
# Sanity check
assert_in('name', self.nodes[-1].files_current)
assert_not_in('name', self.nodes[-1].files_versions)
def tearDown(self):
super(TestMigrateFiles, self).tearDown()
self.clear()
def test_get_targets(self):
targets = find_file_mismatch_nodes()
assert_equal(len(targets), 1)
assert_equal(targets[0], self.nodes[-1])
def test_migrate(self):
main(dry_run=False)
assert_equal(len(find_file_mismatch_nodes()), 0)
assert_in('name', self.nodes[-1].files_versions)
assert_equal(
self.nodes[-1].files_current['name'],
self.nodes[-1].files_versions['name'][0],
)
|
<commit_before><commit_msg>Add migration to ensure consistency on file keys.
Resolves https://github.com/CenterForOpenScience/openscienceframework.org/issues/1119<commit_after>#!/usr/bin/env python
# encoding: utf-8
"""Find all nodes with different sets of keys for `files_current` and
`files_versions`, and ensure that all keys present in the former are also
present in the latter.
"""
from website.models import Node
from website.app import init_app
def find_file_mismatch_nodes():
"""Find nodes with inconsistent `files_current` and `files_versions` field
keys.
"""
return [
node for node in Node.find()
if set(node.files_versions.keys()) != set(node.files_current.keys())
]
def migrate_node(node):
"""Ensure that all keys present in `files_current` are also present in
`files_versions`.
"""
for key, file_id in node.files_current.iteritems():
if key not in node.files_versions:
node.files_versions[key] = [file_id]
else:
if file_id not in node.files_versions[key]:
node.files_versions[key].append(file_id)
node.save()
def main(dry_run=True):
init_app()
nodes = find_file_mismatch_nodes()
print('Migrating {0} nodes'.format(len(nodes)))
if dry_run:
return
for node in nodes:
migrate_node(node)
if __name__ == '__main__':
import sys
dry_run = 'dry' in sys.argv
main(dry_run=dry_run)
from nose.tools import * # noqa
from tests.base import OsfTestCase
from tests.factories import ProjectFactory
from framework.auth import Auth
class TestMigrateFiles(OsfTestCase):
def clear(self):
Node.remove()
def setUp(self):
super(TestMigrateFiles, self).setUp()
self.clear()
self.nodes = []
for idx in range(3):
node = ProjectFactory()
node.add_file(
Auth(user=node.creator),
'name',
'contents',
len('contents'),
'text/plain',
)
self.nodes.append(node)
self.nodes[-1].files_versions = {}
self.nodes[-1].save()
# Sanity check
assert_in('name', self.nodes[-1].files_current)
assert_not_in('name', self.nodes[-1].files_versions)
def tearDown(self):
super(TestMigrateFiles, self).tearDown()
self.clear()
def test_get_targets(self):
targets = find_file_mismatch_nodes()
assert_equal(len(targets), 1)
assert_equal(targets[0], self.nodes[-1])
def test_migrate(self):
main(dry_run=False)
assert_equal(len(find_file_mismatch_nodes()), 0)
assert_in('name', self.nodes[-1].files_versions)
assert_equal(
self.nodes[-1].files_current['name'],
self.nodes[-1].files_versions['name'][0],
)
|
|
386baa36355b0e9378fff59fe768d1baa7e73fec
|
scripts/examples/Arduino/Portenta-H7/21-Sensor-Control/himax_motion_detection.py
|
scripts/examples/Arduino/Portenta-H7/21-Sensor-Control/himax_motion_detection.py
|
# Himax motion detection example.
import sensor, image, time, pyb
from pyb import Pin, ExtInt
sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.QVGA)
sensor.set_framerate(15)
sensor.ioctl(sensor.IOCTL_HIMAX_MD_THRESHOLD, 0x01)
sensor.ioctl(sensor.IOCTL_HIMAX_MD_WINDOW, (0, 0, 320, 240))
sensor.ioctl(sensor.IOCTL_HIMAX_MD_CLEAR)
sensor.ioctl(sensor.IOCTL_HIMAX_MD_ENABLE, True)
motion_detected = False
def on_motion(line):
global motion_detected
motion_detected = True
led = pyb.LED(3)
ext = ExtInt(Pin("PC15"), ExtInt.IRQ_RISING, Pin.PULL_DOWN, on_motion)
clock = time.clock()
while(True):
clock.tick()
img = sensor.snapshot()
if (motion_detected):
led.on()
time.sleep_ms(500)
# Clear motion detection flag
sensor.ioctl(sensor.IOCTL_HIMAX_MD_CLEAR)
motion_detected = False
led.off()
print(clock.fps())
|
Add Himax motion detection example.
|
Add Himax motion detection example.
|
Python
|
mit
|
openmv/openmv,iabdalkader/openmv,openmv/openmv,iabdalkader/openmv,iabdalkader/openmv,iabdalkader/openmv,kwagyeman/openmv,kwagyeman/openmv,kwagyeman/openmv,openmv/openmv,kwagyeman/openmv,openmv/openmv
|
Add Himax motion detection example.
|
# Himax motion detection example.
import sensor, image, time, pyb
from pyb import Pin, ExtInt
sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.QVGA)
sensor.set_framerate(15)
sensor.ioctl(sensor.IOCTL_HIMAX_MD_THRESHOLD, 0x01)
sensor.ioctl(sensor.IOCTL_HIMAX_MD_WINDOW, (0, 0, 320, 240))
sensor.ioctl(sensor.IOCTL_HIMAX_MD_CLEAR)
sensor.ioctl(sensor.IOCTL_HIMAX_MD_ENABLE, True)
motion_detected = False
def on_motion(line):
global motion_detected
motion_detected = True
led = pyb.LED(3)
ext = ExtInt(Pin("PC15"), ExtInt.IRQ_RISING, Pin.PULL_DOWN, on_motion)
clock = time.clock()
while(True):
clock.tick()
img = sensor.snapshot()
if (motion_detected):
led.on()
time.sleep_ms(500)
# Clear motion detection flag
sensor.ioctl(sensor.IOCTL_HIMAX_MD_CLEAR)
motion_detected = False
led.off()
print(clock.fps())
|
<commit_before><commit_msg>Add Himax motion detection example.<commit_after>
|
# Himax motion detection example.
import sensor, image, time, pyb
from pyb import Pin, ExtInt
sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.QVGA)
sensor.set_framerate(15)
sensor.ioctl(sensor.IOCTL_HIMAX_MD_THRESHOLD, 0x01)
sensor.ioctl(sensor.IOCTL_HIMAX_MD_WINDOW, (0, 0, 320, 240))
sensor.ioctl(sensor.IOCTL_HIMAX_MD_CLEAR)
sensor.ioctl(sensor.IOCTL_HIMAX_MD_ENABLE, True)
motion_detected = False
def on_motion(line):
global motion_detected
motion_detected = True
led = pyb.LED(3)
ext = ExtInt(Pin("PC15"), ExtInt.IRQ_RISING, Pin.PULL_DOWN, on_motion)
clock = time.clock()
while(True):
clock.tick()
img = sensor.snapshot()
if (motion_detected):
led.on()
time.sleep_ms(500)
# Clear motion detection flag
sensor.ioctl(sensor.IOCTL_HIMAX_MD_CLEAR)
motion_detected = False
led.off()
print(clock.fps())
|
Add Himax motion detection example.# Himax motion detection example.
import sensor, image, time, pyb
from pyb import Pin, ExtInt
sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.QVGA)
sensor.set_framerate(15)
sensor.ioctl(sensor.IOCTL_HIMAX_MD_THRESHOLD, 0x01)
sensor.ioctl(sensor.IOCTL_HIMAX_MD_WINDOW, (0, 0, 320, 240))
sensor.ioctl(sensor.IOCTL_HIMAX_MD_CLEAR)
sensor.ioctl(sensor.IOCTL_HIMAX_MD_ENABLE, True)
motion_detected = False
def on_motion(line):
global motion_detected
motion_detected = True
led = pyb.LED(3)
ext = ExtInt(Pin("PC15"), ExtInt.IRQ_RISING, Pin.PULL_DOWN, on_motion)
clock = time.clock()
while(True):
clock.tick()
img = sensor.snapshot()
if (motion_detected):
led.on()
time.sleep_ms(500)
# Clear motion detection flag
sensor.ioctl(sensor.IOCTL_HIMAX_MD_CLEAR)
motion_detected = False
led.off()
print(clock.fps())
|
<commit_before><commit_msg>Add Himax motion detection example.<commit_after># Himax motion detection example.
import sensor, image, time, pyb
from pyb import Pin, ExtInt
sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.QVGA)
sensor.set_framerate(15)
sensor.ioctl(sensor.IOCTL_HIMAX_MD_THRESHOLD, 0x01)
sensor.ioctl(sensor.IOCTL_HIMAX_MD_WINDOW, (0, 0, 320, 240))
sensor.ioctl(sensor.IOCTL_HIMAX_MD_CLEAR)
sensor.ioctl(sensor.IOCTL_HIMAX_MD_ENABLE, True)
motion_detected = False
def on_motion(line):
global motion_detected
motion_detected = True
led = pyb.LED(3)
ext = ExtInt(Pin("PC15"), ExtInt.IRQ_RISING, Pin.PULL_DOWN, on_motion)
clock = time.clock()
while(True):
clock.tick()
img = sensor.snapshot()
if (motion_detected):
led.on()
time.sleep_ms(500)
# Clear motion detection flag
sensor.ioctl(sensor.IOCTL_HIMAX_MD_CLEAR)
motion_detected = False
led.off()
print(clock.fps())
|
|
a801deeaa00e443b3c68c1fbcea1e6ff62d90082
|
python/addusers.py
|
python/addusers.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Adds a sequential number of users into a test database
with username: newusern and password newusern
Not for production usage
"""
import MySQLdb
hostname = # FILL IN
username = # FILL IN
password = # FILL IN
# Simple routine to run a query on a database and print the results:
def doQuery( conn, n_users ) :
cur = conn.cursor()
try:
for i in range(0,n_users):
cur.execute("""CREATE USER \'newuser%i\'@\'localhost\' IDENTIFIED BY \'password%i\'""" % (i,i) )
cur.execute( """GRANT ALL PRIVILEGES ON * . * TO \'newuser%i\'@\'localhost\'""" % i )
cur.execute( """FLUSH PRIVILEGES""" )
except MySQLdb.Error, e:
try:
print ("MySQL Error [%d]: %s" % (e.args[0], e.args[1]))
except IndexError:
print ("MySQL Error: %s" % str(e))
if __name__ == '__main__':
print("Using mysql.connector…")
myConnection = MySQLdb.connect( host=hostname, user=username, passwd=password, 20)
doQuery( myConnection )
myConnection.close()
|
Add Python script to generate users
|
Add Python script to generate users
|
Python
|
mit
|
veekaybee/intro-to-sql,veekaybee/intro-to-sql,veekaybee/intro-to-sql
|
Add Python script to generate users
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Adds a sequential number of users into a test database
with username: newusern and password newusern
Not for production usage
"""
import MySQLdb
hostname = # FILL IN
username = # FILL IN
password = # FILL IN
# Simple routine to run a query on a database and print the results:
def doQuery( conn, n_users ) :
cur = conn.cursor()
try:
for i in range(0,n_users):
cur.execute("""CREATE USER \'newuser%i\'@\'localhost\' IDENTIFIED BY \'password%i\'""" % (i,i) )
cur.execute( """GRANT ALL PRIVILEGES ON * . * TO \'newuser%i\'@\'localhost\'""" % i )
cur.execute( """FLUSH PRIVILEGES""" )
except MySQLdb.Error, e:
try:
print ("MySQL Error [%d]: %s" % (e.args[0], e.args[1]))
except IndexError:
print ("MySQL Error: %s" % str(e))
if __name__ == '__main__':
print("Using mysql.connector…")
myConnection = MySQLdb.connect( host=hostname, user=username, passwd=password, 20)
doQuery( myConnection )
myConnection.close()
|
<commit_before><commit_msg>Add Python script to generate users<commit_after>
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Adds a sequential number of users into a test database
with username: newusern and password newusern
Not for production usage
"""
import MySQLdb
hostname = # FILL IN
username = # FILL IN
password = # FILL IN
# Simple routine to run a query on a database and print the results:
def doQuery( conn, n_users ) :
cur = conn.cursor()
try:
for i in range(0,n_users):
cur.execute("""CREATE USER \'newuser%i\'@\'localhost\' IDENTIFIED BY \'password%i\'""" % (i,i) )
cur.execute( """GRANT ALL PRIVILEGES ON * . * TO \'newuser%i\'@\'localhost\'""" % i )
cur.execute( """FLUSH PRIVILEGES""" )
except MySQLdb.Error, e:
try:
print ("MySQL Error [%d]: %s" % (e.args[0], e.args[1]))
except IndexError:
print ("MySQL Error: %s" % str(e))
if __name__ == '__main__':
print("Using mysql.connector…")
myConnection = MySQLdb.connect( host=hostname, user=username, passwd=password, 20)
doQuery( myConnection )
myConnection.close()
|
Add Python script to generate users#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Adds a sequential number of users into a test database
with username: newusern and password newusern
Not for production usage
"""
import MySQLdb
hostname = # FILL IN
username = # FILL IN
password = # FILL IN
# Simple routine to run a query on a database and print the results:
def doQuery( conn, n_users ) :
cur = conn.cursor()
try:
for i in range(0,n_users):
cur.execute("""CREATE USER \'newuser%i\'@\'localhost\' IDENTIFIED BY \'password%i\'""" % (i,i) )
cur.execute( """GRANT ALL PRIVILEGES ON * . * TO \'newuser%i\'@\'localhost\'""" % i )
cur.execute( """FLUSH PRIVILEGES""" )
except MySQLdb.Error, e:
try:
print ("MySQL Error [%d]: %s" % (e.args[0], e.args[1]))
except IndexError:
print ("MySQL Error: %s" % str(e))
if __name__ == '__main__':
print("Using mysql.connector…")
myConnection = MySQLdb.connect( host=hostname, user=username, passwd=password, 20)
doQuery( myConnection )
myConnection.close()
|
<commit_before><commit_msg>Add Python script to generate users<commit_after>#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Adds a sequential number of users into a test database
with username: newusern and password newusern
Not for production usage
"""
import MySQLdb
hostname = # FILL IN
username = # FILL IN
password = # FILL IN
# Simple routine to run a query on a database and print the results:
def doQuery( conn, n_users ) :
cur = conn.cursor()
try:
for i in range(0,n_users):
cur.execute("""CREATE USER \'newuser%i\'@\'localhost\' IDENTIFIED BY \'password%i\'""" % (i,i) )
cur.execute( """GRANT ALL PRIVILEGES ON * . * TO \'newuser%i\'@\'localhost\'""" % i )
cur.execute( """FLUSH PRIVILEGES""" )
except MySQLdb.Error, e:
try:
print ("MySQL Error [%d]: %s" % (e.args[0], e.args[1]))
except IndexError:
print ("MySQL Error: %s" % str(e))
if __name__ == '__main__':
print("Using mysql.connector…")
myConnection = MySQLdb.connect( host=hostname, user=username, passwd=password, 20)
doQuery( myConnection )
myConnection.close()
|
|
ff98bdf9ce263648de784183ad5984864f9d387a
|
tests/api/test_refs.py
|
tests/api/test_refs.py
|
async def test_create(spawn_client, test_random_alphanumeric, static_time):
client = await spawn_client(authorize=True, permissions=["create_ref"])
data = {
"name": "Test Viruses",
"description": "A bunch of viruses used for testing",
"data_type": "genome",
"organism": "virus",
"public": True
}
resp = await client.post("/api/refs", data)
assert resp.status == 201
assert resp.headers["Location"] == "/api/refs/" + test_random_alphanumeric.history[0]
assert await resp.json() == dict(
data,
id=test_random_alphanumeric.history[0],
created_at=static_time.iso,
user={
"id": "test"
},
users=[{
"build": True,
"id": "test",
"modify": True,
"modify_kind": True,
"remove": True
}]
)
|
Add ref create api test
|
Add ref create api test
|
Python
|
mit
|
virtool/virtool,igboyes/virtool,virtool/virtool,igboyes/virtool
|
Add ref create api test
|
async def test_create(spawn_client, test_random_alphanumeric, static_time):
client = await spawn_client(authorize=True, permissions=["create_ref"])
data = {
"name": "Test Viruses",
"description": "A bunch of viruses used for testing",
"data_type": "genome",
"organism": "virus",
"public": True
}
resp = await client.post("/api/refs", data)
assert resp.status == 201
assert resp.headers["Location"] == "/api/refs/" + test_random_alphanumeric.history[0]
assert await resp.json() == dict(
data,
id=test_random_alphanumeric.history[0],
created_at=static_time.iso,
user={
"id": "test"
},
users=[{
"build": True,
"id": "test",
"modify": True,
"modify_kind": True,
"remove": True
}]
)
|
<commit_before><commit_msg>Add ref create api test<commit_after>
|
async def test_create(spawn_client, test_random_alphanumeric, static_time):
client = await spawn_client(authorize=True, permissions=["create_ref"])
data = {
"name": "Test Viruses",
"description": "A bunch of viruses used for testing",
"data_type": "genome",
"organism": "virus",
"public": True
}
resp = await client.post("/api/refs", data)
assert resp.status == 201
assert resp.headers["Location"] == "/api/refs/" + test_random_alphanumeric.history[0]
assert await resp.json() == dict(
data,
id=test_random_alphanumeric.history[0],
created_at=static_time.iso,
user={
"id": "test"
},
users=[{
"build": True,
"id": "test",
"modify": True,
"modify_kind": True,
"remove": True
}]
)
|
Add ref create api testasync def test_create(spawn_client, test_random_alphanumeric, static_time):
client = await spawn_client(authorize=True, permissions=["create_ref"])
data = {
"name": "Test Viruses",
"description": "A bunch of viruses used for testing",
"data_type": "genome",
"organism": "virus",
"public": True
}
resp = await client.post("/api/refs", data)
assert resp.status == 201
assert resp.headers["Location"] == "/api/refs/" + test_random_alphanumeric.history[0]
assert await resp.json() == dict(
data,
id=test_random_alphanumeric.history[0],
created_at=static_time.iso,
user={
"id": "test"
},
users=[{
"build": True,
"id": "test",
"modify": True,
"modify_kind": True,
"remove": True
}]
)
|
<commit_before><commit_msg>Add ref create api test<commit_after>async def test_create(spawn_client, test_random_alphanumeric, static_time):
client = await spawn_client(authorize=True, permissions=["create_ref"])
data = {
"name": "Test Viruses",
"description": "A bunch of viruses used for testing",
"data_type": "genome",
"organism": "virus",
"public": True
}
resp = await client.post("/api/refs", data)
assert resp.status == 201
assert resp.headers["Location"] == "/api/refs/" + test_random_alphanumeric.history[0]
assert await resp.json() == dict(
data,
id=test_random_alphanumeric.history[0],
created_at=static_time.iso,
user={
"id": "test"
},
users=[{
"build": True,
"id": "test",
"modify": True,
"modify_kind": True,
"remove": True
}]
)
|
|
c122db5ceda59d786bd550f586ea87d808595ab6
|
pombola/nigeria/management/commands/nigeria_update_lga_boundaries_from_gadm.py
|
pombola/nigeria/management/commands/nigeria_update_lga_boundaries_from_gadm.py
|
from django.contrib.gis.gdal import DataSource
from django.core.management import BaseCommand
from django.db import transaction
from mapit.management.command_utils import save_polygons, fix_invalid_geos_geometry
from mapit.models import Area, Type
class Command(BaseCommand):
help = "Update the Nigeria boundaries from GADM"
args = '<SHP FILENAME>'
def get_lga_area(self, lga_name, state_name):
lga_name_in_db = {
'Eastern Obolo': 'Eastern O bolo',
}.get(lga_name, lga_name)
# print "state:", state_name
kwargs = {
'type': self.lga_type,
'name__iexact': lga_name_in_db,
'parent_area__name': state_name,
}
try:
area = Area.objects.get(**kwargs)
except Area.DoesNotExist:
del kwargs['parent_area__name']
area = Area.objects.get(**kwargs)
return area
def fix_geometry(self, g):
# Make a GEOS geometry only to check for validity:
geos_g = g.geos
if not geos_g.valid:
geos_g = fix_invalid_geos_geometry(geos_g)
if geos_g is None:
print "The geometry was invalid and couldn't be fixed"
g = None
else:
g = geos_g.ogr
return g
def handle(self, filename, **options):
with transaction.atomic():
self.lga_type = Type.objects.get(code='LGA')
ds = DataSource(filename)
layer = ds[0]
for feature in layer:
lga_name = unicode(feature['NAME_2'])
state_name = unicode(feature['NAME_1'])
print "Updating LGA {0} in state {1}".format(
lga_name, state_name
)
area = self.get_lga_area(lga_name, state_name)
g = feature.geom.transform('4326', clone=True)
g = self.fix_geometry(g)
if g is None:
continue
poly = [g]
save_polygons({area.id: (area, poly)})
|
Add a script to reimport the LGA boundaries from the GADM.org data
|
NG: Add a script to reimport the LGA boundaries from the GADM.org data
The original import of the LGA boundaries from the GADM.org data was
missing some polygons: some LGAs were entirely without geometry. My
assumption (since I don't have the GADM ddata from that time) is that
these polygons were invalid in some way or simply missing. This
script replaces all the LGA boundaries from a current download of GADM
boundary data, and applies the fix_invalid_geos_geometry method from
MapIt to each. This does seem to fill in the missing polygons in the
LGA data.
This should be run on the NGA_adm2.shp file.
|
Python
|
agpl-3.0
|
mysociety/pombola,mysociety/pombola,mysociety/pombola,mysociety/pombola,mysociety/pombola,mysociety/pombola
|
NG: Add a script to reimport the LGA boundaries from the GADM.org data
The original import of the LGA boundaries from the GADM.org data was
missing some polygons: some LGAs were entirely without geometry. My
assumption (since I don't have the GADM ddata from that time) is that
these polygons were invalid in some way or simply missing. This
script replaces all the LGA boundaries from a current download of GADM
boundary data, and applies the fix_invalid_geos_geometry method from
MapIt to each. This does seem to fill in the missing polygons in the
LGA data.
This should be run on the NGA_adm2.shp file.
|
from django.contrib.gis.gdal import DataSource
from django.core.management import BaseCommand
from django.db import transaction
from mapit.management.command_utils import save_polygons, fix_invalid_geos_geometry
from mapit.models import Area, Type
class Command(BaseCommand):
help = "Update the Nigeria boundaries from GADM"
args = '<SHP FILENAME>'
def get_lga_area(self, lga_name, state_name):
lga_name_in_db = {
'Eastern Obolo': 'Eastern O bolo',
}.get(lga_name, lga_name)
# print "state:", state_name
kwargs = {
'type': self.lga_type,
'name__iexact': lga_name_in_db,
'parent_area__name': state_name,
}
try:
area = Area.objects.get(**kwargs)
except Area.DoesNotExist:
del kwargs['parent_area__name']
area = Area.objects.get(**kwargs)
return area
def fix_geometry(self, g):
# Make a GEOS geometry only to check for validity:
geos_g = g.geos
if not geos_g.valid:
geos_g = fix_invalid_geos_geometry(geos_g)
if geos_g is None:
print "The geometry was invalid and couldn't be fixed"
g = None
else:
g = geos_g.ogr
return g
def handle(self, filename, **options):
with transaction.atomic():
self.lga_type = Type.objects.get(code='LGA')
ds = DataSource(filename)
layer = ds[0]
for feature in layer:
lga_name = unicode(feature['NAME_2'])
state_name = unicode(feature['NAME_1'])
print "Updating LGA {0} in state {1}".format(
lga_name, state_name
)
area = self.get_lga_area(lga_name, state_name)
g = feature.geom.transform('4326', clone=True)
g = self.fix_geometry(g)
if g is None:
continue
poly = [g]
save_polygons({area.id: (area, poly)})
|
<commit_before><commit_msg>NG: Add a script to reimport the LGA boundaries from the GADM.org data
The original import of the LGA boundaries from the GADM.org data was
missing some polygons: some LGAs were entirely without geometry. My
assumption (since I don't have the GADM ddata from that time) is that
these polygons were invalid in some way or simply missing. This
script replaces all the LGA boundaries from a current download of GADM
boundary data, and applies the fix_invalid_geos_geometry method from
MapIt to each. This does seem to fill in the missing polygons in the
LGA data.
This should be run on the NGA_adm2.shp file.<commit_after>
|
from django.contrib.gis.gdal import DataSource
from django.core.management import BaseCommand
from django.db import transaction
from mapit.management.command_utils import save_polygons, fix_invalid_geos_geometry
from mapit.models import Area, Type
class Command(BaseCommand):
help = "Update the Nigeria boundaries from GADM"
args = '<SHP FILENAME>'
def get_lga_area(self, lga_name, state_name):
lga_name_in_db = {
'Eastern Obolo': 'Eastern O bolo',
}.get(lga_name, lga_name)
# print "state:", state_name
kwargs = {
'type': self.lga_type,
'name__iexact': lga_name_in_db,
'parent_area__name': state_name,
}
try:
area = Area.objects.get(**kwargs)
except Area.DoesNotExist:
del kwargs['parent_area__name']
area = Area.objects.get(**kwargs)
return area
def fix_geometry(self, g):
# Make a GEOS geometry only to check for validity:
geos_g = g.geos
if not geos_g.valid:
geos_g = fix_invalid_geos_geometry(geos_g)
if geos_g is None:
print "The geometry was invalid and couldn't be fixed"
g = None
else:
g = geos_g.ogr
return g
def handle(self, filename, **options):
with transaction.atomic():
self.lga_type = Type.objects.get(code='LGA')
ds = DataSource(filename)
layer = ds[0]
for feature in layer:
lga_name = unicode(feature['NAME_2'])
state_name = unicode(feature['NAME_1'])
print "Updating LGA {0} in state {1}".format(
lga_name, state_name
)
area = self.get_lga_area(lga_name, state_name)
g = feature.geom.transform('4326', clone=True)
g = self.fix_geometry(g)
if g is None:
continue
poly = [g]
save_polygons({area.id: (area, poly)})
|
NG: Add a script to reimport the LGA boundaries from the GADM.org data
The original import of the LGA boundaries from the GADM.org data was
missing some polygons: some LGAs were entirely without geometry. My
assumption (since I don't have the GADM ddata from that time) is that
these polygons were invalid in some way or simply missing. This
script replaces all the LGA boundaries from a current download of GADM
boundary data, and applies the fix_invalid_geos_geometry method from
MapIt to each. This does seem to fill in the missing polygons in the
LGA data.
This should be run on the NGA_adm2.shp file.from django.contrib.gis.gdal import DataSource
from django.core.management import BaseCommand
from django.db import transaction
from mapit.management.command_utils import save_polygons, fix_invalid_geos_geometry
from mapit.models import Area, Type
class Command(BaseCommand):
help = "Update the Nigeria boundaries from GADM"
args = '<SHP FILENAME>'
def get_lga_area(self, lga_name, state_name):
lga_name_in_db = {
'Eastern Obolo': 'Eastern O bolo',
}.get(lga_name, lga_name)
# print "state:", state_name
kwargs = {
'type': self.lga_type,
'name__iexact': lga_name_in_db,
'parent_area__name': state_name,
}
try:
area = Area.objects.get(**kwargs)
except Area.DoesNotExist:
del kwargs['parent_area__name']
area = Area.objects.get(**kwargs)
return area
def fix_geometry(self, g):
# Make a GEOS geometry only to check for validity:
geos_g = g.geos
if not geos_g.valid:
geos_g = fix_invalid_geos_geometry(geos_g)
if geos_g is None:
print "The geometry was invalid and couldn't be fixed"
g = None
else:
g = geos_g.ogr
return g
def handle(self, filename, **options):
with transaction.atomic():
self.lga_type = Type.objects.get(code='LGA')
ds = DataSource(filename)
layer = ds[0]
for feature in layer:
lga_name = unicode(feature['NAME_2'])
state_name = unicode(feature['NAME_1'])
print "Updating LGA {0} in state {1}".format(
lga_name, state_name
)
area = self.get_lga_area(lga_name, state_name)
g = feature.geom.transform('4326', clone=True)
g = self.fix_geometry(g)
if g is None:
continue
poly = [g]
save_polygons({area.id: (area, poly)})
|
<commit_before><commit_msg>NG: Add a script to reimport the LGA boundaries from the GADM.org data
The original import of the LGA boundaries from the GADM.org data was
missing some polygons: some LGAs were entirely without geometry. My
assumption (since I don't have the GADM ddata from that time) is that
these polygons were invalid in some way or simply missing. This
script replaces all the LGA boundaries from a current download of GADM
boundary data, and applies the fix_invalid_geos_geometry method from
MapIt to each. This does seem to fill in the missing polygons in the
LGA data.
This should be run on the NGA_adm2.shp file.<commit_after>from django.contrib.gis.gdal import DataSource
from django.core.management import BaseCommand
from django.db import transaction
from mapit.management.command_utils import save_polygons, fix_invalid_geos_geometry
from mapit.models import Area, Type
class Command(BaseCommand):
help = "Update the Nigeria boundaries from GADM"
args = '<SHP FILENAME>'
def get_lga_area(self, lga_name, state_name):
lga_name_in_db = {
'Eastern Obolo': 'Eastern O bolo',
}.get(lga_name, lga_name)
# print "state:", state_name
kwargs = {
'type': self.lga_type,
'name__iexact': lga_name_in_db,
'parent_area__name': state_name,
}
try:
area = Area.objects.get(**kwargs)
except Area.DoesNotExist:
del kwargs['parent_area__name']
area = Area.objects.get(**kwargs)
return area
def fix_geometry(self, g):
# Make a GEOS geometry only to check for validity:
geos_g = g.geos
if not geos_g.valid:
geos_g = fix_invalid_geos_geometry(geos_g)
if geos_g is None:
print "The geometry was invalid and couldn't be fixed"
g = None
else:
g = geos_g.ogr
return g
def handle(self, filename, **options):
with transaction.atomic():
self.lga_type = Type.objects.get(code='LGA')
ds = DataSource(filename)
layer = ds[0]
for feature in layer:
lga_name = unicode(feature['NAME_2'])
state_name = unicode(feature['NAME_1'])
print "Updating LGA {0} in state {1}".format(
lga_name, state_name
)
area = self.get_lga_area(lga_name, state_name)
g = feature.geom.transform('4326', clone=True)
g = self.fix_geometry(g)
if g is None:
continue
poly = [g]
save_polygons({area.id: (area, poly)})
|
|
816872186966186eb463d1fd45bea3a4c6f68e00
|
demoproject/tests_demo.py
|
demoproject/tests_demo.py
|
from demoproject.urls import urlpatterns
from django.test import Client, TestCase
class DemoProject_TestCase(TestCase):
def setUp(self):
self.client = Client()
def test_all_views_load(self):
"""
A simple sanity test to make sure all views from demoproject
still continue to load!
"""
for url in urlpatterns:
address = url._regex
if address.startswith('^'):
address = '/' + address[1:]
if address.endswith('$'):
address = address[:-1]
response = self.client.get(address)
self.assertEqual(response.status_code, 200)
|
Add new sanity test for demoproject views
|
Add new sanity test for demoproject views
helps boost test coverage but most importantly executes the code
paths that were fixed in 9d9033ecd5a8592a12872293cdf6d710cebf894f
|
Python
|
bsd-2-clause
|
pgollakota/django-chartit,pgollakota/django-chartit,pgollakota/django-chartit
|
Add new sanity test for demoproject views
helps boost test coverage but most importantly executes the code
paths that were fixed in 9d9033ecd5a8592a12872293cdf6d710cebf894f
|
from demoproject.urls import urlpatterns
from django.test import Client, TestCase
class DemoProject_TestCase(TestCase):
def setUp(self):
self.client = Client()
def test_all_views_load(self):
"""
A simple sanity test to make sure all views from demoproject
still continue to load!
"""
for url in urlpatterns:
address = url._regex
if address.startswith('^'):
address = '/' + address[1:]
if address.endswith('$'):
address = address[:-1]
response = self.client.get(address)
self.assertEqual(response.status_code, 200)
|
<commit_before><commit_msg>Add new sanity test for demoproject views
helps boost test coverage but most importantly executes the code
paths that were fixed in 9d9033ecd5a8592a12872293cdf6d710cebf894f<commit_after>
|
from demoproject.urls import urlpatterns
from django.test import Client, TestCase
class DemoProject_TestCase(TestCase):
def setUp(self):
self.client = Client()
def test_all_views_load(self):
"""
A simple sanity test to make sure all views from demoproject
still continue to load!
"""
for url in urlpatterns:
address = url._regex
if address.startswith('^'):
address = '/' + address[1:]
if address.endswith('$'):
address = address[:-1]
response = self.client.get(address)
self.assertEqual(response.status_code, 200)
|
Add new sanity test for demoproject views
helps boost test coverage but most importantly executes the code
paths that were fixed in 9d9033ecd5a8592a12872293cdf6d710cebf894ffrom demoproject.urls import urlpatterns
from django.test import Client, TestCase
class DemoProject_TestCase(TestCase):
def setUp(self):
self.client = Client()
def test_all_views_load(self):
"""
A simple sanity test to make sure all views from demoproject
still continue to load!
"""
for url in urlpatterns:
address = url._regex
if address.startswith('^'):
address = '/' + address[1:]
if address.endswith('$'):
address = address[:-1]
response = self.client.get(address)
self.assertEqual(response.status_code, 200)
|
<commit_before><commit_msg>Add new sanity test for demoproject views
helps boost test coverage but most importantly executes the code
paths that were fixed in 9d9033ecd5a8592a12872293cdf6d710cebf894f<commit_after>from demoproject.urls import urlpatterns
from django.test import Client, TestCase
class DemoProject_TestCase(TestCase):
def setUp(self):
self.client = Client()
def test_all_views_load(self):
"""
A simple sanity test to make sure all views from demoproject
still continue to load!
"""
for url in urlpatterns:
address = url._regex
if address.startswith('^'):
address = '/' + address[1:]
if address.endswith('$'):
address = address[:-1]
response = self.client.get(address)
self.assertEqual(response.status_code, 200)
|
|
959aecd612f66eee22e179f985227dbb6e63202a
|
__init__.py
|
__init__.py
|
from abaqus_model import *
from abaqus_postproc import *
from continuum_analysis import *
from rayleighritz import RayleighRitzDiscrete
from stiffcalc import *
|
Move buckling calcs to continuum_analysis
|
Move buckling calcs to continuum_analysis
|
Python
|
mit
|
dashdotrobot/bike-wheel-calc
|
Move buckling calcs to continuum_analysis
|
from abaqus_model import *
from abaqus_postproc import *
from continuum_analysis import *
from rayleighritz import RayleighRitzDiscrete
from stiffcalc import *
|
<commit_before><commit_msg>Move buckling calcs to continuum_analysis<commit_after>
|
from abaqus_model import *
from abaqus_postproc import *
from continuum_analysis import *
from rayleighritz import RayleighRitzDiscrete
from stiffcalc import *
|
Move buckling calcs to continuum_analysisfrom abaqus_model import *
from abaqus_postproc import *
from continuum_analysis import *
from rayleighritz import RayleighRitzDiscrete
from stiffcalc import *
|
<commit_before><commit_msg>Move buckling calcs to continuum_analysis<commit_after>from abaqus_model import *
from abaqus_postproc import *
from continuum_analysis import *
from rayleighritz import RayleighRitzDiscrete
from stiffcalc import *
|
|
736093f945ff53c4fe6d9d8d2e0c4afc28d9ace3
|
chimera/py/leetcode_rotate_list.py
|
chimera/py/leetcode_rotate_list.py
|
# coding=utf-8
"""
chimera.leetcode_rotate_list
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Given a list, rotate the list to the right by k places, where k is
non-negative.
For example:
Given 1->2->3->4->5->NULL and k = 2,
return 4->5->1->2->3->NULL.
"""
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def rotateRight(self, head, k):
"""
:type head: ListNode
:type k: int
:rtype: ListNode
"""
if not head:
return head
tail = p = head
n = 0
while tail:
n += 1
p = tail
tail = tail.next
p.next = head
rotate = k % n
for i in xrange(n - rotate):
p = head
head = head.next
p.next = None
return head
|
Add answer to leetcode rotate list
|
Add answer to leetcode rotate list
|
Python
|
mit
|
air-upc/chimera,air-upc/chimera
|
Add answer to leetcode rotate list
|
# coding=utf-8
"""
chimera.leetcode_rotate_list
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Given a list, rotate the list to the right by k places, where k is
non-negative.
For example:
Given 1->2->3->4->5->NULL and k = 2,
return 4->5->1->2->3->NULL.
"""
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def rotateRight(self, head, k):
"""
:type head: ListNode
:type k: int
:rtype: ListNode
"""
if not head:
return head
tail = p = head
n = 0
while tail:
n += 1
p = tail
tail = tail.next
p.next = head
rotate = k % n
for i in xrange(n - rotate):
p = head
head = head.next
p.next = None
return head
|
<commit_before><commit_msg>Add answer to leetcode rotate list<commit_after>
|
# coding=utf-8
"""
chimera.leetcode_rotate_list
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Given a list, rotate the list to the right by k places, where k is
non-negative.
For example:
Given 1->2->3->4->5->NULL and k = 2,
return 4->5->1->2->3->NULL.
"""
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def rotateRight(self, head, k):
"""
:type head: ListNode
:type k: int
:rtype: ListNode
"""
if not head:
return head
tail = p = head
n = 0
while tail:
n += 1
p = tail
tail = tail.next
p.next = head
rotate = k % n
for i in xrange(n - rotate):
p = head
head = head.next
p.next = None
return head
|
Add answer to leetcode rotate list# coding=utf-8
"""
chimera.leetcode_rotate_list
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Given a list, rotate the list to the right by k places, where k is
non-negative.
For example:
Given 1->2->3->4->5->NULL and k = 2,
return 4->5->1->2->3->NULL.
"""
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def rotateRight(self, head, k):
"""
:type head: ListNode
:type k: int
:rtype: ListNode
"""
if not head:
return head
tail = p = head
n = 0
while tail:
n += 1
p = tail
tail = tail.next
p.next = head
rotate = k % n
for i in xrange(n - rotate):
p = head
head = head.next
p.next = None
return head
|
<commit_before><commit_msg>Add answer to leetcode rotate list<commit_after># coding=utf-8
"""
chimera.leetcode_rotate_list
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Given a list, rotate the list to the right by k places, where k is
non-negative.
For example:
Given 1->2->3->4->5->NULL and k = 2,
return 4->5->1->2->3->NULL.
"""
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def rotateRight(self, head, k):
"""
:type head: ListNode
:type k: int
:rtype: ListNode
"""
if not head:
return head
tail = p = head
n = 0
while tail:
n += 1
p = tail
tail = tail.next
p.next = head
rotate = k % n
for i in xrange(n - rotate):
p = head
head = head.next
p.next = None
return head
|
|
cc19cdc3430df018e3a8fa63abaf796a897a475b
|
Orange/tests/sql/test_naive_bayes.py
|
Orange/tests/sql/test_naive_bayes.py
|
import unittest
from numpy import array
import Orange.classification.naive_bayes as nb
from Orange.data.discretization import DiscretizeTable
from Orange.data.sql.table import SqlTable
from Orange.data.variable import DiscreteVariable
class NaiveBayesTest(unittest.TestCase):
def test_NaiveBayes(self):
table = SqlTable(host='localhost', database='test', table='iris',
type_hints=dict(iris=DiscreteVariable(
values=['Iris-setosa', 'Iris-versicolor',
'Iris-virginica']),
__class_vars__=['iris']))
table = DiscretizeTable(table)
bayes = nb.BayesLearner()
clf = bayes(table)
# Single instance prediction
self.assertEqual(clf(table[0]), table[0].get_class())
# Table prediction
pred = clf(table)
actual = array([ins.get_class() for ins in table])
ca = pred == actual
ca = ca.sum() / len(ca)
self.assertGreater(ca, 0.95)
self.assertLess(ca, 1.)
|
Add naive bayes SQL test.
|
Add naive bayes SQL test.
|
Python
|
bsd-2-clause
|
kwikadi/orange3,kwikadi/orange3,qPCR4vir/orange3,kwikadi/orange3,cheral/orange3,qusp/orange3,marinkaz/orange3,cheral/orange3,qusp/orange3,kwikadi/orange3,kwikadi/orange3,cheral/orange3,marinkaz/orange3,qPCR4vir/orange3,qPCR4vir/orange3,kwikadi/orange3,marinkaz/orange3,cheral/orange3,qusp/orange3,qPCR4vir/orange3,qPCR4vir/orange3,qusp/orange3,marinkaz/orange3,cheral/orange3,marinkaz/orange3,marinkaz/orange3,qPCR4vir/orange3,cheral/orange3
|
Add naive bayes SQL test.
|
import unittest
from numpy import array
import Orange.classification.naive_bayes as nb
from Orange.data.discretization import DiscretizeTable
from Orange.data.sql.table import SqlTable
from Orange.data.variable import DiscreteVariable
class NaiveBayesTest(unittest.TestCase):
def test_NaiveBayes(self):
table = SqlTable(host='localhost', database='test', table='iris',
type_hints=dict(iris=DiscreteVariable(
values=['Iris-setosa', 'Iris-versicolor',
'Iris-virginica']),
__class_vars__=['iris']))
table = DiscretizeTable(table)
bayes = nb.BayesLearner()
clf = bayes(table)
# Single instance prediction
self.assertEqual(clf(table[0]), table[0].get_class())
# Table prediction
pred = clf(table)
actual = array([ins.get_class() for ins in table])
ca = pred == actual
ca = ca.sum() / len(ca)
self.assertGreater(ca, 0.95)
self.assertLess(ca, 1.)
|
<commit_before><commit_msg>Add naive bayes SQL test.<commit_after>
|
import unittest
from numpy import array
import Orange.classification.naive_bayes as nb
from Orange.data.discretization import DiscretizeTable
from Orange.data.sql.table import SqlTable
from Orange.data.variable import DiscreteVariable
class NaiveBayesTest(unittest.TestCase):
def test_NaiveBayes(self):
table = SqlTable(host='localhost', database='test', table='iris',
type_hints=dict(iris=DiscreteVariable(
values=['Iris-setosa', 'Iris-versicolor',
'Iris-virginica']),
__class_vars__=['iris']))
table = DiscretizeTable(table)
bayes = nb.BayesLearner()
clf = bayes(table)
# Single instance prediction
self.assertEqual(clf(table[0]), table[0].get_class())
# Table prediction
pred = clf(table)
actual = array([ins.get_class() for ins in table])
ca = pred == actual
ca = ca.sum() / len(ca)
self.assertGreater(ca, 0.95)
self.assertLess(ca, 1.)
|
Add naive bayes SQL test.import unittest
from numpy import array
import Orange.classification.naive_bayes as nb
from Orange.data.discretization import DiscretizeTable
from Orange.data.sql.table import SqlTable
from Orange.data.variable import DiscreteVariable
class NaiveBayesTest(unittest.TestCase):
def test_NaiveBayes(self):
table = SqlTable(host='localhost', database='test', table='iris',
type_hints=dict(iris=DiscreteVariable(
values=['Iris-setosa', 'Iris-versicolor',
'Iris-virginica']),
__class_vars__=['iris']))
table = DiscretizeTable(table)
bayes = nb.BayesLearner()
clf = bayes(table)
# Single instance prediction
self.assertEqual(clf(table[0]), table[0].get_class())
# Table prediction
pred = clf(table)
actual = array([ins.get_class() for ins in table])
ca = pred == actual
ca = ca.sum() / len(ca)
self.assertGreater(ca, 0.95)
self.assertLess(ca, 1.)
|
<commit_before><commit_msg>Add naive bayes SQL test.<commit_after>import unittest
from numpy import array
import Orange.classification.naive_bayes as nb
from Orange.data.discretization import DiscretizeTable
from Orange.data.sql.table import SqlTable
from Orange.data.variable import DiscreteVariable
class NaiveBayesTest(unittest.TestCase):
def test_NaiveBayes(self):
table = SqlTable(host='localhost', database='test', table='iris',
type_hints=dict(iris=DiscreteVariable(
values=['Iris-setosa', 'Iris-versicolor',
'Iris-virginica']),
__class_vars__=['iris']))
table = DiscretizeTable(table)
bayes = nb.BayesLearner()
clf = bayes(table)
# Single instance prediction
self.assertEqual(clf(table[0]), table[0].get_class())
# Table prediction
pred = clf(table)
actual = array([ins.get_class() for ins in table])
ca = pred == actual
ca = ca.sum() / len(ca)
self.assertGreater(ca, 0.95)
self.assertLess(ca, 1.)
|
|
f39a640a8d5bf7d4a5d80f94235d1fa7461bd4dc
|
s3stash/stash_single_image.py
|
s3stash/stash_single_image.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os
import argparse
import logging
import json
from s3stash.nxstashref_image import NuxeoStashImage
def main(argv=None):
parser = argparse.ArgumentParser(description='Produce jp2 version of Nuxeo image file and stash in S3.')
parser.add_argument('path', help="Nuxeo document path")
parser.add_argument('--bucket', default='ucldc-private-files/jp2000', help="S3 bucket name")
parser.add_argument('--region', default='us-west-2', help='AWS region')
parser.add_argument('--pynuxrc', default='~/.pynuxrc', help="rc file for use by pynux")
parser.add_argument('--replace', action="store_true", help="replace file on s3 if it already exists")
if argv is None:
argv = parser.parse_args()
# logging
# FIXME would like to name log with nuxeo UID
filename = argv.path.split('/')[-1]
logfile = "logs/{}.log".format(filename)
print "LOG:\t{}".format(logfile)
logging.basicConfig(filename=logfile, level=logging.INFO, format='%(asctime)s (%(name)s) [%(levelname)s]: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
logger = logging.getLogger(__name__)
# convert and stash jp2
nxstash = NuxeoStashImage(argv.path, argv.bucket, argv.region, argv.pynuxrc, argv.replace)
report = nxstash.nxstashref()
# output report to json file
reportfile = "reports/{}.json".format(filename)
with open(reportfile, 'w') as f:
json.dump(report, f, sort_keys=True, indent=4)
# parse report to give basic stats
print "REPORT:\t{}".format(reportfile)
print "SUMMARY:"
if 'already_s3_stashed' in report.keys():
print "already stashed:\t{}".format(report['already_s3_stashed'])
print "converted:\t{}".format(report['converted'])
print "stashed:\t{}".format(report['stashed'])
print "\nDone."
if __name__ == "__main__":
sys.exit(main())
|
Add code for stashing a single nuxeo image on s3.
|
Add code for stashing a single nuxeo image on s3.
|
Python
|
bsd-3-clause
|
barbarahui/nuxeo-calisphere,barbarahui/nuxeo-calisphere
|
Add code for stashing a single nuxeo image on s3.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os
import argparse
import logging
import json
from s3stash.nxstashref_image import NuxeoStashImage
def main(argv=None):
parser = argparse.ArgumentParser(description='Produce jp2 version of Nuxeo image file and stash in S3.')
parser.add_argument('path', help="Nuxeo document path")
parser.add_argument('--bucket', default='ucldc-private-files/jp2000', help="S3 bucket name")
parser.add_argument('--region', default='us-west-2', help='AWS region')
parser.add_argument('--pynuxrc', default='~/.pynuxrc', help="rc file for use by pynux")
parser.add_argument('--replace', action="store_true", help="replace file on s3 if it already exists")
if argv is None:
argv = parser.parse_args()
# logging
# FIXME would like to name log with nuxeo UID
filename = argv.path.split('/')[-1]
logfile = "logs/{}.log".format(filename)
print "LOG:\t{}".format(logfile)
logging.basicConfig(filename=logfile, level=logging.INFO, format='%(asctime)s (%(name)s) [%(levelname)s]: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
logger = logging.getLogger(__name__)
# convert and stash jp2
nxstash = NuxeoStashImage(argv.path, argv.bucket, argv.region, argv.pynuxrc, argv.replace)
report = nxstash.nxstashref()
# output report to json file
reportfile = "reports/{}.json".format(filename)
with open(reportfile, 'w') as f:
json.dump(report, f, sort_keys=True, indent=4)
# parse report to give basic stats
print "REPORT:\t{}".format(reportfile)
print "SUMMARY:"
if 'already_s3_stashed' in report.keys():
print "already stashed:\t{}".format(report['already_s3_stashed'])
print "converted:\t{}".format(report['converted'])
print "stashed:\t{}".format(report['stashed'])
print "\nDone."
if __name__ == "__main__":
sys.exit(main())
|
<commit_before><commit_msg>Add code for stashing a single nuxeo image on s3.<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os
import argparse
import logging
import json
from s3stash.nxstashref_image import NuxeoStashImage
def main(argv=None):
parser = argparse.ArgumentParser(description='Produce jp2 version of Nuxeo image file and stash in S3.')
parser.add_argument('path', help="Nuxeo document path")
parser.add_argument('--bucket', default='ucldc-private-files/jp2000', help="S3 bucket name")
parser.add_argument('--region', default='us-west-2', help='AWS region')
parser.add_argument('--pynuxrc', default='~/.pynuxrc', help="rc file for use by pynux")
parser.add_argument('--replace', action="store_true", help="replace file on s3 if it already exists")
if argv is None:
argv = parser.parse_args()
# logging
# FIXME would like to name log with nuxeo UID
filename = argv.path.split('/')[-1]
logfile = "logs/{}.log".format(filename)
print "LOG:\t{}".format(logfile)
logging.basicConfig(filename=logfile, level=logging.INFO, format='%(asctime)s (%(name)s) [%(levelname)s]: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
logger = logging.getLogger(__name__)
# convert and stash jp2
nxstash = NuxeoStashImage(argv.path, argv.bucket, argv.region, argv.pynuxrc, argv.replace)
report = nxstash.nxstashref()
# output report to json file
reportfile = "reports/{}.json".format(filename)
with open(reportfile, 'w') as f:
json.dump(report, f, sort_keys=True, indent=4)
# parse report to give basic stats
print "REPORT:\t{}".format(reportfile)
print "SUMMARY:"
if 'already_s3_stashed' in report.keys():
print "already stashed:\t{}".format(report['already_s3_stashed'])
print "converted:\t{}".format(report['converted'])
print "stashed:\t{}".format(report['stashed'])
print "\nDone."
if __name__ == "__main__":
sys.exit(main())
|
Add code for stashing a single nuxeo image on s3.#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os
import argparse
import logging
import json
from s3stash.nxstashref_image import NuxeoStashImage
def main(argv=None):
parser = argparse.ArgumentParser(description='Produce jp2 version of Nuxeo image file and stash in S3.')
parser.add_argument('path', help="Nuxeo document path")
parser.add_argument('--bucket', default='ucldc-private-files/jp2000', help="S3 bucket name")
parser.add_argument('--region', default='us-west-2', help='AWS region')
parser.add_argument('--pynuxrc', default='~/.pynuxrc', help="rc file for use by pynux")
parser.add_argument('--replace', action="store_true", help="replace file on s3 if it already exists")
if argv is None:
argv = parser.parse_args()
# logging
# FIXME would like to name log with nuxeo UID
filename = argv.path.split('/')[-1]
logfile = "logs/{}.log".format(filename)
print "LOG:\t{}".format(logfile)
logging.basicConfig(filename=logfile, level=logging.INFO, format='%(asctime)s (%(name)s) [%(levelname)s]: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
logger = logging.getLogger(__name__)
# convert and stash jp2
nxstash = NuxeoStashImage(argv.path, argv.bucket, argv.region, argv.pynuxrc, argv.replace)
report = nxstash.nxstashref()
# output report to json file
reportfile = "reports/{}.json".format(filename)
with open(reportfile, 'w') as f:
json.dump(report, f, sort_keys=True, indent=4)
# parse report to give basic stats
print "REPORT:\t{}".format(reportfile)
print "SUMMARY:"
if 'already_s3_stashed' in report.keys():
print "already stashed:\t{}".format(report['already_s3_stashed'])
print "converted:\t{}".format(report['converted'])
print "stashed:\t{}".format(report['stashed'])
print "\nDone."
if __name__ == "__main__":
sys.exit(main())
|
<commit_before><commit_msg>Add code for stashing a single nuxeo image on s3.<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os
import argparse
import logging
import json
from s3stash.nxstashref_image import NuxeoStashImage
def main(argv=None):
parser = argparse.ArgumentParser(description='Produce jp2 version of Nuxeo image file and stash in S3.')
parser.add_argument('path', help="Nuxeo document path")
parser.add_argument('--bucket', default='ucldc-private-files/jp2000', help="S3 bucket name")
parser.add_argument('--region', default='us-west-2', help='AWS region')
parser.add_argument('--pynuxrc', default='~/.pynuxrc', help="rc file for use by pynux")
parser.add_argument('--replace', action="store_true", help="replace file on s3 if it already exists")
if argv is None:
argv = parser.parse_args()
# logging
# FIXME would like to name log with nuxeo UID
filename = argv.path.split('/')[-1]
logfile = "logs/{}.log".format(filename)
print "LOG:\t{}".format(logfile)
logging.basicConfig(filename=logfile, level=logging.INFO, format='%(asctime)s (%(name)s) [%(levelname)s]: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
logger = logging.getLogger(__name__)
# convert and stash jp2
nxstash = NuxeoStashImage(argv.path, argv.bucket, argv.region, argv.pynuxrc, argv.replace)
report = nxstash.nxstashref()
# output report to json file
reportfile = "reports/{}.json".format(filename)
with open(reportfile, 'w') as f:
json.dump(report, f, sort_keys=True, indent=4)
# parse report to give basic stats
print "REPORT:\t{}".format(reportfile)
print "SUMMARY:"
if 'already_s3_stashed' in report.keys():
print "already stashed:\t{}".format(report['already_s3_stashed'])
print "converted:\t{}".format(report['converted'])
print "stashed:\t{}".format(report['stashed'])
print "\nDone."
if __name__ == "__main__":
sys.exit(main())
|
|
9f7bd49350b0d1b8a8986b28db75a5b369bf7bb5
|
py/utf-8-validation.py
|
py/utf-8-validation.py
|
class Solution(object):
def validUtf8(self, data):
"""
:type data: List[int]
:rtype: bool
"""
it = iter(data)
while True:
try:
c = it.next() & 0xff
try:
t = 0x80
n = 0
while t > 0:
if t & c:
n += 1
t >>= 1
else:
break
if n == 1 or n > 4:
return False
elif n > 1:
for _ in xrange(n - 1):
c = it.next() & 0xff
if c & 0xc0 != 0x80:
return False
except StopIteration:
return False
except StopIteration:
return True
|
Add py solution for 393. UTF-8 Validation
|
Add py solution for 393. UTF-8 Validation
393. UTF-8 Validation: https://leetcode.com/problems/utf-8-validation/
Approach:
Observe the first item remaining in each step. The value will be added
1 << step either the remaining count is odd or it's a left-to-right
step. Hence the n | 0x55555.. is the key.
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 393. UTF-8 Validation
393. UTF-8 Validation: https://leetcode.com/problems/utf-8-validation/
Approach:
Observe the first item remaining in each step. The value will be added
1 << step either the remaining count is odd or it's a left-to-right
step. Hence the n | 0x55555.. is the key.
|
class Solution(object):
def validUtf8(self, data):
"""
:type data: List[int]
:rtype: bool
"""
it = iter(data)
while True:
try:
c = it.next() & 0xff
try:
t = 0x80
n = 0
while t > 0:
if t & c:
n += 1
t >>= 1
else:
break
if n == 1 or n > 4:
return False
elif n > 1:
for _ in xrange(n - 1):
c = it.next() & 0xff
if c & 0xc0 != 0x80:
return False
except StopIteration:
return False
except StopIteration:
return True
|
<commit_before><commit_msg>Add py solution for 393. UTF-8 Validation
393. UTF-8 Validation: https://leetcode.com/problems/utf-8-validation/
Approach:
Observe the first item remaining in each step. The value will be added
1 << step either the remaining count is odd or it's a left-to-right
step. Hence the n | 0x55555.. is the key.<commit_after>
|
class Solution(object):
def validUtf8(self, data):
"""
:type data: List[int]
:rtype: bool
"""
it = iter(data)
while True:
try:
c = it.next() & 0xff
try:
t = 0x80
n = 0
while t > 0:
if t & c:
n += 1
t >>= 1
else:
break
if n == 1 or n > 4:
return False
elif n > 1:
for _ in xrange(n - 1):
c = it.next() & 0xff
if c & 0xc0 != 0x80:
return False
except StopIteration:
return False
except StopIteration:
return True
|
Add py solution for 393. UTF-8 Validation
393. UTF-8 Validation: https://leetcode.com/problems/utf-8-validation/
Approach:
Observe the first item remaining in each step. The value will be added
1 << step either the remaining count is odd or it's a left-to-right
step. Hence the n | 0x55555.. is the key.class Solution(object):
def validUtf8(self, data):
"""
:type data: List[int]
:rtype: bool
"""
it = iter(data)
while True:
try:
c = it.next() & 0xff
try:
t = 0x80
n = 0
while t > 0:
if t & c:
n += 1
t >>= 1
else:
break
if n == 1 or n > 4:
return False
elif n > 1:
for _ in xrange(n - 1):
c = it.next() & 0xff
if c & 0xc0 != 0x80:
return False
except StopIteration:
return False
except StopIteration:
return True
|
<commit_before><commit_msg>Add py solution for 393. UTF-8 Validation
393. UTF-8 Validation: https://leetcode.com/problems/utf-8-validation/
Approach:
Observe the first item remaining in each step. The value will be added
1 << step either the remaining count is odd or it's a left-to-right
step. Hence the n | 0x55555.. is the key.<commit_after>class Solution(object):
def validUtf8(self, data):
"""
:type data: List[int]
:rtype: bool
"""
it = iter(data)
while True:
try:
c = it.next() & 0xff
try:
t = 0x80
n = 0
while t > 0:
if t & c:
n += 1
t >>= 1
else:
break
if n == 1 or n > 4:
return False
elif n > 1:
for _ in xrange(n - 1):
c = it.next() & 0xff
if c & 0xc0 != 0x80:
return False
except StopIteration:
return False
except StopIteration:
return True
|
|
100a03003adf3f425d59b69e95078bd0f1e82193
|
test/reopen_screen.py
|
test/reopen_screen.py
|
#!/usr/bin/env python
# Test for bug reported by Jeremy Hill in which re-opening the screen
# would cause a segfault.
import VisionEgg
VisionEgg.start_default_logging(); VisionEgg.watch_exceptions()
from VisionEgg.Core import Screen, Viewport, swap_buffers
import pygame
from pygame.locals import QUIT,KEYDOWN,MOUSEBUTTONDOWN
from VisionEgg.Text import Text
from VisionEgg.Dots import DotArea2D
def run():
screen = Screen()
screen.parameters.bgcolor = (0.0,0.0,0.0) # black (RGB)
dots = DotArea2D( position = ( screen.size[0]/2.0, screen.size[1]/2.0 ),
size = ( 300.0 , 300.0 ),
signal_fraction = 0.1,
signal_direction_deg = 180.0,
velocity_pixels_per_sec = 10.0,
dot_lifespan_sec = 5.0,
dot_size = 3.0,
num_dots = 100)
text = Text( text = "Vision Egg dot_simple_loop demo.",
position = (screen.size[0]/2,2),
anchor = 'bottom',
color = (1.0,1.0,1.0))
viewport = Viewport( screen=screen, stimuli=[dots,text] )
# The main loop below is an alternative to using the
# VisionEgg.FlowControl.Presentation class.
quit_now = 0
while not quit_now:
for event in pygame.event.get():
if event.type in (QUIT,KEYDOWN,MOUSEBUTTONDOWN):
quit_now = 1
screen.clear()
viewport.draw()
swap_buffers()
screen.close()
print "run 1"
run()
print "run 2"
run()
print "done"
|
Add test script for segfault bug reported by Jeremy Hill.
|
Add test script for segfault bug reported by Jeremy Hill.
git-svn-id: 033d166fe8e629f6cbcd3c0e2b9ad0cffc79b88b@1477 3a63a0ee-37fe-0310-a504-e92b6e0a3ba7
|
Python
|
lgpl-2.1
|
visionegg/visionegg,visionegg/visionegg,visionegg/visionegg,visionegg/visionegg,visionegg/visionegg
|
Add test script for segfault bug reported by Jeremy Hill.
git-svn-id: 033d166fe8e629f6cbcd3c0e2b9ad0cffc79b88b@1477 3a63a0ee-37fe-0310-a504-e92b6e0a3ba7
|
#!/usr/bin/env python
# Test for bug reported by Jeremy Hill in which re-opening the screen
# would cause a segfault.
import VisionEgg
VisionEgg.start_default_logging(); VisionEgg.watch_exceptions()
from VisionEgg.Core import Screen, Viewport, swap_buffers
import pygame
from pygame.locals import QUIT,KEYDOWN,MOUSEBUTTONDOWN
from VisionEgg.Text import Text
from VisionEgg.Dots import DotArea2D
def run():
screen = Screen()
screen.parameters.bgcolor = (0.0,0.0,0.0) # black (RGB)
dots = DotArea2D( position = ( screen.size[0]/2.0, screen.size[1]/2.0 ),
size = ( 300.0 , 300.0 ),
signal_fraction = 0.1,
signal_direction_deg = 180.0,
velocity_pixels_per_sec = 10.0,
dot_lifespan_sec = 5.0,
dot_size = 3.0,
num_dots = 100)
text = Text( text = "Vision Egg dot_simple_loop demo.",
position = (screen.size[0]/2,2),
anchor = 'bottom',
color = (1.0,1.0,1.0))
viewport = Viewport( screen=screen, stimuli=[dots,text] )
# The main loop below is an alternative to using the
# VisionEgg.FlowControl.Presentation class.
quit_now = 0
while not quit_now:
for event in pygame.event.get():
if event.type in (QUIT,KEYDOWN,MOUSEBUTTONDOWN):
quit_now = 1
screen.clear()
viewport.draw()
swap_buffers()
screen.close()
print "run 1"
run()
print "run 2"
run()
print "done"
|
<commit_before><commit_msg>Add test script for segfault bug reported by Jeremy Hill.
git-svn-id: 033d166fe8e629f6cbcd3c0e2b9ad0cffc79b88b@1477 3a63a0ee-37fe-0310-a504-e92b6e0a3ba7<commit_after>
|
#!/usr/bin/env python
# Test for bug reported by Jeremy Hill in which re-opening the screen
# would cause a segfault.
import VisionEgg
VisionEgg.start_default_logging(); VisionEgg.watch_exceptions()
from VisionEgg.Core import Screen, Viewport, swap_buffers
import pygame
from pygame.locals import QUIT,KEYDOWN,MOUSEBUTTONDOWN
from VisionEgg.Text import Text
from VisionEgg.Dots import DotArea2D
def run():
screen = Screen()
screen.parameters.bgcolor = (0.0,0.0,0.0) # black (RGB)
dots = DotArea2D( position = ( screen.size[0]/2.0, screen.size[1]/2.0 ),
size = ( 300.0 , 300.0 ),
signal_fraction = 0.1,
signal_direction_deg = 180.0,
velocity_pixels_per_sec = 10.0,
dot_lifespan_sec = 5.0,
dot_size = 3.0,
num_dots = 100)
text = Text( text = "Vision Egg dot_simple_loop demo.",
position = (screen.size[0]/2,2),
anchor = 'bottom',
color = (1.0,1.0,1.0))
viewport = Viewport( screen=screen, stimuli=[dots,text] )
# The main loop below is an alternative to using the
# VisionEgg.FlowControl.Presentation class.
quit_now = 0
while not quit_now:
for event in pygame.event.get():
if event.type in (QUIT,KEYDOWN,MOUSEBUTTONDOWN):
quit_now = 1
screen.clear()
viewport.draw()
swap_buffers()
screen.close()
print "run 1"
run()
print "run 2"
run()
print "done"
|
Add test script for segfault bug reported by Jeremy Hill.
git-svn-id: 033d166fe8e629f6cbcd3c0e2b9ad0cffc79b88b@1477 3a63a0ee-37fe-0310-a504-e92b6e0a3ba7#!/usr/bin/env python
# Test for bug reported by Jeremy Hill in which re-opening the screen
# would cause a segfault.
import VisionEgg
VisionEgg.start_default_logging(); VisionEgg.watch_exceptions()
from VisionEgg.Core import Screen, Viewport, swap_buffers
import pygame
from pygame.locals import QUIT,KEYDOWN,MOUSEBUTTONDOWN
from VisionEgg.Text import Text
from VisionEgg.Dots import DotArea2D
def run():
screen = Screen()
screen.parameters.bgcolor = (0.0,0.0,0.0) # black (RGB)
dots = DotArea2D( position = ( screen.size[0]/2.0, screen.size[1]/2.0 ),
size = ( 300.0 , 300.0 ),
signal_fraction = 0.1,
signal_direction_deg = 180.0,
velocity_pixels_per_sec = 10.0,
dot_lifespan_sec = 5.0,
dot_size = 3.0,
num_dots = 100)
text = Text( text = "Vision Egg dot_simple_loop demo.",
position = (screen.size[0]/2,2),
anchor = 'bottom',
color = (1.0,1.0,1.0))
viewport = Viewport( screen=screen, stimuli=[dots,text] )
# The main loop below is an alternative to using the
# VisionEgg.FlowControl.Presentation class.
quit_now = 0
while not quit_now:
for event in pygame.event.get():
if event.type in (QUIT,KEYDOWN,MOUSEBUTTONDOWN):
quit_now = 1
screen.clear()
viewport.draw()
swap_buffers()
screen.close()
print "run 1"
run()
print "run 2"
run()
print "done"
|
<commit_before><commit_msg>Add test script for segfault bug reported by Jeremy Hill.
git-svn-id: 033d166fe8e629f6cbcd3c0e2b9ad0cffc79b88b@1477 3a63a0ee-37fe-0310-a504-e92b6e0a3ba7<commit_after>#!/usr/bin/env python
# Test for bug reported by Jeremy Hill in which re-opening the screen
# would cause a segfault.
import VisionEgg
VisionEgg.start_default_logging(); VisionEgg.watch_exceptions()
from VisionEgg.Core import Screen, Viewport, swap_buffers
import pygame
from pygame.locals import QUIT,KEYDOWN,MOUSEBUTTONDOWN
from VisionEgg.Text import Text
from VisionEgg.Dots import DotArea2D
def run():
screen = Screen()
screen.parameters.bgcolor = (0.0,0.0,0.0) # black (RGB)
dots = DotArea2D( position = ( screen.size[0]/2.0, screen.size[1]/2.0 ),
size = ( 300.0 , 300.0 ),
signal_fraction = 0.1,
signal_direction_deg = 180.0,
velocity_pixels_per_sec = 10.0,
dot_lifespan_sec = 5.0,
dot_size = 3.0,
num_dots = 100)
text = Text( text = "Vision Egg dot_simple_loop demo.",
position = (screen.size[0]/2,2),
anchor = 'bottom',
color = (1.0,1.0,1.0))
viewport = Viewport( screen=screen, stimuli=[dots,text] )
# The main loop below is an alternative to using the
# VisionEgg.FlowControl.Presentation class.
quit_now = 0
while not quit_now:
for event in pygame.event.get():
if event.type in (QUIT,KEYDOWN,MOUSEBUTTONDOWN):
quit_now = 1
screen.clear()
viewport.draw()
swap_buffers()
screen.close()
print "run 1"
run()
print "run 2"
run()
print "done"
|
|
1e7421878e90949abc4f6fac5835bd27b472d2b6
|
example_Knudsen.py
|
example_Knudsen.py
|
import openpnm as op
import numpy as np
import matplotlib.pyplot as plt
# Get Deff w/o including Knudsen effect
spacing = 1.0
net = op.network.Cubic(shape=[10, 10, 10], spacing=spacing)
geom = op.geometry.StickAndBall(network=net)
air = op.phases.Air(network=net)
phys = op.physics.Standard(network=net, geometry=geom, phase=air)
fd = op.algorithms.FickianDiffusion(network=net, phase=air)
fd.set_value_BC(pores=net.pores("left"), values=1.0)
fd.set_value_BC(pores=net.pores("right"), values=0.0)
fd.run()
L = (net.shape * net.spacing)[1]
A = (net.shape * net.spacing)[[0, 2]].prod()
Mdot = fd.rate(pores=net.pores("left")).squeeze()
Deff0 = Mdot * L / A
# Get Deff w/ including Knudsen effect
mdiff = op.models.physics.diffusive_conductance.mixed_diffusivity
phys.add_model(propname="throat.diffusive_conductance", model=mdiff)
spacings = np.linspace(1e-9, 1e-4, 20)
spacings = np.logspace(-9, -3, 25)
Deff = []
for spacing in spacings:
np.random.seed(10)
net = op.network.Cubic(shape=[10, 10, 10], spacing=spacing)
geom = op.geometry.StickAndBall(network=net)
air = op.phases.Air(network=net)
phys = op.physics.Standard(network=net, geometry=geom, phase=air)
phys.add_model(propname="throat.diffusive_conductance", model=mdiff)
fd = op.algorithms.FickianDiffusion(network=net, phase=air)
fd.set_value_BC(pores=net.pores("left"), values=1.0)
fd.set_value_BC(pores=net.pores("right"), values=0.0)
fd.run()
L = (net.shape * net.spacing)[1]
A = (net.shape * net.spacing)[[0, 2]].prod()
Mdot = fd.rate(pores=net.pores("left")).squeeze()
Deff.append(Mdot * L / A)
# Plot ratio of Deff w/ Knudsen to that w/o
Deff = np.array(Deff)
plt.figure()
plt.plot(spacings, Deff/Deff0)
plt.xscale("log")
plt.xlabel("spacing (m)")
plt.ylabel("Deff/Deff0")
|
Add example script for the newly added mixed_diffusivity
|
Add example script for the newly added mixed_diffusivity
|
Python
|
mit
|
TomTranter/OpenPNM,PMEAL/OpenPNM
|
Add example script for the newly added mixed_diffusivity
|
import openpnm as op
import numpy as np
import matplotlib.pyplot as plt
# Get Deff w/o including Knudsen effect
spacing = 1.0
net = op.network.Cubic(shape=[10, 10, 10], spacing=spacing)
geom = op.geometry.StickAndBall(network=net)
air = op.phases.Air(network=net)
phys = op.physics.Standard(network=net, geometry=geom, phase=air)
fd = op.algorithms.FickianDiffusion(network=net, phase=air)
fd.set_value_BC(pores=net.pores("left"), values=1.0)
fd.set_value_BC(pores=net.pores("right"), values=0.0)
fd.run()
L = (net.shape * net.spacing)[1]
A = (net.shape * net.spacing)[[0, 2]].prod()
Mdot = fd.rate(pores=net.pores("left")).squeeze()
Deff0 = Mdot * L / A
# Get Deff w/ including Knudsen effect
mdiff = op.models.physics.diffusive_conductance.mixed_diffusivity
phys.add_model(propname="throat.diffusive_conductance", model=mdiff)
spacings = np.linspace(1e-9, 1e-4, 20)
spacings = np.logspace(-9, -3, 25)
Deff = []
for spacing in spacings:
np.random.seed(10)
net = op.network.Cubic(shape=[10, 10, 10], spacing=spacing)
geom = op.geometry.StickAndBall(network=net)
air = op.phases.Air(network=net)
phys = op.physics.Standard(network=net, geometry=geom, phase=air)
phys.add_model(propname="throat.diffusive_conductance", model=mdiff)
fd = op.algorithms.FickianDiffusion(network=net, phase=air)
fd.set_value_BC(pores=net.pores("left"), values=1.0)
fd.set_value_BC(pores=net.pores("right"), values=0.0)
fd.run()
L = (net.shape * net.spacing)[1]
A = (net.shape * net.spacing)[[0, 2]].prod()
Mdot = fd.rate(pores=net.pores("left")).squeeze()
Deff.append(Mdot * L / A)
# Plot ratio of Deff w/ Knudsen to that w/o
Deff = np.array(Deff)
plt.figure()
plt.plot(spacings, Deff/Deff0)
plt.xscale("log")
plt.xlabel("spacing (m)")
plt.ylabel("Deff/Deff0")
|
<commit_before><commit_msg>Add example script for the newly added mixed_diffusivity<commit_after>
|
import openpnm as op
import numpy as np
import matplotlib.pyplot as plt
# Get Deff w/o including Knudsen effect
spacing = 1.0
net = op.network.Cubic(shape=[10, 10, 10], spacing=spacing)
geom = op.geometry.StickAndBall(network=net)
air = op.phases.Air(network=net)
phys = op.physics.Standard(network=net, geometry=geom, phase=air)
fd = op.algorithms.FickianDiffusion(network=net, phase=air)
fd.set_value_BC(pores=net.pores("left"), values=1.0)
fd.set_value_BC(pores=net.pores("right"), values=0.0)
fd.run()
L = (net.shape * net.spacing)[1]
A = (net.shape * net.spacing)[[0, 2]].prod()
Mdot = fd.rate(pores=net.pores("left")).squeeze()
Deff0 = Mdot * L / A
# Get Deff w/ including Knudsen effect
mdiff = op.models.physics.diffusive_conductance.mixed_diffusivity
phys.add_model(propname="throat.diffusive_conductance", model=mdiff)
spacings = np.linspace(1e-9, 1e-4, 20)
spacings = np.logspace(-9, -3, 25)
Deff = []
for spacing in spacings:
np.random.seed(10)
net = op.network.Cubic(shape=[10, 10, 10], spacing=spacing)
geom = op.geometry.StickAndBall(network=net)
air = op.phases.Air(network=net)
phys = op.physics.Standard(network=net, geometry=geom, phase=air)
phys.add_model(propname="throat.diffusive_conductance", model=mdiff)
fd = op.algorithms.FickianDiffusion(network=net, phase=air)
fd.set_value_BC(pores=net.pores("left"), values=1.0)
fd.set_value_BC(pores=net.pores("right"), values=0.0)
fd.run()
L = (net.shape * net.spacing)[1]
A = (net.shape * net.spacing)[[0, 2]].prod()
Mdot = fd.rate(pores=net.pores("left")).squeeze()
Deff.append(Mdot * L / A)
# Plot ratio of Deff w/ Knudsen to that w/o
Deff = np.array(Deff)
plt.figure()
plt.plot(spacings, Deff/Deff0)
plt.xscale("log")
plt.xlabel("spacing (m)")
plt.ylabel("Deff/Deff0")
|
Add example script for the newly added mixed_diffusivityimport openpnm as op
import numpy as np
import matplotlib.pyplot as plt
# Get Deff w/o including Knudsen effect
spacing = 1.0
net = op.network.Cubic(shape=[10, 10, 10], spacing=spacing)
geom = op.geometry.StickAndBall(network=net)
air = op.phases.Air(network=net)
phys = op.physics.Standard(network=net, geometry=geom, phase=air)
fd = op.algorithms.FickianDiffusion(network=net, phase=air)
fd.set_value_BC(pores=net.pores("left"), values=1.0)
fd.set_value_BC(pores=net.pores("right"), values=0.0)
fd.run()
L = (net.shape * net.spacing)[1]
A = (net.shape * net.spacing)[[0, 2]].prod()
Mdot = fd.rate(pores=net.pores("left")).squeeze()
Deff0 = Mdot * L / A
# Get Deff w/ including Knudsen effect
mdiff = op.models.physics.diffusive_conductance.mixed_diffusivity
phys.add_model(propname="throat.diffusive_conductance", model=mdiff)
spacings = np.linspace(1e-9, 1e-4, 20)
spacings = np.logspace(-9, -3, 25)
Deff = []
for spacing in spacings:
np.random.seed(10)
net = op.network.Cubic(shape=[10, 10, 10], spacing=spacing)
geom = op.geometry.StickAndBall(network=net)
air = op.phases.Air(network=net)
phys = op.physics.Standard(network=net, geometry=geom, phase=air)
phys.add_model(propname="throat.diffusive_conductance", model=mdiff)
fd = op.algorithms.FickianDiffusion(network=net, phase=air)
fd.set_value_BC(pores=net.pores("left"), values=1.0)
fd.set_value_BC(pores=net.pores("right"), values=0.0)
fd.run()
L = (net.shape * net.spacing)[1]
A = (net.shape * net.spacing)[[0, 2]].prod()
Mdot = fd.rate(pores=net.pores("left")).squeeze()
Deff.append(Mdot * L / A)
# Plot ratio of Deff w/ Knudsen to that w/o
Deff = np.array(Deff)
plt.figure()
plt.plot(spacings, Deff/Deff0)
plt.xscale("log")
plt.xlabel("spacing (m)")
plt.ylabel("Deff/Deff0")
|
<commit_before><commit_msg>Add example script for the newly added mixed_diffusivity<commit_after>import openpnm as op
import numpy as np
import matplotlib.pyplot as plt
# Get Deff w/o including Knudsen effect
spacing = 1.0
net = op.network.Cubic(shape=[10, 10, 10], spacing=spacing)
geom = op.geometry.StickAndBall(network=net)
air = op.phases.Air(network=net)
phys = op.physics.Standard(network=net, geometry=geom, phase=air)
fd = op.algorithms.FickianDiffusion(network=net, phase=air)
fd.set_value_BC(pores=net.pores("left"), values=1.0)
fd.set_value_BC(pores=net.pores("right"), values=0.0)
fd.run()
L = (net.shape * net.spacing)[1]
A = (net.shape * net.spacing)[[0, 2]].prod()
Mdot = fd.rate(pores=net.pores("left")).squeeze()
Deff0 = Mdot * L / A
# Get Deff w/ including Knudsen effect
mdiff = op.models.physics.diffusive_conductance.mixed_diffusivity
phys.add_model(propname="throat.diffusive_conductance", model=mdiff)
spacings = np.linspace(1e-9, 1e-4, 20)
spacings = np.logspace(-9, -3, 25)
Deff = []
for spacing in spacings:
np.random.seed(10)
net = op.network.Cubic(shape=[10, 10, 10], spacing=spacing)
geom = op.geometry.StickAndBall(network=net)
air = op.phases.Air(network=net)
phys = op.physics.Standard(network=net, geometry=geom, phase=air)
phys.add_model(propname="throat.diffusive_conductance", model=mdiff)
fd = op.algorithms.FickianDiffusion(network=net, phase=air)
fd.set_value_BC(pores=net.pores("left"), values=1.0)
fd.set_value_BC(pores=net.pores("right"), values=0.0)
fd.run()
L = (net.shape * net.spacing)[1]
A = (net.shape * net.spacing)[[0, 2]].prod()
Mdot = fd.rate(pores=net.pores("left")).squeeze()
Deff.append(Mdot * L / A)
# Plot ratio of Deff w/ Knudsen to that w/o
Deff = np.array(Deff)
plt.figure()
plt.plot(spacings, Deff/Deff0)
plt.xscale("log")
plt.xlabel("spacing (m)")
plt.ylabel("Deff/Deff0")
|
|
e1772c008d607a2545ddaa05508b1a74473be0ec
|
airflow/migrations/versions/7171349d4c73_add_ti_job_id_index.py
|
airflow/migrations/versions/7171349d4c73_add_ti_job_id_index.py
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""add ti job_id index
Revision ID: 7171349d4c73
Revises: cc1e65623dc7
Create Date: 2017-08-14 18:08:50.196042
"""
# revision identifiers, used by Alembic.
revision = '7171349d4c73'
down_revision = 'cc1e65623dc7'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_index('ti_job_id', 'task_instance', ['job_id'], unique=False)
def downgrade():
op.drop_index('ti_job_id', table_name='task_instance')
|
Add TaskInstance index on job_id
|
[AIRFLOW-1495] Add TaskInstance index on job_id
Column job_id is unindexed in TaskInstance, it was
used as
default sort column in TaskInstanceView.
This commit adds the required migration to add the
index on
task_instance.job_id on future db upgrades.
Closes #2520 from edgarRd/erod-ti-jobid-index
|
Python
|
apache-2.0
|
asnir/airflow,janczak10/incubator-airflow,KL-WLCR/incubator-airflow,airbnb/airflow,Acehaidrey/incubator-airflow,jhsenjaliya/incubator-airflow,airbnb/airflow,adamhaney/airflow,mrares/incubator-airflow,Fokko/incubator-airflow,skudriashev/incubator-airflow,mistercrunch/airflow,mtagle/airflow,OpringaoDoTurno/airflow,dhuang/incubator-airflow,Twistbioscience/incubator-airflow,MortalViews/incubator-airflow,cjqian/incubator-airflow,artwr/airflow,Twistbioscience/incubator-airflow,wndhydrnt/airflow,bolkedebruin/airflow,yk5/incubator-airflow,bolkedebruin/airflow,lyft/incubator-airflow,jgao54/airflow,gilt/incubator-airflow,hgrif/incubator-airflow,nathanielvarona/airflow,wooga/airflow,sekikn/incubator-airflow,MortalViews/incubator-airflow,fenglu-g/incubator-airflow,ProstoMaxim/incubator-airflow,yk5/incubator-airflow,gtoonstra/airflow,ProstoMaxim/incubator-airflow,mrkm4ntr/incubator-airflow,skudriashev/incubator-airflow,artwr/airflow,akosel/incubator-airflow,gtoonstra/airflow,criccomini/airflow,spektom/incubator-airflow,yati-sagade/incubator-airflow,zack3241/incubator-airflow,apache/incubator-airflow,janczak10/incubator-airflow,CloverHealth/airflow,dmitry-r/incubator-airflow,Acehaidrey/incubator-airflow,lxneng/incubator-airflow,CloverHealth/airflow,wooga/airflow,owlabs/incubator-airflow,wolfier/incubator-airflow,wooga/airflow,MortalViews/incubator-airflow,adamhaney/airflow,adamhaney/airflow,malmiron/incubator-airflow,yati-sagade/incubator-airflow,CloverHealth/airflow,sid88in/incubator-airflow,janczak10/incubator-airflow,edgarRd/incubator-airflow,MetrodataTeam/incubator-airflow,andyxhadji/incubator-airflow,DinoCow/airflow,OpringaoDoTurno/airflow,yati-sagade/incubator-airflow,yk5/incubator-airflow,cfei18/incubator-airflow,cfei18/incubator-airflow,fenglu-g/incubator-airflow,apache/airflow,mrares/incubator-airflow,mrkm4ntr/incubator-airflow,danielvdende/incubator-airflow,akosel/incubator-airflow,apache/airflow,dhuang/incubator-airflow,yati-sagade/incubator-airflow,wndhydrnt/airflow,sergiohgz/incubator-airflow,andyxhadji/incubator-airflow,gilt/incubator-airflow,dhuang/incubator-airflow,bolkedebruin/airflow,jgao54/airflow,KL-WLCR/incubator-airflow,asnir/airflow,gilt/incubator-airflow,edgarRd/incubator-airflow,asnir/airflow,wolfier/incubator-airflow,wolfier/incubator-airflow,apache/airflow,cjqian/incubator-airflow,edgarRd/incubator-airflow,zack3241/incubator-airflow,cfei18/incubator-airflow,mtagle/airflow,ProstoMaxim/incubator-airflow,dhuang/incubator-airflow,cjqian/incubator-airflow,Tagar/incubator-airflow,mrkm4ntr/incubator-airflow,malmiron/incubator-airflow,Twistbioscience/incubator-airflow,lxneng/incubator-airflow,apache/incubator-airflow,Twistbioscience/incubator-airflow,danielvdende/incubator-airflow,danielvdende/incubator-airflow,r39132/airflow,janczak10/incubator-airflow,subodhchhabra/airflow,jfantom/incubator-airflow,criccomini/airflow,fenglu-g/incubator-airflow,airbnb/airflow,yk5/incubator-airflow,mistercrunch/airflow,apache/incubator-airflow,owlabs/incubator-airflow,RealImpactAnalytics/airflow,DinoCow/airflow,nathanielvarona/airflow,Acehaidrey/incubator-airflow,KL-WLCR/incubator-airflow,apache/airflow,nathanielvarona/airflow,MetrodataTeam/incubator-airflow,MetrodataTeam/incubator-airflow,sergiohgz/incubator-airflow,malmiron/incubator-airflow,zack3241/incubator-airflow,Tagar/incubator-airflow,skudriashev/incubator-airflow,spektom/incubator-airflow,wileeam/airflow,skudriashev/incubator-airflow,spektom/incubator-airflow,RealImpactAnalytics/airflow,jgao54/airflow,lyft/incubator-airflow,mtagle/airflow,sekikn/incubator-airflow,wileeam/airflow,Fokko/incubator-airflow,gtoonstra/airflow,artwr/airflow,akosel/incubator-airflow,artwr/airflow,OpringaoDoTurno/airflow,r39132/airflow,andyxhadji/incubator-airflow,RealImpactAnalytics/airflow,Tagar/incubator-airflow,wndhydrnt/airflow,KL-WLCR/incubator-airflow,jfantom/incubator-airflow,jfantom/incubator-airflow,gtoonstra/airflow,jhsenjaliya/incubator-airflow,sergiohgz/incubator-airflow,mrares/incubator-airflow,hgrif/incubator-airflow,jhsenjaliya/incubator-airflow,danielvdende/incubator-airflow,Tagar/incubator-airflow,sekikn/incubator-airflow,danielvdende/incubator-airflow,lxneng/incubator-airflow,sergiohgz/incubator-airflow,mrkm4ntr/incubator-airflow,adamhaney/airflow,dmitry-r/incubator-airflow,edgarRd/incubator-airflow,danielvdende/incubator-airflow,cfei18/incubator-airflow,r39132/airflow,CloverHealth/airflow,sekikn/incubator-airflow,airbnb/airflow,sid88in/incubator-airflow,lyft/incubator-airflow,criccomini/airflow,Acehaidrey/incubator-airflow,sid88in/incubator-airflow,bolkedebruin/airflow,subodhchhabra/airflow,MortalViews/incubator-airflow,akosel/incubator-airflow,ProstoMaxim/incubator-airflow,jfantom/incubator-airflow,zack3241/incubator-airflow,lxneng/incubator-airflow,fenglu-g/incubator-airflow,Acehaidrey/incubator-airflow,jgao54/airflow,MetrodataTeam/incubator-airflow,r39132/airflow,hgrif/incubator-airflow,wolfier/incubator-airflow,cfei18/incubator-airflow,cfei18/incubator-airflow,nathanielvarona/airflow,hgrif/incubator-airflow,malmiron/incubator-airflow,subodhchhabra/airflow,wndhydrnt/airflow,dmitry-r/incubator-airflow,spektom/incubator-airflow,cjqian/incubator-airflow,mrares/incubator-airflow,DinoCow/airflow,wileeam/airflow,mistercrunch/airflow,RealImpactAnalytics/airflow,subodhchhabra/airflow,bolkedebruin/airflow,Fokko/incubator-airflow,asnir/airflow,Fokko/incubator-airflow,mtagle/airflow,wileeam/airflow,owlabs/incubator-airflow,jhsenjaliya/incubator-airflow,nathanielvarona/airflow,apache/airflow,criccomini/airflow,OpringaoDoTurno/airflow,DinoCow/airflow,apache/airflow,dmitry-r/incubator-airflow,sid88in/incubator-airflow,nathanielvarona/airflow,andyxhadji/incubator-airflow,gilt/incubator-airflow,wooga/airflow,Acehaidrey/incubator-airflow,lyft/incubator-airflow,owlabs/incubator-airflow,apache/incubator-airflow,mistercrunch/airflow
|
[AIRFLOW-1495] Add TaskInstance index on job_id
Column job_id is unindexed in TaskInstance, it was
used as
default sort column in TaskInstanceView.
This commit adds the required migration to add the
index on
task_instance.job_id on future db upgrades.
Closes #2520 from edgarRd/erod-ti-jobid-index
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""add ti job_id index
Revision ID: 7171349d4c73
Revises: cc1e65623dc7
Create Date: 2017-08-14 18:08:50.196042
"""
# revision identifiers, used by Alembic.
revision = '7171349d4c73'
down_revision = 'cc1e65623dc7'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_index('ti_job_id', 'task_instance', ['job_id'], unique=False)
def downgrade():
op.drop_index('ti_job_id', table_name='task_instance')
|
<commit_before><commit_msg>[AIRFLOW-1495] Add TaskInstance index on job_id
Column job_id is unindexed in TaskInstance, it was
used as
default sort column in TaskInstanceView.
This commit adds the required migration to add the
index on
task_instance.job_id on future db upgrades.
Closes #2520 from edgarRd/erod-ti-jobid-index<commit_after>
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""add ti job_id index
Revision ID: 7171349d4c73
Revises: cc1e65623dc7
Create Date: 2017-08-14 18:08:50.196042
"""
# revision identifiers, used by Alembic.
revision = '7171349d4c73'
down_revision = 'cc1e65623dc7'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_index('ti_job_id', 'task_instance', ['job_id'], unique=False)
def downgrade():
op.drop_index('ti_job_id', table_name='task_instance')
|
[AIRFLOW-1495] Add TaskInstance index on job_id
Column job_id is unindexed in TaskInstance, it was
used as
default sort column in TaskInstanceView.
This commit adds the required migration to add the
index on
task_instance.job_id on future db upgrades.
Closes #2520 from edgarRd/erod-ti-jobid-index# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""add ti job_id index
Revision ID: 7171349d4c73
Revises: cc1e65623dc7
Create Date: 2017-08-14 18:08:50.196042
"""
# revision identifiers, used by Alembic.
revision = '7171349d4c73'
down_revision = 'cc1e65623dc7'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_index('ti_job_id', 'task_instance', ['job_id'], unique=False)
def downgrade():
op.drop_index('ti_job_id', table_name='task_instance')
|
<commit_before><commit_msg>[AIRFLOW-1495] Add TaskInstance index on job_id
Column job_id is unindexed in TaskInstance, it was
used as
default sort column in TaskInstanceView.
This commit adds the required migration to add the
index on
task_instance.job_id on future db upgrades.
Closes #2520 from edgarRd/erod-ti-jobid-index<commit_after># -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""add ti job_id index
Revision ID: 7171349d4c73
Revises: cc1e65623dc7
Create Date: 2017-08-14 18:08:50.196042
"""
# revision identifiers, used by Alembic.
revision = '7171349d4c73'
down_revision = 'cc1e65623dc7'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_index('ti_job_id', 'task_instance', ['job_id'], unique=False)
def downgrade():
op.drop_index('ti_job_id', table_name='task_instance')
|
|
e3b7b9e5f8ca1be061c71c764fd62d6aeed3fd43
|
tests/test_bqlmath.py
|
tests/test_bqlmath.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import apsw
import pytest
from bayeslite import bayesdb_open
from bayeslite import bqlmath
from bayeslite.math_util import abserr
from bayeslite.util import cursor_value
def get_python_math_call(name, probe):
func = bqlmath.bqlmath_funcs[name]
if isinstance(probe, tuple):
return func(*probe)
else:
return func(probe)
def get_sql_math_call(name, probe):
if isinstance(probe, tuple):
return 'SELECT %s%s' % (name, str(probe))
else:
return 'SELECT %s(%s)' % (name, probe)
PROBES_FLOAT = [-2.5, -1, -0.1, 0, 0.1, 1, 2.5]
PROBES_TUPLE = itertools.combinations(PROBES_FLOAT, 2)
PROBES = itertools.chain(PROBES_FLOAT, PROBES_TUPLE)
FUNCS = bqlmath.bqlmath_funcs.iterkeys()
@pytest.mark.parametrize('name,probe', itertools.product(FUNCS, PROBES))
def test_math_func_one_param(name, probe):
# Retrieve result from python.
python_value_error = None
python_type_error = None
try:
result_python = get_python_math_call(name, probe)
except ValueError:
python_value_error = True
except TypeError:
python_type_error = True
# Retrieve result from SQL.
sql_value_error = None
sql_type_error = None
try:
with bayesdb_open(':memory') as bdb:
cursor = bdb.execute(get_sql_math_call(name, probe))
result_sql = cursor_value(cursor)
except ValueError:
sql_value_error = True
except (TypeError, apsw.SQLError):
sql_type_error = True
# Domain error on both.
if python_value_error or sql_value_error:
assert python_value_error and sql_value_error
# Arity error on both.
elif python_type_error or sql_type_error:
assert python_type_error and sql_type_error
# Both invocations succeeded, confirm results match.
else:
assert abserr(result_python, result_sql) < 1e-4
|
Add test suite for bqlmath.
|
Add test suite for bqlmath.
|
Python
|
apache-2.0
|
probcomp/bayeslite,probcomp/bayeslite
|
Add test suite for bqlmath.
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import apsw
import pytest
from bayeslite import bayesdb_open
from bayeslite import bqlmath
from bayeslite.math_util import abserr
from bayeslite.util import cursor_value
def get_python_math_call(name, probe):
func = bqlmath.bqlmath_funcs[name]
if isinstance(probe, tuple):
return func(*probe)
else:
return func(probe)
def get_sql_math_call(name, probe):
if isinstance(probe, tuple):
return 'SELECT %s%s' % (name, str(probe))
else:
return 'SELECT %s(%s)' % (name, probe)
PROBES_FLOAT = [-2.5, -1, -0.1, 0, 0.1, 1, 2.5]
PROBES_TUPLE = itertools.combinations(PROBES_FLOAT, 2)
PROBES = itertools.chain(PROBES_FLOAT, PROBES_TUPLE)
FUNCS = bqlmath.bqlmath_funcs.iterkeys()
@pytest.mark.parametrize('name,probe', itertools.product(FUNCS, PROBES))
def test_math_func_one_param(name, probe):
# Retrieve result from python.
python_value_error = None
python_type_error = None
try:
result_python = get_python_math_call(name, probe)
except ValueError:
python_value_error = True
except TypeError:
python_type_error = True
# Retrieve result from SQL.
sql_value_error = None
sql_type_error = None
try:
with bayesdb_open(':memory') as bdb:
cursor = bdb.execute(get_sql_math_call(name, probe))
result_sql = cursor_value(cursor)
except ValueError:
sql_value_error = True
except (TypeError, apsw.SQLError):
sql_type_error = True
# Domain error on both.
if python_value_error or sql_value_error:
assert python_value_error and sql_value_error
# Arity error on both.
elif python_type_error or sql_type_error:
assert python_type_error and sql_type_error
# Both invocations succeeded, confirm results match.
else:
assert abserr(result_python, result_sql) < 1e-4
|
<commit_before><commit_msg>Add test suite for bqlmath.<commit_after>
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import apsw
import pytest
from bayeslite import bayesdb_open
from bayeslite import bqlmath
from bayeslite.math_util import abserr
from bayeslite.util import cursor_value
def get_python_math_call(name, probe):
func = bqlmath.bqlmath_funcs[name]
if isinstance(probe, tuple):
return func(*probe)
else:
return func(probe)
def get_sql_math_call(name, probe):
if isinstance(probe, tuple):
return 'SELECT %s%s' % (name, str(probe))
else:
return 'SELECT %s(%s)' % (name, probe)
PROBES_FLOAT = [-2.5, -1, -0.1, 0, 0.1, 1, 2.5]
PROBES_TUPLE = itertools.combinations(PROBES_FLOAT, 2)
PROBES = itertools.chain(PROBES_FLOAT, PROBES_TUPLE)
FUNCS = bqlmath.bqlmath_funcs.iterkeys()
@pytest.mark.parametrize('name,probe', itertools.product(FUNCS, PROBES))
def test_math_func_one_param(name, probe):
# Retrieve result from python.
python_value_error = None
python_type_error = None
try:
result_python = get_python_math_call(name, probe)
except ValueError:
python_value_error = True
except TypeError:
python_type_error = True
# Retrieve result from SQL.
sql_value_error = None
sql_type_error = None
try:
with bayesdb_open(':memory') as bdb:
cursor = bdb.execute(get_sql_math_call(name, probe))
result_sql = cursor_value(cursor)
except ValueError:
sql_value_error = True
except (TypeError, apsw.SQLError):
sql_type_error = True
# Domain error on both.
if python_value_error or sql_value_error:
assert python_value_error and sql_value_error
# Arity error on both.
elif python_type_error or sql_type_error:
assert python_type_error and sql_type_error
# Both invocations succeeded, confirm results match.
else:
assert abserr(result_python, result_sql) < 1e-4
|
Add test suite for bqlmath.# -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import apsw
import pytest
from bayeslite import bayesdb_open
from bayeslite import bqlmath
from bayeslite.math_util import abserr
from bayeslite.util import cursor_value
def get_python_math_call(name, probe):
func = bqlmath.bqlmath_funcs[name]
if isinstance(probe, tuple):
return func(*probe)
else:
return func(probe)
def get_sql_math_call(name, probe):
if isinstance(probe, tuple):
return 'SELECT %s%s' % (name, str(probe))
else:
return 'SELECT %s(%s)' % (name, probe)
PROBES_FLOAT = [-2.5, -1, -0.1, 0, 0.1, 1, 2.5]
PROBES_TUPLE = itertools.combinations(PROBES_FLOAT, 2)
PROBES = itertools.chain(PROBES_FLOAT, PROBES_TUPLE)
FUNCS = bqlmath.bqlmath_funcs.iterkeys()
@pytest.mark.parametrize('name,probe', itertools.product(FUNCS, PROBES))
def test_math_func_one_param(name, probe):
# Retrieve result from python.
python_value_error = None
python_type_error = None
try:
result_python = get_python_math_call(name, probe)
except ValueError:
python_value_error = True
except TypeError:
python_type_error = True
# Retrieve result from SQL.
sql_value_error = None
sql_type_error = None
try:
with bayesdb_open(':memory') as bdb:
cursor = bdb.execute(get_sql_math_call(name, probe))
result_sql = cursor_value(cursor)
except ValueError:
sql_value_error = True
except (TypeError, apsw.SQLError):
sql_type_error = True
# Domain error on both.
if python_value_error or sql_value_error:
assert python_value_error and sql_value_error
# Arity error on both.
elif python_type_error or sql_type_error:
assert python_type_error and sql_type_error
# Both invocations succeeded, confirm results match.
else:
assert abserr(result_python, result_sql) < 1e-4
|
<commit_before><commit_msg>Add test suite for bqlmath.<commit_after># -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import apsw
import pytest
from bayeslite import bayesdb_open
from bayeslite import bqlmath
from bayeslite.math_util import abserr
from bayeslite.util import cursor_value
def get_python_math_call(name, probe):
func = bqlmath.bqlmath_funcs[name]
if isinstance(probe, tuple):
return func(*probe)
else:
return func(probe)
def get_sql_math_call(name, probe):
if isinstance(probe, tuple):
return 'SELECT %s%s' % (name, str(probe))
else:
return 'SELECT %s(%s)' % (name, probe)
PROBES_FLOAT = [-2.5, -1, -0.1, 0, 0.1, 1, 2.5]
PROBES_TUPLE = itertools.combinations(PROBES_FLOAT, 2)
PROBES = itertools.chain(PROBES_FLOAT, PROBES_TUPLE)
FUNCS = bqlmath.bqlmath_funcs.iterkeys()
@pytest.mark.parametrize('name,probe', itertools.product(FUNCS, PROBES))
def test_math_func_one_param(name, probe):
# Retrieve result from python.
python_value_error = None
python_type_error = None
try:
result_python = get_python_math_call(name, probe)
except ValueError:
python_value_error = True
except TypeError:
python_type_error = True
# Retrieve result from SQL.
sql_value_error = None
sql_type_error = None
try:
with bayesdb_open(':memory') as bdb:
cursor = bdb.execute(get_sql_math_call(name, probe))
result_sql = cursor_value(cursor)
except ValueError:
sql_value_error = True
except (TypeError, apsw.SQLError):
sql_type_error = True
# Domain error on both.
if python_value_error or sql_value_error:
assert python_value_error and sql_value_error
# Arity error on both.
elif python_type_error or sql_type_error:
assert python_type_error and sql_type_error
# Both invocations succeeded, confirm results match.
else:
assert abserr(result_python, result_sql) < 1e-4
|
|
6be70d01bdf58389db2a6adc4035f82669d02a61
|
cms/plugins/googlemap/cms_plugins.py
|
cms/plugins/googlemap/cms_plugins.py
|
from cms.plugin_pool import plugin_pool
from cms.plugin_base import CMSPluginBase
from django.utils.translation import ugettext_lazy as _
from cms.plugins.googlemap.models import GoogleMap
from cms.plugins.googlemap.settings import GOOGLE_MAPS_API_KEY
from cms.plugins.googlemap import settings
from django.forms.widgets import Media
class GoogleMapPlugin(CMSPluginBase):
model = GoogleMap
name = _("Google Map")
render_template = "cms/plugins/googlemap.html"
def render(self, context, instance, placeholder):
context.update({
'object':instance,
'placeholder':placeholder,
})
return context
def get_plugin_media(self, request, context, plugin):
if 'GOOGLE_MAPS_API_KEY' in context:
key = context['GOOGLE_MAPS_API_KEY']
else:
key = GOOGLE_MAPS_API_KEY
return Media(js = ('http://maps.google.com/maps?file=api&v=2&key=%s&hl=%s' % (key, request.LANGUAGE_CODE),))
plugin_pool.register_plugin(GoogleMapPlugin)
|
from django.conf import settings
from cms.plugin_pool import plugin_pool
from cms.plugin_base import CMSPluginBase
from django.utils.translation import ugettext_lazy as _
from cms.plugins.googlemap.models import GoogleMap
from cms.plugins.googlemap.settings import GOOGLE_MAPS_API_KEY
from django.forms.widgets import Media
class GoogleMapPlugin(CMSPluginBase):
model = GoogleMap
name = _("Google Map")
render_template = "cms/plugins/googlemap.html"
def render(self, context, instance, placeholder):
context.update({
'object':instance,
'placeholder':placeholder,
})
return context
def get_plugin_media(self, request, context, plugin):
if 'GOOGLE_MAPS_API_KEY' in context:
key = context['GOOGLE_MAPS_API_KEY']
else:
key = GOOGLE_MAPS_API_KEY
lang = getattr(request, 'LANGUAGE_CODE', settings.LANGUAGE_CODE[0:2])
return Media(js = ('http://maps.google.com/maps?file=api&v=2&key=%s&hl=%s' % (key, lang),))
plugin_pool.register_plugin(GoogleMapPlugin)
|
Allow use of GoogleMaps plugin without Multilingual support
|
Allow use of GoogleMaps plugin without Multilingual support
|
Python
|
bsd-3-clause
|
cyberintruder/django-cms,chmberl/django-cms,owers19856/django-cms,jproffitt/django-cms,vstoykov/django-cms,MagicSolutions/django-cms,isotoma/django-cms,jproffitt/django-cms,chkir/django-cms,stefanw/django-cms,jeffreylu9/django-cms,divio/django-cms,Vegasvikk/django-cms,jrief/django-cms,pbs/django-cms,farhaadila/django-cms,chrisglass/django-cms,kk9599/django-cms,nimbis/django-cms,rsalmaso/django-cms,vad/django-cms,rscnt/django-cms,memnonila/django-cms,intip/django-cms,MagicSolutions/django-cms,stefanfoulis/django-cms,jrclaramunt/django-cms,SinnerSchraderMobileMirrors/django-cms,selecsosi/django-cms,donce/django-cms,liuyisiyisi/django-cms,jeffreylu9/django-cms,pbs/django-cms,liuyisiyisi/django-cms,jrclaramunt/django-cms,divio/django-cms,stefanfoulis/django-cms,astagi/django-cms,datakortet/django-cms,intip/django-cms,DylannCordel/django-cms,frnhr/django-cms,adaptivelogic/django-cms,stefanw/django-cms,qnub/django-cms,timgraham/django-cms,stefanw/django-cms,memnonila/django-cms,mkoistinen/django-cms,jeffreylu9/django-cms,vad/django-cms,AlexProfi/django-cms,intgr/django-cms,ojii/django-cms,sephii/django-cms,philippze/django-cms,rscnt/django-cms,kk9599/django-cms,pbs/django-cms,jsma/django-cms,astagi/django-cms,rscnt/django-cms,czpython/django-cms,jeffreylu9/django-cms,foobacca/django-cms,foobacca/django-cms,selecsosi/django-cms,donce/django-cms,SinnerSchraderMobileMirrors/django-cms,SachaMPS/django-cms,chkir/django-cms,11craft/django-cms,youprofit/django-cms,petecummings/django-cms,wyg3958/django-cms,frnhr/django-cms,foobacca/django-cms,pixbuffer/django-cms,datakortet/django-cms,vxsx/django-cms,datakortet/django-cms,driesdesmet/django-cms,bittner/django-cms,astagi/django-cms,netzkolchose/django-cms,owers19856/django-cms,netzkolchose/django-cms,wuzhihui1123/django-cms,wyg3958/django-cms,cyberintruder/django-cms,SofiaReis/django-cms,benzkji/django-cms,nimbis/django-cms,rryan/django-cms,takeshineshiro/django-cms,robmagee/django-cms,FinalAngel/django-cms,nostalgiaz/django-cms,andyzsf/django-cms,frnhr/django-cms,adaptivelogic/django-cms,dhorelik/django-cms,iddqd1/django-cms,jsma/django-cms,frnhr/django-cms,11craft/django-cms,saintbird/django-cms,adaptivelogic/django-cms,memnonila/django-cms,jrief/django-cms,pbs/django-cms,takeshineshiro/django-cms,MagicSolutions/django-cms,Vegasvikk/django-cms,evildmp/django-cms,AlexProfi/django-cms,jsma/django-cms,divio/django-cms,rsalmaso/django-cms,isotoma/django-cms,petecummings/django-cms,vxsx/django-cms,Livefyre/django-cms,11craft/django-cms,liuyisiyisi/django-cms,jalaziz/django-cms-grappelli-old,keimlink/django-cms,czpython/django-cms,keimlink/django-cms,stefanfoulis/django-cms,petecummings/django-cms,takeshineshiro/django-cms,ScholzVolkmer/django-cms,timgraham/django-cms,chkir/django-cms,foobacca/django-cms,stefanw/django-cms,jalaziz/django-cms-grappelli-old,rryan/django-cms,ScholzVolkmer/django-cms,qnub/django-cms,benzkji/django-cms,ScholzVolkmer/django-cms,kk9599/django-cms,nimbis/django-cms,webu/django-cms,nostalgiaz/django-cms,nostalgiaz/django-cms,vad/django-cms,mkoistinen/django-cms,selecsosi/django-cms,jsma/django-cms,keimlink/django-cms,timgraham/django-cms,yakky/django-cms,isotoma/django-cms,vxsx/django-cms,jrclaramunt/django-cms,intgr/django-cms,pancentric/django-cms,sznekol/django-cms,pixbuffer/django-cms,leture/django-cms,mkoistinen/django-cms,irudayarajisawa/django-cms,czpython/django-cms,intip/django-cms,Jaccorot/django-cms,sephii/django-cms,irudayarajisawa/django-cms,jproffitt/django-cms,wuzhihui1123/django-cms,vstoykov/django-cms,rsalmaso/django-cms,farhaadila/django-cms,vad/django-cms,SofiaReis/django-cms,360youlun/django-cms,DylannCordel/django-cms,intgr/django-cms,jrief/django-cms,leture/django-cms,SofiaReis/django-cms,yakky/django-cms,Vegasvikk/django-cms,owers19856/django-cms,mkoistinen/django-cms,saintbird/django-cms,yakky/django-cms,jrief/django-cms,robmagee/django-cms,pixbuffer/django-cms,philippze/django-cms,saintbird/django-cms,ojii/django-cms,360youlun/django-cms,webu/django-cms,sznekol/django-cms,dhorelik/django-cms,wuzhihui1123/django-cms,evildmp/django-cms,yakky/django-cms,Livefyre/django-cms,netzkolchose/django-cms,josjevv/django-cms,bittner/django-cms,rryan/django-cms,SmithsonianEnterprises/django-cms,evildmp/django-cms,farhaadila/django-cms,nimbis/django-cms,sznekol/django-cms,ojii/django-cms,sephii/django-cms,Livefyre/django-cms,rryan/django-cms,FinalAngel/django-cms,jalaziz/django-cms-grappelli-old,FinalAngel/django-cms,bittner/django-cms,SachaMPS/django-cms,webu/django-cms,pancentric/django-cms,driesdesmet/django-cms,benzkji/django-cms,divio/django-cms,isotoma/django-cms,VillageAlliance/django-cms,iddqd1/django-cms,intip/django-cms,evildmp/django-cms,chrisglass/django-cms,benzkji/django-cms,360youlun/django-cms,sephii/django-cms,pancentric/django-cms,datakortet/django-cms,dhorelik/django-cms,cyberintruder/django-cms,andyzsf/django-cms,iddqd1/django-cms,andyzsf/django-cms,SachaMPS/django-cms,czpython/django-cms,nostalgiaz/django-cms,robmagee/django-cms,vstoykov/django-cms,chmberl/django-cms,philippze/django-cms,wyg3958/django-cms,intgr/django-cms,andyzsf/django-cms,leture/django-cms,chmberl/django-cms,vxsx/django-cms,Livefyre/django-cms,FinalAngel/django-cms,DylannCordel/django-cms,josjevv/django-cms,11craft/django-cms,jproffitt/django-cms,selecsosi/django-cms,irudayarajisawa/django-cms,youprofit/django-cms,VillageAlliance/django-cms,rsalmaso/django-cms,netzkolchose/django-cms,SinnerSchraderMobileMirrors/django-cms,bittner/django-cms,VillageAlliance/django-cms,Jaccorot/django-cms,driesdesmet/django-cms,SmithsonianEnterprises/django-cms,donce/django-cms,youprofit/django-cms,AlexProfi/django-cms,wuzhihui1123/django-cms,qnub/django-cms,stefanfoulis/django-cms,Jaccorot/django-cms,josjevv/django-cms,SmithsonianEnterprises/django-cms
|
from cms.plugin_pool import plugin_pool
from cms.plugin_base import CMSPluginBase
from django.utils.translation import ugettext_lazy as _
from cms.plugins.googlemap.models import GoogleMap
from cms.plugins.googlemap.settings import GOOGLE_MAPS_API_KEY
from cms.plugins.googlemap import settings
from django.forms.widgets import Media
class GoogleMapPlugin(CMSPluginBase):
model = GoogleMap
name = _("Google Map")
render_template = "cms/plugins/googlemap.html"
def render(self, context, instance, placeholder):
context.update({
'object':instance,
'placeholder':placeholder,
})
return context
def get_plugin_media(self, request, context, plugin):
if 'GOOGLE_MAPS_API_KEY' in context:
key = context['GOOGLE_MAPS_API_KEY']
else:
key = GOOGLE_MAPS_API_KEY
return Media(js = ('http://maps.google.com/maps?file=api&v=2&key=%s&hl=%s' % (key, request.LANGUAGE_CODE),))
plugin_pool.register_plugin(GoogleMapPlugin)Allow use of GoogleMaps plugin without Multilingual support
|
from django.conf import settings
from cms.plugin_pool import plugin_pool
from cms.plugin_base import CMSPluginBase
from django.utils.translation import ugettext_lazy as _
from cms.plugins.googlemap.models import GoogleMap
from cms.plugins.googlemap.settings import GOOGLE_MAPS_API_KEY
from django.forms.widgets import Media
class GoogleMapPlugin(CMSPluginBase):
model = GoogleMap
name = _("Google Map")
render_template = "cms/plugins/googlemap.html"
def render(self, context, instance, placeholder):
context.update({
'object':instance,
'placeholder':placeholder,
})
return context
def get_plugin_media(self, request, context, plugin):
if 'GOOGLE_MAPS_API_KEY' in context:
key = context['GOOGLE_MAPS_API_KEY']
else:
key = GOOGLE_MAPS_API_KEY
lang = getattr(request, 'LANGUAGE_CODE', settings.LANGUAGE_CODE[0:2])
return Media(js = ('http://maps.google.com/maps?file=api&v=2&key=%s&hl=%s' % (key, lang),))
plugin_pool.register_plugin(GoogleMapPlugin)
|
<commit_before>from cms.plugin_pool import plugin_pool
from cms.plugin_base import CMSPluginBase
from django.utils.translation import ugettext_lazy as _
from cms.plugins.googlemap.models import GoogleMap
from cms.plugins.googlemap.settings import GOOGLE_MAPS_API_KEY
from cms.plugins.googlemap import settings
from django.forms.widgets import Media
class GoogleMapPlugin(CMSPluginBase):
model = GoogleMap
name = _("Google Map")
render_template = "cms/plugins/googlemap.html"
def render(self, context, instance, placeholder):
context.update({
'object':instance,
'placeholder':placeholder,
})
return context
def get_plugin_media(self, request, context, plugin):
if 'GOOGLE_MAPS_API_KEY' in context:
key = context['GOOGLE_MAPS_API_KEY']
else:
key = GOOGLE_MAPS_API_KEY
return Media(js = ('http://maps.google.com/maps?file=api&v=2&key=%s&hl=%s' % (key, request.LANGUAGE_CODE),))
plugin_pool.register_plugin(GoogleMapPlugin)<commit_msg>Allow use of GoogleMaps plugin without Multilingual support<commit_after>
|
from django.conf import settings
from cms.plugin_pool import plugin_pool
from cms.plugin_base import CMSPluginBase
from django.utils.translation import ugettext_lazy as _
from cms.plugins.googlemap.models import GoogleMap
from cms.plugins.googlemap.settings import GOOGLE_MAPS_API_KEY
from django.forms.widgets import Media
class GoogleMapPlugin(CMSPluginBase):
model = GoogleMap
name = _("Google Map")
render_template = "cms/plugins/googlemap.html"
def render(self, context, instance, placeholder):
context.update({
'object':instance,
'placeholder':placeholder,
})
return context
def get_plugin_media(self, request, context, plugin):
if 'GOOGLE_MAPS_API_KEY' in context:
key = context['GOOGLE_MAPS_API_KEY']
else:
key = GOOGLE_MAPS_API_KEY
lang = getattr(request, 'LANGUAGE_CODE', settings.LANGUAGE_CODE[0:2])
return Media(js = ('http://maps.google.com/maps?file=api&v=2&key=%s&hl=%s' % (key, lang),))
plugin_pool.register_plugin(GoogleMapPlugin)
|
from cms.plugin_pool import plugin_pool
from cms.plugin_base import CMSPluginBase
from django.utils.translation import ugettext_lazy as _
from cms.plugins.googlemap.models import GoogleMap
from cms.plugins.googlemap.settings import GOOGLE_MAPS_API_KEY
from cms.plugins.googlemap import settings
from django.forms.widgets import Media
class GoogleMapPlugin(CMSPluginBase):
model = GoogleMap
name = _("Google Map")
render_template = "cms/plugins/googlemap.html"
def render(self, context, instance, placeholder):
context.update({
'object':instance,
'placeholder':placeholder,
})
return context
def get_plugin_media(self, request, context, plugin):
if 'GOOGLE_MAPS_API_KEY' in context:
key = context['GOOGLE_MAPS_API_KEY']
else:
key = GOOGLE_MAPS_API_KEY
return Media(js = ('http://maps.google.com/maps?file=api&v=2&key=%s&hl=%s' % (key, request.LANGUAGE_CODE),))
plugin_pool.register_plugin(GoogleMapPlugin)Allow use of GoogleMaps plugin without Multilingual supportfrom django.conf import settings
from cms.plugin_pool import plugin_pool
from cms.plugin_base import CMSPluginBase
from django.utils.translation import ugettext_lazy as _
from cms.plugins.googlemap.models import GoogleMap
from cms.plugins.googlemap.settings import GOOGLE_MAPS_API_KEY
from django.forms.widgets import Media
class GoogleMapPlugin(CMSPluginBase):
model = GoogleMap
name = _("Google Map")
render_template = "cms/plugins/googlemap.html"
def render(self, context, instance, placeholder):
context.update({
'object':instance,
'placeholder':placeholder,
})
return context
def get_plugin_media(self, request, context, plugin):
if 'GOOGLE_MAPS_API_KEY' in context:
key = context['GOOGLE_MAPS_API_KEY']
else:
key = GOOGLE_MAPS_API_KEY
lang = getattr(request, 'LANGUAGE_CODE', settings.LANGUAGE_CODE[0:2])
return Media(js = ('http://maps.google.com/maps?file=api&v=2&key=%s&hl=%s' % (key, lang),))
plugin_pool.register_plugin(GoogleMapPlugin)
|
<commit_before>from cms.plugin_pool import plugin_pool
from cms.plugin_base import CMSPluginBase
from django.utils.translation import ugettext_lazy as _
from cms.plugins.googlemap.models import GoogleMap
from cms.plugins.googlemap.settings import GOOGLE_MAPS_API_KEY
from cms.plugins.googlemap import settings
from django.forms.widgets import Media
class GoogleMapPlugin(CMSPluginBase):
model = GoogleMap
name = _("Google Map")
render_template = "cms/plugins/googlemap.html"
def render(self, context, instance, placeholder):
context.update({
'object':instance,
'placeholder':placeholder,
})
return context
def get_plugin_media(self, request, context, plugin):
if 'GOOGLE_MAPS_API_KEY' in context:
key = context['GOOGLE_MAPS_API_KEY']
else:
key = GOOGLE_MAPS_API_KEY
return Media(js = ('http://maps.google.com/maps?file=api&v=2&key=%s&hl=%s' % (key, request.LANGUAGE_CODE),))
plugin_pool.register_plugin(GoogleMapPlugin)<commit_msg>Allow use of GoogleMaps plugin without Multilingual support<commit_after>from django.conf import settings
from cms.plugin_pool import plugin_pool
from cms.plugin_base import CMSPluginBase
from django.utils.translation import ugettext_lazy as _
from cms.plugins.googlemap.models import GoogleMap
from cms.plugins.googlemap.settings import GOOGLE_MAPS_API_KEY
from django.forms.widgets import Media
class GoogleMapPlugin(CMSPluginBase):
model = GoogleMap
name = _("Google Map")
render_template = "cms/plugins/googlemap.html"
def render(self, context, instance, placeholder):
context.update({
'object':instance,
'placeholder':placeholder,
})
return context
def get_plugin_media(self, request, context, plugin):
if 'GOOGLE_MAPS_API_KEY' in context:
key = context['GOOGLE_MAPS_API_KEY']
else:
key = GOOGLE_MAPS_API_KEY
lang = getattr(request, 'LANGUAGE_CODE', settings.LANGUAGE_CODE[0:2])
return Media(js = ('http://maps.google.com/maps?file=api&v=2&key=%s&hl=%s' % (key, lang),))
plugin_pool.register_plugin(GoogleMapPlugin)
|
c15c4a663c257cad6763cf92c50b7ad706017c74
|
evesrp/views/__init__.py
|
evesrp/views/__init__.py
|
from collections import OrderedDict
from urllib.parse import urlparse
import re
from flask import render_template, redirect, url_for, request, abort, jsonify,\
flash, Markup, session
from flask.views import View
from flask.ext.login import login_user, login_required, logout_user, \
current_user
from flask.ext.wtf import Form
from flask.ext.principal import identity_changed, AnonymousIdentity
from sqlalchemy.orm.exc import NoResultFound
from wtforms.fields import StringField, PasswordField, SelectField, \
SubmitField, TextAreaField, HiddenField
from wtforms.fields.html5 import URLField, DecimalField
from wtforms.widgets import HiddenInput
from wtforms.validators import InputRequired, ValidationError, AnyOf, URL
from .. import app, auth_methods, db, requests_session, killmail_sources
from ..auth import SubmitRequestsPermission, ReviewRequestsPermission, \
PayoutRequestsPermission, admin_permission
from ..auth.models import User, Group, Division, Pilot
from ..models import Request, Modifier, Action
@app.route('/')
@login_required
def index():
return render_template('base.html')
|
from flask import render_template
from flask.ext.login import login_required
from .. import app
@app.route('/')
@login_required
def index():
return render_template('base.html')
|
Remove extraneous imports in the base view package
|
Remove extraneous imports in the base view package
|
Python
|
bsd-2-clause
|
eskwire/evesrp,eskwire/evesrp,paxswill/evesrp,eskwire/evesrp,paxswill/evesrp,paxswill/evesrp,eskwire/evesrp
|
from collections import OrderedDict
from urllib.parse import urlparse
import re
from flask import render_template, redirect, url_for, request, abort, jsonify,\
flash, Markup, session
from flask.views import View
from flask.ext.login import login_user, login_required, logout_user, \
current_user
from flask.ext.wtf import Form
from flask.ext.principal import identity_changed, AnonymousIdentity
from sqlalchemy.orm.exc import NoResultFound
from wtforms.fields import StringField, PasswordField, SelectField, \
SubmitField, TextAreaField, HiddenField
from wtforms.fields.html5 import URLField, DecimalField
from wtforms.widgets import HiddenInput
from wtforms.validators import InputRequired, ValidationError, AnyOf, URL
from .. import app, auth_methods, db, requests_session, killmail_sources
from ..auth import SubmitRequestsPermission, ReviewRequestsPermission, \
PayoutRequestsPermission, admin_permission
from ..auth.models import User, Group, Division, Pilot
from ..models import Request, Modifier, Action
@app.route('/')
@login_required
def index():
return render_template('base.html')
Remove extraneous imports in the base view package
|
from flask import render_template
from flask.ext.login import login_required
from .. import app
@app.route('/')
@login_required
def index():
return render_template('base.html')
|
<commit_before>from collections import OrderedDict
from urllib.parse import urlparse
import re
from flask import render_template, redirect, url_for, request, abort, jsonify,\
flash, Markup, session
from flask.views import View
from flask.ext.login import login_user, login_required, logout_user, \
current_user
from flask.ext.wtf import Form
from flask.ext.principal import identity_changed, AnonymousIdentity
from sqlalchemy.orm.exc import NoResultFound
from wtforms.fields import StringField, PasswordField, SelectField, \
SubmitField, TextAreaField, HiddenField
from wtforms.fields.html5 import URLField, DecimalField
from wtforms.widgets import HiddenInput
from wtforms.validators import InputRequired, ValidationError, AnyOf, URL
from .. import app, auth_methods, db, requests_session, killmail_sources
from ..auth import SubmitRequestsPermission, ReviewRequestsPermission, \
PayoutRequestsPermission, admin_permission
from ..auth.models import User, Group, Division, Pilot
from ..models import Request, Modifier, Action
@app.route('/')
@login_required
def index():
return render_template('base.html')
<commit_msg>Remove extraneous imports in the base view package<commit_after>
|
from flask import render_template
from flask.ext.login import login_required
from .. import app
@app.route('/')
@login_required
def index():
return render_template('base.html')
|
from collections import OrderedDict
from urllib.parse import urlparse
import re
from flask import render_template, redirect, url_for, request, abort, jsonify,\
flash, Markup, session
from flask.views import View
from flask.ext.login import login_user, login_required, logout_user, \
current_user
from flask.ext.wtf import Form
from flask.ext.principal import identity_changed, AnonymousIdentity
from sqlalchemy.orm.exc import NoResultFound
from wtforms.fields import StringField, PasswordField, SelectField, \
SubmitField, TextAreaField, HiddenField
from wtforms.fields.html5 import URLField, DecimalField
from wtforms.widgets import HiddenInput
from wtforms.validators import InputRequired, ValidationError, AnyOf, URL
from .. import app, auth_methods, db, requests_session, killmail_sources
from ..auth import SubmitRequestsPermission, ReviewRequestsPermission, \
PayoutRequestsPermission, admin_permission
from ..auth.models import User, Group, Division, Pilot
from ..models import Request, Modifier, Action
@app.route('/')
@login_required
def index():
return render_template('base.html')
Remove extraneous imports in the base view packagefrom flask import render_template
from flask.ext.login import login_required
from .. import app
@app.route('/')
@login_required
def index():
return render_template('base.html')
|
<commit_before>from collections import OrderedDict
from urllib.parse import urlparse
import re
from flask import render_template, redirect, url_for, request, abort, jsonify,\
flash, Markup, session
from flask.views import View
from flask.ext.login import login_user, login_required, logout_user, \
current_user
from flask.ext.wtf import Form
from flask.ext.principal import identity_changed, AnonymousIdentity
from sqlalchemy.orm.exc import NoResultFound
from wtforms.fields import StringField, PasswordField, SelectField, \
SubmitField, TextAreaField, HiddenField
from wtforms.fields.html5 import URLField, DecimalField
from wtforms.widgets import HiddenInput
from wtforms.validators import InputRequired, ValidationError, AnyOf, URL
from .. import app, auth_methods, db, requests_session, killmail_sources
from ..auth import SubmitRequestsPermission, ReviewRequestsPermission, \
PayoutRequestsPermission, admin_permission
from ..auth.models import User, Group, Division, Pilot
from ..models import Request, Modifier, Action
@app.route('/')
@login_required
def index():
return render_template('base.html')
<commit_msg>Remove extraneous imports in the base view package<commit_after>from flask import render_template
from flask.ext.login import login_required
from .. import app
@app.route('/')
@login_required
def index():
return render_template('base.html')
|
295823afe17cedaa1934afbcd19d955974089c63
|
python/send.py
|
python/send.py
|
#!/usr/bin/env python
import pika
# Host in which RabbitMQ is running.
HOST = 'localhost'
# Name of the queue.
QUEUE = 'pages'
# The message to send.
MESSAGE = 'Hi there! This is a test message =)'
# Getting the connection using pika.
# Creating the channel.
# Declaring the queue.
connection = pika.BlockingConnection(pika.ConnectionParameters(HOST))
channel = connection.channel()
channel.queue_declare(queue=QUEUE)
# Sends the 'MESSAGE' to the queue.
# Default empty 'exchange' with 'routing_key' equal to the queue name
# will route the message to that queue.
channel.publish(exchange='', routing_key=QUEUE, body=MESSAGE)
# The connection is closed.
connection.close()
|
Add producer written in Python
|
Add producer written in Python
|
Python
|
apache-2.0
|
jovannypcg/rabbitmq_usage,jovannypcg/rabbitmq_usage
|
Add producer written in Python
|
#!/usr/bin/env python
import pika
# Host in which RabbitMQ is running.
HOST = 'localhost'
# Name of the queue.
QUEUE = 'pages'
# The message to send.
MESSAGE = 'Hi there! This is a test message =)'
# Getting the connection using pika.
# Creating the channel.
# Declaring the queue.
connection = pika.BlockingConnection(pika.ConnectionParameters(HOST))
channel = connection.channel()
channel.queue_declare(queue=QUEUE)
# Sends the 'MESSAGE' to the queue.
# Default empty 'exchange' with 'routing_key' equal to the queue name
# will route the message to that queue.
channel.publish(exchange='', routing_key=QUEUE, body=MESSAGE)
# The connection is closed.
connection.close()
|
<commit_before><commit_msg>Add producer written in Python<commit_after>
|
#!/usr/bin/env python
import pika
# Host in which RabbitMQ is running.
HOST = 'localhost'
# Name of the queue.
QUEUE = 'pages'
# The message to send.
MESSAGE = 'Hi there! This is a test message =)'
# Getting the connection using pika.
# Creating the channel.
# Declaring the queue.
connection = pika.BlockingConnection(pika.ConnectionParameters(HOST))
channel = connection.channel()
channel.queue_declare(queue=QUEUE)
# Sends the 'MESSAGE' to the queue.
# Default empty 'exchange' with 'routing_key' equal to the queue name
# will route the message to that queue.
channel.publish(exchange='', routing_key=QUEUE, body=MESSAGE)
# The connection is closed.
connection.close()
|
Add producer written in Python#!/usr/bin/env python
import pika
# Host in which RabbitMQ is running.
HOST = 'localhost'
# Name of the queue.
QUEUE = 'pages'
# The message to send.
MESSAGE = 'Hi there! This is a test message =)'
# Getting the connection using pika.
# Creating the channel.
# Declaring the queue.
connection = pika.BlockingConnection(pika.ConnectionParameters(HOST))
channel = connection.channel()
channel.queue_declare(queue=QUEUE)
# Sends the 'MESSAGE' to the queue.
# Default empty 'exchange' with 'routing_key' equal to the queue name
# will route the message to that queue.
channel.publish(exchange='', routing_key=QUEUE, body=MESSAGE)
# The connection is closed.
connection.close()
|
<commit_before><commit_msg>Add producer written in Python<commit_after>#!/usr/bin/env python
import pika
# Host in which RabbitMQ is running.
HOST = 'localhost'
# Name of the queue.
QUEUE = 'pages'
# The message to send.
MESSAGE = 'Hi there! This is a test message =)'
# Getting the connection using pika.
# Creating the channel.
# Declaring the queue.
connection = pika.BlockingConnection(pika.ConnectionParameters(HOST))
channel = connection.channel()
channel.queue_declare(queue=QUEUE)
# Sends the 'MESSAGE' to the queue.
# Default empty 'exchange' with 'routing_key' equal to the queue name
# will route the message to that queue.
channel.publish(exchange='', routing_key=QUEUE, body=MESSAGE)
# The connection is closed.
connection.close()
|
|
2aa7a6260d9d5a74ee81677be2bd5f97774f9116
|
calexicon/internal/tests/test_gregorian.py
|
calexicon/internal/tests/test_gregorian.py
|
import unittest
from calexicon.internal.gregorian import is_gregorian_leap_year
class TestGregorian(unittest.TestCase):
def test_is_gregorian_leap_year(self):
self.assertTrue(is_gregorian_leap_year(2000))
self.assertTrue(is_gregorian_leap_year(1984))
self.assertFalse(is_gregorian_leap_year(1900))
self.assertFalse(is_gregorian_leap_year(1901))
|
Add tests for internal gregorian functions.
|
Add tests for internal gregorian functions.
|
Python
|
apache-2.0
|
jwg4/calexicon,jwg4/qual
|
Add tests for internal gregorian functions.
|
import unittest
from calexicon.internal.gregorian import is_gregorian_leap_year
class TestGregorian(unittest.TestCase):
def test_is_gregorian_leap_year(self):
self.assertTrue(is_gregorian_leap_year(2000))
self.assertTrue(is_gregorian_leap_year(1984))
self.assertFalse(is_gregorian_leap_year(1900))
self.assertFalse(is_gregorian_leap_year(1901))
|
<commit_before><commit_msg>Add tests for internal gregorian functions.<commit_after>
|
import unittest
from calexicon.internal.gregorian import is_gregorian_leap_year
class TestGregorian(unittest.TestCase):
def test_is_gregorian_leap_year(self):
self.assertTrue(is_gregorian_leap_year(2000))
self.assertTrue(is_gregorian_leap_year(1984))
self.assertFalse(is_gregorian_leap_year(1900))
self.assertFalse(is_gregorian_leap_year(1901))
|
Add tests for internal gregorian functions.import unittest
from calexicon.internal.gregorian import is_gregorian_leap_year
class TestGregorian(unittest.TestCase):
def test_is_gregorian_leap_year(self):
self.assertTrue(is_gregorian_leap_year(2000))
self.assertTrue(is_gregorian_leap_year(1984))
self.assertFalse(is_gregorian_leap_year(1900))
self.assertFalse(is_gregorian_leap_year(1901))
|
<commit_before><commit_msg>Add tests for internal gregorian functions.<commit_after>import unittest
from calexicon.internal.gregorian import is_gregorian_leap_year
class TestGregorian(unittest.TestCase):
def test_is_gregorian_leap_year(self):
self.assertTrue(is_gregorian_leap_year(2000))
self.assertTrue(is_gregorian_leap_year(1984))
self.assertFalse(is_gregorian_leap_year(1900))
self.assertFalse(is_gregorian_leap_year(1901))
|
|
db81e8ca0b0321994f188daf45211e6ae2dda4a4
|
dengue/utils/make_titer_strain_control.py
|
dengue/utils/make_titer_strain_control.py
|
from Bio import SeqIO
from pprint import pprint
with open('../../data/dengue_titers.tsv', 'r') as f:
titerstrains = set([ line.split()[0] for line in f ])
with open('../../data/dengue_titers.tsv', 'r') as f:
serastrains = set([ line.split()[1] for line in f ])
autologous = titerstrains.intersection(serastrains)
print len(autologous)
strains_with_titers = [s for s in SeqIO.parse(open('../../data/dengue.fasta', 'r'), 'fasta') if s.description.split('|')[0] in autologous ]
SeqIO.write(strains_with_titers, '../../data/control.fasta', 'fasta')
print 'Found %d strains with autologous titers and sequence data.'%len(strains_with_titers)
|
Make a control dataset that only contains sequences with titer data.
|
Make a control dataset that only contains sequences with titer data.
|
Python
|
agpl-3.0
|
nextstrain/augur,blab/nextstrain-augur,nextstrain/augur,nextstrain/augur
|
Make a control dataset that only contains sequences with titer data.
|
from Bio import SeqIO
from pprint import pprint
with open('../../data/dengue_titers.tsv', 'r') as f:
titerstrains = set([ line.split()[0] for line in f ])
with open('../../data/dengue_titers.tsv', 'r') as f:
serastrains = set([ line.split()[1] for line in f ])
autologous = titerstrains.intersection(serastrains)
print len(autologous)
strains_with_titers = [s for s in SeqIO.parse(open('../../data/dengue.fasta', 'r'), 'fasta') if s.description.split('|')[0] in autologous ]
SeqIO.write(strains_with_titers, '../../data/control.fasta', 'fasta')
print 'Found %d strains with autologous titers and sequence data.'%len(strains_with_titers)
|
<commit_before><commit_msg>Make a control dataset that only contains sequences with titer data.<commit_after>
|
from Bio import SeqIO
from pprint import pprint
with open('../../data/dengue_titers.tsv', 'r') as f:
titerstrains = set([ line.split()[0] for line in f ])
with open('../../data/dengue_titers.tsv', 'r') as f:
serastrains = set([ line.split()[1] for line in f ])
autologous = titerstrains.intersection(serastrains)
print len(autologous)
strains_with_titers = [s for s in SeqIO.parse(open('../../data/dengue.fasta', 'r'), 'fasta') if s.description.split('|')[0] in autologous ]
SeqIO.write(strains_with_titers, '../../data/control.fasta', 'fasta')
print 'Found %d strains with autologous titers and sequence data.'%len(strains_with_titers)
|
Make a control dataset that only contains sequences with titer data.from Bio import SeqIO
from pprint import pprint
with open('../../data/dengue_titers.tsv', 'r') as f:
titerstrains = set([ line.split()[0] for line in f ])
with open('../../data/dengue_titers.tsv', 'r') as f:
serastrains = set([ line.split()[1] for line in f ])
autologous = titerstrains.intersection(serastrains)
print len(autologous)
strains_with_titers = [s for s in SeqIO.parse(open('../../data/dengue.fasta', 'r'), 'fasta') if s.description.split('|')[0] in autologous ]
SeqIO.write(strains_with_titers, '../../data/control.fasta', 'fasta')
print 'Found %d strains with autologous titers and sequence data.'%len(strains_with_titers)
|
<commit_before><commit_msg>Make a control dataset that only contains sequences with titer data.<commit_after>from Bio import SeqIO
from pprint import pprint
with open('../../data/dengue_titers.tsv', 'r') as f:
titerstrains = set([ line.split()[0] for line in f ])
with open('../../data/dengue_titers.tsv', 'r') as f:
serastrains = set([ line.split()[1] for line in f ])
autologous = titerstrains.intersection(serastrains)
print len(autologous)
strains_with_titers = [s for s in SeqIO.parse(open('../../data/dengue.fasta', 'r'), 'fasta') if s.description.split('|')[0] in autologous ]
SeqIO.write(strains_with_titers, '../../data/control.fasta', 'fasta')
print 'Found %d strains with autologous titers and sequence data.'%len(strains_with_titers)
|
|
e0b84a97e4c7ad5dcef336080657a884cff603fc
|
tests/gl_test_2.py
|
tests/gl_test_2.py
|
#!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import pyglet.window
from pyglet.window.event import *
import time
from pyglet.GL.VERSION_1_1 import *
from pyglet.GLU.VERSION_1_1 import *
from pyglet import clock
factory = pyglet.window.WindowFactory()
factory.config._attributes['doublebuffer'] = 1
class ExitHandler(object):
running = True
def on_close(self):
self.running = False
def on_keypress(self, symbol, modifiers):
if symbol == pyglet.window.key.K_ESCAPE:
self.running = False
return EVENT_UNHANDLED
exit_handler = ExitHandler()
def setup():
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(60., 1., 1., 100.)
glMatrixMode(GL_MODELVIEW)
glClearColor(1, 1, 1, 1)
glColor4f(.5, .5, .5, .5)
def draw():
global r
glClear(GL_COLOR_BUFFER_BIT)
glLoadIdentity()
r += 1
if r > 360: r = 0
glRotatef(r, 0, 0, 1)
glBegin(GL_QUADS)
glVertex3f(-1., -1., -5.)
glVertex3f(-1., 1., -5.)
glVertex3f(1., 1., -5.)
glVertex3f(1., -1., -5.)
glEnd()
w1 = factory.create(width=200, height=200)
w1.push_handlers(exit_handler)
w1.switch_to()
setup()
c = clock.Clock()
w2 = factory.create(width=400, height=400)
w2.push_handlers(exit_handler)
w2.switch_to()
setup()
r = 0
while exit_handler.running:
c.set_fps(60)
w1.switch_to()
w1.dispatch_events()
draw()
w1.flip()
w2.switch_to()
w2.dispatch_events()
draw()
w2.flip()
|
Test two windows drawing GL with different contexts.
|
Test two windows drawing GL with different contexts.
--HG--
extra : convert_revision : svn%3A14d46d22-621c-0410-bb3d-6f67920f7d95/trunk%4045
|
Python
|
bsd-3-clause
|
infowantstobeseen/pyglet-darwincore,infowantstobeseen/pyglet-darwincore,infowantstobeseen/pyglet-darwincore,infowantstobeseen/pyglet-darwincore,infowantstobeseen/pyglet-darwincore
|
Test two windows drawing GL with different contexts.
--HG--
extra : convert_revision : svn%3A14d46d22-621c-0410-bb3d-6f67920f7d95/trunk%4045
|
#!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import pyglet.window
from pyglet.window.event import *
import time
from pyglet.GL.VERSION_1_1 import *
from pyglet.GLU.VERSION_1_1 import *
from pyglet import clock
factory = pyglet.window.WindowFactory()
factory.config._attributes['doublebuffer'] = 1
class ExitHandler(object):
running = True
def on_close(self):
self.running = False
def on_keypress(self, symbol, modifiers):
if symbol == pyglet.window.key.K_ESCAPE:
self.running = False
return EVENT_UNHANDLED
exit_handler = ExitHandler()
def setup():
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(60., 1., 1., 100.)
glMatrixMode(GL_MODELVIEW)
glClearColor(1, 1, 1, 1)
glColor4f(.5, .5, .5, .5)
def draw():
global r
glClear(GL_COLOR_BUFFER_BIT)
glLoadIdentity()
r += 1
if r > 360: r = 0
glRotatef(r, 0, 0, 1)
glBegin(GL_QUADS)
glVertex3f(-1., -1., -5.)
glVertex3f(-1., 1., -5.)
glVertex3f(1., 1., -5.)
glVertex3f(1., -1., -5.)
glEnd()
w1 = factory.create(width=200, height=200)
w1.push_handlers(exit_handler)
w1.switch_to()
setup()
c = clock.Clock()
w2 = factory.create(width=400, height=400)
w2.push_handlers(exit_handler)
w2.switch_to()
setup()
r = 0
while exit_handler.running:
c.set_fps(60)
w1.switch_to()
w1.dispatch_events()
draw()
w1.flip()
w2.switch_to()
w2.dispatch_events()
draw()
w2.flip()
|
<commit_before><commit_msg>Test two windows drawing GL with different contexts.
--HG--
extra : convert_revision : svn%3A14d46d22-621c-0410-bb3d-6f67920f7d95/trunk%4045<commit_after>
|
#!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import pyglet.window
from pyglet.window.event import *
import time
from pyglet.GL.VERSION_1_1 import *
from pyglet.GLU.VERSION_1_1 import *
from pyglet import clock
factory = pyglet.window.WindowFactory()
factory.config._attributes['doublebuffer'] = 1
class ExitHandler(object):
running = True
def on_close(self):
self.running = False
def on_keypress(self, symbol, modifiers):
if symbol == pyglet.window.key.K_ESCAPE:
self.running = False
return EVENT_UNHANDLED
exit_handler = ExitHandler()
def setup():
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(60., 1., 1., 100.)
glMatrixMode(GL_MODELVIEW)
glClearColor(1, 1, 1, 1)
glColor4f(.5, .5, .5, .5)
def draw():
global r
glClear(GL_COLOR_BUFFER_BIT)
glLoadIdentity()
r += 1
if r > 360: r = 0
glRotatef(r, 0, 0, 1)
glBegin(GL_QUADS)
glVertex3f(-1., -1., -5.)
glVertex3f(-1., 1., -5.)
glVertex3f(1., 1., -5.)
glVertex3f(1., -1., -5.)
glEnd()
w1 = factory.create(width=200, height=200)
w1.push_handlers(exit_handler)
w1.switch_to()
setup()
c = clock.Clock()
w2 = factory.create(width=400, height=400)
w2.push_handlers(exit_handler)
w2.switch_to()
setup()
r = 0
while exit_handler.running:
c.set_fps(60)
w1.switch_to()
w1.dispatch_events()
draw()
w1.flip()
w2.switch_to()
w2.dispatch_events()
draw()
w2.flip()
|
Test two windows drawing GL with different contexts.
--HG--
extra : convert_revision : svn%3A14d46d22-621c-0410-bb3d-6f67920f7d95/trunk%4045#!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import pyglet.window
from pyglet.window.event import *
import time
from pyglet.GL.VERSION_1_1 import *
from pyglet.GLU.VERSION_1_1 import *
from pyglet import clock
factory = pyglet.window.WindowFactory()
factory.config._attributes['doublebuffer'] = 1
class ExitHandler(object):
running = True
def on_close(self):
self.running = False
def on_keypress(self, symbol, modifiers):
if symbol == pyglet.window.key.K_ESCAPE:
self.running = False
return EVENT_UNHANDLED
exit_handler = ExitHandler()
def setup():
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(60., 1., 1., 100.)
glMatrixMode(GL_MODELVIEW)
glClearColor(1, 1, 1, 1)
glColor4f(.5, .5, .5, .5)
def draw():
global r
glClear(GL_COLOR_BUFFER_BIT)
glLoadIdentity()
r += 1
if r > 360: r = 0
glRotatef(r, 0, 0, 1)
glBegin(GL_QUADS)
glVertex3f(-1., -1., -5.)
glVertex3f(-1., 1., -5.)
glVertex3f(1., 1., -5.)
glVertex3f(1., -1., -5.)
glEnd()
w1 = factory.create(width=200, height=200)
w1.push_handlers(exit_handler)
w1.switch_to()
setup()
c = clock.Clock()
w2 = factory.create(width=400, height=400)
w2.push_handlers(exit_handler)
w2.switch_to()
setup()
r = 0
while exit_handler.running:
c.set_fps(60)
w1.switch_to()
w1.dispatch_events()
draw()
w1.flip()
w2.switch_to()
w2.dispatch_events()
draw()
w2.flip()
|
<commit_before><commit_msg>Test two windows drawing GL with different contexts.
--HG--
extra : convert_revision : svn%3A14d46d22-621c-0410-bb3d-6f67920f7d95/trunk%4045<commit_after>#!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import pyglet.window
from pyglet.window.event import *
import time
from pyglet.GL.VERSION_1_1 import *
from pyglet.GLU.VERSION_1_1 import *
from pyglet import clock
factory = pyglet.window.WindowFactory()
factory.config._attributes['doublebuffer'] = 1
class ExitHandler(object):
running = True
def on_close(self):
self.running = False
def on_keypress(self, symbol, modifiers):
if symbol == pyglet.window.key.K_ESCAPE:
self.running = False
return EVENT_UNHANDLED
exit_handler = ExitHandler()
def setup():
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(60., 1., 1., 100.)
glMatrixMode(GL_MODELVIEW)
glClearColor(1, 1, 1, 1)
glColor4f(.5, .5, .5, .5)
def draw():
global r
glClear(GL_COLOR_BUFFER_BIT)
glLoadIdentity()
r += 1
if r > 360: r = 0
glRotatef(r, 0, 0, 1)
glBegin(GL_QUADS)
glVertex3f(-1., -1., -5.)
glVertex3f(-1., 1., -5.)
glVertex3f(1., 1., -5.)
glVertex3f(1., -1., -5.)
glEnd()
w1 = factory.create(width=200, height=200)
w1.push_handlers(exit_handler)
w1.switch_to()
setup()
c = clock.Clock()
w2 = factory.create(width=400, height=400)
w2.push_handlers(exit_handler)
w2.switch_to()
setup()
r = 0
while exit_handler.running:
c.set_fps(60)
w1.switch_to()
w1.dispatch_events()
draw()
w1.flip()
w2.switch_to()
w2.dispatch_events()
draw()
w2.flip()
|
|
8affb8e4a3744e604b88157a918ef690203cbfa8
|
zerver/migrations/0375_invalid_characters_in_stream_names.py
|
zerver/migrations/0375_invalid_characters_in_stream_names.py
|
import unicodedata
from django.db import connection, migrations
from django.db.backends.postgresql.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
# There are 66 Unicode non-characters; see
# https://www.unicode.org/faq/private_use.html#nonchar4
unicode_non_chars = set(
chr(x)
for x in list(range(0xFDD0, 0xFDF0)) # FDD0 through FDEF, inclusive
+ list(range(0xFFFE, 0x110000, 0x10000)) # 0xFFFE, 0x1FFFE, ... 0x10FFFE inclusive
+ list(range(0xFFFF, 0x110000, 0x10000)) # 0xFFFF, 0x1FFFF, ... 0x10FFFF inclusive
)
def character_is_printable(character: str) -> bool:
return not (unicodedata.category(character) in ["Cc", "Cs"] or character in unicode_non_chars)
def fix_stream_names(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
Stream = apps.get_model("zerver", "Stream")
Realm = apps.get_model("zerver", "Realm")
total_fixed_count = 0
realm_ids = Realm.objects.values_list("id", flat=True)
if len(realm_ids) == 0:
return
print("")
for realm_id in realm_ids:
print(f"Processing realm {realm_id}")
realm_stream_dicts = Stream.objects.filter(realm_id=realm_id).values("id", "name")
occupied_stream_names = set(stream_dict["name"] for stream_dict in realm_stream_dicts)
for stream_dict in realm_stream_dicts:
stream_name = stream_dict["name"]
fixed_stream_name = "".join(
[
character if character_is_printable(character) else "\N{REPLACEMENT CHARACTER}"
for character in stream_name
]
)
if fixed_stream_name == stream_name:
continue
if fixed_stream_name == "":
fixed_stream_name = "(no name)"
# The process of stripping invalid characters can lead to collisions,
# with the new stream name being the same as the name of another existing stream.
# We append underscore until the name no longer conflicts.
while fixed_stream_name in occupied_stream_names:
fixed_stream_name += "_"
occupied_stream_names.add(fixed_stream_name)
total_fixed_count += 1
with connection.cursor() as cursor:
cursor.execute(
"UPDATE zerver_stream SET name = %s WHERE id = %s",
[fixed_stream_name, stream_dict["id"]],
)
print(f"Fixed {total_fixed_count} stream names")
class Migration(migrations.Migration):
atomic = False
dependencies = [
("zerver", "0374_backfill_user_delete_realmauditlog"),
]
operations = [
migrations.RunPython(fix_stream_names, reverse_code=migrations.RunPython.noop),
]
|
Remove disallowed characters from stream names.
|
migrations: Remove disallowed characters from stream names.
character_is_printable logic is taken from similar work by @madrix01
|
Python
|
apache-2.0
|
zulip/zulip,andersk/zulip,zulip/zulip,rht/zulip,rht/zulip,andersk/zulip,kou/zulip,zulip/zulip,andersk/zulip,rht/zulip,zulip/zulip,rht/zulip,andersk/zulip,andersk/zulip,zulip/zulip,zulip/zulip,rht/zulip,kou/zulip,kou/zulip,zulip/zulip,kou/zulip,kou/zulip,kou/zulip,andersk/zulip,rht/zulip,rht/zulip,kou/zulip,andersk/zulip
|
migrations: Remove disallowed characters from stream names.
character_is_printable logic is taken from similar work by @madrix01
|
import unicodedata
from django.db import connection, migrations
from django.db.backends.postgresql.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
# There are 66 Unicode non-characters; see
# https://www.unicode.org/faq/private_use.html#nonchar4
unicode_non_chars = set(
chr(x)
for x in list(range(0xFDD0, 0xFDF0)) # FDD0 through FDEF, inclusive
+ list(range(0xFFFE, 0x110000, 0x10000)) # 0xFFFE, 0x1FFFE, ... 0x10FFFE inclusive
+ list(range(0xFFFF, 0x110000, 0x10000)) # 0xFFFF, 0x1FFFF, ... 0x10FFFF inclusive
)
def character_is_printable(character: str) -> bool:
return not (unicodedata.category(character) in ["Cc", "Cs"] or character in unicode_non_chars)
def fix_stream_names(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
Stream = apps.get_model("zerver", "Stream")
Realm = apps.get_model("zerver", "Realm")
total_fixed_count = 0
realm_ids = Realm.objects.values_list("id", flat=True)
if len(realm_ids) == 0:
return
print("")
for realm_id in realm_ids:
print(f"Processing realm {realm_id}")
realm_stream_dicts = Stream.objects.filter(realm_id=realm_id).values("id", "name")
occupied_stream_names = set(stream_dict["name"] for stream_dict in realm_stream_dicts)
for stream_dict in realm_stream_dicts:
stream_name = stream_dict["name"]
fixed_stream_name = "".join(
[
character if character_is_printable(character) else "\N{REPLACEMENT CHARACTER}"
for character in stream_name
]
)
if fixed_stream_name == stream_name:
continue
if fixed_stream_name == "":
fixed_stream_name = "(no name)"
# The process of stripping invalid characters can lead to collisions,
# with the new stream name being the same as the name of another existing stream.
# We append underscore until the name no longer conflicts.
while fixed_stream_name in occupied_stream_names:
fixed_stream_name += "_"
occupied_stream_names.add(fixed_stream_name)
total_fixed_count += 1
with connection.cursor() as cursor:
cursor.execute(
"UPDATE zerver_stream SET name = %s WHERE id = %s",
[fixed_stream_name, stream_dict["id"]],
)
print(f"Fixed {total_fixed_count} stream names")
class Migration(migrations.Migration):
atomic = False
dependencies = [
("zerver", "0374_backfill_user_delete_realmauditlog"),
]
operations = [
migrations.RunPython(fix_stream_names, reverse_code=migrations.RunPython.noop),
]
|
<commit_before><commit_msg>migrations: Remove disallowed characters from stream names.
character_is_printable logic is taken from similar work by @madrix01<commit_after>
|
import unicodedata
from django.db import connection, migrations
from django.db.backends.postgresql.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
# There are 66 Unicode non-characters; see
# https://www.unicode.org/faq/private_use.html#nonchar4
unicode_non_chars = set(
chr(x)
for x in list(range(0xFDD0, 0xFDF0)) # FDD0 through FDEF, inclusive
+ list(range(0xFFFE, 0x110000, 0x10000)) # 0xFFFE, 0x1FFFE, ... 0x10FFFE inclusive
+ list(range(0xFFFF, 0x110000, 0x10000)) # 0xFFFF, 0x1FFFF, ... 0x10FFFF inclusive
)
def character_is_printable(character: str) -> bool:
return not (unicodedata.category(character) in ["Cc", "Cs"] or character in unicode_non_chars)
def fix_stream_names(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
Stream = apps.get_model("zerver", "Stream")
Realm = apps.get_model("zerver", "Realm")
total_fixed_count = 0
realm_ids = Realm.objects.values_list("id", flat=True)
if len(realm_ids) == 0:
return
print("")
for realm_id in realm_ids:
print(f"Processing realm {realm_id}")
realm_stream_dicts = Stream.objects.filter(realm_id=realm_id).values("id", "name")
occupied_stream_names = set(stream_dict["name"] for stream_dict in realm_stream_dicts)
for stream_dict in realm_stream_dicts:
stream_name = stream_dict["name"]
fixed_stream_name = "".join(
[
character if character_is_printable(character) else "\N{REPLACEMENT CHARACTER}"
for character in stream_name
]
)
if fixed_stream_name == stream_name:
continue
if fixed_stream_name == "":
fixed_stream_name = "(no name)"
# The process of stripping invalid characters can lead to collisions,
# with the new stream name being the same as the name of another existing stream.
# We append underscore until the name no longer conflicts.
while fixed_stream_name in occupied_stream_names:
fixed_stream_name += "_"
occupied_stream_names.add(fixed_stream_name)
total_fixed_count += 1
with connection.cursor() as cursor:
cursor.execute(
"UPDATE zerver_stream SET name = %s WHERE id = %s",
[fixed_stream_name, stream_dict["id"]],
)
print(f"Fixed {total_fixed_count} stream names")
class Migration(migrations.Migration):
atomic = False
dependencies = [
("zerver", "0374_backfill_user_delete_realmauditlog"),
]
operations = [
migrations.RunPython(fix_stream_names, reverse_code=migrations.RunPython.noop),
]
|
migrations: Remove disallowed characters from stream names.
character_is_printable logic is taken from similar work by @madrix01import unicodedata
from django.db import connection, migrations
from django.db.backends.postgresql.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
# There are 66 Unicode non-characters; see
# https://www.unicode.org/faq/private_use.html#nonchar4
unicode_non_chars = set(
chr(x)
for x in list(range(0xFDD0, 0xFDF0)) # FDD0 through FDEF, inclusive
+ list(range(0xFFFE, 0x110000, 0x10000)) # 0xFFFE, 0x1FFFE, ... 0x10FFFE inclusive
+ list(range(0xFFFF, 0x110000, 0x10000)) # 0xFFFF, 0x1FFFF, ... 0x10FFFF inclusive
)
def character_is_printable(character: str) -> bool:
return not (unicodedata.category(character) in ["Cc", "Cs"] or character in unicode_non_chars)
def fix_stream_names(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
Stream = apps.get_model("zerver", "Stream")
Realm = apps.get_model("zerver", "Realm")
total_fixed_count = 0
realm_ids = Realm.objects.values_list("id", flat=True)
if len(realm_ids) == 0:
return
print("")
for realm_id in realm_ids:
print(f"Processing realm {realm_id}")
realm_stream_dicts = Stream.objects.filter(realm_id=realm_id).values("id", "name")
occupied_stream_names = set(stream_dict["name"] for stream_dict in realm_stream_dicts)
for stream_dict in realm_stream_dicts:
stream_name = stream_dict["name"]
fixed_stream_name = "".join(
[
character if character_is_printable(character) else "\N{REPLACEMENT CHARACTER}"
for character in stream_name
]
)
if fixed_stream_name == stream_name:
continue
if fixed_stream_name == "":
fixed_stream_name = "(no name)"
# The process of stripping invalid characters can lead to collisions,
# with the new stream name being the same as the name of another existing stream.
# We append underscore until the name no longer conflicts.
while fixed_stream_name in occupied_stream_names:
fixed_stream_name += "_"
occupied_stream_names.add(fixed_stream_name)
total_fixed_count += 1
with connection.cursor() as cursor:
cursor.execute(
"UPDATE zerver_stream SET name = %s WHERE id = %s",
[fixed_stream_name, stream_dict["id"]],
)
print(f"Fixed {total_fixed_count} stream names")
class Migration(migrations.Migration):
atomic = False
dependencies = [
("zerver", "0374_backfill_user_delete_realmauditlog"),
]
operations = [
migrations.RunPython(fix_stream_names, reverse_code=migrations.RunPython.noop),
]
|
<commit_before><commit_msg>migrations: Remove disallowed characters from stream names.
character_is_printable logic is taken from similar work by @madrix01<commit_after>import unicodedata
from django.db import connection, migrations
from django.db.backends.postgresql.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
# There are 66 Unicode non-characters; see
# https://www.unicode.org/faq/private_use.html#nonchar4
unicode_non_chars = set(
chr(x)
for x in list(range(0xFDD0, 0xFDF0)) # FDD0 through FDEF, inclusive
+ list(range(0xFFFE, 0x110000, 0x10000)) # 0xFFFE, 0x1FFFE, ... 0x10FFFE inclusive
+ list(range(0xFFFF, 0x110000, 0x10000)) # 0xFFFF, 0x1FFFF, ... 0x10FFFF inclusive
)
def character_is_printable(character: str) -> bool:
return not (unicodedata.category(character) in ["Cc", "Cs"] or character in unicode_non_chars)
def fix_stream_names(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
Stream = apps.get_model("zerver", "Stream")
Realm = apps.get_model("zerver", "Realm")
total_fixed_count = 0
realm_ids = Realm.objects.values_list("id", flat=True)
if len(realm_ids) == 0:
return
print("")
for realm_id in realm_ids:
print(f"Processing realm {realm_id}")
realm_stream_dicts = Stream.objects.filter(realm_id=realm_id).values("id", "name")
occupied_stream_names = set(stream_dict["name"] for stream_dict in realm_stream_dicts)
for stream_dict in realm_stream_dicts:
stream_name = stream_dict["name"]
fixed_stream_name = "".join(
[
character if character_is_printable(character) else "\N{REPLACEMENT CHARACTER}"
for character in stream_name
]
)
if fixed_stream_name == stream_name:
continue
if fixed_stream_name == "":
fixed_stream_name = "(no name)"
# The process of stripping invalid characters can lead to collisions,
# with the new stream name being the same as the name of another existing stream.
# We append underscore until the name no longer conflicts.
while fixed_stream_name in occupied_stream_names:
fixed_stream_name += "_"
occupied_stream_names.add(fixed_stream_name)
total_fixed_count += 1
with connection.cursor() as cursor:
cursor.execute(
"UPDATE zerver_stream SET name = %s WHERE id = %s",
[fixed_stream_name, stream_dict["id"]],
)
print(f"Fixed {total_fixed_count} stream names")
class Migration(migrations.Migration):
atomic = False
dependencies = [
("zerver", "0374_backfill_user_delete_realmauditlog"),
]
operations = [
migrations.RunPython(fix_stream_names, reverse_code=migrations.RunPython.noop),
]
|
|
12c483953f39a3bacaab6d49ba17c4920db52179
|
firecares/firestation/management/commands/cleanup_phonenumbers.py
|
firecares/firestation/management/commands/cleanup_phonenumbers.py
|
from django.core.management.base import BaseCommand
from firecares.firestation.models import FireDepartment
from phonenumber_field.modelfields import PhoneNumber
import re
"""
This command is for cleaning up every phone and fax number in the
database. It removes all non-numeric characters, such as parenthesis,
hyphens, spaces, etc. It also removes prefixed 1s These numbers should
be made human-readable on the client side.
"""
def cleanNumber(no1):
no2 = re.sub('[^0-9]','', no1)
if no2.startswith("1"):
no2 = no2[1:]
return no2
class Command(BaseCommand):
def handle(self, *args, **kwargs):
print("Don't worry, it always takes this long.")
for fd in FireDepartment.objects.all():
# If the FD has a phone number, clean it up
if fd.headquarters_phone and not fd.headquarters_phone.raw_input == "Invalid Input":
newPhone = cleanNumber(fd.headquarters_phone.raw_input)
print(newPhone)
fd.headquarters_phone = newPhone
# If the FD has a fax number, clean it up
if fd.headquarters_fax and not fd.headquarters_fax.raw_input == "Invalid Input":
newFax = cleanNumber(fd.headquarters_fax.raw_input)
print(newFax)
fd.headquarters_fax = newFax
# Save and continue to the next FD (if any)
fd.save()
print("Completed successfully!")
|
Add script to clean up all FD phone and fax numbers.
|
Add script to clean up all FD phone and fax numbers.
|
Python
|
mit
|
FireCARES/firecares,HunterConnelly/firecares,HunterConnelly/firecares,FireCARES/firecares,FireCARES/firecares,FireCARES/firecares,FireCARES/firecares,HunterConnelly/firecares,HunterConnelly/firecares
|
Add script to clean up all FD phone and fax numbers.
|
from django.core.management.base import BaseCommand
from firecares.firestation.models import FireDepartment
from phonenumber_field.modelfields import PhoneNumber
import re
"""
This command is for cleaning up every phone and fax number in the
database. It removes all non-numeric characters, such as parenthesis,
hyphens, spaces, etc. It also removes prefixed 1s These numbers should
be made human-readable on the client side.
"""
def cleanNumber(no1):
no2 = re.sub('[^0-9]','', no1)
if no2.startswith("1"):
no2 = no2[1:]
return no2
class Command(BaseCommand):
def handle(self, *args, **kwargs):
print("Don't worry, it always takes this long.")
for fd in FireDepartment.objects.all():
# If the FD has a phone number, clean it up
if fd.headquarters_phone and not fd.headquarters_phone.raw_input == "Invalid Input":
newPhone = cleanNumber(fd.headquarters_phone.raw_input)
print(newPhone)
fd.headquarters_phone = newPhone
# If the FD has a fax number, clean it up
if fd.headquarters_fax and not fd.headquarters_fax.raw_input == "Invalid Input":
newFax = cleanNumber(fd.headquarters_fax.raw_input)
print(newFax)
fd.headquarters_fax = newFax
# Save and continue to the next FD (if any)
fd.save()
print("Completed successfully!")
|
<commit_before><commit_msg>Add script to clean up all FD phone and fax numbers.<commit_after>
|
from django.core.management.base import BaseCommand
from firecares.firestation.models import FireDepartment
from phonenumber_field.modelfields import PhoneNumber
import re
"""
This command is for cleaning up every phone and fax number in the
database. It removes all non-numeric characters, such as parenthesis,
hyphens, spaces, etc. It also removes prefixed 1s These numbers should
be made human-readable on the client side.
"""
def cleanNumber(no1):
no2 = re.sub('[^0-9]','', no1)
if no2.startswith("1"):
no2 = no2[1:]
return no2
class Command(BaseCommand):
def handle(self, *args, **kwargs):
print("Don't worry, it always takes this long.")
for fd in FireDepartment.objects.all():
# If the FD has a phone number, clean it up
if fd.headquarters_phone and not fd.headquarters_phone.raw_input == "Invalid Input":
newPhone = cleanNumber(fd.headquarters_phone.raw_input)
print(newPhone)
fd.headquarters_phone = newPhone
# If the FD has a fax number, clean it up
if fd.headquarters_fax and not fd.headquarters_fax.raw_input == "Invalid Input":
newFax = cleanNumber(fd.headquarters_fax.raw_input)
print(newFax)
fd.headquarters_fax = newFax
# Save and continue to the next FD (if any)
fd.save()
print("Completed successfully!")
|
Add script to clean up all FD phone and fax numbers.from django.core.management.base import BaseCommand
from firecares.firestation.models import FireDepartment
from phonenumber_field.modelfields import PhoneNumber
import re
"""
This command is for cleaning up every phone and fax number in the
database. It removes all non-numeric characters, such as parenthesis,
hyphens, spaces, etc. It also removes prefixed 1s These numbers should
be made human-readable on the client side.
"""
def cleanNumber(no1):
no2 = re.sub('[^0-9]','', no1)
if no2.startswith("1"):
no2 = no2[1:]
return no2
class Command(BaseCommand):
def handle(self, *args, **kwargs):
print("Don't worry, it always takes this long.")
for fd in FireDepartment.objects.all():
# If the FD has a phone number, clean it up
if fd.headquarters_phone and not fd.headquarters_phone.raw_input == "Invalid Input":
newPhone = cleanNumber(fd.headquarters_phone.raw_input)
print(newPhone)
fd.headquarters_phone = newPhone
# If the FD has a fax number, clean it up
if fd.headquarters_fax and not fd.headquarters_fax.raw_input == "Invalid Input":
newFax = cleanNumber(fd.headquarters_fax.raw_input)
print(newFax)
fd.headquarters_fax = newFax
# Save and continue to the next FD (if any)
fd.save()
print("Completed successfully!")
|
<commit_before><commit_msg>Add script to clean up all FD phone and fax numbers.<commit_after>from django.core.management.base import BaseCommand
from firecares.firestation.models import FireDepartment
from phonenumber_field.modelfields import PhoneNumber
import re
"""
This command is for cleaning up every phone and fax number in the
database. It removes all non-numeric characters, such as parenthesis,
hyphens, spaces, etc. It also removes prefixed 1s These numbers should
be made human-readable on the client side.
"""
def cleanNumber(no1):
no2 = re.sub('[^0-9]','', no1)
if no2.startswith("1"):
no2 = no2[1:]
return no2
class Command(BaseCommand):
def handle(self, *args, **kwargs):
print("Don't worry, it always takes this long.")
for fd in FireDepartment.objects.all():
# If the FD has a phone number, clean it up
if fd.headquarters_phone and not fd.headquarters_phone.raw_input == "Invalid Input":
newPhone = cleanNumber(fd.headquarters_phone.raw_input)
print(newPhone)
fd.headquarters_phone = newPhone
# If the FD has a fax number, clean it up
if fd.headquarters_fax and not fd.headquarters_fax.raw_input == "Invalid Input":
newFax = cleanNumber(fd.headquarters_fax.raw_input)
print(newFax)
fd.headquarters_fax = newFax
# Save and continue to the next FD (if any)
fd.save()
print("Completed successfully!")
|
|
7d128f2386fd3bbcbff1a407018f9ab9ed580810
|
tests/test_path.py
|
tests/test_path.py
|
from gypsy.path import _join
def test_join():
assert _join('s3://', 'bucket', 'prefix') == 's3://bucket/prefix'
assert _join('s3://bucket', 'prefix') == 's3://bucket/prefix'
assert _join('bucket', 'prefix') == 'bucket/prefix'
|
Add tests for path join
|
Add tests for path join
|
Python
|
mit
|
tesera/pygypsy,tesera/pygypsy
|
Add tests for path join
|
from gypsy.path import _join
def test_join():
assert _join('s3://', 'bucket', 'prefix') == 's3://bucket/prefix'
assert _join('s3://bucket', 'prefix') == 's3://bucket/prefix'
assert _join('bucket', 'prefix') == 'bucket/prefix'
|
<commit_before><commit_msg>Add tests for path join<commit_after>
|
from gypsy.path import _join
def test_join():
assert _join('s3://', 'bucket', 'prefix') == 's3://bucket/prefix'
assert _join('s3://bucket', 'prefix') == 's3://bucket/prefix'
assert _join('bucket', 'prefix') == 'bucket/prefix'
|
Add tests for path joinfrom gypsy.path import _join
def test_join():
assert _join('s3://', 'bucket', 'prefix') == 's3://bucket/prefix'
assert _join('s3://bucket', 'prefix') == 's3://bucket/prefix'
assert _join('bucket', 'prefix') == 'bucket/prefix'
|
<commit_before><commit_msg>Add tests for path join<commit_after>from gypsy.path import _join
def test_join():
assert _join('s3://', 'bucket', 'prefix') == 's3://bucket/prefix'
assert _join('s3://bucket', 'prefix') == 's3://bucket/prefix'
assert _join('bucket', 'prefix') == 'bucket/prefix'
|
|
08988d19c712ad4604f0acced71a069c7c20067a
|
zou/app/stores/file_store.py
|
zou/app/stores/file_store.py
|
import flask_fs as fs
from zou.app import app
pictures = fs.Storage("pictures", overwrite=True)
movies = fs.Storage("movies", overwrite=True)
pictures.configure(app)
movies.configure(app)
def make_key(prefix, id):
return "%s-%s" % (prefix, id)
def add_picture(prefix, id, path):
key = make_key(prefix, id)
with open(path, 'rb') as fd:
return pictures.write(key, fd)
def get_picture(prefix, id):
key = make_key(prefix, id)
return pictures.read(key)
def open_picture(prefix, id):
key = make_key(prefix, id)
return pictures.open(key, 'rb')
def exists_picture(prefix, id):
key = make_key(prefix, id)
return pictures.exists(key)
def remove_picture(prefix, id):
key = make_key(prefix, id)
pictures.delete(key)
def add_movie(prefix, id, content):
key = make_key(prefix, id)
return movies.write(key, content)
def get_movie(prefix, id):
key = make_key(prefix, id)
return movies.read(key)
def open_movie(prefix, id):
key = make_key(prefix, id)
return movies.open(key, 'rb')
def exists_movie(prefix, id):
key = make_key(prefix, id)
return movies.exists(key)
def remove_movie(prefix, id):
key = make_key(prefix, id)
movies.delete(key)
|
Add kv store for file storage
|
Add kv store for file storage
|
Python
|
agpl-3.0
|
cgwire/zou
|
Add kv store for file storage
|
import flask_fs as fs
from zou.app import app
pictures = fs.Storage("pictures", overwrite=True)
movies = fs.Storage("movies", overwrite=True)
pictures.configure(app)
movies.configure(app)
def make_key(prefix, id):
return "%s-%s" % (prefix, id)
def add_picture(prefix, id, path):
key = make_key(prefix, id)
with open(path, 'rb') as fd:
return pictures.write(key, fd)
def get_picture(prefix, id):
key = make_key(prefix, id)
return pictures.read(key)
def open_picture(prefix, id):
key = make_key(prefix, id)
return pictures.open(key, 'rb')
def exists_picture(prefix, id):
key = make_key(prefix, id)
return pictures.exists(key)
def remove_picture(prefix, id):
key = make_key(prefix, id)
pictures.delete(key)
def add_movie(prefix, id, content):
key = make_key(prefix, id)
return movies.write(key, content)
def get_movie(prefix, id):
key = make_key(prefix, id)
return movies.read(key)
def open_movie(prefix, id):
key = make_key(prefix, id)
return movies.open(key, 'rb')
def exists_movie(prefix, id):
key = make_key(prefix, id)
return movies.exists(key)
def remove_movie(prefix, id):
key = make_key(prefix, id)
movies.delete(key)
|
<commit_before><commit_msg>Add kv store for file storage<commit_after>
|
import flask_fs as fs
from zou.app import app
pictures = fs.Storage("pictures", overwrite=True)
movies = fs.Storage("movies", overwrite=True)
pictures.configure(app)
movies.configure(app)
def make_key(prefix, id):
return "%s-%s" % (prefix, id)
def add_picture(prefix, id, path):
key = make_key(prefix, id)
with open(path, 'rb') as fd:
return pictures.write(key, fd)
def get_picture(prefix, id):
key = make_key(prefix, id)
return pictures.read(key)
def open_picture(prefix, id):
key = make_key(prefix, id)
return pictures.open(key, 'rb')
def exists_picture(prefix, id):
key = make_key(prefix, id)
return pictures.exists(key)
def remove_picture(prefix, id):
key = make_key(prefix, id)
pictures.delete(key)
def add_movie(prefix, id, content):
key = make_key(prefix, id)
return movies.write(key, content)
def get_movie(prefix, id):
key = make_key(prefix, id)
return movies.read(key)
def open_movie(prefix, id):
key = make_key(prefix, id)
return movies.open(key, 'rb')
def exists_movie(prefix, id):
key = make_key(prefix, id)
return movies.exists(key)
def remove_movie(prefix, id):
key = make_key(prefix, id)
movies.delete(key)
|
Add kv store for file storageimport flask_fs as fs
from zou.app import app
pictures = fs.Storage("pictures", overwrite=True)
movies = fs.Storage("movies", overwrite=True)
pictures.configure(app)
movies.configure(app)
def make_key(prefix, id):
return "%s-%s" % (prefix, id)
def add_picture(prefix, id, path):
key = make_key(prefix, id)
with open(path, 'rb') as fd:
return pictures.write(key, fd)
def get_picture(prefix, id):
key = make_key(prefix, id)
return pictures.read(key)
def open_picture(prefix, id):
key = make_key(prefix, id)
return pictures.open(key, 'rb')
def exists_picture(prefix, id):
key = make_key(prefix, id)
return pictures.exists(key)
def remove_picture(prefix, id):
key = make_key(prefix, id)
pictures.delete(key)
def add_movie(prefix, id, content):
key = make_key(prefix, id)
return movies.write(key, content)
def get_movie(prefix, id):
key = make_key(prefix, id)
return movies.read(key)
def open_movie(prefix, id):
key = make_key(prefix, id)
return movies.open(key, 'rb')
def exists_movie(prefix, id):
key = make_key(prefix, id)
return movies.exists(key)
def remove_movie(prefix, id):
key = make_key(prefix, id)
movies.delete(key)
|
<commit_before><commit_msg>Add kv store for file storage<commit_after>import flask_fs as fs
from zou.app import app
pictures = fs.Storage("pictures", overwrite=True)
movies = fs.Storage("movies", overwrite=True)
pictures.configure(app)
movies.configure(app)
def make_key(prefix, id):
return "%s-%s" % (prefix, id)
def add_picture(prefix, id, path):
key = make_key(prefix, id)
with open(path, 'rb') as fd:
return pictures.write(key, fd)
def get_picture(prefix, id):
key = make_key(prefix, id)
return pictures.read(key)
def open_picture(prefix, id):
key = make_key(prefix, id)
return pictures.open(key, 'rb')
def exists_picture(prefix, id):
key = make_key(prefix, id)
return pictures.exists(key)
def remove_picture(prefix, id):
key = make_key(prefix, id)
pictures.delete(key)
def add_movie(prefix, id, content):
key = make_key(prefix, id)
return movies.write(key, content)
def get_movie(prefix, id):
key = make_key(prefix, id)
return movies.read(key)
def open_movie(prefix, id):
key = make_key(prefix, id)
return movies.open(key, 'rb')
def exists_movie(prefix, id):
key = make_key(prefix, id)
return movies.exists(key)
def remove_movie(prefix, id):
key = make_key(prefix, id)
movies.delete(key)
|
|
32c5a681c7dd498204d38d5d1152aa7f67e09069
|
taiga/feedback/admin.py
|
taiga/feedback/admin.py
|
# Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.contrib import admin
from . import models
class FeedbackEntryAdmin(admin.ModelAdmin):
list_display = ['created_date', 'full_name', 'email' ]
list_display_links = list_display
list_filter = ['created_date',]
date_hierarchy = "created_date"
ordering = ("-created_date", "id")
search_fields = ("full_name", "email", "id")
admin.site.register(models.FeedbackEntry, FeedbackEntryAdmin)
|
Add feedback entries to the Admin panel
|
Add feedback entries to the Admin panel
|
Python
|
agpl-3.0
|
19kestier/taiga-back,gauravjns/taiga-back,Zaneh-/bearded-tribble-back,jeffdwyatt/taiga-back,crr0004/taiga-back,CoolCloud/taiga-back,astagi/taiga-back,frt-arch/taiga-back,joshisa/taiga-back,coopsource/taiga-back,rajiteh/taiga-back,astronaut1712/taiga-back,coopsource/taiga-back,astronaut1712/taiga-back,gauravjns/taiga-back,astagi/taiga-back,WALR/taiga-back,taigaio/taiga-back,dayatz/taiga-back,dycodedev/taiga-back,Rademade/taiga-back,EvgeneOskin/taiga-back,obimod/taiga-back,frt-arch/taiga-back,joshisa/taiga-back,EvgeneOskin/taiga-back,Tigerwhit4/taiga-back,dayatz/taiga-back,gam-phon/taiga-back,rajiteh/taiga-back,CMLL/taiga-back,taigaio/taiga-back,crr0004/taiga-back,gam-phon/taiga-back,forging2012/taiga-back,Rademade/taiga-back,seanchen/taiga-back,Zaneh-/bearded-tribble-back,CMLL/taiga-back,frt-arch/taiga-back,gauravjns/taiga-back,obimod/taiga-back,crr0004/taiga-back,CMLL/taiga-back,obimod/taiga-back,astronaut1712/taiga-back,bdang2012/taiga-back-casting,astagi/taiga-back,CoolCloud/taiga-back,taigaio/taiga-back,bdang2012/taiga-back-casting,astagi/taiga-back,WALR/taiga-back,rajiteh/taiga-back,coopsource/taiga-back,jeffdwyatt/taiga-back,CMLL/taiga-back,rajiteh/taiga-back,astronaut1712/taiga-back,dycodedev/taiga-back,Tigerwhit4/taiga-back,CoolCloud/taiga-back,obimod/taiga-back,crr0004/taiga-back,Rademade/taiga-back,Rademade/taiga-back,Rademade/taiga-back,jeffdwyatt/taiga-back,joshisa/taiga-back,WALR/taiga-back,EvgeneOskin/taiga-back,bdang2012/taiga-back-casting,gam-phon/taiga-back,19kestier/taiga-back,forging2012/taiga-back,Zaneh-/bearded-tribble-back,coopsource/taiga-back,EvgeneOskin/taiga-back,joshisa/taiga-back,19kestier/taiga-back,seanchen/taiga-back,jeffdwyatt/taiga-back,WALR/taiga-back,CoolCloud/taiga-back,bdang2012/taiga-back-casting,forging2012/taiga-back,dycodedev/taiga-back,forging2012/taiga-back,dycodedev/taiga-back,xdevelsistemas/taiga-back-community,Tigerwhit4/taiga-back,dayatz/taiga-back,seanchen/taiga-back,Tigerwhit4/taiga-back,xdevelsistemas/taiga-back-community,gauravjns/taiga-back,gam-phon/taiga-back,seanchen/taiga-back,xdevelsistemas/taiga-back-community
|
Add feedback entries to the Admin panel
|
# Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.contrib import admin
from . import models
class FeedbackEntryAdmin(admin.ModelAdmin):
list_display = ['created_date', 'full_name', 'email' ]
list_display_links = list_display
list_filter = ['created_date',]
date_hierarchy = "created_date"
ordering = ("-created_date", "id")
search_fields = ("full_name", "email", "id")
admin.site.register(models.FeedbackEntry, FeedbackEntryAdmin)
|
<commit_before><commit_msg>Add feedback entries to the Admin panel<commit_after>
|
# Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.contrib import admin
from . import models
class FeedbackEntryAdmin(admin.ModelAdmin):
list_display = ['created_date', 'full_name', 'email' ]
list_display_links = list_display
list_filter = ['created_date',]
date_hierarchy = "created_date"
ordering = ("-created_date", "id")
search_fields = ("full_name", "email", "id")
admin.site.register(models.FeedbackEntry, FeedbackEntryAdmin)
|
Add feedback entries to the Admin panel# Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.contrib import admin
from . import models
class FeedbackEntryAdmin(admin.ModelAdmin):
list_display = ['created_date', 'full_name', 'email' ]
list_display_links = list_display
list_filter = ['created_date',]
date_hierarchy = "created_date"
ordering = ("-created_date", "id")
search_fields = ("full_name", "email", "id")
admin.site.register(models.FeedbackEntry, FeedbackEntryAdmin)
|
<commit_before><commit_msg>Add feedback entries to the Admin panel<commit_after># Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.contrib import admin
from . import models
class FeedbackEntryAdmin(admin.ModelAdmin):
list_display = ['created_date', 'full_name', 'email' ]
list_display_links = list_display
list_filter = ['created_date',]
date_hierarchy = "created_date"
ordering = ("-created_date", "id")
search_fields = ("full_name", "email", "id")
admin.site.register(models.FeedbackEntry, FeedbackEntryAdmin)
|
|
c6ded12845f25e305789840e1687bfee83e82be5
|
tests/test_standings.py
|
tests/test_standings.py
|
#!/usr/bin/env python
import pytest
from datetime import datetime
from mlbgame import standings
date = datetime(2017, 5, 15, 19, 4, 59, 367187)
s = standings.Standings(date)
def test_standings_url():
standings_url = 'http://mlb.mlb.com/lookup/json/named.standings_schedule_date.bam?season=2017&' \
'schedule_game_date.game_date=%272017/05/15%27&sit_code=%27h0%27&league_id=103&' \
'league_id=104&all_star_sw=%27N%27&version=2'
assert s.standings_url == standings_url
def test_historical_standings_url():
date = datetime(2016, 5, 15)
s = standings.Standings(date)
standings_url = 'http://mlb.mlb.com/lookup/json/named.historical_standings_schedule_date.bam?season=2016&' \
'game_date=%272016/05/15%27&sit_code=%27h0%27&league_id=103&league_id=104&' \
'all_star_sw=%27N%27&version=48'
assert s.standings_url == standings_url
def test_divisions_is_list():
assert type(s.divisions) is list
|
Add a few simple pytest tests
|
Add a few simple pytest tests
- These tests should eventually provide unit tests for all classes
|
Python
|
mit
|
zachpanz88/mlbgame,panzarino/mlbgame
|
Add a few simple pytest tests
- These tests should eventually provide unit tests for all classes
|
#!/usr/bin/env python
import pytest
from datetime import datetime
from mlbgame import standings
date = datetime(2017, 5, 15, 19, 4, 59, 367187)
s = standings.Standings(date)
def test_standings_url():
standings_url = 'http://mlb.mlb.com/lookup/json/named.standings_schedule_date.bam?season=2017&' \
'schedule_game_date.game_date=%272017/05/15%27&sit_code=%27h0%27&league_id=103&' \
'league_id=104&all_star_sw=%27N%27&version=2'
assert s.standings_url == standings_url
def test_historical_standings_url():
date = datetime(2016, 5, 15)
s = standings.Standings(date)
standings_url = 'http://mlb.mlb.com/lookup/json/named.historical_standings_schedule_date.bam?season=2016&' \
'game_date=%272016/05/15%27&sit_code=%27h0%27&league_id=103&league_id=104&' \
'all_star_sw=%27N%27&version=48'
assert s.standings_url == standings_url
def test_divisions_is_list():
assert type(s.divisions) is list
|
<commit_before><commit_msg>Add a few simple pytest tests
- These tests should eventually provide unit tests for all classes<commit_after>
|
#!/usr/bin/env python
import pytest
from datetime import datetime
from mlbgame import standings
date = datetime(2017, 5, 15, 19, 4, 59, 367187)
s = standings.Standings(date)
def test_standings_url():
standings_url = 'http://mlb.mlb.com/lookup/json/named.standings_schedule_date.bam?season=2017&' \
'schedule_game_date.game_date=%272017/05/15%27&sit_code=%27h0%27&league_id=103&' \
'league_id=104&all_star_sw=%27N%27&version=2'
assert s.standings_url == standings_url
def test_historical_standings_url():
date = datetime(2016, 5, 15)
s = standings.Standings(date)
standings_url = 'http://mlb.mlb.com/lookup/json/named.historical_standings_schedule_date.bam?season=2016&' \
'game_date=%272016/05/15%27&sit_code=%27h0%27&league_id=103&league_id=104&' \
'all_star_sw=%27N%27&version=48'
assert s.standings_url == standings_url
def test_divisions_is_list():
assert type(s.divisions) is list
|
Add a few simple pytest tests
- These tests should eventually provide unit tests for all classes#!/usr/bin/env python
import pytest
from datetime import datetime
from mlbgame import standings
date = datetime(2017, 5, 15, 19, 4, 59, 367187)
s = standings.Standings(date)
def test_standings_url():
standings_url = 'http://mlb.mlb.com/lookup/json/named.standings_schedule_date.bam?season=2017&' \
'schedule_game_date.game_date=%272017/05/15%27&sit_code=%27h0%27&league_id=103&' \
'league_id=104&all_star_sw=%27N%27&version=2'
assert s.standings_url == standings_url
def test_historical_standings_url():
date = datetime(2016, 5, 15)
s = standings.Standings(date)
standings_url = 'http://mlb.mlb.com/lookup/json/named.historical_standings_schedule_date.bam?season=2016&' \
'game_date=%272016/05/15%27&sit_code=%27h0%27&league_id=103&league_id=104&' \
'all_star_sw=%27N%27&version=48'
assert s.standings_url == standings_url
def test_divisions_is_list():
assert type(s.divisions) is list
|
<commit_before><commit_msg>Add a few simple pytest tests
- These tests should eventually provide unit tests for all classes<commit_after>#!/usr/bin/env python
import pytest
from datetime import datetime
from mlbgame import standings
date = datetime(2017, 5, 15, 19, 4, 59, 367187)
s = standings.Standings(date)
def test_standings_url():
standings_url = 'http://mlb.mlb.com/lookup/json/named.standings_schedule_date.bam?season=2017&' \
'schedule_game_date.game_date=%272017/05/15%27&sit_code=%27h0%27&league_id=103&' \
'league_id=104&all_star_sw=%27N%27&version=2'
assert s.standings_url == standings_url
def test_historical_standings_url():
date = datetime(2016, 5, 15)
s = standings.Standings(date)
standings_url = 'http://mlb.mlb.com/lookup/json/named.historical_standings_schedule_date.bam?season=2016&' \
'game_date=%272016/05/15%27&sit_code=%27h0%27&league_id=103&league_id=104&' \
'all_star_sw=%27N%27&version=48'
assert s.standings_url == standings_url
def test_divisions_is_list():
assert type(s.divisions) is list
|
|
d637cbe9c904fb0f0b67fbc10f66db299d153f4e
|
tests/functional/test_docs.py
|
tests/functional/test_docs.py
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest
import botocore.session
from boto3.docs import docs_for
class TestDocs(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
def test_resource_docs_generated(self):
docs_str = docs_for('s3', self.session)
self.assertIn('Service Resource', docs_str)
self.assertIn('A resource representing Amazon Simple Storage Service',
docs_str)
def test_client_docs_generated(self):
docs_str = docs_for('s3', self.session)
self.assertIn('s3.Client', docs_str)
self.assertIn(
'A low-level client representing Amazon Simple Storage Service',
docs_str)
def test_waiter_docs_generated(self):
docs_str = docs_for('s3', self.session)
self.assertIn('Waiter', docs_str)
self.assertIn('bucket_exists', docs_str)
|
Add basic smoke tests for doc generation
|
Add basic smoke tests for doc generation
|
Python
|
apache-2.0
|
boto/boto3
|
Add basic smoke tests for doc generation
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest
import botocore.session
from boto3.docs import docs_for
class TestDocs(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
def test_resource_docs_generated(self):
docs_str = docs_for('s3', self.session)
self.assertIn('Service Resource', docs_str)
self.assertIn('A resource representing Amazon Simple Storage Service',
docs_str)
def test_client_docs_generated(self):
docs_str = docs_for('s3', self.session)
self.assertIn('s3.Client', docs_str)
self.assertIn(
'A low-level client representing Amazon Simple Storage Service',
docs_str)
def test_waiter_docs_generated(self):
docs_str = docs_for('s3', self.session)
self.assertIn('Waiter', docs_str)
self.assertIn('bucket_exists', docs_str)
|
<commit_before><commit_msg>Add basic smoke tests for doc generation<commit_after>
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest
import botocore.session
from boto3.docs import docs_for
class TestDocs(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
def test_resource_docs_generated(self):
docs_str = docs_for('s3', self.session)
self.assertIn('Service Resource', docs_str)
self.assertIn('A resource representing Amazon Simple Storage Service',
docs_str)
def test_client_docs_generated(self):
docs_str = docs_for('s3', self.session)
self.assertIn('s3.Client', docs_str)
self.assertIn(
'A low-level client representing Amazon Simple Storage Service',
docs_str)
def test_waiter_docs_generated(self):
docs_str = docs_for('s3', self.session)
self.assertIn('Waiter', docs_str)
self.assertIn('bucket_exists', docs_str)
|
Add basic smoke tests for doc generation# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest
import botocore.session
from boto3.docs import docs_for
class TestDocs(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
def test_resource_docs_generated(self):
docs_str = docs_for('s3', self.session)
self.assertIn('Service Resource', docs_str)
self.assertIn('A resource representing Amazon Simple Storage Service',
docs_str)
def test_client_docs_generated(self):
docs_str = docs_for('s3', self.session)
self.assertIn('s3.Client', docs_str)
self.assertIn(
'A low-level client representing Amazon Simple Storage Service',
docs_str)
def test_waiter_docs_generated(self):
docs_str = docs_for('s3', self.session)
self.assertIn('Waiter', docs_str)
self.assertIn('bucket_exists', docs_str)
|
<commit_before><commit_msg>Add basic smoke tests for doc generation<commit_after># Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest
import botocore.session
from boto3.docs import docs_for
class TestDocs(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
def test_resource_docs_generated(self):
docs_str = docs_for('s3', self.session)
self.assertIn('Service Resource', docs_str)
self.assertIn('A resource representing Amazon Simple Storage Service',
docs_str)
def test_client_docs_generated(self):
docs_str = docs_for('s3', self.session)
self.assertIn('s3.Client', docs_str)
self.assertIn(
'A low-level client representing Amazon Simple Storage Service',
docs_str)
def test_waiter_docs_generated(self):
docs_str = docs_for('s3', self.session)
self.assertIn('Waiter', docs_str)
self.assertIn('bucket_exists', docs_str)
|
|
86d8f0fd48ccb577a8300362ea9d181e63d2fa5d
|
tests/unit/core/test_issue.py
|
tests/unit/core/test_issue.py
|
# -*- coding:utf-8 -*-
#
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
import bandit
from bandit.core import issue
class IssueTests(testtools.TestCase):
def test_issue_create(self):
new_issue = _get_issue_instance()
self.assertIsInstance(new_issue, issue.Issue)
def test_issue_str(self):
test_issue = _get_issue_instance()
self.assertEqual(
("Issue: 'Test issue' from bandit_plugin: Severity: MEDIUM "
"Confidence: MEDIUM at code.py:1"),
str(test_issue)
)
def test_issue_as_dict(self):
test_issue = _get_issue_instance()
test_issue_dict = test_issue.as_dict(with_code=False)
self.assertIsInstance(test_issue_dict, dict)
for attr in [
'filename', 'test_name', 'issue_severity', 'issue_confidence',
'issue_text', 'line_number', 'line_range'
]:
self.assertIn(attr, test_issue_dict)
def test_issue_filter(self):
test_issue = _get_issue_instance()
result = test_issue.filter(bandit.HIGH, bandit.HIGH)
self.assertFalse(result)
result = test_issue.filter(bandit.MEDIUM, bandit.MEDIUM)
self.assertTrue(result)
result = test_issue.filter(bandit.LOW, bandit.LOW)
self.assertTrue(result)
result = test_issue.filter(bandit.LOW, bandit.HIGH)
self.assertFalse(result)
result = test_issue.filter(bandit.HIGH, bandit.LOW)
self.assertFalse(result)
def _get_issue_instance():
new_issue = issue.Issue(bandit.MEDIUM, bandit.MEDIUM, 'Test issue')
new_issue.fname = 'code.py'
new_issue.test = 'bandit_plugin'
new_issue.lineno = 1
return new_issue
|
Add unit tests for bandit.core.issue
|
Add unit tests for bandit.core.issue
Brings coverage for bandit.core.issue to 100%.
Change-Id: I67dfbb64cb276d1c16b28e4bbc6b50c8254bd3f1
|
Python
|
apache-2.0
|
stackforge/bandit,chair6/bandit,pombredanne/bandit,pombredanne/bandit,stackforge/bandit
|
Add unit tests for bandit.core.issue
Brings coverage for bandit.core.issue to 100%.
Change-Id: I67dfbb64cb276d1c16b28e4bbc6b50c8254bd3f1
|
# -*- coding:utf-8 -*-
#
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
import bandit
from bandit.core import issue
class IssueTests(testtools.TestCase):
def test_issue_create(self):
new_issue = _get_issue_instance()
self.assertIsInstance(new_issue, issue.Issue)
def test_issue_str(self):
test_issue = _get_issue_instance()
self.assertEqual(
("Issue: 'Test issue' from bandit_plugin: Severity: MEDIUM "
"Confidence: MEDIUM at code.py:1"),
str(test_issue)
)
def test_issue_as_dict(self):
test_issue = _get_issue_instance()
test_issue_dict = test_issue.as_dict(with_code=False)
self.assertIsInstance(test_issue_dict, dict)
for attr in [
'filename', 'test_name', 'issue_severity', 'issue_confidence',
'issue_text', 'line_number', 'line_range'
]:
self.assertIn(attr, test_issue_dict)
def test_issue_filter(self):
test_issue = _get_issue_instance()
result = test_issue.filter(bandit.HIGH, bandit.HIGH)
self.assertFalse(result)
result = test_issue.filter(bandit.MEDIUM, bandit.MEDIUM)
self.assertTrue(result)
result = test_issue.filter(bandit.LOW, bandit.LOW)
self.assertTrue(result)
result = test_issue.filter(bandit.LOW, bandit.HIGH)
self.assertFalse(result)
result = test_issue.filter(bandit.HIGH, bandit.LOW)
self.assertFalse(result)
def _get_issue_instance():
new_issue = issue.Issue(bandit.MEDIUM, bandit.MEDIUM, 'Test issue')
new_issue.fname = 'code.py'
new_issue.test = 'bandit_plugin'
new_issue.lineno = 1
return new_issue
|
<commit_before><commit_msg>Add unit tests for bandit.core.issue
Brings coverage for bandit.core.issue to 100%.
Change-Id: I67dfbb64cb276d1c16b28e4bbc6b50c8254bd3f1<commit_after>
|
# -*- coding:utf-8 -*-
#
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
import bandit
from bandit.core import issue
class IssueTests(testtools.TestCase):
def test_issue_create(self):
new_issue = _get_issue_instance()
self.assertIsInstance(new_issue, issue.Issue)
def test_issue_str(self):
test_issue = _get_issue_instance()
self.assertEqual(
("Issue: 'Test issue' from bandit_plugin: Severity: MEDIUM "
"Confidence: MEDIUM at code.py:1"),
str(test_issue)
)
def test_issue_as_dict(self):
test_issue = _get_issue_instance()
test_issue_dict = test_issue.as_dict(with_code=False)
self.assertIsInstance(test_issue_dict, dict)
for attr in [
'filename', 'test_name', 'issue_severity', 'issue_confidence',
'issue_text', 'line_number', 'line_range'
]:
self.assertIn(attr, test_issue_dict)
def test_issue_filter(self):
test_issue = _get_issue_instance()
result = test_issue.filter(bandit.HIGH, bandit.HIGH)
self.assertFalse(result)
result = test_issue.filter(bandit.MEDIUM, bandit.MEDIUM)
self.assertTrue(result)
result = test_issue.filter(bandit.LOW, bandit.LOW)
self.assertTrue(result)
result = test_issue.filter(bandit.LOW, bandit.HIGH)
self.assertFalse(result)
result = test_issue.filter(bandit.HIGH, bandit.LOW)
self.assertFalse(result)
def _get_issue_instance():
new_issue = issue.Issue(bandit.MEDIUM, bandit.MEDIUM, 'Test issue')
new_issue.fname = 'code.py'
new_issue.test = 'bandit_plugin'
new_issue.lineno = 1
return new_issue
|
Add unit tests for bandit.core.issue
Brings coverage for bandit.core.issue to 100%.
Change-Id: I67dfbb64cb276d1c16b28e4bbc6b50c8254bd3f1# -*- coding:utf-8 -*-
#
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
import bandit
from bandit.core import issue
class IssueTests(testtools.TestCase):
def test_issue_create(self):
new_issue = _get_issue_instance()
self.assertIsInstance(new_issue, issue.Issue)
def test_issue_str(self):
test_issue = _get_issue_instance()
self.assertEqual(
("Issue: 'Test issue' from bandit_plugin: Severity: MEDIUM "
"Confidence: MEDIUM at code.py:1"),
str(test_issue)
)
def test_issue_as_dict(self):
test_issue = _get_issue_instance()
test_issue_dict = test_issue.as_dict(with_code=False)
self.assertIsInstance(test_issue_dict, dict)
for attr in [
'filename', 'test_name', 'issue_severity', 'issue_confidence',
'issue_text', 'line_number', 'line_range'
]:
self.assertIn(attr, test_issue_dict)
def test_issue_filter(self):
test_issue = _get_issue_instance()
result = test_issue.filter(bandit.HIGH, bandit.HIGH)
self.assertFalse(result)
result = test_issue.filter(bandit.MEDIUM, bandit.MEDIUM)
self.assertTrue(result)
result = test_issue.filter(bandit.LOW, bandit.LOW)
self.assertTrue(result)
result = test_issue.filter(bandit.LOW, bandit.HIGH)
self.assertFalse(result)
result = test_issue.filter(bandit.HIGH, bandit.LOW)
self.assertFalse(result)
def _get_issue_instance():
new_issue = issue.Issue(bandit.MEDIUM, bandit.MEDIUM, 'Test issue')
new_issue.fname = 'code.py'
new_issue.test = 'bandit_plugin'
new_issue.lineno = 1
return new_issue
|
<commit_before><commit_msg>Add unit tests for bandit.core.issue
Brings coverage for bandit.core.issue to 100%.
Change-Id: I67dfbb64cb276d1c16b28e4bbc6b50c8254bd3f1<commit_after># -*- coding:utf-8 -*-
#
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
import bandit
from bandit.core import issue
class IssueTests(testtools.TestCase):
def test_issue_create(self):
new_issue = _get_issue_instance()
self.assertIsInstance(new_issue, issue.Issue)
def test_issue_str(self):
test_issue = _get_issue_instance()
self.assertEqual(
("Issue: 'Test issue' from bandit_plugin: Severity: MEDIUM "
"Confidence: MEDIUM at code.py:1"),
str(test_issue)
)
def test_issue_as_dict(self):
test_issue = _get_issue_instance()
test_issue_dict = test_issue.as_dict(with_code=False)
self.assertIsInstance(test_issue_dict, dict)
for attr in [
'filename', 'test_name', 'issue_severity', 'issue_confidence',
'issue_text', 'line_number', 'line_range'
]:
self.assertIn(attr, test_issue_dict)
def test_issue_filter(self):
test_issue = _get_issue_instance()
result = test_issue.filter(bandit.HIGH, bandit.HIGH)
self.assertFalse(result)
result = test_issue.filter(bandit.MEDIUM, bandit.MEDIUM)
self.assertTrue(result)
result = test_issue.filter(bandit.LOW, bandit.LOW)
self.assertTrue(result)
result = test_issue.filter(bandit.LOW, bandit.HIGH)
self.assertFalse(result)
result = test_issue.filter(bandit.HIGH, bandit.LOW)
self.assertFalse(result)
def _get_issue_instance():
new_issue = issue.Issue(bandit.MEDIUM, bandit.MEDIUM, 'Test issue')
new_issue.fname = 'code.py'
new_issue.test = 'bandit_plugin'
new_issue.lineno = 1
return new_issue
|
|
3ce048f8c0346c30173b52a691bd18ece1cbc13d
|
scripts/stock_price/tough_question_tfp.py
|
scripts/stock_price/tough_question_tfp.py
|
#!/usr/bin/python3
# coding: utf-8
'''
Implementation of the article below with TensorFlow Probability
'Bayesian Methods for Hackers'
https://github.com/CamDavidsonPilon/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers/blob/master/Chapter2_MorePyMC/Ch2_MorePyMC_PyMC3.ipynb
Based on an example of TensorFlow Probability
https://github.com/tensorflow/probability/tree/master/tensorflow_probability/python/edward2
https://www.hellocybernetics.tech/entry/2018/11/09/231817
'''
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
## from tensorflow_probability import edward2 as ed
tfd = tfp.distributions
N = 1000
X = 300
N_RESULTS = 2000
N_BURNIN = 1000
## Explanatory variable(s)
true_prob = tf.random_uniform([], minval=0.0, maxval=1.0)
## Observed data
observations = tf.random.shuffle(tf.concat([tf.ones(X, dtype=tf.int32), tf.zeros(N-X, dtype=tf.int32)], 0))
def target_log_prob_fn(true_prob):
log_prob_parts = [
tfd.Bernoulli(probs=0.5).log_prob(tf.fill([N], 1)) + tfd.Bernoulli(probs=true_prob).log_prob(observations),
tfd.Bernoulli(probs=0.5).log_prob(tf.fill([N], 0)) + tfd.Bernoulli(probs=0.5).log_prob(observations)
]
sum_log_prob = tf.reduce_sum(tf.reduce_logsumexp(log_prob_parts, 0))
return sum_log_prob
hmc_kernel = tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob_fn,
step_size=0.01,
num_leapfrog_steps=5)
states, kernels_results = tfp.mcmc.sample_chain(
num_results=N_RESULTS,
current_state=[true_prob],
kernel=hmc_kernel,
num_burnin_steps=N_BURNIN)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
states_, results_ = sess.run([states, kernels_results])
plt.hist(states_[0], bins=50)
plt.show()
|
Add a TensorFlow Probability sample
|
Add a TensorFlow Probability sample
|
Python
|
mit
|
zettsu-t/cPlusPlusFriend,zettsu-t/cPlusPlusFriend,zettsu-t/cPlusPlusFriend,zettsu-t/cPlusPlusFriend,zettsu-t/cPlusPlusFriend,zettsu-t/cPlusPlusFriend,zettsu-t/cPlusPlusFriend
|
Add a TensorFlow Probability sample
|
#!/usr/bin/python3
# coding: utf-8
'''
Implementation of the article below with TensorFlow Probability
'Bayesian Methods for Hackers'
https://github.com/CamDavidsonPilon/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers/blob/master/Chapter2_MorePyMC/Ch2_MorePyMC_PyMC3.ipynb
Based on an example of TensorFlow Probability
https://github.com/tensorflow/probability/tree/master/tensorflow_probability/python/edward2
https://www.hellocybernetics.tech/entry/2018/11/09/231817
'''
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
## from tensorflow_probability import edward2 as ed
tfd = tfp.distributions
N = 1000
X = 300
N_RESULTS = 2000
N_BURNIN = 1000
## Explanatory variable(s)
true_prob = tf.random_uniform([], minval=0.0, maxval=1.0)
## Observed data
observations = tf.random.shuffle(tf.concat([tf.ones(X, dtype=tf.int32), tf.zeros(N-X, dtype=tf.int32)], 0))
def target_log_prob_fn(true_prob):
log_prob_parts = [
tfd.Bernoulli(probs=0.5).log_prob(tf.fill([N], 1)) + tfd.Bernoulli(probs=true_prob).log_prob(observations),
tfd.Bernoulli(probs=0.5).log_prob(tf.fill([N], 0)) + tfd.Bernoulli(probs=0.5).log_prob(observations)
]
sum_log_prob = tf.reduce_sum(tf.reduce_logsumexp(log_prob_parts, 0))
return sum_log_prob
hmc_kernel = tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob_fn,
step_size=0.01,
num_leapfrog_steps=5)
states, kernels_results = tfp.mcmc.sample_chain(
num_results=N_RESULTS,
current_state=[true_prob],
kernel=hmc_kernel,
num_burnin_steps=N_BURNIN)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
states_, results_ = sess.run([states, kernels_results])
plt.hist(states_[0], bins=50)
plt.show()
|
<commit_before><commit_msg>Add a TensorFlow Probability sample<commit_after>
|
#!/usr/bin/python3
# coding: utf-8
'''
Implementation of the article below with TensorFlow Probability
'Bayesian Methods for Hackers'
https://github.com/CamDavidsonPilon/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers/blob/master/Chapter2_MorePyMC/Ch2_MorePyMC_PyMC3.ipynb
Based on an example of TensorFlow Probability
https://github.com/tensorflow/probability/tree/master/tensorflow_probability/python/edward2
https://www.hellocybernetics.tech/entry/2018/11/09/231817
'''
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
## from tensorflow_probability import edward2 as ed
tfd = tfp.distributions
N = 1000
X = 300
N_RESULTS = 2000
N_BURNIN = 1000
## Explanatory variable(s)
true_prob = tf.random_uniform([], minval=0.0, maxval=1.0)
## Observed data
observations = tf.random.shuffle(tf.concat([tf.ones(X, dtype=tf.int32), tf.zeros(N-X, dtype=tf.int32)], 0))
def target_log_prob_fn(true_prob):
log_prob_parts = [
tfd.Bernoulli(probs=0.5).log_prob(tf.fill([N], 1)) + tfd.Bernoulli(probs=true_prob).log_prob(observations),
tfd.Bernoulli(probs=0.5).log_prob(tf.fill([N], 0)) + tfd.Bernoulli(probs=0.5).log_prob(observations)
]
sum_log_prob = tf.reduce_sum(tf.reduce_logsumexp(log_prob_parts, 0))
return sum_log_prob
hmc_kernel = tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob_fn,
step_size=0.01,
num_leapfrog_steps=5)
states, kernels_results = tfp.mcmc.sample_chain(
num_results=N_RESULTS,
current_state=[true_prob],
kernel=hmc_kernel,
num_burnin_steps=N_BURNIN)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
states_, results_ = sess.run([states, kernels_results])
plt.hist(states_[0], bins=50)
plt.show()
|
Add a TensorFlow Probability sample#!/usr/bin/python3
# coding: utf-8
'''
Implementation of the article below with TensorFlow Probability
'Bayesian Methods for Hackers'
https://github.com/CamDavidsonPilon/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers/blob/master/Chapter2_MorePyMC/Ch2_MorePyMC_PyMC3.ipynb
Based on an example of TensorFlow Probability
https://github.com/tensorflow/probability/tree/master/tensorflow_probability/python/edward2
https://www.hellocybernetics.tech/entry/2018/11/09/231817
'''
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
## from tensorflow_probability import edward2 as ed
tfd = tfp.distributions
N = 1000
X = 300
N_RESULTS = 2000
N_BURNIN = 1000
## Explanatory variable(s)
true_prob = tf.random_uniform([], minval=0.0, maxval=1.0)
## Observed data
observations = tf.random.shuffle(tf.concat([tf.ones(X, dtype=tf.int32), tf.zeros(N-X, dtype=tf.int32)], 0))
def target_log_prob_fn(true_prob):
log_prob_parts = [
tfd.Bernoulli(probs=0.5).log_prob(tf.fill([N], 1)) + tfd.Bernoulli(probs=true_prob).log_prob(observations),
tfd.Bernoulli(probs=0.5).log_prob(tf.fill([N], 0)) + tfd.Bernoulli(probs=0.5).log_prob(observations)
]
sum_log_prob = tf.reduce_sum(tf.reduce_logsumexp(log_prob_parts, 0))
return sum_log_prob
hmc_kernel = tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob_fn,
step_size=0.01,
num_leapfrog_steps=5)
states, kernels_results = tfp.mcmc.sample_chain(
num_results=N_RESULTS,
current_state=[true_prob],
kernel=hmc_kernel,
num_burnin_steps=N_BURNIN)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
states_, results_ = sess.run([states, kernels_results])
plt.hist(states_[0], bins=50)
plt.show()
|
<commit_before><commit_msg>Add a TensorFlow Probability sample<commit_after>#!/usr/bin/python3
# coding: utf-8
'''
Implementation of the article below with TensorFlow Probability
'Bayesian Methods for Hackers'
https://github.com/CamDavidsonPilon/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers/blob/master/Chapter2_MorePyMC/Ch2_MorePyMC_PyMC3.ipynb
Based on an example of TensorFlow Probability
https://github.com/tensorflow/probability/tree/master/tensorflow_probability/python/edward2
https://www.hellocybernetics.tech/entry/2018/11/09/231817
'''
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
## from tensorflow_probability import edward2 as ed
tfd = tfp.distributions
N = 1000
X = 300
N_RESULTS = 2000
N_BURNIN = 1000
## Explanatory variable(s)
true_prob = tf.random_uniform([], minval=0.0, maxval=1.0)
## Observed data
observations = tf.random.shuffle(tf.concat([tf.ones(X, dtype=tf.int32), tf.zeros(N-X, dtype=tf.int32)], 0))
def target_log_prob_fn(true_prob):
log_prob_parts = [
tfd.Bernoulli(probs=0.5).log_prob(tf.fill([N], 1)) + tfd.Bernoulli(probs=true_prob).log_prob(observations),
tfd.Bernoulli(probs=0.5).log_prob(tf.fill([N], 0)) + tfd.Bernoulli(probs=0.5).log_prob(observations)
]
sum_log_prob = tf.reduce_sum(tf.reduce_logsumexp(log_prob_parts, 0))
return sum_log_prob
hmc_kernel = tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob_fn,
step_size=0.01,
num_leapfrog_steps=5)
states, kernels_results = tfp.mcmc.sample_chain(
num_results=N_RESULTS,
current_state=[true_prob],
kernel=hmc_kernel,
num_burnin_steps=N_BURNIN)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
states_, results_ = sess.run([states, kernels_results])
plt.hist(states_[0], bins=50)
plt.show()
|
|
2e821ab48542c89ac41ebc17036bddc164506a22
|
combine_data/cartesianProductOfIDs.py
|
combine_data/cartesianProductOfIDs.py
|
import argparse
import itertools
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate the cartesian product of two ID files')
parser.add_argument('--idFileA',required=True,type=str,help='First file of IDs')
parser.add_argument('--idFileB',required=True,type=str,help='Second file of IDS')
parser.add_argument('--outFile',required=True,type=str,help='Output file')
args = parser.parse_args()
with open(args.idFileA) as f:
idsA = [ int(line.strip()) for line in f ]
with open(args.idFileB) as f:
idsB = [ int(line.strip()) for line in f ]
idsA = sorted(list(set(idsA)))
idsB = sorted(list(set(idsB)))
with open(args.outFile,'w') as outF:
for a,b in itertools.product(idsA,idsB):
outF.write("%d\t%d\n" % (min(a,b),max(a,b)))
print "Processing complete."
|
Backup of some unused code
|
Backup of some unused code
|
Python
|
mit
|
jakelever/knowledgediscovery,jakelever/knowledgediscovery,jakelever/knowledgediscovery,jakelever/knowledgediscovery
|
Backup of some unused code
|
import argparse
import itertools
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate the cartesian product of two ID files')
parser.add_argument('--idFileA',required=True,type=str,help='First file of IDs')
parser.add_argument('--idFileB',required=True,type=str,help='Second file of IDS')
parser.add_argument('--outFile',required=True,type=str,help='Output file')
args = parser.parse_args()
with open(args.idFileA) as f:
idsA = [ int(line.strip()) for line in f ]
with open(args.idFileB) as f:
idsB = [ int(line.strip()) for line in f ]
idsA = sorted(list(set(idsA)))
idsB = sorted(list(set(idsB)))
with open(args.outFile,'w') as outF:
for a,b in itertools.product(idsA,idsB):
outF.write("%d\t%d\n" % (min(a,b),max(a,b)))
print "Processing complete."
|
<commit_before><commit_msg>Backup of some unused code<commit_after>
|
import argparse
import itertools
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate the cartesian product of two ID files')
parser.add_argument('--idFileA',required=True,type=str,help='First file of IDs')
parser.add_argument('--idFileB',required=True,type=str,help='Second file of IDS')
parser.add_argument('--outFile',required=True,type=str,help='Output file')
args = parser.parse_args()
with open(args.idFileA) as f:
idsA = [ int(line.strip()) for line in f ]
with open(args.idFileB) as f:
idsB = [ int(line.strip()) for line in f ]
idsA = sorted(list(set(idsA)))
idsB = sorted(list(set(idsB)))
with open(args.outFile,'w') as outF:
for a,b in itertools.product(idsA,idsB):
outF.write("%d\t%d\n" % (min(a,b),max(a,b)))
print "Processing complete."
|
Backup of some unused codeimport argparse
import itertools
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate the cartesian product of two ID files')
parser.add_argument('--idFileA',required=True,type=str,help='First file of IDs')
parser.add_argument('--idFileB',required=True,type=str,help='Second file of IDS')
parser.add_argument('--outFile',required=True,type=str,help='Output file')
args = parser.parse_args()
with open(args.idFileA) as f:
idsA = [ int(line.strip()) for line in f ]
with open(args.idFileB) as f:
idsB = [ int(line.strip()) for line in f ]
idsA = sorted(list(set(idsA)))
idsB = sorted(list(set(idsB)))
with open(args.outFile,'w') as outF:
for a,b in itertools.product(idsA,idsB):
outF.write("%d\t%d\n" % (min(a,b),max(a,b)))
print "Processing complete."
|
<commit_before><commit_msg>Backup of some unused code<commit_after>import argparse
import itertools
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate the cartesian product of two ID files')
parser.add_argument('--idFileA',required=True,type=str,help='First file of IDs')
parser.add_argument('--idFileB',required=True,type=str,help='Second file of IDS')
parser.add_argument('--outFile',required=True,type=str,help='Output file')
args = parser.parse_args()
with open(args.idFileA) as f:
idsA = [ int(line.strip()) for line in f ]
with open(args.idFileB) as f:
idsB = [ int(line.strip()) for line in f ]
idsA = sorted(list(set(idsA)))
idsB = sorted(list(set(idsB)))
with open(args.outFile,'w') as outF:
for a,b in itertools.product(idsA,idsB):
outF.write("%d\t%d\n" % (min(a,b),max(a,b)))
print "Processing complete."
|
|
a1820a0e5f9bd891b20f70ab68dfd4bb385047a0
|
utils/multiclassification.py
|
utils/multiclassification.py
|
from __future__ import division
import numpy as np
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import _fit_binary
from sklearn.externals.joblib import Parallel, delayed
from unbalanced_dataset import SMOTE
def _fit_ovo_binary(estimator, X, y, i, j, sampling=None):
"""Fit a single binary estimator (one-vs-one)."""
cond = np.logical_or(y == i, y == j)
y = y[cond]
y_binary = np.empty(y.shape, np.int)
y_binary[y == i] = 0
y_binary[y == j] = 1
ind = np.arange(X.shape[0])
X_values = X[ind[cond]]
y_values = y_binary
if sampling == 'SMOTE':
print 'SMOTE'
ratio = 1
smote = SMOTE(ratio=ratio)
X_values, y_values = smote.fit_transform(X_values, y_values)
return _fit_binary(estimator, X_values, y_values, classes=[i, j])
class CustomOneVsOneClassifier(OneVsOneClassifier):
def __init__(self, estimator, n_jobs=1, sampling=None):
self.estimator = estimator
self.n_jobs = n_jobs
self.sampling = sampling
def predict_proba(self, X):
return super(CustomOneVsOneClassifier, self).decision_function(X)
def fit(self, X, y):
"""Fit underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : array-like, shape = [n_samples]
Multi-class targets.
Returns
-------
self
"""
y = np.asarray(y)
self.classes_ = np.unique(y)
n_classes = self.classes_.shape[0]
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_ovo_binary)(
self.estimator, X, y,
self.classes_[i], self.classes_[j], sampling=self.sampling)
for i in range(n_classes) for j in range(i + 1, n_classes))
return self
|
Add utils to allow multiclass classification.
|
Add utils to allow multiclass classification.
|
Python
|
mit
|
davidgasquez/kaggle-airbnb
|
Add utils to allow multiclass classification.
|
from __future__ import division
import numpy as np
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import _fit_binary
from sklearn.externals.joblib import Parallel, delayed
from unbalanced_dataset import SMOTE
def _fit_ovo_binary(estimator, X, y, i, j, sampling=None):
"""Fit a single binary estimator (one-vs-one)."""
cond = np.logical_or(y == i, y == j)
y = y[cond]
y_binary = np.empty(y.shape, np.int)
y_binary[y == i] = 0
y_binary[y == j] = 1
ind = np.arange(X.shape[0])
X_values = X[ind[cond]]
y_values = y_binary
if sampling == 'SMOTE':
print 'SMOTE'
ratio = 1
smote = SMOTE(ratio=ratio)
X_values, y_values = smote.fit_transform(X_values, y_values)
return _fit_binary(estimator, X_values, y_values, classes=[i, j])
class CustomOneVsOneClassifier(OneVsOneClassifier):
def __init__(self, estimator, n_jobs=1, sampling=None):
self.estimator = estimator
self.n_jobs = n_jobs
self.sampling = sampling
def predict_proba(self, X):
return super(CustomOneVsOneClassifier, self).decision_function(X)
def fit(self, X, y):
"""Fit underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : array-like, shape = [n_samples]
Multi-class targets.
Returns
-------
self
"""
y = np.asarray(y)
self.classes_ = np.unique(y)
n_classes = self.classes_.shape[0]
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_ovo_binary)(
self.estimator, X, y,
self.classes_[i], self.classes_[j], sampling=self.sampling)
for i in range(n_classes) for j in range(i + 1, n_classes))
return self
|
<commit_before><commit_msg>Add utils to allow multiclass classification.<commit_after>
|
from __future__ import division
import numpy as np
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import _fit_binary
from sklearn.externals.joblib import Parallel, delayed
from unbalanced_dataset import SMOTE
def _fit_ovo_binary(estimator, X, y, i, j, sampling=None):
"""Fit a single binary estimator (one-vs-one)."""
cond = np.logical_or(y == i, y == j)
y = y[cond]
y_binary = np.empty(y.shape, np.int)
y_binary[y == i] = 0
y_binary[y == j] = 1
ind = np.arange(X.shape[0])
X_values = X[ind[cond]]
y_values = y_binary
if sampling == 'SMOTE':
print 'SMOTE'
ratio = 1
smote = SMOTE(ratio=ratio)
X_values, y_values = smote.fit_transform(X_values, y_values)
return _fit_binary(estimator, X_values, y_values, classes=[i, j])
class CustomOneVsOneClassifier(OneVsOneClassifier):
def __init__(self, estimator, n_jobs=1, sampling=None):
self.estimator = estimator
self.n_jobs = n_jobs
self.sampling = sampling
def predict_proba(self, X):
return super(CustomOneVsOneClassifier, self).decision_function(X)
def fit(self, X, y):
"""Fit underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : array-like, shape = [n_samples]
Multi-class targets.
Returns
-------
self
"""
y = np.asarray(y)
self.classes_ = np.unique(y)
n_classes = self.classes_.shape[0]
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_ovo_binary)(
self.estimator, X, y,
self.classes_[i], self.classes_[j], sampling=self.sampling)
for i in range(n_classes) for j in range(i + 1, n_classes))
return self
|
Add utils to allow multiclass classification.from __future__ import division
import numpy as np
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import _fit_binary
from sklearn.externals.joblib import Parallel, delayed
from unbalanced_dataset import SMOTE
def _fit_ovo_binary(estimator, X, y, i, j, sampling=None):
"""Fit a single binary estimator (one-vs-one)."""
cond = np.logical_or(y == i, y == j)
y = y[cond]
y_binary = np.empty(y.shape, np.int)
y_binary[y == i] = 0
y_binary[y == j] = 1
ind = np.arange(X.shape[0])
X_values = X[ind[cond]]
y_values = y_binary
if sampling == 'SMOTE':
print 'SMOTE'
ratio = 1
smote = SMOTE(ratio=ratio)
X_values, y_values = smote.fit_transform(X_values, y_values)
return _fit_binary(estimator, X_values, y_values, classes=[i, j])
class CustomOneVsOneClassifier(OneVsOneClassifier):
def __init__(self, estimator, n_jobs=1, sampling=None):
self.estimator = estimator
self.n_jobs = n_jobs
self.sampling = sampling
def predict_proba(self, X):
return super(CustomOneVsOneClassifier, self).decision_function(X)
def fit(self, X, y):
"""Fit underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : array-like, shape = [n_samples]
Multi-class targets.
Returns
-------
self
"""
y = np.asarray(y)
self.classes_ = np.unique(y)
n_classes = self.classes_.shape[0]
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_ovo_binary)(
self.estimator, X, y,
self.classes_[i], self.classes_[j], sampling=self.sampling)
for i in range(n_classes) for j in range(i + 1, n_classes))
return self
|
<commit_before><commit_msg>Add utils to allow multiclass classification.<commit_after>from __future__ import division
import numpy as np
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import _fit_binary
from sklearn.externals.joblib import Parallel, delayed
from unbalanced_dataset import SMOTE
def _fit_ovo_binary(estimator, X, y, i, j, sampling=None):
"""Fit a single binary estimator (one-vs-one)."""
cond = np.logical_or(y == i, y == j)
y = y[cond]
y_binary = np.empty(y.shape, np.int)
y_binary[y == i] = 0
y_binary[y == j] = 1
ind = np.arange(X.shape[0])
X_values = X[ind[cond]]
y_values = y_binary
if sampling == 'SMOTE':
print 'SMOTE'
ratio = 1
smote = SMOTE(ratio=ratio)
X_values, y_values = smote.fit_transform(X_values, y_values)
return _fit_binary(estimator, X_values, y_values, classes=[i, j])
class CustomOneVsOneClassifier(OneVsOneClassifier):
def __init__(self, estimator, n_jobs=1, sampling=None):
self.estimator = estimator
self.n_jobs = n_jobs
self.sampling = sampling
def predict_proba(self, X):
return super(CustomOneVsOneClassifier, self).decision_function(X)
def fit(self, X, y):
"""Fit underlying estimators.
Parameters
----------
X : (sparse) array-like, shape = [n_samples, n_features]
Data.
y : array-like, shape = [n_samples]
Multi-class targets.
Returns
-------
self
"""
y = np.asarray(y)
self.classes_ = np.unique(y)
n_classes = self.classes_.shape[0]
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_ovo_binary)(
self.estimator, X, y,
self.classes_[i], self.classes_[j], sampling=self.sampling)
for i in range(n_classes) for j in range(i + 1, n_classes))
return self
|
|
c9b3bd8309d3d1448823787160021a8688e8f3c1
|
vv_h5_setup.py
|
vv_h5_setup.py
|
import tables
vv_desc = dict(
obsid=tables.IntCol(pos=0),
revision=tables.IntCol(pos=1),
most_recent=tables.IntCol(pos=2),
slot=tables.IntCol(pos=3),
type=tables.StringCol(10,pos=4),
n_pts=tables.IntCol(pos=5),
rad_off=tables.FloatCol(pos=6),
frac_dy_big=tables.FloatCol(pos=7),
frac_dz_big=tables.FloatCol(pos=8),
frac_mag_big=tables.FloatCol(pos=9),
mean_y =tables.FloatCol(pos=10),
mean_z =tables.FloatCol(pos=11),
dy_mean=tables.FloatCol(pos=12),
dy_med =tables.FloatCol(pos=13),
dy_rms =tables.FloatCol(pos=14),
dz_mean=tables.FloatCol(pos=15),
dz_med =tables.FloatCol(pos=16),
dz_rms =tables.FloatCol(pos=17),
mag_mean=tables.FloatCol(pos=18),
mag_med =tables.FloatCol(pos=19),
mag_rms =tables.FloatCol(pos=20),
)
h5f = tables.openFile('/data/aca/archive/vv/vv.h5', 'a')
tbl = h5f.createTable('/', 'vv', vv_desc)
tbl.cols.obsid.createIndex()
h5f.close()
|
Add python to make vv h5 file
|
Add python to make vv h5 file
|
Python
|
bsd-3-clause
|
sot/mica,sot/mica
|
Add python to make vv h5 file
|
import tables
vv_desc = dict(
obsid=tables.IntCol(pos=0),
revision=tables.IntCol(pos=1),
most_recent=tables.IntCol(pos=2),
slot=tables.IntCol(pos=3),
type=tables.StringCol(10,pos=4),
n_pts=tables.IntCol(pos=5),
rad_off=tables.FloatCol(pos=6),
frac_dy_big=tables.FloatCol(pos=7),
frac_dz_big=tables.FloatCol(pos=8),
frac_mag_big=tables.FloatCol(pos=9),
mean_y =tables.FloatCol(pos=10),
mean_z =tables.FloatCol(pos=11),
dy_mean=tables.FloatCol(pos=12),
dy_med =tables.FloatCol(pos=13),
dy_rms =tables.FloatCol(pos=14),
dz_mean=tables.FloatCol(pos=15),
dz_med =tables.FloatCol(pos=16),
dz_rms =tables.FloatCol(pos=17),
mag_mean=tables.FloatCol(pos=18),
mag_med =tables.FloatCol(pos=19),
mag_rms =tables.FloatCol(pos=20),
)
h5f = tables.openFile('/data/aca/archive/vv/vv.h5', 'a')
tbl = h5f.createTable('/', 'vv', vv_desc)
tbl.cols.obsid.createIndex()
h5f.close()
|
<commit_before><commit_msg>Add python to make vv h5 file<commit_after>
|
import tables
vv_desc = dict(
obsid=tables.IntCol(pos=0),
revision=tables.IntCol(pos=1),
most_recent=tables.IntCol(pos=2),
slot=tables.IntCol(pos=3),
type=tables.StringCol(10,pos=4),
n_pts=tables.IntCol(pos=5),
rad_off=tables.FloatCol(pos=6),
frac_dy_big=tables.FloatCol(pos=7),
frac_dz_big=tables.FloatCol(pos=8),
frac_mag_big=tables.FloatCol(pos=9),
mean_y =tables.FloatCol(pos=10),
mean_z =tables.FloatCol(pos=11),
dy_mean=tables.FloatCol(pos=12),
dy_med =tables.FloatCol(pos=13),
dy_rms =tables.FloatCol(pos=14),
dz_mean=tables.FloatCol(pos=15),
dz_med =tables.FloatCol(pos=16),
dz_rms =tables.FloatCol(pos=17),
mag_mean=tables.FloatCol(pos=18),
mag_med =tables.FloatCol(pos=19),
mag_rms =tables.FloatCol(pos=20),
)
h5f = tables.openFile('/data/aca/archive/vv/vv.h5', 'a')
tbl = h5f.createTable('/', 'vv', vv_desc)
tbl.cols.obsid.createIndex()
h5f.close()
|
Add python to make vv h5 fileimport tables
vv_desc = dict(
obsid=tables.IntCol(pos=0),
revision=tables.IntCol(pos=1),
most_recent=tables.IntCol(pos=2),
slot=tables.IntCol(pos=3),
type=tables.StringCol(10,pos=4),
n_pts=tables.IntCol(pos=5),
rad_off=tables.FloatCol(pos=6),
frac_dy_big=tables.FloatCol(pos=7),
frac_dz_big=tables.FloatCol(pos=8),
frac_mag_big=tables.FloatCol(pos=9),
mean_y =tables.FloatCol(pos=10),
mean_z =tables.FloatCol(pos=11),
dy_mean=tables.FloatCol(pos=12),
dy_med =tables.FloatCol(pos=13),
dy_rms =tables.FloatCol(pos=14),
dz_mean=tables.FloatCol(pos=15),
dz_med =tables.FloatCol(pos=16),
dz_rms =tables.FloatCol(pos=17),
mag_mean=tables.FloatCol(pos=18),
mag_med =tables.FloatCol(pos=19),
mag_rms =tables.FloatCol(pos=20),
)
h5f = tables.openFile('/data/aca/archive/vv/vv.h5', 'a')
tbl = h5f.createTable('/', 'vv', vv_desc)
tbl.cols.obsid.createIndex()
h5f.close()
|
<commit_before><commit_msg>Add python to make vv h5 file<commit_after>import tables
vv_desc = dict(
obsid=tables.IntCol(pos=0),
revision=tables.IntCol(pos=1),
most_recent=tables.IntCol(pos=2),
slot=tables.IntCol(pos=3),
type=tables.StringCol(10,pos=4),
n_pts=tables.IntCol(pos=5),
rad_off=tables.FloatCol(pos=6),
frac_dy_big=tables.FloatCol(pos=7),
frac_dz_big=tables.FloatCol(pos=8),
frac_mag_big=tables.FloatCol(pos=9),
mean_y =tables.FloatCol(pos=10),
mean_z =tables.FloatCol(pos=11),
dy_mean=tables.FloatCol(pos=12),
dy_med =tables.FloatCol(pos=13),
dy_rms =tables.FloatCol(pos=14),
dz_mean=tables.FloatCol(pos=15),
dz_med =tables.FloatCol(pos=16),
dz_rms =tables.FloatCol(pos=17),
mag_mean=tables.FloatCol(pos=18),
mag_med =tables.FloatCol(pos=19),
mag_rms =tables.FloatCol(pos=20),
)
h5f = tables.openFile('/data/aca/archive/vv/vv.h5', 'a')
tbl = h5f.createTable('/', 'vv', vv_desc)
tbl.cols.obsid.createIndex()
h5f.close()
|
|
d48035b06b952b9ac4d95897d08de50d5977bf9f
|
tests/basics/ordereddict1.py
|
tests/basics/ordereddict1.py
|
try:
from collections import OrderedDict
except ImportError:
try:
from _collections import OrderedDict
except ImportError:
print("SKIP")
import sys
sys.exit()
d = OrderedDict([(10, 20), ("b", 100), (1, 2)])
print(list(d.keys()))
print(list(d.values()))
del d["b"]
print(list(d.keys()))
print(list(d.values()))
|
Add basic test for OrderedDict.
|
tests: Add basic test for OrderedDict.
Mostly to have coverage of newly added code in map.c.
|
Python
|
mit
|
selste/micropython,mgyenik/micropython,swegener/micropython,dhylands/micropython,feilongfl/micropython,neilh10/micropython,jmarcelino/pycom-micropython,paul-xxx/micropython,tdautc19841202/micropython,blmorris/micropython,ceramos/micropython,Timmenem/micropython,noahchense/micropython,tdautc19841202/micropython,emfcamp/micropython,chrisdearman/micropython,toolmacher/micropython,ahotam/micropython,kerneltask/micropython,mianos/micropython,xyb/micropython,toolmacher/micropython,noahwilliamsson/micropython,xyb/micropython,noahchense/micropython,ernesto-g/micropython,cwyark/micropython,ganshun666/micropython,xuxiaoxin/micropython,paul-xxx/micropython,ryannathans/micropython,danicampora/micropython,selste/micropython,AriZuu/micropython,cnoviello/micropython,puuu/micropython,xyb/micropython,martinribelotta/micropython,pfalcon/micropython,supergis/micropython,cloudformdesign/micropython,micropython/micropython-esp32,utopiaprince/micropython,tuc-osg/micropython,ruffy91/micropython,MrSurly/micropython,kerneltask/micropython,vriera/micropython,cnoviello/micropython,rubencabrera/micropython,stonegithubs/micropython,cnoviello/micropython,martinribelotta/micropython,redbear/micropython,supergis/micropython,lowRISC/micropython,Timmenem/micropython,heisewangluo/micropython,slzatz/micropython,rubencabrera/micropython,suda/micropython,noahchense/micropython,xhat/micropython,tobbad/micropython,tralamazza/micropython,tdautc19841202/micropython,mgyenik/micropython,MrSurly/micropython-esp32,ericsnowcurrently/micropython,jimkmc/micropython,adafruit/micropython,ahotam/micropython,lowRISC/micropython,bvernoux/micropython,selste/micropython,toolmacher/micropython,supergis/micropython,SHA2017-badge/micropython-esp32,pfalcon/micropython,adafruit/circuitpython,matthewelse/micropython,xuxiaoxin/micropython,ruffy91/micropython,mianos/micropython,mpalomer/micropython,ganshun666/micropython,henriknelson/micropython,deshipu/micropython,ericsnowcurrently/micropython,ChuckM/micropython,xuxiaoxin/micropython,trezor/micropython,infinnovation/micropython,torwag/micropython,AriZuu/micropython,misterdanb/micropython,ganshun666/micropython,ryannathans/micropython,selste/micropython,lbattraw/micropython,puuu/micropython,jmarcelino/pycom-micropython,heisewangluo/micropython,selste/micropython,adafruit/circuitpython,noahchense/micropython,toolmacher/micropython,adafruit/circuitpython,skybird6672/micropython,blazewicz/micropython,drrk/micropython,hosaka/micropython,alex-march/micropython,hosaka/micropython,mianos/micropython,PappaPeppar/micropython,infinnovation/micropython,tuc-osg/micropython,dxxb/micropython,dxxb/micropython,adamkh/micropython,ceramos/micropython,deshipu/micropython,vitiral/micropython,skybird6672/micropython,PappaPeppar/micropython,ernesto-g/micropython,ceramos/micropython,micropython/micropython-esp32,xyb/micropython,HenrikSolver/micropython,praemdonck/micropython,alex-march/micropython,neilh10/micropython,Peetz0r/micropython-esp32,dxxb/micropython,adamkh/micropython,paul-xxx/micropython,puuu/micropython,kerneltask/micropython,redbear/micropython,utopiaprince/micropython,rubencabrera/micropython,pramasoul/micropython,mhoffma/micropython,xyb/micropython,adamkh/micropython,turbinenreiter/micropython,infinnovation/micropython,alex-march/micropython,neilh10/micropython,adafruit/micropython,praemdonck/micropython,infinnovation/micropython,dinau/micropython,cwyark/micropython,adafruit/circuitpython,Peetz0r/micropython-esp32,MrSurly/micropython-esp32,galenhz/micropython,drrk/micropython,dmazzella/micropython,mgyenik/micropython,matthewelse/micropython,firstval/micropython,HenrikSolver/micropython,blmorris/micropython,mpalomer/micropython,ChuckM/micropython,noahwilliamsson/micropython,EcmaXp/micropython,danicampora/micropython,torwag/micropython,oopy/micropython,kostyll/micropython,orionrobots/micropython,warner83/micropython,misterdanb/micropython,mgyenik/micropython,feilongfl/micropython,dhylands/micropython,cloudformdesign/micropython,blazewicz/micropython,matthewelse/micropython,omtinez/micropython,ganshun666/micropython,mhoffma/micropython,turbinenreiter/micropython,suda/micropython,dhylands/micropython,MrSurly/micropython,alex-robbins/micropython,jimkmc/micropython,xuxiaoxin/micropython,Timmenem/micropython,jmarcelino/pycom-micropython,kostyll/micropython,adafruit/circuitpython,skybird6672/micropython,ahotam/micropython,dmazzella/micropython,cloudformdesign/micropython,omtinez/micropython,cwyark/micropython,cloudformdesign/micropython,jimkmc/micropython,alex-robbins/micropython,adafruit/micropython,lbattraw/micropython,pramasoul/micropython,suda/micropython,TDAbboud/micropython,HenrikSolver/micropython,mpalomer/micropython,mhoffma/micropython,heisewangluo/micropython,tobbad/micropython,pfalcon/micropython,emfcamp/micropython,omtinez/micropython,jimkmc/micropython,matthewelse/micropython,blazewicz/micropython,MrSurly/micropython-esp32,noahchense/micropython,stonegithubs/micropython,dinau/micropython,praemdonck/micropython,AriZuu/micropython,ericsnowcurrently/micropython,misterdanb/micropython,EcmaXp/micropython,xhat/micropython,mpalomer/micropython,oopy/micropython,vriera/micropython,heisewangluo/micropython,redbear/micropython,cnoviello/micropython,tdautc19841202/micropython,kerneltask/micropython,trezor/micropython,hiway/micropython,deshipu/micropython,tuc-osg/micropython,lowRISC/micropython,ericsnowcurrently/micropython,paul-xxx/micropython,orionrobots/micropython,orionrobots/micropython,henriknelson/micropython,vriera/micropython,dxxb/micropython,pramasoul/micropython,pfalcon/micropython,cnoviello/micropython,ganshun666/micropython,feilongfl/micropython,dxxb/micropython,ahotam/micropython,paul-xxx/micropython,henriknelson/micropython,warner83/micropython,hosaka/micropython,vitiral/micropython,jmarcelino/pycom-micropython,TDAbboud/micropython,drrk/micropython,chrisdearman/micropython,feilongfl/micropython,MrSurly/micropython-esp32,vriera/micropython,stonegithubs/micropython,torwag/micropython,cwyark/micropython,hosaka/micropython,bvernoux/micropython,swegener/micropython,supergis/micropython,Timmenem/micropython,mhoffma/micropython,noahwilliamsson/micropython,firstval/micropython,ahotam/micropython,mpalomer/micropython,dmazzella/micropython,ernesto-g/micropython,warner83/micropython,vitiral/micropython,alex-robbins/micropython,kerneltask/micropython,turbinenreiter/micropython,blazewicz/micropython,ryannathans/micropython,blmorris/micropython,ceramos/micropython,jlillest/micropython,adamkh/micropython,trezor/micropython,tuc-osg/micropython,toolmacher/micropython,firstval/micropython,misterdanb/micropython,tuc-osg/micropython,supergis/micropython,SHA2017-badge/micropython-esp32,alex-march/micropython,alex-robbins/micropython,tobbad/micropython,emfcamp/micropython,pozetroninc/micropython,tralamazza/micropython,hiway/micropython,jmarcelino/pycom-micropython,kostyll/micropython,SHA2017-badge/micropython-esp32,redbear/micropython,lbattraw/micropython,ryannathans/micropython,lowRISC/micropython,chrisdearman/micropython,infinnovation/micropython,pramasoul/micropython,oopy/micropython,stonegithubs/micropython,galenhz/micropython,galenhz/micropython,xhat/micropython,vriera/micropython,slzatz/micropython,slzatz/micropython,MrSurly/micropython,warner83/micropython,adamkh/micropython,feilongfl/micropython,micropython/micropython-esp32,tralamazza/micropython,martinribelotta/micropython,TDAbboud/micropython,henriknelson/micropython,PappaPeppar/micropython,adafruit/circuitpython,ChuckM/micropython,hiway/micropython,swegener/micropython,turbinenreiter/micropython,dinau/micropython,suda/micropython,neilh10/micropython,skybird6672/micropython,ruffy91/micropython,tdautc19841202/micropython,drrk/micropython,danicampora/micropython,xhat/micropython,utopiaprince/micropython,omtinez/micropython,slzatz/micropython,lbattraw/micropython,chrisdearman/micropython,pozetroninc/micropython,jlillest/micropython,warner83/micropython,HenrikSolver/micropython,AriZuu/micropython,micropython/micropython-esp32,EcmaXp/micropython,ernesto-g/micropython,martinribelotta/micropython,PappaPeppar/micropython,alex-robbins/micropython,vitiral/micropython,skybird6672/micropython,pozetroninc/micropython,jlillest/micropython,lowRISC/micropython,deshipu/micropython,jlillest/micropython,puuu/micropython,matthewelse/micropython,stonegithubs/micropython,xhat/micropython,pozetroninc/micropython,dinau/micropython,chrisdearman/micropython,AriZuu/micropython,mgyenik/micropython,ChuckM/micropython,EcmaXp/micropython,tralamazza/micropython,galenhz/micropython,tobbad/micropython,alex-march/micropython,lbattraw/micropython,cwyark/micropython,rubencabrera/micropython,bvernoux/micropython,hiway/micropython,turbinenreiter/micropython,Peetz0r/micropython-esp32,ruffy91/micropython,MrSurly/micropython,jimkmc/micropython,noahwilliamsson/micropython,praemdonck/micropython,ChuckM/micropython,oopy/micropython,orionrobots/micropython,MrSurly/micropython,xuxiaoxin/micropython,danicampora/micropython,puuu/micropython,SHA2017-badge/micropython-esp32,deshipu/micropython,MrSurly/micropython-esp32,pfalcon/micropython,ruffy91/micropython,torwag/micropython,cloudformdesign/micropython,Timmenem/micropython,vitiral/micropython,noahwilliamsson/micropython,mianos/micropython,Peetz0r/micropython-esp32,kostyll/micropython,SHA2017-badge/micropython-esp32,swegener/micropython,hosaka/micropython,oopy/micropython,dmazzella/micropython,utopiaprince/micropython,utopiaprince/micropython,martinribelotta/micropython,drrk/micropython,torwag/micropython,tobbad/micropython,suda/micropython,orionrobots/micropython,pramasoul/micropython,matthewelse/micropython,TDAbboud/micropython,omtinez/micropython,emfcamp/micropython,blmorris/micropython,misterdanb/micropython,dinau/micropython,ernesto-g/micropython,blmorris/micropython,kostyll/micropython,praemdonck/micropython,neilh10/micropython,HenrikSolver/micropython,jlillest/micropython,ceramos/micropython,emfcamp/micropython,EcmaXp/micropython,micropython/micropython-esp32,TDAbboud/micropython,adafruit/micropython,firstval/micropython,swegener/micropython,trezor/micropython,ryannathans/micropython,hiway/micropython,redbear/micropython,ericsnowcurrently/micropython,mhoffma/micropython,pozetroninc/micropython,firstval/micropython,blazewicz/micropython,trezor/micropython,bvernoux/micropython,PappaPeppar/micropython,slzatz/micropython,mianos/micropython,adafruit/micropython,rubencabrera/micropython,henriknelson/micropython,dhylands/micropython,galenhz/micropython,heisewangluo/micropython,bvernoux/micropython,danicampora/micropython,dhylands/micropython,Peetz0r/micropython-esp32
|
tests: Add basic test for OrderedDict.
Mostly to have coverage of newly added code in map.c.
|
try:
from collections import OrderedDict
except ImportError:
try:
from _collections import OrderedDict
except ImportError:
print("SKIP")
import sys
sys.exit()
d = OrderedDict([(10, 20), ("b", 100), (1, 2)])
print(list(d.keys()))
print(list(d.values()))
del d["b"]
print(list(d.keys()))
print(list(d.values()))
|
<commit_before><commit_msg>tests: Add basic test for OrderedDict.
Mostly to have coverage of newly added code in map.c.<commit_after>
|
try:
from collections import OrderedDict
except ImportError:
try:
from _collections import OrderedDict
except ImportError:
print("SKIP")
import sys
sys.exit()
d = OrderedDict([(10, 20), ("b", 100), (1, 2)])
print(list(d.keys()))
print(list(d.values()))
del d["b"]
print(list(d.keys()))
print(list(d.values()))
|
tests: Add basic test for OrderedDict.
Mostly to have coverage of newly added code in map.c.try:
from collections import OrderedDict
except ImportError:
try:
from _collections import OrderedDict
except ImportError:
print("SKIP")
import sys
sys.exit()
d = OrderedDict([(10, 20), ("b", 100), (1, 2)])
print(list(d.keys()))
print(list(d.values()))
del d["b"]
print(list(d.keys()))
print(list(d.values()))
|
<commit_before><commit_msg>tests: Add basic test for OrderedDict.
Mostly to have coverage of newly added code in map.c.<commit_after>try:
from collections import OrderedDict
except ImportError:
try:
from _collections import OrderedDict
except ImportError:
print("SKIP")
import sys
sys.exit()
d = OrderedDict([(10, 20), ("b", 100), (1, 2)])
print(list(d.keys()))
print(list(d.values()))
del d["b"]
print(list(d.keys()))
print(list(d.values()))
|
|
645efb8ffcc3c9a3e41db2619430ffcb7a6d570f
|
src/ggrc/migrations/versions/20160314140338_4fd36860d196_add_finished_date_to_request_and_.py
|
src/ggrc/migrations/versions/20160314140338_4fd36860d196_add_finished_date_to_request_and_.py
|
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: andraz@reciprocitylabs.com
# Maintained By: andraz@reciprocitylabs.com
"""
add finished date to request and assessment
Create Date: 2016-03-14 14:03:38.026877
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '4fd36860d196'
down_revision = '39aec99639d5'
def upgrade_table(table):
"""Add columns finished_date and verified_date and populate them."""
op.add_column(table,
sa.Column('finished_date', sa.DateTime(), nullable=True))
op.add_column(table,
sa.Column('verified_date', sa.DateTime(), nullable=True))
op.execute("""
UPDATE {}
SET finished_date = updated_at
WHERE status in ("Finished", "Verified", "Final")
""".format(table))
op.execute("""
UPDATE {}
SET verified_date = updated_at, status = "Final"
WHERE status = "Verified"
""".format(table))
def upgrade():
upgrade_table('requests')
upgrade_table('assessments')
def downgrade():
"""Remove verified_date and finished_date columns."""
op.drop_column('assessments', 'verified_date')
op.drop_column('assessments', 'finished_date')
op.drop_column('requests', 'verified_date')
op.drop_column('requests', 'finished_date')
|
Migrate Req/Ass to have verified/finished date
|
Migrate Req/Ass to have verified/finished date
|
Python
|
apache-2.0
|
andrei-karalionak/ggrc-core,VinnieJohns/ggrc-core,AleksNeStu/ggrc-core,NejcZupec/ggrc-core,j0gurt/ggrc-core,edofic/ggrc-core,plamut/ggrc-core,kr41/ggrc-core,edofic/ggrc-core,plamut/ggrc-core,edofic/ggrc-core,andrei-karalionak/ggrc-core,kr41/ggrc-core,selahssea/ggrc-core,josthkko/ggrc-core,prasannav7/ggrc-core,prasannav7/ggrc-core,AleksNeStu/ggrc-core,NejcZupec/ggrc-core,andrei-karalionak/ggrc-core,prasannav7/ggrc-core,j0gurt/ggrc-core,VinnieJohns/ggrc-core,selahssea/ggrc-core,selahssea/ggrc-core,edofic/ggrc-core,josthkko/ggrc-core,plamut/ggrc-core,AleksNeStu/ggrc-core,j0gurt/ggrc-core,josthkko/ggrc-core,NejcZupec/ggrc-core,prasannav7/ggrc-core,j0gurt/ggrc-core,kr41/ggrc-core,andrei-karalionak/ggrc-core,selahssea/ggrc-core,plamut/ggrc-core,josthkko/ggrc-core,VinnieJohns/ggrc-core,AleksNeStu/ggrc-core,VinnieJohns/ggrc-core,NejcZupec/ggrc-core,kr41/ggrc-core
|
Migrate Req/Ass to have verified/finished date
|
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: andraz@reciprocitylabs.com
# Maintained By: andraz@reciprocitylabs.com
"""
add finished date to request and assessment
Create Date: 2016-03-14 14:03:38.026877
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '4fd36860d196'
down_revision = '39aec99639d5'
def upgrade_table(table):
"""Add columns finished_date and verified_date and populate them."""
op.add_column(table,
sa.Column('finished_date', sa.DateTime(), nullable=True))
op.add_column(table,
sa.Column('verified_date', sa.DateTime(), nullable=True))
op.execute("""
UPDATE {}
SET finished_date = updated_at
WHERE status in ("Finished", "Verified", "Final")
""".format(table))
op.execute("""
UPDATE {}
SET verified_date = updated_at, status = "Final"
WHERE status = "Verified"
""".format(table))
def upgrade():
upgrade_table('requests')
upgrade_table('assessments')
def downgrade():
"""Remove verified_date and finished_date columns."""
op.drop_column('assessments', 'verified_date')
op.drop_column('assessments', 'finished_date')
op.drop_column('requests', 'verified_date')
op.drop_column('requests', 'finished_date')
|
<commit_before><commit_msg>Migrate Req/Ass to have verified/finished date<commit_after>
|
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: andraz@reciprocitylabs.com
# Maintained By: andraz@reciprocitylabs.com
"""
add finished date to request and assessment
Create Date: 2016-03-14 14:03:38.026877
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '4fd36860d196'
down_revision = '39aec99639d5'
def upgrade_table(table):
"""Add columns finished_date and verified_date and populate them."""
op.add_column(table,
sa.Column('finished_date', sa.DateTime(), nullable=True))
op.add_column(table,
sa.Column('verified_date', sa.DateTime(), nullable=True))
op.execute("""
UPDATE {}
SET finished_date = updated_at
WHERE status in ("Finished", "Verified", "Final")
""".format(table))
op.execute("""
UPDATE {}
SET verified_date = updated_at, status = "Final"
WHERE status = "Verified"
""".format(table))
def upgrade():
upgrade_table('requests')
upgrade_table('assessments')
def downgrade():
"""Remove verified_date and finished_date columns."""
op.drop_column('assessments', 'verified_date')
op.drop_column('assessments', 'finished_date')
op.drop_column('requests', 'verified_date')
op.drop_column('requests', 'finished_date')
|
Migrate Req/Ass to have verified/finished date# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: andraz@reciprocitylabs.com
# Maintained By: andraz@reciprocitylabs.com
"""
add finished date to request and assessment
Create Date: 2016-03-14 14:03:38.026877
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '4fd36860d196'
down_revision = '39aec99639d5'
def upgrade_table(table):
"""Add columns finished_date and verified_date and populate them."""
op.add_column(table,
sa.Column('finished_date', sa.DateTime(), nullable=True))
op.add_column(table,
sa.Column('verified_date', sa.DateTime(), nullable=True))
op.execute("""
UPDATE {}
SET finished_date = updated_at
WHERE status in ("Finished", "Verified", "Final")
""".format(table))
op.execute("""
UPDATE {}
SET verified_date = updated_at, status = "Final"
WHERE status = "Verified"
""".format(table))
def upgrade():
upgrade_table('requests')
upgrade_table('assessments')
def downgrade():
"""Remove verified_date and finished_date columns."""
op.drop_column('assessments', 'verified_date')
op.drop_column('assessments', 'finished_date')
op.drop_column('requests', 'verified_date')
op.drop_column('requests', 'finished_date')
|
<commit_before><commit_msg>Migrate Req/Ass to have verified/finished date<commit_after># Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: andraz@reciprocitylabs.com
# Maintained By: andraz@reciprocitylabs.com
"""
add finished date to request and assessment
Create Date: 2016-03-14 14:03:38.026877
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '4fd36860d196'
down_revision = '39aec99639d5'
def upgrade_table(table):
"""Add columns finished_date and verified_date and populate them."""
op.add_column(table,
sa.Column('finished_date', sa.DateTime(), nullable=True))
op.add_column(table,
sa.Column('verified_date', sa.DateTime(), nullable=True))
op.execute("""
UPDATE {}
SET finished_date = updated_at
WHERE status in ("Finished", "Verified", "Final")
""".format(table))
op.execute("""
UPDATE {}
SET verified_date = updated_at, status = "Final"
WHERE status = "Verified"
""".format(table))
def upgrade():
upgrade_table('requests')
upgrade_table('assessments')
def downgrade():
"""Remove verified_date and finished_date columns."""
op.drop_column('assessments', 'verified_date')
op.drop_column('assessments', 'finished_date')
op.drop_column('requests', 'verified_date')
op.drop_column('requests', 'finished_date')
|
|
37baa669ed1e00fabddd33478fa75f4047075ce3
|
cs473vision/ObjectDetector.py
|
cs473vision/ObjectDetector.py
|
'''
Created on Feb 28, 2014
@author: Vance Zuo
'''
import numpy
import cv2
class ObjectDetector(object):
'''
classdocs
'''
def __init__(self, params):
'''
Constructor
'''
self.bg_img = None
self.fg_img = None
return
def load_image(self, bg_path, fg_path):
self.bg_img = cv2.imread(bg_path)
self.fg_img = cv2.imread(fg_path)
return True
def subtract_background(self):
# Take simple difference
naive = cv2.absdiff(self.bg_img, self.bg_img)
cv2.imwrite("naive.png", naive)
# MOG Subtraction
bg_subtractor = cv2.BackgroundSubtractorMOG()
bg_mask = bg_subtractor.apply(self.bg_img)
fg_mask = bg_subtractor.apply(self.fg_img)
cv2.imwrite("MOG.png", fg_mask)
# MOG2 Subtraction
bg_subtractor = cv2.BackgroundSubtractorMOG2()
bg_mask = bg_subtractor.apply(self.bg_img)
fg_mask = bg_subtractor.apply(self.fg_img)
cv2.imwrite("MOG2.png", fg_mask)
return
|
Create Python object detection script.
|
Create Python object detection script.
|
Python
|
mit
|
vancezuo/cs473-vision
|
Create Python object detection script.
|
'''
Created on Feb 28, 2014
@author: Vance Zuo
'''
import numpy
import cv2
class ObjectDetector(object):
'''
classdocs
'''
def __init__(self, params):
'''
Constructor
'''
self.bg_img = None
self.fg_img = None
return
def load_image(self, bg_path, fg_path):
self.bg_img = cv2.imread(bg_path)
self.fg_img = cv2.imread(fg_path)
return True
def subtract_background(self):
# Take simple difference
naive = cv2.absdiff(self.bg_img, self.bg_img)
cv2.imwrite("naive.png", naive)
# MOG Subtraction
bg_subtractor = cv2.BackgroundSubtractorMOG()
bg_mask = bg_subtractor.apply(self.bg_img)
fg_mask = bg_subtractor.apply(self.fg_img)
cv2.imwrite("MOG.png", fg_mask)
# MOG2 Subtraction
bg_subtractor = cv2.BackgroundSubtractorMOG2()
bg_mask = bg_subtractor.apply(self.bg_img)
fg_mask = bg_subtractor.apply(self.fg_img)
cv2.imwrite("MOG2.png", fg_mask)
return
|
<commit_before><commit_msg>Create Python object detection script.<commit_after>
|
'''
Created on Feb 28, 2014
@author: Vance Zuo
'''
import numpy
import cv2
class ObjectDetector(object):
'''
classdocs
'''
def __init__(self, params):
'''
Constructor
'''
self.bg_img = None
self.fg_img = None
return
def load_image(self, bg_path, fg_path):
self.bg_img = cv2.imread(bg_path)
self.fg_img = cv2.imread(fg_path)
return True
def subtract_background(self):
# Take simple difference
naive = cv2.absdiff(self.bg_img, self.bg_img)
cv2.imwrite("naive.png", naive)
# MOG Subtraction
bg_subtractor = cv2.BackgroundSubtractorMOG()
bg_mask = bg_subtractor.apply(self.bg_img)
fg_mask = bg_subtractor.apply(self.fg_img)
cv2.imwrite("MOG.png", fg_mask)
# MOG2 Subtraction
bg_subtractor = cv2.BackgroundSubtractorMOG2()
bg_mask = bg_subtractor.apply(self.bg_img)
fg_mask = bg_subtractor.apply(self.fg_img)
cv2.imwrite("MOG2.png", fg_mask)
return
|
Create Python object detection script.'''
Created on Feb 28, 2014
@author: Vance Zuo
'''
import numpy
import cv2
class ObjectDetector(object):
'''
classdocs
'''
def __init__(self, params):
'''
Constructor
'''
self.bg_img = None
self.fg_img = None
return
def load_image(self, bg_path, fg_path):
self.bg_img = cv2.imread(bg_path)
self.fg_img = cv2.imread(fg_path)
return True
def subtract_background(self):
# Take simple difference
naive = cv2.absdiff(self.bg_img, self.bg_img)
cv2.imwrite("naive.png", naive)
# MOG Subtraction
bg_subtractor = cv2.BackgroundSubtractorMOG()
bg_mask = bg_subtractor.apply(self.bg_img)
fg_mask = bg_subtractor.apply(self.fg_img)
cv2.imwrite("MOG.png", fg_mask)
# MOG2 Subtraction
bg_subtractor = cv2.BackgroundSubtractorMOG2()
bg_mask = bg_subtractor.apply(self.bg_img)
fg_mask = bg_subtractor.apply(self.fg_img)
cv2.imwrite("MOG2.png", fg_mask)
return
|
<commit_before><commit_msg>Create Python object detection script.<commit_after>'''
Created on Feb 28, 2014
@author: Vance Zuo
'''
import numpy
import cv2
class ObjectDetector(object):
'''
classdocs
'''
def __init__(self, params):
'''
Constructor
'''
self.bg_img = None
self.fg_img = None
return
def load_image(self, bg_path, fg_path):
self.bg_img = cv2.imread(bg_path)
self.fg_img = cv2.imread(fg_path)
return True
def subtract_background(self):
# Take simple difference
naive = cv2.absdiff(self.bg_img, self.bg_img)
cv2.imwrite("naive.png", naive)
# MOG Subtraction
bg_subtractor = cv2.BackgroundSubtractorMOG()
bg_mask = bg_subtractor.apply(self.bg_img)
fg_mask = bg_subtractor.apply(self.fg_img)
cv2.imwrite("MOG.png", fg_mask)
# MOG2 Subtraction
bg_subtractor = cv2.BackgroundSubtractorMOG2()
bg_mask = bg_subtractor.apply(self.bg_img)
fg_mask = bg_subtractor.apply(self.fg_img)
cv2.imwrite("MOG2.png", fg_mask)
return
|
|
94d40dfcf574d61df7def99a43d5b9fa0c75e244
|
py/queue-reconstruction-by-height.py
|
py/queue-reconstruction-by-height.py
|
from collections import defaultdict
class Solution(object):
def insert(self, now, p, front):
lsize = 0 if now.left is None else now.left.val[1]
if front <= lsize:
if now.left is None:
now.left = TreeNode((p, 1))
else:
self.insert(now.left, p, front)
else:
if now.right is None:
now.right = TreeNode((p, 1))
else:
self.insert(now.right, p, front - lsize - 1)
now.val = (now.val[0], now.val[1] + 1)
def inOrder(self, cur):
if cur:
for x in self.inOrder(cur.left):
yield x
yield cur.val[0]
for x in self.inOrder(cur.right):
yield x
def reconstructQueue(self, people):
"""
:type people: List[List[int]]
:rtype: List[List[int]]
"""
if not people:
return people
people.sort(key=lambda x:(-x[0], x[1]))
root = TreeNode((people[0], 1))
for p in people[1:]:
self.insert(root, p, p[1])
return list(self.inOrder(root))
|
Add py solution for 406. Queue Reconstruction by Height
|
Add py solution for 406. Queue Reconstruction by Height
406. Queue Reconstruction by Height: https://leetcode.com/problems/queue-reconstruction-by-height/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 406. Queue Reconstruction by Height
406. Queue Reconstruction by Height: https://leetcode.com/problems/queue-reconstruction-by-height/
|
from collections import defaultdict
class Solution(object):
def insert(self, now, p, front):
lsize = 0 if now.left is None else now.left.val[1]
if front <= lsize:
if now.left is None:
now.left = TreeNode((p, 1))
else:
self.insert(now.left, p, front)
else:
if now.right is None:
now.right = TreeNode((p, 1))
else:
self.insert(now.right, p, front - lsize - 1)
now.val = (now.val[0], now.val[1] + 1)
def inOrder(self, cur):
if cur:
for x in self.inOrder(cur.left):
yield x
yield cur.val[0]
for x in self.inOrder(cur.right):
yield x
def reconstructQueue(self, people):
"""
:type people: List[List[int]]
:rtype: List[List[int]]
"""
if not people:
return people
people.sort(key=lambda x:(-x[0], x[1]))
root = TreeNode((people[0], 1))
for p in people[1:]:
self.insert(root, p, p[1])
return list(self.inOrder(root))
|
<commit_before><commit_msg>Add py solution for 406. Queue Reconstruction by Height
406. Queue Reconstruction by Height: https://leetcode.com/problems/queue-reconstruction-by-height/<commit_after>
|
from collections import defaultdict
class Solution(object):
def insert(self, now, p, front):
lsize = 0 if now.left is None else now.left.val[1]
if front <= lsize:
if now.left is None:
now.left = TreeNode((p, 1))
else:
self.insert(now.left, p, front)
else:
if now.right is None:
now.right = TreeNode((p, 1))
else:
self.insert(now.right, p, front - lsize - 1)
now.val = (now.val[0], now.val[1] + 1)
def inOrder(self, cur):
if cur:
for x in self.inOrder(cur.left):
yield x
yield cur.val[0]
for x in self.inOrder(cur.right):
yield x
def reconstructQueue(self, people):
"""
:type people: List[List[int]]
:rtype: List[List[int]]
"""
if not people:
return people
people.sort(key=lambda x:(-x[0], x[1]))
root = TreeNode((people[0], 1))
for p in people[1:]:
self.insert(root, p, p[1])
return list(self.inOrder(root))
|
Add py solution for 406. Queue Reconstruction by Height
406. Queue Reconstruction by Height: https://leetcode.com/problems/queue-reconstruction-by-height/from collections import defaultdict
class Solution(object):
def insert(self, now, p, front):
lsize = 0 if now.left is None else now.left.val[1]
if front <= lsize:
if now.left is None:
now.left = TreeNode((p, 1))
else:
self.insert(now.left, p, front)
else:
if now.right is None:
now.right = TreeNode((p, 1))
else:
self.insert(now.right, p, front - lsize - 1)
now.val = (now.val[0], now.val[1] + 1)
def inOrder(self, cur):
if cur:
for x in self.inOrder(cur.left):
yield x
yield cur.val[0]
for x in self.inOrder(cur.right):
yield x
def reconstructQueue(self, people):
"""
:type people: List[List[int]]
:rtype: List[List[int]]
"""
if not people:
return people
people.sort(key=lambda x:(-x[0], x[1]))
root = TreeNode((people[0], 1))
for p in people[1:]:
self.insert(root, p, p[1])
return list(self.inOrder(root))
|
<commit_before><commit_msg>Add py solution for 406. Queue Reconstruction by Height
406. Queue Reconstruction by Height: https://leetcode.com/problems/queue-reconstruction-by-height/<commit_after>from collections import defaultdict
class Solution(object):
def insert(self, now, p, front):
lsize = 0 if now.left is None else now.left.val[1]
if front <= lsize:
if now.left is None:
now.left = TreeNode((p, 1))
else:
self.insert(now.left, p, front)
else:
if now.right is None:
now.right = TreeNode((p, 1))
else:
self.insert(now.right, p, front - lsize - 1)
now.val = (now.val[0], now.val[1] + 1)
def inOrder(self, cur):
if cur:
for x in self.inOrder(cur.left):
yield x
yield cur.val[0]
for x in self.inOrder(cur.right):
yield x
def reconstructQueue(self, people):
"""
:type people: List[List[int]]
:rtype: List[List[int]]
"""
if not people:
return people
people.sort(key=lambda x:(-x[0], x[1]))
root = TreeNode((people[0], 1))
for p in people[1:]:
self.insert(root, p, p[1])
return list(self.inOrder(root))
|
|
76ce9117ed92a743734cd5ba7e209617a7664ad1
|
benchmarks/bench_gala.py
|
benchmarks/bench_gala.py
|
import os
from gala import imio, features, agglo, classify
rundir = os.path.dirname(__file__)
dd = os.path.abspath(os.path.join(rundir, '../tests/example-data'))
em3d = features.default.paper_em()
def setup_trdata():
wstr = imio.read_h5_stack(os.path.join(dd, 'train-ws.lzf.h5'))
prtr = imio.read_h5_stack(os.path.join(dd, 'train-p1.lzf.h5'))
gttr = imio.read_h5_stack(os.path.join(dd, 'train-gt.lzf.h5'))
return wstr, prtr, gttr
def setup_tsdata():
wsts = imio.read_h5_stack(os.path.join(dd, 'test-ws.lzf.h5'))
prts = imio.read_h5_stack(os.path.join(dd, 'test-p1.lzf.h5'))
gtts = imio.read_h5_stack(os.path.join(dd, 'test-gt.lzf.h5'))
return wsts, prts, gtts
def setup_trgraph():
ws, pr, ts = setup_trdata()
g = agglo.Rag(ws, pr, feature_manager=em3d)
return g
def setup_tsgraph():
ws, pr, ts = setup_tsdata()
g = agglo.Rag(ws, pr, feature_manager=em3d)
return g
def setup_trexamples():
gt = imio.read_h5_stack(os.path.join(dd, 'train-gt.lzf.h5'))
g = setup_trgraph()
(X, y, w, e), _ = g.learn_agglomerate(gt, em3d, min_num_epochs=5)
y = y[:, 0]
return X, y
def setup_classifier():
X, y = setup_trexamples()
rf = classify.DefaultRandomForest()
rf.fit(X, y)
return rf
def setup_policy():
rf = classify.DefaultRandomForest()
cl = agglo.classifier_probability(em3d, rf)
return cl
def setup_tsgraph_queue():
g = setup_tsgraph()
cl = setup_policy()
g.merge_priority_function = cl
g.rebuild_merge_queue()
return g
|
Add partial benchmarking file for gala
|
Add partial benchmarking file for gala
|
Python
|
bsd-3-clause
|
janelia-flyem/gala,jni/gala
|
Add partial benchmarking file for gala
|
import os
from gala import imio, features, agglo, classify
rundir = os.path.dirname(__file__)
dd = os.path.abspath(os.path.join(rundir, '../tests/example-data'))
em3d = features.default.paper_em()
def setup_trdata():
wstr = imio.read_h5_stack(os.path.join(dd, 'train-ws.lzf.h5'))
prtr = imio.read_h5_stack(os.path.join(dd, 'train-p1.lzf.h5'))
gttr = imio.read_h5_stack(os.path.join(dd, 'train-gt.lzf.h5'))
return wstr, prtr, gttr
def setup_tsdata():
wsts = imio.read_h5_stack(os.path.join(dd, 'test-ws.lzf.h5'))
prts = imio.read_h5_stack(os.path.join(dd, 'test-p1.lzf.h5'))
gtts = imio.read_h5_stack(os.path.join(dd, 'test-gt.lzf.h5'))
return wsts, prts, gtts
def setup_trgraph():
ws, pr, ts = setup_trdata()
g = agglo.Rag(ws, pr, feature_manager=em3d)
return g
def setup_tsgraph():
ws, pr, ts = setup_tsdata()
g = agglo.Rag(ws, pr, feature_manager=em3d)
return g
def setup_trexamples():
gt = imio.read_h5_stack(os.path.join(dd, 'train-gt.lzf.h5'))
g = setup_trgraph()
(X, y, w, e), _ = g.learn_agglomerate(gt, em3d, min_num_epochs=5)
y = y[:, 0]
return X, y
def setup_classifier():
X, y = setup_trexamples()
rf = classify.DefaultRandomForest()
rf.fit(X, y)
return rf
def setup_policy():
rf = classify.DefaultRandomForest()
cl = agglo.classifier_probability(em3d, rf)
return cl
def setup_tsgraph_queue():
g = setup_tsgraph()
cl = setup_policy()
g.merge_priority_function = cl
g.rebuild_merge_queue()
return g
|
<commit_before><commit_msg>Add partial benchmarking file for gala<commit_after>
|
import os
from gala import imio, features, agglo, classify
rundir = os.path.dirname(__file__)
dd = os.path.abspath(os.path.join(rundir, '../tests/example-data'))
em3d = features.default.paper_em()
def setup_trdata():
wstr = imio.read_h5_stack(os.path.join(dd, 'train-ws.lzf.h5'))
prtr = imio.read_h5_stack(os.path.join(dd, 'train-p1.lzf.h5'))
gttr = imio.read_h5_stack(os.path.join(dd, 'train-gt.lzf.h5'))
return wstr, prtr, gttr
def setup_tsdata():
wsts = imio.read_h5_stack(os.path.join(dd, 'test-ws.lzf.h5'))
prts = imio.read_h5_stack(os.path.join(dd, 'test-p1.lzf.h5'))
gtts = imio.read_h5_stack(os.path.join(dd, 'test-gt.lzf.h5'))
return wsts, prts, gtts
def setup_trgraph():
ws, pr, ts = setup_trdata()
g = agglo.Rag(ws, pr, feature_manager=em3d)
return g
def setup_tsgraph():
ws, pr, ts = setup_tsdata()
g = agglo.Rag(ws, pr, feature_manager=em3d)
return g
def setup_trexamples():
gt = imio.read_h5_stack(os.path.join(dd, 'train-gt.lzf.h5'))
g = setup_trgraph()
(X, y, w, e), _ = g.learn_agglomerate(gt, em3d, min_num_epochs=5)
y = y[:, 0]
return X, y
def setup_classifier():
X, y = setup_trexamples()
rf = classify.DefaultRandomForest()
rf.fit(X, y)
return rf
def setup_policy():
rf = classify.DefaultRandomForest()
cl = agglo.classifier_probability(em3d, rf)
return cl
def setup_tsgraph_queue():
g = setup_tsgraph()
cl = setup_policy()
g.merge_priority_function = cl
g.rebuild_merge_queue()
return g
|
Add partial benchmarking file for galaimport os
from gala import imio, features, agglo, classify
rundir = os.path.dirname(__file__)
dd = os.path.abspath(os.path.join(rundir, '../tests/example-data'))
em3d = features.default.paper_em()
def setup_trdata():
wstr = imio.read_h5_stack(os.path.join(dd, 'train-ws.lzf.h5'))
prtr = imio.read_h5_stack(os.path.join(dd, 'train-p1.lzf.h5'))
gttr = imio.read_h5_stack(os.path.join(dd, 'train-gt.lzf.h5'))
return wstr, prtr, gttr
def setup_tsdata():
wsts = imio.read_h5_stack(os.path.join(dd, 'test-ws.lzf.h5'))
prts = imio.read_h5_stack(os.path.join(dd, 'test-p1.lzf.h5'))
gtts = imio.read_h5_stack(os.path.join(dd, 'test-gt.lzf.h5'))
return wsts, prts, gtts
def setup_trgraph():
ws, pr, ts = setup_trdata()
g = agglo.Rag(ws, pr, feature_manager=em3d)
return g
def setup_tsgraph():
ws, pr, ts = setup_tsdata()
g = agglo.Rag(ws, pr, feature_manager=em3d)
return g
def setup_trexamples():
gt = imio.read_h5_stack(os.path.join(dd, 'train-gt.lzf.h5'))
g = setup_trgraph()
(X, y, w, e), _ = g.learn_agglomerate(gt, em3d, min_num_epochs=5)
y = y[:, 0]
return X, y
def setup_classifier():
X, y = setup_trexamples()
rf = classify.DefaultRandomForest()
rf.fit(X, y)
return rf
def setup_policy():
rf = classify.DefaultRandomForest()
cl = agglo.classifier_probability(em3d, rf)
return cl
def setup_tsgraph_queue():
g = setup_tsgraph()
cl = setup_policy()
g.merge_priority_function = cl
g.rebuild_merge_queue()
return g
|
<commit_before><commit_msg>Add partial benchmarking file for gala<commit_after>import os
from gala import imio, features, agglo, classify
rundir = os.path.dirname(__file__)
dd = os.path.abspath(os.path.join(rundir, '../tests/example-data'))
em3d = features.default.paper_em()
def setup_trdata():
wstr = imio.read_h5_stack(os.path.join(dd, 'train-ws.lzf.h5'))
prtr = imio.read_h5_stack(os.path.join(dd, 'train-p1.lzf.h5'))
gttr = imio.read_h5_stack(os.path.join(dd, 'train-gt.lzf.h5'))
return wstr, prtr, gttr
def setup_tsdata():
wsts = imio.read_h5_stack(os.path.join(dd, 'test-ws.lzf.h5'))
prts = imio.read_h5_stack(os.path.join(dd, 'test-p1.lzf.h5'))
gtts = imio.read_h5_stack(os.path.join(dd, 'test-gt.lzf.h5'))
return wsts, prts, gtts
def setup_trgraph():
ws, pr, ts = setup_trdata()
g = agglo.Rag(ws, pr, feature_manager=em3d)
return g
def setup_tsgraph():
ws, pr, ts = setup_tsdata()
g = agglo.Rag(ws, pr, feature_manager=em3d)
return g
def setup_trexamples():
gt = imio.read_h5_stack(os.path.join(dd, 'train-gt.lzf.h5'))
g = setup_trgraph()
(X, y, w, e), _ = g.learn_agglomerate(gt, em3d, min_num_epochs=5)
y = y[:, 0]
return X, y
def setup_classifier():
X, y = setup_trexamples()
rf = classify.DefaultRandomForest()
rf.fit(X, y)
return rf
def setup_policy():
rf = classify.DefaultRandomForest()
cl = agglo.classifier_probability(em3d, rf)
return cl
def setup_tsgraph_queue():
g = setup_tsgraph()
cl = setup_policy()
g.merge_priority_function = cl
g.rebuild_merge_queue()
return g
|
|
197fb6ec004c0bf47ec7e2fd25b75564a3ecf6c4
|
test/audit_logs/test_audit_log.py
|
test/audit_logs/test_audit_log.py
|
import datetime
import pytest
from girder import auditLogger
@pytest.fixture
def recordModel():
from girder.plugins.audit_logs import Record
yield Record()
@pytest.fixture
def resetLog():
yield auditLogger
for handler in auditLogger.handlers:
auditLogger.removeHandler(handler)
@pytest.mark.plugin('audit_logs')
def testAnonymousRestRequestLogging(server, recordModel, resetLog):
assert list(recordModel.find()) == []
server.request('/user/me')
records = recordModel.find()
assert records.count() == 1
record = records[0]
assert record['ip'] == '127.0.0.1'
assert record['type'] == 'rest.request'
assert record['userId'] == None
assert isinstance(record['when'], datetime.datetime)
assert record['details']['method'] == 'GET'
assert record['details']['status'] == 200
assert record['details']['route'] == ['user', 'me']
assert record['details']['params'] == {}
@pytest.mark.plugin('audit_logs')
def testFailedRestRequestLogging(server, recordModel, resetLog):
server.request('/folder', method='POST', params={
'name': 'Foo',
'parentId': 'foo'
})
records = recordModel.find()
assert records.count() == 1
details = records[0]['details']
assert details['method'] == 'POST'
assert details['status'] == 401
assert details['route'] == ['folder']
assert details['params'] == {
'name': 'Foo',
'parentId': 'foo'
}
@pytest.mark.plugin('audit_logs')
def testAuthenticatedRestRequestLogging(server, recordModel, resetLog, admin):
server.request('/user/me', user=admin)
records = recordModel.find()
assert records.count() == 1
record = records[0]
assert record['userId'] == admin['_id']
|
Add tests for logging of rest requests
|
Add tests for logging of rest requests
|
Python
|
apache-2.0
|
RafaelPalomar/girder,manthey/girder,manthey/girder,Kitware/girder,girder/girder,kotfic/girder,jbeezley/girder,manthey/girder,Kitware/girder,data-exp-lab/girder,jbeezley/girder,data-exp-lab/girder,Kitware/girder,RafaelPalomar/girder,kotfic/girder,girder/girder,RafaelPalomar/girder,kotfic/girder,manthey/girder,RafaelPalomar/girder,data-exp-lab/girder,girder/girder,RafaelPalomar/girder,data-exp-lab/girder,girder/girder,kotfic/girder,data-exp-lab/girder,kotfic/girder,Kitware/girder,jbeezley/girder,jbeezley/girder
|
Add tests for logging of rest requests
|
import datetime
import pytest
from girder import auditLogger
@pytest.fixture
def recordModel():
from girder.plugins.audit_logs import Record
yield Record()
@pytest.fixture
def resetLog():
yield auditLogger
for handler in auditLogger.handlers:
auditLogger.removeHandler(handler)
@pytest.mark.plugin('audit_logs')
def testAnonymousRestRequestLogging(server, recordModel, resetLog):
assert list(recordModel.find()) == []
server.request('/user/me')
records = recordModel.find()
assert records.count() == 1
record = records[0]
assert record['ip'] == '127.0.0.1'
assert record['type'] == 'rest.request'
assert record['userId'] == None
assert isinstance(record['when'], datetime.datetime)
assert record['details']['method'] == 'GET'
assert record['details']['status'] == 200
assert record['details']['route'] == ['user', 'me']
assert record['details']['params'] == {}
@pytest.mark.plugin('audit_logs')
def testFailedRestRequestLogging(server, recordModel, resetLog):
server.request('/folder', method='POST', params={
'name': 'Foo',
'parentId': 'foo'
})
records = recordModel.find()
assert records.count() == 1
details = records[0]['details']
assert details['method'] == 'POST'
assert details['status'] == 401
assert details['route'] == ['folder']
assert details['params'] == {
'name': 'Foo',
'parentId': 'foo'
}
@pytest.mark.plugin('audit_logs')
def testAuthenticatedRestRequestLogging(server, recordModel, resetLog, admin):
server.request('/user/me', user=admin)
records = recordModel.find()
assert records.count() == 1
record = records[0]
assert record['userId'] == admin['_id']
|
<commit_before><commit_msg>Add tests for logging of rest requests<commit_after>
|
import datetime
import pytest
from girder import auditLogger
@pytest.fixture
def recordModel():
from girder.plugins.audit_logs import Record
yield Record()
@pytest.fixture
def resetLog():
yield auditLogger
for handler in auditLogger.handlers:
auditLogger.removeHandler(handler)
@pytest.mark.plugin('audit_logs')
def testAnonymousRestRequestLogging(server, recordModel, resetLog):
assert list(recordModel.find()) == []
server.request('/user/me')
records = recordModel.find()
assert records.count() == 1
record = records[0]
assert record['ip'] == '127.0.0.1'
assert record['type'] == 'rest.request'
assert record['userId'] == None
assert isinstance(record['when'], datetime.datetime)
assert record['details']['method'] == 'GET'
assert record['details']['status'] == 200
assert record['details']['route'] == ['user', 'me']
assert record['details']['params'] == {}
@pytest.mark.plugin('audit_logs')
def testFailedRestRequestLogging(server, recordModel, resetLog):
server.request('/folder', method='POST', params={
'name': 'Foo',
'parentId': 'foo'
})
records = recordModel.find()
assert records.count() == 1
details = records[0]['details']
assert details['method'] == 'POST'
assert details['status'] == 401
assert details['route'] == ['folder']
assert details['params'] == {
'name': 'Foo',
'parentId': 'foo'
}
@pytest.mark.plugin('audit_logs')
def testAuthenticatedRestRequestLogging(server, recordModel, resetLog, admin):
server.request('/user/me', user=admin)
records = recordModel.find()
assert records.count() == 1
record = records[0]
assert record['userId'] == admin['_id']
|
Add tests for logging of rest requestsimport datetime
import pytest
from girder import auditLogger
@pytest.fixture
def recordModel():
from girder.plugins.audit_logs import Record
yield Record()
@pytest.fixture
def resetLog():
yield auditLogger
for handler in auditLogger.handlers:
auditLogger.removeHandler(handler)
@pytest.mark.plugin('audit_logs')
def testAnonymousRestRequestLogging(server, recordModel, resetLog):
assert list(recordModel.find()) == []
server.request('/user/me')
records = recordModel.find()
assert records.count() == 1
record = records[0]
assert record['ip'] == '127.0.0.1'
assert record['type'] == 'rest.request'
assert record['userId'] == None
assert isinstance(record['when'], datetime.datetime)
assert record['details']['method'] == 'GET'
assert record['details']['status'] == 200
assert record['details']['route'] == ['user', 'me']
assert record['details']['params'] == {}
@pytest.mark.plugin('audit_logs')
def testFailedRestRequestLogging(server, recordModel, resetLog):
server.request('/folder', method='POST', params={
'name': 'Foo',
'parentId': 'foo'
})
records = recordModel.find()
assert records.count() == 1
details = records[0]['details']
assert details['method'] == 'POST'
assert details['status'] == 401
assert details['route'] == ['folder']
assert details['params'] == {
'name': 'Foo',
'parentId': 'foo'
}
@pytest.mark.plugin('audit_logs')
def testAuthenticatedRestRequestLogging(server, recordModel, resetLog, admin):
server.request('/user/me', user=admin)
records = recordModel.find()
assert records.count() == 1
record = records[0]
assert record['userId'] == admin['_id']
|
<commit_before><commit_msg>Add tests for logging of rest requests<commit_after>import datetime
import pytest
from girder import auditLogger
@pytest.fixture
def recordModel():
from girder.plugins.audit_logs import Record
yield Record()
@pytest.fixture
def resetLog():
yield auditLogger
for handler in auditLogger.handlers:
auditLogger.removeHandler(handler)
@pytest.mark.plugin('audit_logs')
def testAnonymousRestRequestLogging(server, recordModel, resetLog):
assert list(recordModel.find()) == []
server.request('/user/me')
records = recordModel.find()
assert records.count() == 1
record = records[0]
assert record['ip'] == '127.0.0.1'
assert record['type'] == 'rest.request'
assert record['userId'] == None
assert isinstance(record['when'], datetime.datetime)
assert record['details']['method'] == 'GET'
assert record['details']['status'] == 200
assert record['details']['route'] == ['user', 'me']
assert record['details']['params'] == {}
@pytest.mark.plugin('audit_logs')
def testFailedRestRequestLogging(server, recordModel, resetLog):
server.request('/folder', method='POST', params={
'name': 'Foo',
'parentId': 'foo'
})
records = recordModel.find()
assert records.count() == 1
details = records[0]['details']
assert details['method'] == 'POST'
assert details['status'] == 401
assert details['route'] == ['folder']
assert details['params'] == {
'name': 'Foo',
'parentId': 'foo'
}
@pytest.mark.plugin('audit_logs')
def testAuthenticatedRestRequestLogging(server, recordModel, resetLog, admin):
server.request('/user/me', user=admin)
records = recordModel.find()
assert records.count() == 1
record = records[0]
assert record['userId'] == admin['_id']
|
|
3ef1e39d476a8b3e41ff0b06dcd6f700c083682d
|
data_controller/abc.py
|
data_controller/abc.py
|
from typing import Dict, Optional
from data_controller.enums import Medium, Site
from utils.helpers import await_func
class DataController:
"""
An ABC for all classes that deals with database read write.
"""
__slots__ = ()
def get_identifier(self, query: str,
medium: Medium) -> Optional[Dict[Site, str]]:
"""
Get the identifier of a given search query.
:param query: the search query.
:param medium: the medium type.
:return: A dict of all identifiers for this search query for all sites,
None if nothing is found.
"""
raise NotImplementedError
def set_identifier(self, name: str, medium: Medium,
site: Site, identifier: str):
"""
Set the identifier for a given name.
:param name: the name.
:param medium: the medium type.
:param site: the site.
:param identifier: the identifier.
"""
raise NotImplementedError
def get_mal_title(self, id_: str, medium: Medium) -> Optional[str]:
"""
Get a MAL title by its id.
:param id_: th MAL id.
:param medium: the medium type.
:return: The MAL title if it's found.
"""
raise NotImplementedError
def set_mal_title(self, id_: str, medium: Medium, title: str):
"""
Set the MAL title for a given id.
:param id_: the MAL id.
:param medium: The medium type.
:param title: The MAL title for the given id.
"""
raise NotImplementedError
def medium_data_by_id(self, id_: str, medium: Medium,
site: Site) -> Optional[dict]:
"""
Get data by id.
:param id_: the id.
:param medium: the medium type.
:param site: the site.
:return: the data for that id if found.
"""
raise NotImplementedError
def set_medium_data(self, id_: str, medium: Medium, site: Site, data: dict):
"""
Set the data for a given id.
:param id_: the id.
:param medium: the medium type.
:param site: the site.
:param data: the data for the id.
"""
raise NotImplementedError
async def get_medium_data(self, query: str,
medium: Medium, loop=None) -> Optional[dict]:
"""
Get the cached data for the given search query.
:param query: the search query.
:param medium: the medium type.
:param loop: the asyncio event loop, optional. If None is provided,
will use the default event loop.
:return: the cached data, for all sites that has the data.
"""
id_dict = await await_func(
self.get_identifier, loop, query, medium
)
if not id_dict:
return
return {site: data for site, data in {
site: await await_func(self.medium_data_by_id, loop,
id_, medium, site)
for site, id_ in id_dict.items()}.items() if data}
|
Add an ABC for all sub classes of `DataController`
|
Add an ABC for all sub classes of `DataController`
|
Python
|
mit
|
MaT1g3R/Roboragi
|
Add an ABC for all sub classes of `DataController`
|
from typing import Dict, Optional
from data_controller.enums import Medium, Site
from utils.helpers import await_func
class DataController:
"""
An ABC for all classes that deals with database read write.
"""
__slots__ = ()
def get_identifier(self, query: str,
medium: Medium) -> Optional[Dict[Site, str]]:
"""
Get the identifier of a given search query.
:param query: the search query.
:param medium: the medium type.
:return: A dict of all identifiers for this search query for all sites,
None if nothing is found.
"""
raise NotImplementedError
def set_identifier(self, name: str, medium: Medium,
site: Site, identifier: str):
"""
Set the identifier for a given name.
:param name: the name.
:param medium: the medium type.
:param site: the site.
:param identifier: the identifier.
"""
raise NotImplementedError
def get_mal_title(self, id_: str, medium: Medium) -> Optional[str]:
"""
Get a MAL title by its id.
:param id_: th MAL id.
:param medium: the medium type.
:return: The MAL title if it's found.
"""
raise NotImplementedError
def set_mal_title(self, id_: str, medium: Medium, title: str):
"""
Set the MAL title for a given id.
:param id_: the MAL id.
:param medium: The medium type.
:param title: The MAL title for the given id.
"""
raise NotImplementedError
def medium_data_by_id(self, id_: str, medium: Medium,
site: Site) -> Optional[dict]:
"""
Get data by id.
:param id_: the id.
:param medium: the medium type.
:param site: the site.
:return: the data for that id if found.
"""
raise NotImplementedError
def set_medium_data(self, id_: str, medium: Medium, site: Site, data: dict):
"""
Set the data for a given id.
:param id_: the id.
:param medium: the medium type.
:param site: the site.
:param data: the data for the id.
"""
raise NotImplementedError
async def get_medium_data(self, query: str,
medium: Medium, loop=None) -> Optional[dict]:
"""
Get the cached data for the given search query.
:param query: the search query.
:param medium: the medium type.
:param loop: the asyncio event loop, optional. If None is provided,
will use the default event loop.
:return: the cached data, for all sites that has the data.
"""
id_dict = await await_func(
self.get_identifier, loop, query, medium
)
if not id_dict:
return
return {site: data for site, data in {
site: await await_func(self.medium_data_by_id, loop,
id_, medium, site)
for site, id_ in id_dict.items()}.items() if data}
|
<commit_before><commit_msg>Add an ABC for all sub classes of `DataController`<commit_after>
|
from typing import Dict, Optional
from data_controller.enums import Medium, Site
from utils.helpers import await_func
class DataController:
"""
An ABC for all classes that deals with database read write.
"""
__slots__ = ()
def get_identifier(self, query: str,
medium: Medium) -> Optional[Dict[Site, str]]:
"""
Get the identifier of a given search query.
:param query: the search query.
:param medium: the medium type.
:return: A dict of all identifiers for this search query for all sites,
None if nothing is found.
"""
raise NotImplementedError
def set_identifier(self, name: str, medium: Medium,
site: Site, identifier: str):
"""
Set the identifier for a given name.
:param name: the name.
:param medium: the medium type.
:param site: the site.
:param identifier: the identifier.
"""
raise NotImplementedError
def get_mal_title(self, id_: str, medium: Medium) -> Optional[str]:
"""
Get a MAL title by its id.
:param id_: th MAL id.
:param medium: the medium type.
:return: The MAL title if it's found.
"""
raise NotImplementedError
def set_mal_title(self, id_: str, medium: Medium, title: str):
"""
Set the MAL title for a given id.
:param id_: the MAL id.
:param medium: The medium type.
:param title: The MAL title for the given id.
"""
raise NotImplementedError
def medium_data_by_id(self, id_: str, medium: Medium,
site: Site) -> Optional[dict]:
"""
Get data by id.
:param id_: the id.
:param medium: the medium type.
:param site: the site.
:return: the data for that id if found.
"""
raise NotImplementedError
def set_medium_data(self, id_: str, medium: Medium, site: Site, data: dict):
"""
Set the data for a given id.
:param id_: the id.
:param medium: the medium type.
:param site: the site.
:param data: the data for the id.
"""
raise NotImplementedError
async def get_medium_data(self, query: str,
medium: Medium, loop=None) -> Optional[dict]:
"""
Get the cached data for the given search query.
:param query: the search query.
:param medium: the medium type.
:param loop: the asyncio event loop, optional. If None is provided,
will use the default event loop.
:return: the cached data, for all sites that has the data.
"""
id_dict = await await_func(
self.get_identifier, loop, query, medium
)
if not id_dict:
return
return {site: data for site, data in {
site: await await_func(self.medium_data_by_id, loop,
id_, medium, site)
for site, id_ in id_dict.items()}.items() if data}
|
Add an ABC for all sub classes of `DataController`from typing import Dict, Optional
from data_controller.enums import Medium, Site
from utils.helpers import await_func
class DataController:
"""
An ABC for all classes that deals with database read write.
"""
__slots__ = ()
def get_identifier(self, query: str,
medium: Medium) -> Optional[Dict[Site, str]]:
"""
Get the identifier of a given search query.
:param query: the search query.
:param medium: the medium type.
:return: A dict of all identifiers for this search query for all sites,
None if nothing is found.
"""
raise NotImplementedError
def set_identifier(self, name: str, medium: Medium,
site: Site, identifier: str):
"""
Set the identifier for a given name.
:param name: the name.
:param medium: the medium type.
:param site: the site.
:param identifier: the identifier.
"""
raise NotImplementedError
def get_mal_title(self, id_: str, medium: Medium) -> Optional[str]:
"""
Get a MAL title by its id.
:param id_: th MAL id.
:param medium: the medium type.
:return: The MAL title if it's found.
"""
raise NotImplementedError
def set_mal_title(self, id_: str, medium: Medium, title: str):
"""
Set the MAL title for a given id.
:param id_: the MAL id.
:param medium: The medium type.
:param title: The MAL title for the given id.
"""
raise NotImplementedError
def medium_data_by_id(self, id_: str, medium: Medium,
site: Site) -> Optional[dict]:
"""
Get data by id.
:param id_: the id.
:param medium: the medium type.
:param site: the site.
:return: the data for that id if found.
"""
raise NotImplementedError
def set_medium_data(self, id_: str, medium: Medium, site: Site, data: dict):
"""
Set the data for a given id.
:param id_: the id.
:param medium: the medium type.
:param site: the site.
:param data: the data for the id.
"""
raise NotImplementedError
async def get_medium_data(self, query: str,
medium: Medium, loop=None) -> Optional[dict]:
"""
Get the cached data for the given search query.
:param query: the search query.
:param medium: the medium type.
:param loop: the asyncio event loop, optional. If None is provided,
will use the default event loop.
:return: the cached data, for all sites that has the data.
"""
id_dict = await await_func(
self.get_identifier, loop, query, medium
)
if not id_dict:
return
return {site: data for site, data in {
site: await await_func(self.medium_data_by_id, loop,
id_, medium, site)
for site, id_ in id_dict.items()}.items() if data}
|
<commit_before><commit_msg>Add an ABC for all sub classes of `DataController`<commit_after>from typing import Dict, Optional
from data_controller.enums import Medium, Site
from utils.helpers import await_func
class DataController:
"""
An ABC for all classes that deals with database read write.
"""
__slots__ = ()
def get_identifier(self, query: str,
medium: Medium) -> Optional[Dict[Site, str]]:
"""
Get the identifier of a given search query.
:param query: the search query.
:param medium: the medium type.
:return: A dict of all identifiers for this search query for all sites,
None if nothing is found.
"""
raise NotImplementedError
def set_identifier(self, name: str, medium: Medium,
site: Site, identifier: str):
"""
Set the identifier for a given name.
:param name: the name.
:param medium: the medium type.
:param site: the site.
:param identifier: the identifier.
"""
raise NotImplementedError
def get_mal_title(self, id_: str, medium: Medium) -> Optional[str]:
"""
Get a MAL title by its id.
:param id_: th MAL id.
:param medium: the medium type.
:return: The MAL title if it's found.
"""
raise NotImplementedError
def set_mal_title(self, id_: str, medium: Medium, title: str):
"""
Set the MAL title for a given id.
:param id_: the MAL id.
:param medium: The medium type.
:param title: The MAL title for the given id.
"""
raise NotImplementedError
def medium_data_by_id(self, id_: str, medium: Medium,
site: Site) -> Optional[dict]:
"""
Get data by id.
:param id_: the id.
:param medium: the medium type.
:param site: the site.
:return: the data for that id if found.
"""
raise NotImplementedError
def set_medium_data(self, id_: str, medium: Medium, site: Site, data: dict):
"""
Set the data for a given id.
:param id_: the id.
:param medium: the medium type.
:param site: the site.
:param data: the data for the id.
"""
raise NotImplementedError
async def get_medium_data(self, query: str,
medium: Medium, loop=None) -> Optional[dict]:
"""
Get the cached data for the given search query.
:param query: the search query.
:param medium: the medium type.
:param loop: the asyncio event loop, optional. If None is provided,
will use the default event loop.
:return: the cached data, for all sites that has the data.
"""
id_dict = await await_func(
self.get_identifier, loop, query, medium
)
if not id_dict:
return
return {site: data for site, data in {
site: await await_func(self.medium_data_by_id, loop,
id_, medium, site)
for site, id_ in id_dict.items()}.items() if data}
|
|
785a5767ee3482fddee37327b4bf3edeed94ff46
|
db/shootout_attempt.py
|
db/shootout_attempt.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import uuid
from db.common import Base
from db.specific_event import SpecificEvent
from db.player import Player
from db.team import Team
class ShootoutAttempt(Base, SpecificEvent):
__tablename__ = 'shootout_attempts'
__autoload__ = True
STANDARD_ATTRS = [
"team_id", "player_id", "zone", "goalie_team_id", "goalie_id",
"attempt_type", "shot_type", "miss_type", "distance", "on_goal",
"scored"
]
def __init__(self, event_id, data_dict):
self.shootout_attempt_id = uuid.uuid4().urn
self.event_id = event_id
for attr in self.STANDARD_ATTRS:
if attr in data_dict:
setattr(self, attr, data_dict[attr])
else:
if attr in ['scored', 'on_goal']:
setattr(self, attr, False)
else:
setattr(self, attr, None)
def __str__(self):
player = Player.find_by_id(self.player_id)
goalie = Player.find_by_id(self.goalie_id)
plr_team = Team.find_by_id(self.team_id)
goalie_team = Team.find_by_id(self.goalie_team_id)
if self.attempt_type == 'GOAL':
return "Shootout Goal: %s (%s) %s, %d ft. vs. %s (%s)" % (
player.name, plr_team.abbr, self.shot_type, self.distance,
goalie.name, goalie_team.abbr)
elif self.attempt_type == 'MISS':
return "Shootout Miss: %s (%s) %s, %d ft., %s vs. %s (%s)" % (
player.name, plr_team.abbr, self.shot_type, self.distance,
self.miss_type, goalie.name, goalie_team.abbr)
elif self.attempt_type == 'SHOT':
return "Shootout Shot: %s (%s) %s, %d ft. vs. %s (%s)" % (
player.name, plr_team.abbr, self.shot_type, self.distance,
goalie.name, goalie_team.abbr)
|
Add shootout attempt item definition
|
Add shootout attempt item definition
|
Python
|
mit
|
leaffan/pynhldb
|
Add shootout attempt item definition
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import uuid
from db.common import Base
from db.specific_event import SpecificEvent
from db.player import Player
from db.team import Team
class ShootoutAttempt(Base, SpecificEvent):
__tablename__ = 'shootout_attempts'
__autoload__ = True
STANDARD_ATTRS = [
"team_id", "player_id", "zone", "goalie_team_id", "goalie_id",
"attempt_type", "shot_type", "miss_type", "distance", "on_goal",
"scored"
]
def __init__(self, event_id, data_dict):
self.shootout_attempt_id = uuid.uuid4().urn
self.event_id = event_id
for attr in self.STANDARD_ATTRS:
if attr in data_dict:
setattr(self, attr, data_dict[attr])
else:
if attr in ['scored', 'on_goal']:
setattr(self, attr, False)
else:
setattr(self, attr, None)
def __str__(self):
player = Player.find_by_id(self.player_id)
goalie = Player.find_by_id(self.goalie_id)
plr_team = Team.find_by_id(self.team_id)
goalie_team = Team.find_by_id(self.goalie_team_id)
if self.attempt_type == 'GOAL':
return "Shootout Goal: %s (%s) %s, %d ft. vs. %s (%s)" % (
player.name, plr_team.abbr, self.shot_type, self.distance,
goalie.name, goalie_team.abbr)
elif self.attempt_type == 'MISS':
return "Shootout Miss: %s (%s) %s, %d ft., %s vs. %s (%s)" % (
player.name, plr_team.abbr, self.shot_type, self.distance,
self.miss_type, goalie.name, goalie_team.abbr)
elif self.attempt_type == 'SHOT':
return "Shootout Shot: %s (%s) %s, %d ft. vs. %s (%s)" % (
player.name, plr_team.abbr, self.shot_type, self.distance,
goalie.name, goalie_team.abbr)
|
<commit_before><commit_msg>Add shootout attempt item definition<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import uuid
from db.common import Base
from db.specific_event import SpecificEvent
from db.player import Player
from db.team import Team
class ShootoutAttempt(Base, SpecificEvent):
__tablename__ = 'shootout_attempts'
__autoload__ = True
STANDARD_ATTRS = [
"team_id", "player_id", "zone", "goalie_team_id", "goalie_id",
"attempt_type", "shot_type", "miss_type", "distance", "on_goal",
"scored"
]
def __init__(self, event_id, data_dict):
self.shootout_attempt_id = uuid.uuid4().urn
self.event_id = event_id
for attr in self.STANDARD_ATTRS:
if attr in data_dict:
setattr(self, attr, data_dict[attr])
else:
if attr in ['scored', 'on_goal']:
setattr(self, attr, False)
else:
setattr(self, attr, None)
def __str__(self):
player = Player.find_by_id(self.player_id)
goalie = Player.find_by_id(self.goalie_id)
plr_team = Team.find_by_id(self.team_id)
goalie_team = Team.find_by_id(self.goalie_team_id)
if self.attempt_type == 'GOAL':
return "Shootout Goal: %s (%s) %s, %d ft. vs. %s (%s)" % (
player.name, plr_team.abbr, self.shot_type, self.distance,
goalie.name, goalie_team.abbr)
elif self.attempt_type == 'MISS':
return "Shootout Miss: %s (%s) %s, %d ft., %s vs. %s (%s)" % (
player.name, plr_team.abbr, self.shot_type, self.distance,
self.miss_type, goalie.name, goalie_team.abbr)
elif self.attempt_type == 'SHOT':
return "Shootout Shot: %s (%s) %s, %d ft. vs. %s (%s)" % (
player.name, plr_team.abbr, self.shot_type, self.distance,
goalie.name, goalie_team.abbr)
|
Add shootout attempt item definition#!/usr/bin/env python
# -*- coding: utf-8 -*-
import uuid
from db.common import Base
from db.specific_event import SpecificEvent
from db.player import Player
from db.team import Team
class ShootoutAttempt(Base, SpecificEvent):
__tablename__ = 'shootout_attempts'
__autoload__ = True
STANDARD_ATTRS = [
"team_id", "player_id", "zone", "goalie_team_id", "goalie_id",
"attempt_type", "shot_type", "miss_type", "distance", "on_goal",
"scored"
]
def __init__(self, event_id, data_dict):
self.shootout_attempt_id = uuid.uuid4().urn
self.event_id = event_id
for attr in self.STANDARD_ATTRS:
if attr in data_dict:
setattr(self, attr, data_dict[attr])
else:
if attr in ['scored', 'on_goal']:
setattr(self, attr, False)
else:
setattr(self, attr, None)
def __str__(self):
player = Player.find_by_id(self.player_id)
goalie = Player.find_by_id(self.goalie_id)
plr_team = Team.find_by_id(self.team_id)
goalie_team = Team.find_by_id(self.goalie_team_id)
if self.attempt_type == 'GOAL':
return "Shootout Goal: %s (%s) %s, %d ft. vs. %s (%s)" % (
player.name, plr_team.abbr, self.shot_type, self.distance,
goalie.name, goalie_team.abbr)
elif self.attempt_type == 'MISS':
return "Shootout Miss: %s (%s) %s, %d ft., %s vs. %s (%s)" % (
player.name, plr_team.abbr, self.shot_type, self.distance,
self.miss_type, goalie.name, goalie_team.abbr)
elif self.attempt_type == 'SHOT':
return "Shootout Shot: %s (%s) %s, %d ft. vs. %s (%s)" % (
player.name, plr_team.abbr, self.shot_type, self.distance,
goalie.name, goalie_team.abbr)
|
<commit_before><commit_msg>Add shootout attempt item definition<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import uuid
from db.common import Base
from db.specific_event import SpecificEvent
from db.player import Player
from db.team import Team
class ShootoutAttempt(Base, SpecificEvent):
__tablename__ = 'shootout_attempts'
__autoload__ = True
STANDARD_ATTRS = [
"team_id", "player_id", "zone", "goalie_team_id", "goalie_id",
"attempt_type", "shot_type", "miss_type", "distance", "on_goal",
"scored"
]
def __init__(self, event_id, data_dict):
self.shootout_attempt_id = uuid.uuid4().urn
self.event_id = event_id
for attr in self.STANDARD_ATTRS:
if attr in data_dict:
setattr(self, attr, data_dict[attr])
else:
if attr in ['scored', 'on_goal']:
setattr(self, attr, False)
else:
setattr(self, attr, None)
def __str__(self):
player = Player.find_by_id(self.player_id)
goalie = Player.find_by_id(self.goalie_id)
plr_team = Team.find_by_id(self.team_id)
goalie_team = Team.find_by_id(self.goalie_team_id)
if self.attempt_type == 'GOAL':
return "Shootout Goal: %s (%s) %s, %d ft. vs. %s (%s)" % (
player.name, plr_team.abbr, self.shot_type, self.distance,
goalie.name, goalie_team.abbr)
elif self.attempt_type == 'MISS':
return "Shootout Miss: %s (%s) %s, %d ft., %s vs. %s (%s)" % (
player.name, plr_team.abbr, self.shot_type, self.distance,
self.miss_type, goalie.name, goalie_team.abbr)
elif self.attempt_type == 'SHOT':
return "Shootout Shot: %s (%s) %s, %d ft. vs. %s (%s)" % (
player.name, plr_team.abbr, self.shot_type, self.distance,
goalie.name, goalie_team.abbr)
|
|
125c75ea246c2d95f0addbb31b2d82dde588f21d
|
tests/test_kaggle_kernel_credentials.py
|
tests/test_kaggle_kernel_credentials.py
|
import unittest
from kaggle_secrets import GcpTarget
from kaggle_gcp import KaggleKernelCredentials
class TestKaggleKernelCredentials(unittest.TestCase):
def test_default_target(self):
creds = KaggleKernelCredentials()
self.assertEqual(GcpTarget.BIGQUERY, creds.target)
|
Add a unit test for KaggleKernelCredentials.
|
Add a unit test for KaggleKernelCredentials.
|
Python
|
apache-2.0
|
Kaggle/docker-python,Kaggle/docker-python
|
Add a unit test for KaggleKernelCredentials.
|
import unittest
from kaggle_secrets import GcpTarget
from kaggle_gcp import KaggleKernelCredentials
class TestKaggleKernelCredentials(unittest.TestCase):
def test_default_target(self):
creds = KaggleKernelCredentials()
self.assertEqual(GcpTarget.BIGQUERY, creds.target)
|
<commit_before><commit_msg>Add a unit test for KaggleKernelCredentials.<commit_after>
|
import unittest
from kaggle_secrets import GcpTarget
from kaggle_gcp import KaggleKernelCredentials
class TestKaggleKernelCredentials(unittest.TestCase):
def test_default_target(self):
creds = KaggleKernelCredentials()
self.assertEqual(GcpTarget.BIGQUERY, creds.target)
|
Add a unit test for KaggleKernelCredentials.import unittest
from kaggle_secrets import GcpTarget
from kaggle_gcp import KaggleKernelCredentials
class TestKaggleKernelCredentials(unittest.TestCase):
def test_default_target(self):
creds = KaggleKernelCredentials()
self.assertEqual(GcpTarget.BIGQUERY, creds.target)
|
<commit_before><commit_msg>Add a unit test for KaggleKernelCredentials.<commit_after>import unittest
from kaggle_secrets import GcpTarget
from kaggle_gcp import KaggleKernelCredentials
class TestKaggleKernelCredentials(unittest.TestCase):
def test_default_target(self):
creds = KaggleKernelCredentials()
self.assertEqual(GcpTarget.BIGQUERY, creds.target)
|
|
fbbaa3fc5b99eed88e039c232f129aaeab0a6f54
|
tests/test_table.py
|
tests/test_table.py
|
#!/usr/bin/env python3
import nose.tools as nose
from table import Table
def test_init_default():
"""should initialize table with required parameters and default values"""
table = Table(num_cols=5, width=78)
nose.assert_equal(table.num_cols, 5)
nose.assert_equal(table.width, 78)
nose.assert_equal(table.alignment, 'left')
nose.assert_equal(table.title, None)
nose.assert_equal(table.header, [])
nose.assert_equal(table.rows, [])
def test_init_optional():
"""should initialize table with optional parameters if supplied"""
table = Table(num_cols=5, width=78, alignment='right', title='Cache')
nose.assert_equal(table.num_cols, 5)
nose.assert_equal(table.width, 78)
nose.assert_equal(table.alignment, 'right')
nose.assert_equal(table.title, 'Cache')
def test_get_separator():
"""should return the correct ASCII separator string"""
table = Table(num_cols=5, width=78)
nose.assert_equal(table.get_separator(), '-' * 78)
def test_str_title():
"""should correctly display title"""
table = Table(num_cols=5, width=12, title='Cache')
nose.assert_regexp_matches(
''.join(('Cache'.center(12), '\n', ('-' * 12))), str(table))
def test_str_no_title():
"""should not display title if not originally supplied"""
table = Table(num_cols=5, width=12)
nose.assert_equal(str(table).strip(), '')
class TestAlignment(object):
def _test_str_align(self, alignment, just):
table_width = 16
num_cols = 2
col_width = table_width // num_cols
table = Table(
num_cols=num_cols, width=table_width, alignment=alignment)
table.header = ['First', 'Last']
table.rows.append(['Bob', 'Smith'])
table.rows.append(['John', 'Earl'])
nose.assert_equal(str(table), '{}{}\n{}\n{}{}\n{}{}'.format(
just('First', col_width), just('Last', col_width),
'-' * table_width,
just('Bob', col_width), just('Smith', col_width),
just('John', col_width), just('Earl', col_width)))
def test_str_align_left(self):
"""should correctly display table when left-aligned"""
self._test_str_align(
alignment='left', just=str.ljust)
def test_str_align_center(self):
"""should correctly display table when center-aligned"""
self._test_str_align(
alignment='center', just=str.center)
def test_str_align_right(self):
"""should correctly display table when right-aligned"""
self._test_str_align(
alignment='right', just=str.rjust)
|
Bring table test coverage to 100%
|
Bring table test coverage to 100%
|
Python
|
mit
|
caleb531/cache-simulator
|
Bring table test coverage to 100%
|
#!/usr/bin/env python3
import nose.tools as nose
from table import Table
def test_init_default():
"""should initialize table with required parameters and default values"""
table = Table(num_cols=5, width=78)
nose.assert_equal(table.num_cols, 5)
nose.assert_equal(table.width, 78)
nose.assert_equal(table.alignment, 'left')
nose.assert_equal(table.title, None)
nose.assert_equal(table.header, [])
nose.assert_equal(table.rows, [])
def test_init_optional():
"""should initialize table with optional parameters if supplied"""
table = Table(num_cols=5, width=78, alignment='right', title='Cache')
nose.assert_equal(table.num_cols, 5)
nose.assert_equal(table.width, 78)
nose.assert_equal(table.alignment, 'right')
nose.assert_equal(table.title, 'Cache')
def test_get_separator():
"""should return the correct ASCII separator string"""
table = Table(num_cols=5, width=78)
nose.assert_equal(table.get_separator(), '-' * 78)
def test_str_title():
"""should correctly display title"""
table = Table(num_cols=5, width=12, title='Cache')
nose.assert_regexp_matches(
''.join(('Cache'.center(12), '\n', ('-' * 12))), str(table))
def test_str_no_title():
"""should not display title if not originally supplied"""
table = Table(num_cols=5, width=12)
nose.assert_equal(str(table).strip(), '')
class TestAlignment(object):
def _test_str_align(self, alignment, just):
table_width = 16
num_cols = 2
col_width = table_width // num_cols
table = Table(
num_cols=num_cols, width=table_width, alignment=alignment)
table.header = ['First', 'Last']
table.rows.append(['Bob', 'Smith'])
table.rows.append(['John', 'Earl'])
nose.assert_equal(str(table), '{}{}\n{}\n{}{}\n{}{}'.format(
just('First', col_width), just('Last', col_width),
'-' * table_width,
just('Bob', col_width), just('Smith', col_width),
just('John', col_width), just('Earl', col_width)))
def test_str_align_left(self):
"""should correctly display table when left-aligned"""
self._test_str_align(
alignment='left', just=str.ljust)
def test_str_align_center(self):
"""should correctly display table when center-aligned"""
self._test_str_align(
alignment='center', just=str.center)
def test_str_align_right(self):
"""should correctly display table when right-aligned"""
self._test_str_align(
alignment='right', just=str.rjust)
|
<commit_before><commit_msg>Bring table test coverage to 100%<commit_after>
|
#!/usr/bin/env python3
import nose.tools as nose
from table import Table
def test_init_default():
"""should initialize table with required parameters and default values"""
table = Table(num_cols=5, width=78)
nose.assert_equal(table.num_cols, 5)
nose.assert_equal(table.width, 78)
nose.assert_equal(table.alignment, 'left')
nose.assert_equal(table.title, None)
nose.assert_equal(table.header, [])
nose.assert_equal(table.rows, [])
def test_init_optional():
"""should initialize table with optional parameters if supplied"""
table = Table(num_cols=5, width=78, alignment='right', title='Cache')
nose.assert_equal(table.num_cols, 5)
nose.assert_equal(table.width, 78)
nose.assert_equal(table.alignment, 'right')
nose.assert_equal(table.title, 'Cache')
def test_get_separator():
"""should return the correct ASCII separator string"""
table = Table(num_cols=5, width=78)
nose.assert_equal(table.get_separator(), '-' * 78)
def test_str_title():
"""should correctly display title"""
table = Table(num_cols=5, width=12, title='Cache')
nose.assert_regexp_matches(
''.join(('Cache'.center(12), '\n', ('-' * 12))), str(table))
def test_str_no_title():
"""should not display title if not originally supplied"""
table = Table(num_cols=5, width=12)
nose.assert_equal(str(table).strip(), '')
class TestAlignment(object):
def _test_str_align(self, alignment, just):
table_width = 16
num_cols = 2
col_width = table_width // num_cols
table = Table(
num_cols=num_cols, width=table_width, alignment=alignment)
table.header = ['First', 'Last']
table.rows.append(['Bob', 'Smith'])
table.rows.append(['John', 'Earl'])
nose.assert_equal(str(table), '{}{}\n{}\n{}{}\n{}{}'.format(
just('First', col_width), just('Last', col_width),
'-' * table_width,
just('Bob', col_width), just('Smith', col_width),
just('John', col_width), just('Earl', col_width)))
def test_str_align_left(self):
"""should correctly display table when left-aligned"""
self._test_str_align(
alignment='left', just=str.ljust)
def test_str_align_center(self):
"""should correctly display table when center-aligned"""
self._test_str_align(
alignment='center', just=str.center)
def test_str_align_right(self):
"""should correctly display table when right-aligned"""
self._test_str_align(
alignment='right', just=str.rjust)
|
Bring table test coverage to 100%#!/usr/bin/env python3
import nose.tools as nose
from table import Table
def test_init_default():
"""should initialize table with required parameters and default values"""
table = Table(num_cols=5, width=78)
nose.assert_equal(table.num_cols, 5)
nose.assert_equal(table.width, 78)
nose.assert_equal(table.alignment, 'left')
nose.assert_equal(table.title, None)
nose.assert_equal(table.header, [])
nose.assert_equal(table.rows, [])
def test_init_optional():
"""should initialize table with optional parameters if supplied"""
table = Table(num_cols=5, width=78, alignment='right', title='Cache')
nose.assert_equal(table.num_cols, 5)
nose.assert_equal(table.width, 78)
nose.assert_equal(table.alignment, 'right')
nose.assert_equal(table.title, 'Cache')
def test_get_separator():
"""should return the correct ASCII separator string"""
table = Table(num_cols=5, width=78)
nose.assert_equal(table.get_separator(), '-' * 78)
def test_str_title():
"""should correctly display title"""
table = Table(num_cols=5, width=12, title='Cache')
nose.assert_regexp_matches(
''.join(('Cache'.center(12), '\n', ('-' * 12))), str(table))
def test_str_no_title():
"""should not display title if not originally supplied"""
table = Table(num_cols=5, width=12)
nose.assert_equal(str(table).strip(), '')
class TestAlignment(object):
def _test_str_align(self, alignment, just):
table_width = 16
num_cols = 2
col_width = table_width // num_cols
table = Table(
num_cols=num_cols, width=table_width, alignment=alignment)
table.header = ['First', 'Last']
table.rows.append(['Bob', 'Smith'])
table.rows.append(['John', 'Earl'])
nose.assert_equal(str(table), '{}{}\n{}\n{}{}\n{}{}'.format(
just('First', col_width), just('Last', col_width),
'-' * table_width,
just('Bob', col_width), just('Smith', col_width),
just('John', col_width), just('Earl', col_width)))
def test_str_align_left(self):
"""should correctly display table when left-aligned"""
self._test_str_align(
alignment='left', just=str.ljust)
def test_str_align_center(self):
"""should correctly display table when center-aligned"""
self._test_str_align(
alignment='center', just=str.center)
def test_str_align_right(self):
"""should correctly display table when right-aligned"""
self._test_str_align(
alignment='right', just=str.rjust)
|
<commit_before><commit_msg>Bring table test coverage to 100%<commit_after>#!/usr/bin/env python3
import nose.tools as nose
from table import Table
def test_init_default():
"""should initialize table with required parameters and default values"""
table = Table(num_cols=5, width=78)
nose.assert_equal(table.num_cols, 5)
nose.assert_equal(table.width, 78)
nose.assert_equal(table.alignment, 'left')
nose.assert_equal(table.title, None)
nose.assert_equal(table.header, [])
nose.assert_equal(table.rows, [])
def test_init_optional():
"""should initialize table with optional parameters if supplied"""
table = Table(num_cols=5, width=78, alignment='right', title='Cache')
nose.assert_equal(table.num_cols, 5)
nose.assert_equal(table.width, 78)
nose.assert_equal(table.alignment, 'right')
nose.assert_equal(table.title, 'Cache')
def test_get_separator():
"""should return the correct ASCII separator string"""
table = Table(num_cols=5, width=78)
nose.assert_equal(table.get_separator(), '-' * 78)
def test_str_title():
"""should correctly display title"""
table = Table(num_cols=5, width=12, title='Cache')
nose.assert_regexp_matches(
''.join(('Cache'.center(12), '\n', ('-' * 12))), str(table))
def test_str_no_title():
"""should not display title if not originally supplied"""
table = Table(num_cols=5, width=12)
nose.assert_equal(str(table).strip(), '')
class TestAlignment(object):
def _test_str_align(self, alignment, just):
table_width = 16
num_cols = 2
col_width = table_width // num_cols
table = Table(
num_cols=num_cols, width=table_width, alignment=alignment)
table.header = ['First', 'Last']
table.rows.append(['Bob', 'Smith'])
table.rows.append(['John', 'Earl'])
nose.assert_equal(str(table), '{}{}\n{}\n{}{}\n{}{}'.format(
just('First', col_width), just('Last', col_width),
'-' * table_width,
just('Bob', col_width), just('Smith', col_width),
just('John', col_width), just('Earl', col_width)))
def test_str_align_left(self):
"""should correctly display table when left-aligned"""
self._test_str_align(
alignment='left', just=str.ljust)
def test_str_align_center(self):
"""should correctly display table when center-aligned"""
self._test_str_align(
alignment='center', just=str.center)
def test_str_align_right(self):
"""should correctly display table when right-aligned"""
self._test_str_align(
alignment='right', just=str.rjust)
|
|
e56a9781f4e7e8042c29c9e54966659c87c5c05c
|
tests/test_views.py
|
tests/test_views.py
|
import pytest
from django.core.urlresolvers import reverse
def test_site_view(client):
response = client.get(reverse('site-home'))
assert response.status_code == 200
assert 'landings/home_site.html' in [template.name for template in response.templates]
|
Add a test for our more general views.
|
Add a test for our more general views.
|
Python
|
apache-2.0
|
hello-base/web,hello-base/web,hello-base/web,hello-base/web
|
Add a test for our more general views.
|
import pytest
from django.core.urlresolvers import reverse
def test_site_view(client):
response = client.get(reverse('site-home'))
assert response.status_code == 200
assert 'landings/home_site.html' in [template.name for template in response.templates]
|
<commit_before><commit_msg>Add a test for our more general views.<commit_after>
|
import pytest
from django.core.urlresolvers import reverse
def test_site_view(client):
response = client.get(reverse('site-home'))
assert response.status_code == 200
assert 'landings/home_site.html' in [template.name for template in response.templates]
|
Add a test for our more general views.import pytest
from django.core.urlresolvers import reverse
def test_site_view(client):
response = client.get(reverse('site-home'))
assert response.status_code == 200
assert 'landings/home_site.html' in [template.name for template in response.templates]
|
<commit_before><commit_msg>Add a test for our more general views.<commit_after>import pytest
from django.core.urlresolvers import reverse
def test_site_view(client):
response = client.get(reverse('site-home'))
assert response.status_code == 200
assert 'landings/home_site.html' in [template.name for template in response.templates]
|
|
1043acdfe324e02bc2a8629ef8a47d6ae9befd7c
|
src/aiy/_drivers/_ecc608_pubkey.py
|
src/aiy/_drivers/_ecc608_pubkey.py
|
#!/usr/bin/env python3
import base64
import ctypes
import sys
CRYPTO_ADDRESS_DICT = {
'Vision Bonnet': 0x60,
'Voice Bonnet': 0x62,
}
class AtcaIfaceCfgLong(ctypes.Structure):
_fields_ = (
('iface_type', ctypes.c_ulong),
('devtype', ctypes.c_ulong),
('slave_address', ctypes.c_ubyte),
('bus', ctypes.c_ubyte),
('baud', ctypes.c_ulong)
)
def main():
try:
cryptolib = ctypes.cdll.LoadLibrary('libcryptoauth.so')
except Exception:
print('Unable to load crypto library, SW authentication required')
sys.exit()
try:
for name, addr in CRYPTO_ADDRESS_DICT.items():
cfg = AtcaIfaceCfgLong.in_dll(cryptolib, 'cfg_ateccx08a_i2c_default')
cfg.slave_address = addr << 1
cfg.bus = 1 # ARM I2C
cfg.devtype = 3 # ECC608
status = cryptolib.atcab_init(cryptolib.cfg_ateccx08a_i2c_default)
if status == 0:
# Found a valid crypto chip.
break
else:
cryptolib.atcab_release()
if status:
raise Exception
serial = ctypes.create_string_buffer(9)
status = cryptolib.atcab_read_serial_number(ctypes.byref(serial))
if status:
raise Exception
serial = ''.join('%02X' % x for x in serial.raw)
print('Serial Number: %s\n' % serial, file=sys.stderr)
pubkey = ctypes.create_string_buffer(64)
status = cryptolib.atcab_genkey_base(0, 0, None, ctypes.byref(pubkey))
if status:
raise Exception
public_key = bytearray.fromhex(
'3059301306072A8648CE3D020106082A8648CE3D03010703420004') + bytes(pubkey.raw)
public_key = '-----BEGIN PUBLIC KEY-----\n' + \
base64.b64encode(public_key).decode('ascii') + '\n-----END PUBLIC KEY-----'
print(public_key)
status = cryptolib.atcab_release()
if status:
raise Exception
except Exception:
print('Unable to communicate with crypto, SW authentication required')
if __name__ == '__main__':
main()
|
Add python script to get ECC608 Public Key
|
Add python script to get ECC608 Public Key
Change-Id: I0826503e9b8d1bb5de3f83b82d0083754a7e2b8f
|
Python
|
apache-2.0
|
google/aiyprojects-raspbian,google/aiyprojects-raspbian,google/aiyprojects-raspbian,google/aiyprojects-raspbian,google/aiyprojects-raspbian
|
Add python script to get ECC608 Public Key
Change-Id: I0826503e9b8d1bb5de3f83b82d0083754a7e2b8f
|
#!/usr/bin/env python3
import base64
import ctypes
import sys
CRYPTO_ADDRESS_DICT = {
'Vision Bonnet': 0x60,
'Voice Bonnet': 0x62,
}
class AtcaIfaceCfgLong(ctypes.Structure):
_fields_ = (
('iface_type', ctypes.c_ulong),
('devtype', ctypes.c_ulong),
('slave_address', ctypes.c_ubyte),
('bus', ctypes.c_ubyte),
('baud', ctypes.c_ulong)
)
def main():
try:
cryptolib = ctypes.cdll.LoadLibrary('libcryptoauth.so')
except Exception:
print('Unable to load crypto library, SW authentication required')
sys.exit()
try:
for name, addr in CRYPTO_ADDRESS_DICT.items():
cfg = AtcaIfaceCfgLong.in_dll(cryptolib, 'cfg_ateccx08a_i2c_default')
cfg.slave_address = addr << 1
cfg.bus = 1 # ARM I2C
cfg.devtype = 3 # ECC608
status = cryptolib.atcab_init(cryptolib.cfg_ateccx08a_i2c_default)
if status == 0:
# Found a valid crypto chip.
break
else:
cryptolib.atcab_release()
if status:
raise Exception
serial = ctypes.create_string_buffer(9)
status = cryptolib.atcab_read_serial_number(ctypes.byref(serial))
if status:
raise Exception
serial = ''.join('%02X' % x for x in serial.raw)
print('Serial Number: %s\n' % serial, file=sys.stderr)
pubkey = ctypes.create_string_buffer(64)
status = cryptolib.atcab_genkey_base(0, 0, None, ctypes.byref(pubkey))
if status:
raise Exception
public_key = bytearray.fromhex(
'3059301306072A8648CE3D020106082A8648CE3D03010703420004') + bytes(pubkey.raw)
public_key = '-----BEGIN PUBLIC KEY-----\n' + \
base64.b64encode(public_key).decode('ascii') + '\n-----END PUBLIC KEY-----'
print(public_key)
status = cryptolib.atcab_release()
if status:
raise Exception
except Exception:
print('Unable to communicate with crypto, SW authentication required')
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add python script to get ECC608 Public Key
Change-Id: I0826503e9b8d1bb5de3f83b82d0083754a7e2b8f<commit_after>
|
#!/usr/bin/env python3
import base64
import ctypes
import sys
CRYPTO_ADDRESS_DICT = {
'Vision Bonnet': 0x60,
'Voice Bonnet': 0x62,
}
class AtcaIfaceCfgLong(ctypes.Structure):
_fields_ = (
('iface_type', ctypes.c_ulong),
('devtype', ctypes.c_ulong),
('slave_address', ctypes.c_ubyte),
('bus', ctypes.c_ubyte),
('baud', ctypes.c_ulong)
)
def main():
try:
cryptolib = ctypes.cdll.LoadLibrary('libcryptoauth.so')
except Exception:
print('Unable to load crypto library, SW authentication required')
sys.exit()
try:
for name, addr in CRYPTO_ADDRESS_DICT.items():
cfg = AtcaIfaceCfgLong.in_dll(cryptolib, 'cfg_ateccx08a_i2c_default')
cfg.slave_address = addr << 1
cfg.bus = 1 # ARM I2C
cfg.devtype = 3 # ECC608
status = cryptolib.atcab_init(cryptolib.cfg_ateccx08a_i2c_default)
if status == 0:
# Found a valid crypto chip.
break
else:
cryptolib.atcab_release()
if status:
raise Exception
serial = ctypes.create_string_buffer(9)
status = cryptolib.atcab_read_serial_number(ctypes.byref(serial))
if status:
raise Exception
serial = ''.join('%02X' % x for x in serial.raw)
print('Serial Number: %s\n' % serial, file=sys.stderr)
pubkey = ctypes.create_string_buffer(64)
status = cryptolib.atcab_genkey_base(0, 0, None, ctypes.byref(pubkey))
if status:
raise Exception
public_key = bytearray.fromhex(
'3059301306072A8648CE3D020106082A8648CE3D03010703420004') + bytes(pubkey.raw)
public_key = '-----BEGIN PUBLIC KEY-----\n' + \
base64.b64encode(public_key).decode('ascii') + '\n-----END PUBLIC KEY-----'
print(public_key)
status = cryptolib.atcab_release()
if status:
raise Exception
except Exception:
print('Unable to communicate with crypto, SW authentication required')
if __name__ == '__main__':
main()
|
Add python script to get ECC608 Public Key
Change-Id: I0826503e9b8d1bb5de3f83b82d0083754a7e2b8f#!/usr/bin/env python3
import base64
import ctypes
import sys
CRYPTO_ADDRESS_DICT = {
'Vision Bonnet': 0x60,
'Voice Bonnet': 0x62,
}
class AtcaIfaceCfgLong(ctypes.Structure):
_fields_ = (
('iface_type', ctypes.c_ulong),
('devtype', ctypes.c_ulong),
('slave_address', ctypes.c_ubyte),
('bus', ctypes.c_ubyte),
('baud', ctypes.c_ulong)
)
def main():
try:
cryptolib = ctypes.cdll.LoadLibrary('libcryptoauth.so')
except Exception:
print('Unable to load crypto library, SW authentication required')
sys.exit()
try:
for name, addr in CRYPTO_ADDRESS_DICT.items():
cfg = AtcaIfaceCfgLong.in_dll(cryptolib, 'cfg_ateccx08a_i2c_default')
cfg.slave_address = addr << 1
cfg.bus = 1 # ARM I2C
cfg.devtype = 3 # ECC608
status = cryptolib.atcab_init(cryptolib.cfg_ateccx08a_i2c_default)
if status == 0:
# Found a valid crypto chip.
break
else:
cryptolib.atcab_release()
if status:
raise Exception
serial = ctypes.create_string_buffer(9)
status = cryptolib.atcab_read_serial_number(ctypes.byref(serial))
if status:
raise Exception
serial = ''.join('%02X' % x for x in serial.raw)
print('Serial Number: %s\n' % serial, file=sys.stderr)
pubkey = ctypes.create_string_buffer(64)
status = cryptolib.atcab_genkey_base(0, 0, None, ctypes.byref(pubkey))
if status:
raise Exception
public_key = bytearray.fromhex(
'3059301306072A8648CE3D020106082A8648CE3D03010703420004') + bytes(pubkey.raw)
public_key = '-----BEGIN PUBLIC KEY-----\n' + \
base64.b64encode(public_key).decode('ascii') + '\n-----END PUBLIC KEY-----'
print(public_key)
status = cryptolib.atcab_release()
if status:
raise Exception
except Exception:
print('Unable to communicate with crypto, SW authentication required')
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add python script to get ECC608 Public Key
Change-Id: I0826503e9b8d1bb5de3f83b82d0083754a7e2b8f<commit_after>#!/usr/bin/env python3
import base64
import ctypes
import sys
CRYPTO_ADDRESS_DICT = {
'Vision Bonnet': 0x60,
'Voice Bonnet': 0x62,
}
class AtcaIfaceCfgLong(ctypes.Structure):
_fields_ = (
('iface_type', ctypes.c_ulong),
('devtype', ctypes.c_ulong),
('slave_address', ctypes.c_ubyte),
('bus', ctypes.c_ubyte),
('baud', ctypes.c_ulong)
)
def main():
try:
cryptolib = ctypes.cdll.LoadLibrary('libcryptoauth.so')
except Exception:
print('Unable to load crypto library, SW authentication required')
sys.exit()
try:
for name, addr in CRYPTO_ADDRESS_DICT.items():
cfg = AtcaIfaceCfgLong.in_dll(cryptolib, 'cfg_ateccx08a_i2c_default')
cfg.slave_address = addr << 1
cfg.bus = 1 # ARM I2C
cfg.devtype = 3 # ECC608
status = cryptolib.atcab_init(cryptolib.cfg_ateccx08a_i2c_default)
if status == 0:
# Found a valid crypto chip.
break
else:
cryptolib.atcab_release()
if status:
raise Exception
serial = ctypes.create_string_buffer(9)
status = cryptolib.atcab_read_serial_number(ctypes.byref(serial))
if status:
raise Exception
serial = ''.join('%02X' % x for x in serial.raw)
print('Serial Number: %s\n' % serial, file=sys.stderr)
pubkey = ctypes.create_string_buffer(64)
status = cryptolib.atcab_genkey_base(0, 0, None, ctypes.byref(pubkey))
if status:
raise Exception
public_key = bytearray.fromhex(
'3059301306072A8648CE3D020106082A8648CE3D03010703420004') + bytes(pubkey.raw)
public_key = '-----BEGIN PUBLIC KEY-----\n' + \
base64.b64encode(public_key).decode('ascii') + '\n-----END PUBLIC KEY-----'
print(public_key)
status = cryptolib.atcab_release()
if status:
raise Exception
except Exception:
print('Unable to communicate with crypto, SW authentication required')
if __name__ == '__main__':
main()
|
|
d9e11e2c5f14cee0ead87ced9afe85bdd299ab35
|
extract_text.py
|
extract_text.py
|
import json
f=open('raw.json')
g=open('extracted1','a')
i=1
for s in f:
j=json.loads(s)
j=j['text']
h=json.dumps(j)
number=str(i) + ':' + ' '
g.write(h)
g.write('\n\n')
i=i+1
|
Add python script to extract
|
Add python script to extract
|
Python
|
mit
|
sukanyapatra/Disatweet
|
Add python script to extract
|
import json
f=open('raw.json')
g=open('extracted1','a')
i=1
for s in f:
j=json.loads(s)
j=j['text']
h=json.dumps(j)
number=str(i) + ':' + ' '
g.write(h)
g.write('\n\n')
i=i+1
|
<commit_before><commit_msg>Add python script to extract<commit_after>
|
import json
f=open('raw.json')
g=open('extracted1','a')
i=1
for s in f:
j=json.loads(s)
j=j['text']
h=json.dumps(j)
number=str(i) + ':' + ' '
g.write(h)
g.write('\n\n')
i=i+1
|
Add python script to extractimport json
f=open('raw.json')
g=open('extracted1','a')
i=1
for s in f:
j=json.loads(s)
j=j['text']
h=json.dumps(j)
number=str(i) + ':' + ' '
g.write(h)
g.write('\n\n')
i=i+1
|
<commit_before><commit_msg>Add python script to extract<commit_after>import json
f=open('raw.json')
g=open('extracted1','a')
i=1
for s in f:
j=json.loads(s)
j=j['text']
h=json.dumps(j)
number=str(i) + ':' + ' '
g.write(h)
g.write('\n\n')
i=i+1
|
|
a70490e52bde05d2afc6ea59416a50e11119d060
|
raggregate/rg_migrations/versions/002_Add_metadata_to_Comment_to_allow_it_to_masquerade_as_epistle.py
|
raggregate/rg_migrations/versions/002_Add_metadata_to_Comment_to_allow_it_to_masquerade_as_epistle.py
|
from sqlalchemy import *
from migrate import *
from raggregate.guid_recipe import GUID
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
comments = Table('comments', meta, autoload=True)
unreadc = Column('unread', Boolean, default=True)
in_reply_toc = Column('in_reply_to', GUID, nullable=True)
unreadc.create(comments)
in_reply_toc.create(comments)
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
meta = MetaData(bind=migrate_engine)
comments = Table('comments', meta, autoload=True)
comments.c.unread.drop()
comments.c.in_reply_to.drop()
|
Add migration for Comment schema upgrade. ...
|
Add migration for Comment schema upgrade. ...
This allows Comment to masquerade as an epistle and is required to use
the last several commits. I forgot to push the migration out. :p
|
Python
|
apache-2.0
|
sjuxax/raggregate
|
Add migration for Comment schema upgrade. ...
This allows Comment to masquerade as an epistle and is required to use
the last several commits. I forgot to push the migration out. :p
|
from sqlalchemy import *
from migrate import *
from raggregate.guid_recipe import GUID
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
comments = Table('comments', meta, autoload=True)
unreadc = Column('unread', Boolean, default=True)
in_reply_toc = Column('in_reply_to', GUID, nullable=True)
unreadc.create(comments)
in_reply_toc.create(comments)
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
meta = MetaData(bind=migrate_engine)
comments = Table('comments', meta, autoload=True)
comments.c.unread.drop()
comments.c.in_reply_to.drop()
|
<commit_before><commit_msg>Add migration for Comment schema upgrade. ...
This allows Comment to masquerade as an epistle and is required to use
the last several commits. I forgot to push the migration out. :p<commit_after>
|
from sqlalchemy import *
from migrate import *
from raggregate.guid_recipe import GUID
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
comments = Table('comments', meta, autoload=True)
unreadc = Column('unread', Boolean, default=True)
in_reply_toc = Column('in_reply_to', GUID, nullable=True)
unreadc.create(comments)
in_reply_toc.create(comments)
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
meta = MetaData(bind=migrate_engine)
comments = Table('comments', meta, autoload=True)
comments.c.unread.drop()
comments.c.in_reply_to.drop()
|
Add migration for Comment schema upgrade. ...
This allows Comment to masquerade as an epistle and is required to use
the last several commits. I forgot to push the migration out. :pfrom sqlalchemy import *
from migrate import *
from raggregate.guid_recipe import GUID
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
comments = Table('comments', meta, autoload=True)
unreadc = Column('unread', Boolean, default=True)
in_reply_toc = Column('in_reply_to', GUID, nullable=True)
unreadc.create(comments)
in_reply_toc.create(comments)
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
meta = MetaData(bind=migrate_engine)
comments = Table('comments', meta, autoload=True)
comments.c.unread.drop()
comments.c.in_reply_to.drop()
|
<commit_before><commit_msg>Add migration for Comment schema upgrade. ...
This allows Comment to masquerade as an epistle and is required to use
the last several commits. I forgot to push the migration out. :p<commit_after>from sqlalchemy import *
from migrate import *
from raggregate.guid_recipe import GUID
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
comments = Table('comments', meta, autoload=True)
unreadc = Column('unread', Boolean, default=True)
in_reply_toc = Column('in_reply_to', GUID, nullable=True)
unreadc.create(comments)
in_reply_toc.create(comments)
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
meta = MetaData(bind=migrate_engine)
comments = Table('comments', meta, autoload=True)
comments.c.unread.drop()
comments.c.in_reply_to.drop()
|
|
6d3f6951d846c50fcc1ff011f9129a4e1e3f7de1
|
testing/test_storm_bmi.py
|
testing/test_storm_bmi.py
|
#! /usr/bin/env python
#
# Tests for the BMI version of `storm`.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper (mark.piper@colorado.edu)
from nose.tools import *
import os
import shutil
from subprocess import call
# Global variables
start_dir = os.getcwd()
data_dir = os.path.join(start_dir, 'testing', 'data')
input_file1 = os.path.join(data_dir, 'test1.in')
input_file2 = os.path.join(data_dir, 'test2.in')
build_dir = os.path.join(start_dir, 'build')
exe = './bmi/storm'
# Fixtures -------------------------------------------------------------
def setup_module():
'''
Called before any tests are performed.
'''
print('*** BMI tests')
os.mkdir(build_dir)
os.chdir(build_dir)
def teardown_module():
'''
Called after all tests have completed.
'''
os.chdir(start_dir)
shutil.rmtree(build_dir)
# Tests ----------------------------------------------------------------
def test_configure():
'''
Test whether CMake executes successfully
'''
call(['cmake', '..'])
def test_compile():
'''
Test whether `storm` compiles
'''
call(['make'])
def test_without_input_file():
'''
Check that storm runs without an input file
'''
r = call([exe])
assert_equal(r, 0)
def test_with_singlestep_input_file():
'''
Check that storm runs with a one-step input file
'''
r = call([exe, input_file1])
assert_equal(r, 0)
def test_with_multistep_input_file():
'''
Check that storm runs with a multi-step input file
'''
r = call([exe, input_file2])
assert_equal(r, 0)
|
Add unit tests for BMI version of storm
|
Add unit tests for BMI version of storm
|
Python
|
mit
|
mdpiper/storm,csdms-contrib/storm,mdpiper/storm,csdms-contrib/storm
|
Add unit tests for BMI version of storm
|
#! /usr/bin/env python
#
# Tests for the BMI version of `storm`.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper (mark.piper@colorado.edu)
from nose.tools import *
import os
import shutil
from subprocess import call
# Global variables
start_dir = os.getcwd()
data_dir = os.path.join(start_dir, 'testing', 'data')
input_file1 = os.path.join(data_dir, 'test1.in')
input_file2 = os.path.join(data_dir, 'test2.in')
build_dir = os.path.join(start_dir, 'build')
exe = './bmi/storm'
# Fixtures -------------------------------------------------------------
def setup_module():
'''
Called before any tests are performed.
'''
print('*** BMI tests')
os.mkdir(build_dir)
os.chdir(build_dir)
def teardown_module():
'''
Called after all tests have completed.
'''
os.chdir(start_dir)
shutil.rmtree(build_dir)
# Tests ----------------------------------------------------------------
def test_configure():
'''
Test whether CMake executes successfully
'''
call(['cmake', '..'])
def test_compile():
'''
Test whether `storm` compiles
'''
call(['make'])
def test_without_input_file():
'''
Check that storm runs without an input file
'''
r = call([exe])
assert_equal(r, 0)
def test_with_singlestep_input_file():
'''
Check that storm runs with a one-step input file
'''
r = call([exe, input_file1])
assert_equal(r, 0)
def test_with_multistep_input_file():
'''
Check that storm runs with a multi-step input file
'''
r = call([exe, input_file2])
assert_equal(r, 0)
|
<commit_before><commit_msg>Add unit tests for BMI version of storm<commit_after>
|
#! /usr/bin/env python
#
# Tests for the BMI version of `storm`.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper (mark.piper@colorado.edu)
from nose.tools import *
import os
import shutil
from subprocess import call
# Global variables
start_dir = os.getcwd()
data_dir = os.path.join(start_dir, 'testing', 'data')
input_file1 = os.path.join(data_dir, 'test1.in')
input_file2 = os.path.join(data_dir, 'test2.in')
build_dir = os.path.join(start_dir, 'build')
exe = './bmi/storm'
# Fixtures -------------------------------------------------------------
def setup_module():
'''
Called before any tests are performed.
'''
print('*** BMI tests')
os.mkdir(build_dir)
os.chdir(build_dir)
def teardown_module():
'''
Called after all tests have completed.
'''
os.chdir(start_dir)
shutil.rmtree(build_dir)
# Tests ----------------------------------------------------------------
def test_configure():
'''
Test whether CMake executes successfully
'''
call(['cmake', '..'])
def test_compile():
'''
Test whether `storm` compiles
'''
call(['make'])
def test_without_input_file():
'''
Check that storm runs without an input file
'''
r = call([exe])
assert_equal(r, 0)
def test_with_singlestep_input_file():
'''
Check that storm runs with a one-step input file
'''
r = call([exe, input_file1])
assert_equal(r, 0)
def test_with_multistep_input_file():
'''
Check that storm runs with a multi-step input file
'''
r = call([exe, input_file2])
assert_equal(r, 0)
|
Add unit tests for BMI version of storm#! /usr/bin/env python
#
# Tests for the BMI version of `storm`.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper (mark.piper@colorado.edu)
from nose.tools import *
import os
import shutil
from subprocess import call
# Global variables
start_dir = os.getcwd()
data_dir = os.path.join(start_dir, 'testing', 'data')
input_file1 = os.path.join(data_dir, 'test1.in')
input_file2 = os.path.join(data_dir, 'test2.in')
build_dir = os.path.join(start_dir, 'build')
exe = './bmi/storm'
# Fixtures -------------------------------------------------------------
def setup_module():
'''
Called before any tests are performed.
'''
print('*** BMI tests')
os.mkdir(build_dir)
os.chdir(build_dir)
def teardown_module():
'''
Called after all tests have completed.
'''
os.chdir(start_dir)
shutil.rmtree(build_dir)
# Tests ----------------------------------------------------------------
def test_configure():
'''
Test whether CMake executes successfully
'''
call(['cmake', '..'])
def test_compile():
'''
Test whether `storm` compiles
'''
call(['make'])
def test_without_input_file():
'''
Check that storm runs without an input file
'''
r = call([exe])
assert_equal(r, 0)
def test_with_singlestep_input_file():
'''
Check that storm runs with a one-step input file
'''
r = call([exe, input_file1])
assert_equal(r, 0)
def test_with_multistep_input_file():
'''
Check that storm runs with a multi-step input file
'''
r = call([exe, input_file2])
assert_equal(r, 0)
|
<commit_before><commit_msg>Add unit tests for BMI version of storm<commit_after>#! /usr/bin/env python
#
# Tests for the BMI version of `storm`.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper (mark.piper@colorado.edu)
from nose.tools import *
import os
import shutil
from subprocess import call
# Global variables
start_dir = os.getcwd()
data_dir = os.path.join(start_dir, 'testing', 'data')
input_file1 = os.path.join(data_dir, 'test1.in')
input_file2 = os.path.join(data_dir, 'test2.in')
build_dir = os.path.join(start_dir, 'build')
exe = './bmi/storm'
# Fixtures -------------------------------------------------------------
def setup_module():
'''
Called before any tests are performed.
'''
print('*** BMI tests')
os.mkdir(build_dir)
os.chdir(build_dir)
def teardown_module():
'''
Called after all tests have completed.
'''
os.chdir(start_dir)
shutil.rmtree(build_dir)
# Tests ----------------------------------------------------------------
def test_configure():
'''
Test whether CMake executes successfully
'''
call(['cmake', '..'])
def test_compile():
'''
Test whether `storm` compiles
'''
call(['make'])
def test_without_input_file():
'''
Check that storm runs without an input file
'''
r = call([exe])
assert_equal(r, 0)
def test_with_singlestep_input_file():
'''
Check that storm runs with a one-step input file
'''
r = call([exe, input_file1])
assert_equal(r, 0)
def test_with_multistep_input_file():
'''
Check that storm runs with a multi-step input file
'''
r = call([exe, input_file2])
assert_equal(r, 0)
|
|
086371f56748da9fb68acc4aaa10094b6cf24fcb
|
tests/unit/returners/test_pgjsonb.py
|
tests/unit/returners/test_pgjsonb.py
|
# -*- coding: utf-8 -*-
'''
tests.unit.returners.pgjsonb_test
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Unit tests for the PGJsonb returner (pgjsonb).
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import Salt Testing libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
MagicMock,
NO_MOCK,
NO_MOCK_REASON,
patch
)
# Import Salt libs
import salt.returners.pgjsonb as pgjsonb
log = logging.getLogger(__name__)
@skipIf(NO_MOCK, NO_MOCK_REASON)
class PGJsonbCleanOldJobsTestCase(TestCase, LoaderModuleMockMixin):
'''
Tests for the local_cache.clean_old_jobs function.
'''
def setup_loader_modules(self):
return {pgjsonb: {'__opts__': {'keep_jobs': 1, 'archive_jobs': 0}}}
def test_clean_old_jobs_purge(self):
'''
Tests that the function returns None when no jid_root is found.
'''
connect_mock = MagicMock()
with patch.object(pgjsonb, '_get_serv', connect_mock):
with patch.dict(pgjsonb.__salt__, {'config.option': MagicMock()}):
self.assertEqual(pgjsonb.clean_old_jobs(), None)
def test_clean_old_jobs_archive(self):
'''
Tests that the function returns None when no jid_root is found.
'''
connect_mock = MagicMock()
with patch.object(pgjsonb, '_get_serv', connect_mock):
with patch.dict(pgjsonb.__salt__, {'config.option': MagicMock()}):
with patch.dict(pgjsonb.__opts__, {'archive_jobs': 1}):
self.assertEqual(pgjsonb.clean_old_jobs(), None)
|
Revert "Remove pgjsonb returner unit tests"
|
Revert "Remove pgjsonb returner unit tests"
This reverts commit ab4a670ff22878d5115f408baf0304a0ba3ec994.
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Revert "Remove pgjsonb returner unit tests"
This reverts commit ab4a670ff22878d5115f408baf0304a0ba3ec994.
|
# -*- coding: utf-8 -*-
'''
tests.unit.returners.pgjsonb_test
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Unit tests for the PGJsonb returner (pgjsonb).
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import Salt Testing libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
MagicMock,
NO_MOCK,
NO_MOCK_REASON,
patch
)
# Import Salt libs
import salt.returners.pgjsonb as pgjsonb
log = logging.getLogger(__name__)
@skipIf(NO_MOCK, NO_MOCK_REASON)
class PGJsonbCleanOldJobsTestCase(TestCase, LoaderModuleMockMixin):
'''
Tests for the local_cache.clean_old_jobs function.
'''
def setup_loader_modules(self):
return {pgjsonb: {'__opts__': {'keep_jobs': 1, 'archive_jobs': 0}}}
def test_clean_old_jobs_purge(self):
'''
Tests that the function returns None when no jid_root is found.
'''
connect_mock = MagicMock()
with patch.object(pgjsonb, '_get_serv', connect_mock):
with patch.dict(pgjsonb.__salt__, {'config.option': MagicMock()}):
self.assertEqual(pgjsonb.clean_old_jobs(), None)
def test_clean_old_jobs_archive(self):
'''
Tests that the function returns None when no jid_root is found.
'''
connect_mock = MagicMock()
with patch.object(pgjsonb, '_get_serv', connect_mock):
with patch.dict(pgjsonb.__salt__, {'config.option': MagicMock()}):
with patch.dict(pgjsonb.__opts__, {'archive_jobs': 1}):
self.assertEqual(pgjsonb.clean_old_jobs(), None)
|
<commit_before><commit_msg>Revert "Remove pgjsonb returner unit tests"
This reverts commit ab4a670ff22878d5115f408baf0304a0ba3ec994.<commit_after>
|
# -*- coding: utf-8 -*-
'''
tests.unit.returners.pgjsonb_test
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Unit tests for the PGJsonb returner (pgjsonb).
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import Salt Testing libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
MagicMock,
NO_MOCK,
NO_MOCK_REASON,
patch
)
# Import Salt libs
import salt.returners.pgjsonb as pgjsonb
log = logging.getLogger(__name__)
@skipIf(NO_MOCK, NO_MOCK_REASON)
class PGJsonbCleanOldJobsTestCase(TestCase, LoaderModuleMockMixin):
'''
Tests for the local_cache.clean_old_jobs function.
'''
def setup_loader_modules(self):
return {pgjsonb: {'__opts__': {'keep_jobs': 1, 'archive_jobs': 0}}}
def test_clean_old_jobs_purge(self):
'''
Tests that the function returns None when no jid_root is found.
'''
connect_mock = MagicMock()
with patch.object(pgjsonb, '_get_serv', connect_mock):
with patch.dict(pgjsonb.__salt__, {'config.option': MagicMock()}):
self.assertEqual(pgjsonb.clean_old_jobs(), None)
def test_clean_old_jobs_archive(self):
'''
Tests that the function returns None when no jid_root is found.
'''
connect_mock = MagicMock()
with patch.object(pgjsonb, '_get_serv', connect_mock):
with patch.dict(pgjsonb.__salt__, {'config.option': MagicMock()}):
with patch.dict(pgjsonb.__opts__, {'archive_jobs': 1}):
self.assertEqual(pgjsonb.clean_old_jobs(), None)
|
Revert "Remove pgjsonb returner unit tests"
This reverts commit ab4a670ff22878d5115f408baf0304a0ba3ec994.# -*- coding: utf-8 -*-
'''
tests.unit.returners.pgjsonb_test
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Unit tests for the PGJsonb returner (pgjsonb).
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import Salt Testing libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
MagicMock,
NO_MOCK,
NO_MOCK_REASON,
patch
)
# Import Salt libs
import salt.returners.pgjsonb as pgjsonb
log = logging.getLogger(__name__)
@skipIf(NO_MOCK, NO_MOCK_REASON)
class PGJsonbCleanOldJobsTestCase(TestCase, LoaderModuleMockMixin):
'''
Tests for the local_cache.clean_old_jobs function.
'''
def setup_loader_modules(self):
return {pgjsonb: {'__opts__': {'keep_jobs': 1, 'archive_jobs': 0}}}
def test_clean_old_jobs_purge(self):
'''
Tests that the function returns None when no jid_root is found.
'''
connect_mock = MagicMock()
with patch.object(pgjsonb, '_get_serv', connect_mock):
with patch.dict(pgjsonb.__salt__, {'config.option': MagicMock()}):
self.assertEqual(pgjsonb.clean_old_jobs(), None)
def test_clean_old_jobs_archive(self):
'''
Tests that the function returns None when no jid_root is found.
'''
connect_mock = MagicMock()
with patch.object(pgjsonb, '_get_serv', connect_mock):
with patch.dict(pgjsonb.__salt__, {'config.option': MagicMock()}):
with patch.dict(pgjsonb.__opts__, {'archive_jobs': 1}):
self.assertEqual(pgjsonb.clean_old_jobs(), None)
|
<commit_before><commit_msg>Revert "Remove pgjsonb returner unit tests"
This reverts commit ab4a670ff22878d5115f408baf0304a0ba3ec994.<commit_after># -*- coding: utf-8 -*-
'''
tests.unit.returners.pgjsonb_test
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Unit tests for the PGJsonb returner (pgjsonb).
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
# Import Salt Testing libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
MagicMock,
NO_MOCK,
NO_MOCK_REASON,
patch
)
# Import Salt libs
import salt.returners.pgjsonb as pgjsonb
log = logging.getLogger(__name__)
@skipIf(NO_MOCK, NO_MOCK_REASON)
class PGJsonbCleanOldJobsTestCase(TestCase, LoaderModuleMockMixin):
'''
Tests for the local_cache.clean_old_jobs function.
'''
def setup_loader_modules(self):
return {pgjsonb: {'__opts__': {'keep_jobs': 1, 'archive_jobs': 0}}}
def test_clean_old_jobs_purge(self):
'''
Tests that the function returns None when no jid_root is found.
'''
connect_mock = MagicMock()
with patch.object(pgjsonb, '_get_serv', connect_mock):
with patch.dict(pgjsonb.__salt__, {'config.option': MagicMock()}):
self.assertEqual(pgjsonb.clean_old_jobs(), None)
def test_clean_old_jobs_archive(self):
'''
Tests that the function returns None when no jid_root is found.
'''
connect_mock = MagicMock()
with patch.object(pgjsonb, '_get_serv', connect_mock):
with patch.dict(pgjsonb.__salt__, {'config.option': MagicMock()}):
with patch.dict(pgjsonb.__opts__, {'archive_jobs': 1}):
self.assertEqual(pgjsonb.clean_old_jobs(), None)
|
|
722b1d55c771e628ba82bbd5b8f8f5de047112af
|
tests/hexdumper.py
|
tests/hexdumper.py
|
# This hack by: Raymond Hettinger
class hexdumper:
"""Given a byte array, turn it into a string. hex bytes to stdout."""
def __init__(self):
self.FILTER=''.join([(len(repr(chr(x)))==3) and chr(x) or '.' \
for x in range(256)])
def dump(self, src, length=8):
result=[]
for i in xrange(0, len(src), length):
s = src[i:i+length]
hexa = ' '.join(["%02X"%ord(x) for x in s])
printable = s.translate(self.FILTER)
result.append("%04X %-*s %s\n" % \
(i, length*3, hexa, printable))
return ''.join(result)
|
Add a hex dump utility class.
|
Add a hex dump utility class.
|
Python
|
bsd-3-clause
|
gvnn3/PCS,gvnn3/PCS
|
Add a hex dump utility class.
|
# This hack by: Raymond Hettinger
class hexdumper:
"""Given a byte array, turn it into a string. hex bytes to stdout."""
def __init__(self):
self.FILTER=''.join([(len(repr(chr(x)))==3) and chr(x) or '.' \
for x in range(256)])
def dump(self, src, length=8):
result=[]
for i in xrange(0, len(src), length):
s = src[i:i+length]
hexa = ' '.join(["%02X"%ord(x) for x in s])
printable = s.translate(self.FILTER)
result.append("%04X %-*s %s\n" % \
(i, length*3, hexa, printable))
return ''.join(result)
|
<commit_before><commit_msg>Add a hex dump utility class.<commit_after>
|
# This hack by: Raymond Hettinger
class hexdumper:
"""Given a byte array, turn it into a string. hex bytes to stdout."""
def __init__(self):
self.FILTER=''.join([(len(repr(chr(x)))==3) and chr(x) or '.' \
for x in range(256)])
def dump(self, src, length=8):
result=[]
for i in xrange(0, len(src), length):
s = src[i:i+length]
hexa = ' '.join(["%02X"%ord(x) for x in s])
printable = s.translate(self.FILTER)
result.append("%04X %-*s %s\n" % \
(i, length*3, hexa, printable))
return ''.join(result)
|
Add a hex dump utility class.# This hack by: Raymond Hettinger
class hexdumper:
"""Given a byte array, turn it into a string. hex bytes to stdout."""
def __init__(self):
self.FILTER=''.join([(len(repr(chr(x)))==3) and chr(x) or '.' \
for x in range(256)])
def dump(self, src, length=8):
result=[]
for i in xrange(0, len(src), length):
s = src[i:i+length]
hexa = ' '.join(["%02X"%ord(x) for x in s])
printable = s.translate(self.FILTER)
result.append("%04X %-*s %s\n" % \
(i, length*3, hexa, printable))
return ''.join(result)
|
<commit_before><commit_msg>Add a hex dump utility class.<commit_after># This hack by: Raymond Hettinger
class hexdumper:
"""Given a byte array, turn it into a string. hex bytes to stdout."""
def __init__(self):
self.FILTER=''.join([(len(repr(chr(x)))==3) and chr(x) or '.' \
for x in range(256)])
def dump(self, src, length=8):
result=[]
for i in xrange(0, len(src), length):
s = src[i:i+length]
hexa = ' '.join(["%02X"%ord(x) for x in s])
printable = s.translate(self.FILTER)
result.append("%04X %-*s %s\n" % \
(i, length*3, hexa, printable))
return ''.join(result)
|
|
8660c7fda8cc7290fadeed7a39f06218087d9401
|
tests/test_linter.py
|
tests/test_linter.py
|
import logging
import pytest
from mappyfile.validator import Validator
def validate(d):
v = Validator()
return v.validate(d)
def get_from_dict(d, keys):
for k in keys:
if isinstance(k, int):
d = d[0]
else:
d = d[k]
return d
def run_tests():
pytest.main(["tests/test_linter.py"])
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
# run_tests()
print("Done!")
|
Add draft test module for linter
|
Add draft test module for linter
|
Python
|
mit
|
geographika/mappyfile,geographika/mappyfile
|
Add draft test module for linter
|
import logging
import pytest
from mappyfile.validator import Validator
def validate(d):
v = Validator()
return v.validate(d)
def get_from_dict(d, keys):
for k in keys:
if isinstance(k, int):
d = d[0]
else:
d = d[k]
return d
def run_tests():
pytest.main(["tests/test_linter.py"])
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
# run_tests()
print("Done!")
|
<commit_before><commit_msg>Add draft test module for linter<commit_after>
|
import logging
import pytest
from mappyfile.validator import Validator
def validate(d):
v = Validator()
return v.validate(d)
def get_from_dict(d, keys):
for k in keys:
if isinstance(k, int):
d = d[0]
else:
d = d[k]
return d
def run_tests():
pytest.main(["tests/test_linter.py"])
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
# run_tests()
print("Done!")
|
Add draft test module for linterimport logging
import pytest
from mappyfile.validator import Validator
def validate(d):
v = Validator()
return v.validate(d)
def get_from_dict(d, keys):
for k in keys:
if isinstance(k, int):
d = d[0]
else:
d = d[k]
return d
def run_tests():
pytest.main(["tests/test_linter.py"])
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
# run_tests()
print("Done!")
|
<commit_before><commit_msg>Add draft test module for linter<commit_after>import logging
import pytest
from mappyfile.validator import Validator
def validate(d):
v = Validator()
return v.validate(d)
def get_from_dict(d, keys):
for k in keys:
if isinstance(k, int):
d = d[0]
else:
d = d[k]
return d
def run_tests():
pytest.main(["tests/test_linter.py"])
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
# run_tests()
print("Done!")
|
|
ad7f9f785f9a4a4494127a9b2196e1fc64c9f3de
|
tests/test_report.py
|
tests/test_report.py
|
from django.test import TestCase
from deep_collector.core import RelatedObjectsCollector
from .factories import BaseModelFactory
class TestLogReportGeneration(TestCase):
def test_report_with_no_debug_mode(self):
obj = BaseModelFactory.create()
collector = RelatedObjectsCollector()
collector.collect(obj)
report = collector.get_report()
self.assertDictEqual(report, {
'excluded_fields': [],
'log': 'Set DEBUG to True if you what collector internal logs'
})
def test_report_with_debug_mode(self):
self.maxDiff = None
obj = BaseModelFactory.create()
collector = RelatedObjectsCollector()
collector.DEBUG = True
collector.collect(obj)
report = collector.get_report()
self.assertEqual(report['excluded_fields'], [])
# For now, just checking that the log report is not empty.
# Some work has to be done to test it more.
self.assertNotEqual(report['log'], [])
|
Add basic first tests for new report driven by "events"
|
Add basic first tests for new report driven by "events"
|
Python
|
bsd-3-clause
|
iwoca/django-deep-collector
|
Add basic first tests for new report driven by "events"
|
from django.test import TestCase
from deep_collector.core import RelatedObjectsCollector
from .factories import BaseModelFactory
class TestLogReportGeneration(TestCase):
def test_report_with_no_debug_mode(self):
obj = BaseModelFactory.create()
collector = RelatedObjectsCollector()
collector.collect(obj)
report = collector.get_report()
self.assertDictEqual(report, {
'excluded_fields': [],
'log': 'Set DEBUG to True if you what collector internal logs'
})
def test_report_with_debug_mode(self):
self.maxDiff = None
obj = BaseModelFactory.create()
collector = RelatedObjectsCollector()
collector.DEBUG = True
collector.collect(obj)
report = collector.get_report()
self.assertEqual(report['excluded_fields'], [])
# For now, just checking that the log report is not empty.
# Some work has to be done to test it more.
self.assertNotEqual(report['log'], [])
|
<commit_before><commit_msg>Add basic first tests for new report driven by "events"<commit_after>
|
from django.test import TestCase
from deep_collector.core import RelatedObjectsCollector
from .factories import BaseModelFactory
class TestLogReportGeneration(TestCase):
def test_report_with_no_debug_mode(self):
obj = BaseModelFactory.create()
collector = RelatedObjectsCollector()
collector.collect(obj)
report = collector.get_report()
self.assertDictEqual(report, {
'excluded_fields': [],
'log': 'Set DEBUG to True if you what collector internal logs'
})
def test_report_with_debug_mode(self):
self.maxDiff = None
obj = BaseModelFactory.create()
collector = RelatedObjectsCollector()
collector.DEBUG = True
collector.collect(obj)
report = collector.get_report()
self.assertEqual(report['excluded_fields'], [])
# For now, just checking that the log report is not empty.
# Some work has to be done to test it more.
self.assertNotEqual(report['log'], [])
|
Add basic first tests for new report driven by "events"from django.test import TestCase
from deep_collector.core import RelatedObjectsCollector
from .factories import BaseModelFactory
class TestLogReportGeneration(TestCase):
def test_report_with_no_debug_mode(self):
obj = BaseModelFactory.create()
collector = RelatedObjectsCollector()
collector.collect(obj)
report = collector.get_report()
self.assertDictEqual(report, {
'excluded_fields': [],
'log': 'Set DEBUG to True if you what collector internal logs'
})
def test_report_with_debug_mode(self):
self.maxDiff = None
obj = BaseModelFactory.create()
collector = RelatedObjectsCollector()
collector.DEBUG = True
collector.collect(obj)
report = collector.get_report()
self.assertEqual(report['excluded_fields'], [])
# For now, just checking that the log report is not empty.
# Some work has to be done to test it more.
self.assertNotEqual(report['log'], [])
|
<commit_before><commit_msg>Add basic first tests for new report driven by "events"<commit_after>from django.test import TestCase
from deep_collector.core import RelatedObjectsCollector
from .factories import BaseModelFactory
class TestLogReportGeneration(TestCase):
def test_report_with_no_debug_mode(self):
obj = BaseModelFactory.create()
collector = RelatedObjectsCollector()
collector.collect(obj)
report = collector.get_report()
self.assertDictEqual(report, {
'excluded_fields': [],
'log': 'Set DEBUG to True if you what collector internal logs'
})
def test_report_with_debug_mode(self):
self.maxDiff = None
obj = BaseModelFactory.create()
collector = RelatedObjectsCollector()
collector.DEBUG = True
collector.collect(obj)
report = collector.get_report()
self.assertEqual(report['excluded_fields'], [])
# For now, just checking that the log report is not empty.
# Some work has to be done to test it more.
self.assertNotEqual(report['log'], [])
|
|
4683fc67d5171d8bb0391ac45f587fbc3e3c97fc
|
install_dependencies.py
|
install_dependencies.py
|
import platform
import subprocess
"""
This is a standalone script that installs the required dependencies to run. It
*should* be platform independent, and should work regardless of what platform
you are running it on.
To install dependencies, download the DevAssist source and run this script by
running "python install_dependencies.py"
"""
# Identifying host platform
host_platform = platform.system()
def install_dependencies():
"""
Installs dependencies for DevAssist
"""
# Darwin = Mac OSX
if host_platform == "Darwin":
# Installing portaudio
# @TODO: Rewrite to not use shell=True
print("Installing portaudio...\n")
portaudio = subprocess.Popen(["brew install portaudio"], shell=True)
portaudio.communicate()
print("\nportaudio has been installed...")
# Installing pyaudio
# @TODO: Rewrite to not use shell=True
print("Installing pyaudio...\n")
pyaudio = subprocess.Popen(["pip install pyaudio"], shell=True)
pyaudio.communicate()
print("\npyaudio has been installed...")
elif host_platform == "Linux":
# Installing dependencies for portaudio
# @TODO: Rewrite to not use shell=True
print("Installing portaudio & dependencies...\n")
portaudio = subprocess.Popen(["apt-get install portaudio19-dev python-all-dev python3-all-dev"], shell=True)
portaudio.communicate()
print("\nportaudio & dependencies have been installed...")
# Installing pyaudio
# @TODO: Rewrite to not use shell=True
print("Installing pyaudio...\n")
pyaudio = subprocess.Popen(["pip install --global-option='build_ext' --global-option='-I/usr/local/include' --global-option='-L/usr/local/lib' pyaudio"], shell=True)
pyaudio.communicate()
print("\npyaudio has been installed...")
if __name__ == "__main__":
install_dependencies()
|
Add dependency installer for linux and mac osx
|
Add dependency installer for linux and mac osx
|
Python
|
mit
|
GCI-2015-GPW/DevAssist
|
Add dependency installer for linux and mac osx
|
import platform
import subprocess
"""
This is a standalone script that installs the required dependencies to run. It
*should* be platform independent, and should work regardless of what platform
you are running it on.
To install dependencies, download the DevAssist source and run this script by
running "python install_dependencies.py"
"""
# Identifying host platform
host_platform = platform.system()
def install_dependencies():
"""
Installs dependencies for DevAssist
"""
# Darwin = Mac OSX
if host_platform == "Darwin":
# Installing portaudio
# @TODO: Rewrite to not use shell=True
print("Installing portaudio...\n")
portaudio = subprocess.Popen(["brew install portaudio"], shell=True)
portaudio.communicate()
print("\nportaudio has been installed...")
# Installing pyaudio
# @TODO: Rewrite to not use shell=True
print("Installing pyaudio...\n")
pyaudio = subprocess.Popen(["pip install pyaudio"], shell=True)
pyaudio.communicate()
print("\npyaudio has been installed...")
elif host_platform == "Linux":
# Installing dependencies for portaudio
# @TODO: Rewrite to not use shell=True
print("Installing portaudio & dependencies...\n")
portaudio = subprocess.Popen(["apt-get install portaudio19-dev python-all-dev python3-all-dev"], shell=True)
portaudio.communicate()
print("\nportaudio & dependencies have been installed...")
# Installing pyaudio
# @TODO: Rewrite to not use shell=True
print("Installing pyaudio...\n")
pyaudio = subprocess.Popen(["pip install --global-option='build_ext' --global-option='-I/usr/local/include' --global-option='-L/usr/local/lib' pyaudio"], shell=True)
pyaudio.communicate()
print("\npyaudio has been installed...")
if __name__ == "__main__":
install_dependencies()
|
<commit_before><commit_msg>Add dependency installer for linux and mac osx<commit_after>
|
import platform
import subprocess
"""
This is a standalone script that installs the required dependencies to run. It
*should* be platform independent, and should work regardless of what platform
you are running it on.
To install dependencies, download the DevAssist source and run this script by
running "python install_dependencies.py"
"""
# Identifying host platform
host_platform = platform.system()
def install_dependencies():
"""
Installs dependencies for DevAssist
"""
# Darwin = Mac OSX
if host_platform == "Darwin":
# Installing portaudio
# @TODO: Rewrite to not use shell=True
print("Installing portaudio...\n")
portaudio = subprocess.Popen(["brew install portaudio"], shell=True)
portaudio.communicate()
print("\nportaudio has been installed...")
# Installing pyaudio
# @TODO: Rewrite to not use shell=True
print("Installing pyaudio...\n")
pyaudio = subprocess.Popen(["pip install pyaudio"], shell=True)
pyaudio.communicate()
print("\npyaudio has been installed...")
elif host_platform == "Linux":
# Installing dependencies for portaudio
# @TODO: Rewrite to not use shell=True
print("Installing portaudio & dependencies...\n")
portaudio = subprocess.Popen(["apt-get install portaudio19-dev python-all-dev python3-all-dev"], shell=True)
portaudio.communicate()
print("\nportaudio & dependencies have been installed...")
# Installing pyaudio
# @TODO: Rewrite to not use shell=True
print("Installing pyaudio...\n")
pyaudio = subprocess.Popen(["pip install --global-option='build_ext' --global-option='-I/usr/local/include' --global-option='-L/usr/local/lib' pyaudio"], shell=True)
pyaudio.communicate()
print("\npyaudio has been installed...")
if __name__ == "__main__":
install_dependencies()
|
Add dependency installer for linux and mac osximport platform
import subprocess
"""
This is a standalone script that installs the required dependencies to run. It
*should* be platform independent, and should work regardless of what platform
you are running it on.
To install dependencies, download the DevAssist source and run this script by
running "python install_dependencies.py"
"""
# Identifying host platform
host_platform = platform.system()
def install_dependencies():
"""
Installs dependencies for DevAssist
"""
# Darwin = Mac OSX
if host_platform == "Darwin":
# Installing portaudio
# @TODO: Rewrite to not use shell=True
print("Installing portaudio...\n")
portaudio = subprocess.Popen(["brew install portaudio"], shell=True)
portaudio.communicate()
print("\nportaudio has been installed...")
# Installing pyaudio
# @TODO: Rewrite to not use shell=True
print("Installing pyaudio...\n")
pyaudio = subprocess.Popen(["pip install pyaudio"], shell=True)
pyaudio.communicate()
print("\npyaudio has been installed...")
elif host_platform == "Linux":
# Installing dependencies for portaudio
# @TODO: Rewrite to not use shell=True
print("Installing portaudio & dependencies...\n")
portaudio = subprocess.Popen(["apt-get install portaudio19-dev python-all-dev python3-all-dev"], shell=True)
portaudio.communicate()
print("\nportaudio & dependencies have been installed...")
# Installing pyaudio
# @TODO: Rewrite to not use shell=True
print("Installing pyaudio...\n")
pyaudio = subprocess.Popen(["pip install --global-option='build_ext' --global-option='-I/usr/local/include' --global-option='-L/usr/local/lib' pyaudio"], shell=True)
pyaudio.communicate()
print("\npyaudio has been installed...")
if __name__ == "__main__":
install_dependencies()
|
<commit_before><commit_msg>Add dependency installer for linux and mac osx<commit_after>import platform
import subprocess
"""
This is a standalone script that installs the required dependencies to run. It
*should* be platform independent, and should work regardless of what platform
you are running it on.
To install dependencies, download the DevAssist source and run this script by
running "python install_dependencies.py"
"""
# Identifying host platform
host_platform = platform.system()
def install_dependencies():
"""
Installs dependencies for DevAssist
"""
# Darwin = Mac OSX
if host_platform == "Darwin":
# Installing portaudio
# @TODO: Rewrite to not use shell=True
print("Installing portaudio...\n")
portaudio = subprocess.Popen(["brew install portaudio"], shell=True)
portaudio.communicate()
print("\nportaudio has been installed...")
# Installing pyaudio
# @TODO: Rewrite to not use shell=True
print("Installing pyaudio...\n")
pyaudio = subprocess.Popen(["pip install pyaudio"], shell=True)
pyaudio.communicate()
print("\npyaudio has been installed...")
elif host_platform == "Linux":
# Installing dependencies for portaudio
# @TODO: Rewrite to not use shell=True
print("Installing portaudio & dependencies...\n")
portaudio = subprocess.Popen(["apt-get install portaudio19-dev python-all-dev python3-all-dev"], shell=True)
portaudio.communicate()
print("\nportaudio & dependencies have been installed...")
# Installing pyaudio
# @TODO: Rewrite to not use shell=True
print("Installing pyaudio...\n")
pyaudio = subprocess.Popen(["pip install --global-option='build_ext' --global-option='-I/usr/local/include' --global-option='-L/usr/local/lib' pyaudio"], shell=True)
pyaudio.communicate()
print("\npyaudio has been installed...")
if __name__ == "__main__":
install_dependencies()
|
|
0fd7cdee45b54551bcfc901cece2e5cc9dec4555
|
test/test_setup.py
|
test/test_setup.py
|
import os
import django
os.environ['DJANGO_SETTINGS_MODULE'] = 'testsettings'
# run django setup if we are on a version of django that has it
if hasattr(django, 'setup'):
# setup doesn't like being run more than once
try:
django.setup()
except RuntimeError:
pass
|
Add new test setup required for py.test/django test setup
|
Add new test setup required for py.test/django test setup
|
Python
|
apache-2.0
|
emory-libraries/eulcommon,emory-libraries/eulcommon
|
Add new test setup required for py.test/django test setup
|
import os
import django
os.environ['DJANGO_SETTINGS_MODULE'] = 'testsettings'
# run django setup if we are on a version of django that has it
if hasattr(django, 'setup'):
# setup doesn't like being run more than once
try:
django.setup()
except RuntimeError:
pass
|
<commit_before><commit_msg>Add new test setup required for py.test/django test setup<commit_after>
|
import os
import django
os.environ['DJANGO_SETTINGS_MODULE'] = 'testsettings'
# run django setup if we are on a version of django that has it
if hasattr(django, 'setup'):
# setup doesn't like being run more than once
try:
django.setup()
except RuntimeError:
pass
|
Add new test setup required for py.test/django test setupimport os
import django
os.environ['DJANGO_SETTINGS_MODULE'] = 'testsettings'
# run django setup if we are on a version of django that has it
if hasattr(django, 'setup'):
# setup doesn't like being run more than once
try:
django.setup()
except RuntimeError:
pass
|
<commit_before><commit_msg>Add new test setup required for py.test/django test setup<commit_after>import os
import django
os.environ['DJANGO_SETTINGS_MODULE'] = 'testsettings'
# run django setup if we are on a version of django that has it
if hasattr(django, 'setup'):
# setup doesn't like being run more than once
try:
django.setup()
except RuntimeError:
pass
|
|
dbfa14401c0b50eb1a3cac413652cb975ee9d41f
|
ocw-ui/backend/tests/test_directory_helpers.py
|
ocw-ui/backend/tests/test_directory_helpers.py
|
import os
import unittest
from webtest import TestApp
from ..run_webservices import app
from ..directory_helpers import _get_clean_directory_path
test_app = TestApp(app)
class TestDirectoryPathCleaner(unittest.TestCase):
PATH_LEADER = '/tmp/foo'
VALID_CLEAN_DIR = '/tmp/foo/bar'
if not os.path.exists(PATH_LEADER): os.mkdir(PATH_LEADER)
if not os.path.exists(VALID_CLEAN_DIR): os.mkdir(VALID_CLEAN_DIR)
def test_valid_directory_path(self):
clean_path = _get_clean_directory_path(self.PATH_LEADER, '/bar')
self.assertEquals(clean_path, self.VALID_CLEAN_DIR)
|
Add valid directory cleaner helper test
|
Add valid directory cleaner helper test
git-svn-id: https://svn.apache.org/repos/asf/incubator/climate/trunk@1563517 13f79535-47bb-0310-9956-ffa450edef68
Former-commit-id: a23ba7556854cb30faaa0dfc19fdbcc6cb67382c
|
Python
|
apache-2.0
|
huikyole/climate,agoodm/climate,MJJoyce/climate,MBoustani/climate,agoodm/climate,MJJoyce/climate,kwhitehall/climate,MBoustani/climate,lewismc/climate,agoodm/climate,pwcberry/climate,MBoustani/climate,Omkar20895/climate,MJJoyce/climate,agoodm/climate,kwhitehall/climate,lewismc/climate,pwcberry/climate,huikyole/climate,riverma/climate,jarifibrahim/climate,apache/climate,Omkar20895/climate,jarifibrahim/climate,apache/climate,pwcberry/climate,huikyole/climate,jarifibrahim/climate,kwhitehall/climate,lewismc/climate,MBoustani/climate,kwhitehall/climate,agoodm/climate,riverma/climate,MJJoyce/climate,Omkar20895/climate,pwcberry/climate,riverma/climate,huikyole/climate,riverma/climate,pwcberry/climate,Omkar20895/climate,jarifibrahim/climate,lewismc/climate,huikyole/climate,Omkar20895/climate,apache/climate,MBoustani/climate,riverma/climate,apache/climate,MJJoyce/climate,apache/climate,jarifibrahim/climate,lewismc/climate
|
Add valid directory cleaner helper test
git-svn-id: https://svn.apache.org/repos/asf/incubator/climate/trunk@1563517 13f79535-47bb-0310-9956-ffa450edef68
Former-commit-id: a23ba7556854cb30faaa0dfc19fdbcc6cb67382c
|
import os
import unittest
from webtest import TestApp
from ..run_webservices import app
from ..directory_helpers import _get_clean_directory_path
test_app = TestApp(app)
class TestDirectoryPathCleaner(unittest.TestCase):
PATH_LEADER = '/tmp/foo'
VALID_CLEAN_DIR = '/tmp/foo/bar'
if not os.path.exists(PATH_LEADER): os.mkdir(PATH_LEADER)
if not os.path.exists(VALID_CLEAN_DIR): os.mkdir(VALID_CLEAN_DIR)
def test_valid_directory_path(self):
clean_path = _get_clean_directory_path(self.PATH_LEADER, '/bar')
self.assertEquals(clean_path, self.VALID_CLEAN_DIR)
|
<commit_before><commit_msg>Add valid directory cleaner helper test
git-svn-id: https://svn.apache.org/repos/asf/incubator/climate/trunk@1563517 13f79535-47bb-0310-9956-ffa450edef68
Former-commit-id: a23ba7556854cb30faaa0dfc19fdbcc6cb67382c<commit_after>
|
import os
import unittest
from webtest import TestApp
from ..run_webservices import app
from ..directory_helpers import _get_clean_directory_path
test_app = TestApp(app)
class TestDirectoryPathCleaner(unittest.TestCase):
PATH_LEADER = '/tmp/foo'
VALID_CLEAN_DIR = '/tmp/foo/bar'
if not os.path.exists(PATH_LEADER): os.mkdir(PATH_LEADER)
if not os.path.exists(VALID_CLEAN_DIR): os.mkdir(VALID_CLEAN_DIR)
def test_valid_directory_path(self):
clean_path = _get_clean_directory_path(self.PATH_LEADER, '/bar')
self.assertEquals(clean_path, self.VALID_CLEAN_DIR)
|
Add valid directory cleaner helper test
git-svn-id: https://svn.apache.org/repos/asf/incubator/climate/trunk@1563517 13f79535-47bb-0310-9956-ffa450edef68
Former-commit-id: a23ba7556854cb30faaa0dfc19fdbcc6cb67382cimport os
import unittest
from webtest import TestApp
from ..run_webservices import app
from ..directory_helpers import _get_clean_directory_path
test_app = TestApp(app)
class TestDirectoryPathCleaner(unittest.TestCase):
PATH_LEADER = '/tmp/foo'
VALID_CLEAN_DIR = '/tmp/foo/bar'
if not os.path.exists(PATH_LEADER): os.mkdir(PATH_LEADER)
if not os.path.exists(VALID_CLEAN_DIR): os.mkdir(VALID_CLEAN_DIR)
def test_valid_directory_path(self):
clean_path = _get_clean_directory_path(self.PATH_LEADER, '/bar')
self.assertEquals(clean_path, self.VALID_CLEAN_DIR)
|
<commit_before><commit_msg>Add valid directory cleaner helper test
git-svn-id: https://svn.apache.org/repos/asf/incubator/climate/trunk@1563517 13f79535-47bb-0310-9956-ffa450edef68
Former-commit-id: a23ba7556854cb30faaa0dfc19fdbcc6cb67382c<commit_after>import os
import unittest
from webtest import TestApp
from ..run_webservices import app
from ..directory_helpers import _get_clean_directory_path
test_app = TestApp(app)
class TestDirectoryPathCleaner(unittest.TestCase):
PATH_LEADER = '/tmp/foo'
VALID_CLEAN_DIR = '/tmp/foo/bar'
if not os.path.exists(PATH_LEADER): os.mkdir(PATH_LEADER)
if not os.path.exists(VALID_CLEAN_DIR): os.mkdir(VALID_CLEAN_DIR)
def test_valid_directory_path(self):
clean_path = _get_clean_directory_path(self.PATH_LEADER, '/bar')
self.assertEquals(clean_path, self.VALID_CLEAN_DIR)
|
|
f4d26567afc9185e0f9370eda43d30084437ade5
|
CodeFights/makeArrayConsecutive2.py
|
CodeFights/makeArrayConsecutive2.py
|
#!/usr/local/bin/python
# Code Fights Make Array Consecutive 2 Problem
def makeArrayConsecutive2(statues):
return (len(range(min(statues), max(statues) + 1)) - len(statues))
def main():
tests = [
[[6, 2, 3, 8], 3],
[[0, 3], 2],
[[5, 4, 6], 0],
[[6, 3], 2],
[[1], 0]
]
for t in tests:
res = makeArrayConsecutive2(t[0])
ans = t[1]
if ans == res:
print("PASSED: makeArrayConsecutive2({}) returned {}"
.format(t[0], res))
else:
print("FAILED: makeArrayConsecutive2({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights make array consecutive 2 problem
|
Solve Code Fights make array consecutive 2 problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights make array consecutive 2 problem
|
#!/usr/local/bin/python
# Code Fights Make Array Consecutive 2 Problem
def makeArrayConsecutive2(statues):
return (len(range(min(statues), max(statues) + 1)) - len(statues))
def main():
tests = [
[[6, 2, 3, 8], 3],
[[0, 3], 2],
[[5, 4, 6], 0],
[[6, 3], 2],
[[1], 0]
]
for t in tests:
res = makeArrayConsecutive2(t[0])
ans = t[1]
if ans == res:
print("PASSED: makeArrayConsecutive2({}) returned {}"
.format(t[0], res))
else:
print("FAILED: makeArrayConsecutive2({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights make array consecutive 2 problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Make Array Consecutive 2 Problem
def makeArrayConsecutive2(statues):
return (len(range(min(statues), max(statues) + 1)) - len(statues))
def main():
tests = [
[[6, 2, 3, 8], 3],
[[0, 3], 2],
[[5, 4, 6], 0],
[[6, 3], 2],
[[1], 0]
]
for t in tests:
res = makeArrayConsecutive2(t[0])
ans = t[1]
if ans == res:
print("PASSED: makeArrayConsecutive2({}) returned {}"
.format(t[0], res))
else:
print("FAILED: makeArrayConsecutive2({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights make array consecutive 2 problem#!/usr/local/bin/python
# Code Fights Make Array Consecutive 2 Problem
def makeArrayConsecutive2(statues):
return (len(range(min(statues), max(statues) + 1)) - len(statues))
def main():
tests = [
[[6, 2, 3, 8], 3],
[[0, 3], 2],
[[5, 4, 6], 0],
[[6, 3], 2],
[[1], 0]
]
for t in tests:
res = makeArrayConsecutive2(t[0])
ans = t[1]
if ans == res:
print("PASSED: makeArrayConsecutive2({}) returned {}"
.format(t[0], res))
else:
print("FAILED: makeArrayConsecutive2({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights make array consecutive 2 problem<commit_after>#!/usr/local/bin/python
# Code Fights Make Array Consecutive 2 Problem
def makeArrayConsecutive2(statues):
return (len(range(min(statues), max(statues) + 1)) - len(statues))
def main():
tests = [
[[6, 2, 3, 8], 3],
[[0, 3], 2],
[[5, 4, 6], 0],
[[6, 3], 2],
[[1], 0]
]
for t in tests:
res = makeArrayConsecutive2(t[0])
ans = t[1]
if ans == res:
print("PASSED: makeArrayConsecutive2({}) returned {}"
.format(t[0], res))
else:
print("FAILED: makeArrayConsecutive2({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
|
aafd823069176075b4810496ee98cea3203b5652
|
build_time/src/make_subset.py
|
build_time/src/make_subset.py
|
"""
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import os
import sys
from fontTools.subset import Options, load_font, Subsetter, save_font
def main(args):
"""Subset a font (useful for making small test fonts).
Arguments:
font-file
--hinting=(False|True) ,default is false
"""
parser = argparse.ArgumentParser()
parser.add_argument('fontfile', help='Input font file')
parser.add_argument('--text', default='',
help='Text to include in the subset')
parser.add_argument('--unicodes', default='',
help='Comma separated list of Unicode codepoints (hex) '
'to include in the subset; eg, "e7,0xe8,U+00e9"')
parser.add_argument('--glyphs', default='',
help='Comma separated list of glyph IDs (decimal) to '
'include in the subset; eg, "1,27"')
parser.add_argument('--hinting',default=False, action='store_true',
help='Enable hinting if specified, no hinting if not '
'present')
cmd_args = parser.parse_args(args)
options = Options()
# Definitely want the .notdef glyph and outlines.
options.notdef_glyph = True
options.notdef_outline = True
# Get the item. to keep in the subset.
text = cmd_args.text
unicodes_str = cmd_args.unicodes.lower().replace('0x', '').replace('u+', '')
unicodes = [ int(c,16) for c in unicodes_str.split(',') if c ]
glyphs = [ int(c) for c in cmd_args.glyphs.split(',') if c ]
fontfile = cmd_args.fontfile
options.hinting = cmd_args.hinting # False => no hinting
dir = os.path.dirname(fontfile)
basename = os.path.basename(fontfile)
filename, extension = os.path.splitext(basename)
output_file = dir + '/' + filename + '_subset' + extension
font = load_font(fontfile, options, lazy=False)
subsetter = Subsetter(options)
subsetter.populate(text=text, unicodes=unicodes, glyphs=glyphs)
subsetter.subset(font)
save_font(font, output_file, options)
if __name__ == '__main__':
main(sys.argv[1:])
|
Make a command to make subsets. Subsets are useful for testing during development.
|
Make a command to make subsets. Subsets are useful for testing during development.
|
Python
|
apache-2.0
|
googlei18n/TachyFont,moyogo/tachyfont,googlefonts/TachyFont,moyogo/tachyfont,googlei18n/TachyFont,moyogo/tachyfont,bstell/TachyFont,moyogo/tachyfont,googlefonts/TachyFont,moyogo/tachyfont,googlei18n/TachyFont,bstell/TachyFont,googlei18n/TachyFont,googlei18n/TachyFont,bstell/TachyFont,googlefonts/TachyFont,bstell/TachyFont,bstell/TachyFont,googlefonts/TachyFont,googlefonts/TachyFont
|
Make a command to make subsets. Subsets are useful for testing during development.
|
"""
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import os
import sys
from fontTools.subset import Options, load_font, Subsetter, save_font
def main(args):
"""Subset a font (useful for making small test fonts).
Arguments:
font-file
--hinting=(False|True) ,default is false
"""
parser = argparse.ArgumentParser()
parser.add_argument('fontfile', help='Input font file')
parser.add_argument('--text', default='',
help='Text to include in the subset')
parser.add_argument('--unicodes', default='',
help='Comma separated list of Unicode codepoints (hex) '
'to include in the subset; eg, "e7,0xe8,U+00e9"')
parser.add_argument('--glyphs', default='',
help='Comma separated list of glyph IDs (decimal) to '
'include in the subset; eg, "1,27"')
parser.add_argument('--hinting',default=False, action='store_true',
help='Enable hinting if specified, no hinting if not '
'present')
cmd_args = parser.parse_args(args)
options = Options()
# Definitely want the .notdef glyph and outlines.
options.notdef_glyph = True
options.notdef_outline = True
# Get the item. to keep in the subset.
text = cmd_args.text
unicodes_str = cmd_args.unicodes.lower().replace('0x', '').replace('u+', '')
unicodes = [ int(c,16) for c in unicodes_str.split(',') if c ]
glyphs = [ int(c) for c in cmd_args.glyphs.split(',') if c ]
fontfile = cmd_args.fontfile
options.hinting = cmd_args.hinting # False => no hinting
dir = os.path.dirname(fontfile)
basename = os.path.basename(fontfile)
filename, extension = os.path.splitext(basename)
output_file = dir + '/' + filename + '_subset' + extension
font = load_font(fontfile, options, lazy=False)
subsetter = Subsetter(options)
subsetter.populate(text=text, unicodes=unicodes, glyphs=glyphs)
subsetter.subset(font)
save_font(font, output_file, options)
if __name__ == '__main__':
main(sys.argv[1:])
|
<commit_before><commit_msg>Make a command to make subsets. Subsets are useful for testing during development.<commit_after>
|
"""
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import os
import sys
from fontTools.subset import Options, load_font, Subsetter, save_font
def main(args):
"""Subset a font (useful for making small test fonts).
Arguments:
font-file
--hinting=(False|True) ,default is false
"""
parser = argparse.ArgumentParser()
parser.add_argument('fontfile', help='Input font file')
parser.add_argument('--text', default='',
help='Text to include in the subset')
parser.add_argument('--unicodes', default='',
help='Comma separated list of Unicode codepoints (hex) '
'to include in the subset; eg, "e7,0xe8,U+00e9"')
parser.add_argument('--glyphs', default='',
help='Comma separated list of glyph IDs (decimal) to '
'include in the subset; eg, "1,27"')
parser.add_argument('--hinting',default=False, action='store_true',
help='Enable hinting if specified, no hinting if not '
'present')
cmd_args = parser.parse_args(args)
options = Options()
# Definitely want the .notdef glyph and outlines.
options.notdef_glyph = True
options.notdef_outline = True
# Get the item. to keep in the subset.
text = cmd_args.text
unicodes_str = cmd_args.unicodes.lower().replace('0x', '').replace('u+', '')
unicodes = [ int(c,16) for c in unicodes_str.split(',') if c ]
glyphs = [ int(c) for c in cmd_args.glyphs.split(',') if c ]
fontfile = cmd_args.fontfile
options.hinting = cmd_args.hinting # False => no hinting
dir = os.path.dirname(fontfile)
basename = os.path.basename(fontfile)
filename, extension = os.path.splitext(basename)
output_file = dir + '/' + filename + '_subset' + extension
font = load_font(fontfile, options, lazy=False)
subsetter = Subsetter(options)
subsetter.populate(text=text, unicodes=unicodes, glyphs=glyphs)
subsetter.subset(font)
save_font(font, output_file, options)
if __name__ == '__main__':
main(sys.argv[1:])
|
Make a command to make subsets. Subsets are useful for testing during development."""
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import os
import sys
from fontTools.subset import Options, load_font, Subsetter, save_font
def main(args):
"""Subset a font (useful for making small test fonts).
Arguments:
font-file
--hinting=(False|True) ,default is false
"""
parser = argparse.ArgumentParser()
parser.add_argument('fontfile', help='Input font file')
parser.add_argument('--text', default='',
help='Text to include in the subset')
parser.add_argument('--unicodes', default='',
help='Comma separated list of Unicode codepoints (hex) '
'to include in the subset; eg, "e7,0xe8,U+00e9"')
parser.add_argument('--glyphs', default='',
help='Comma separated list of glyph IDs (decimal) to '
'include in the subset; eg, "1,27"')
parser.add_argument('--hinting',default=False, action='store_true',
help='Enable hinting if specified, no hinting if not '
'present')
cmd_args = parser.parse_args(args)
options = Options()
# Definitely want the .notdef glyph and outlines.
options.notdef_glyph = True
options.notdef_outline = True
# Get the item. to keep in the subset.
text = cmd_args.text
unicodes_str = cmd_args.unicodes.lower().replace('0x', '').replace('u+', '')
unicodes = [ int(c,16) for c in unicodes_str.split(',') if c ]
glyphs = [ int(c) for c in cmd_args.glyphs.split(',') if c ]
fontfile = cmd_args.fontfile
options.hinting = cmd_args.hinting # False => no hinting
dir = os.path.dirname(fontfile)
basename = os.path.basename(fontfile)
filename, extension = os.path.splitext(basename)
output_file = dir + '/' + filename + '_subset' + extension
font = load_font(fontfile, options, lazy=False)
subsetter = Subsetter(options)
subsetter.populate(text=text, unicodes=unicodes, glyphs=glyphs)
subsetter.subset(font)
save_font(font, output_file, options)
if __name__ == '__main__':
main(sys.argv[1:])
|
<commit_before><commit_msg>Make a command to make subsets. Subsets are useful for testing during development.<commit_after>"""
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import os
import sys
from fontTools.subset import Options, load_font, Subsetter, save_font
def main(args):
"""Subset a font (useful for making small test fonts).
Arguments:
font-file
--hinting=(False|True) ,default is false
"""
parser = argparse.ArgumentParser()
parser.add_argument('fontfile', help='Input font file')
parser.add_argument('--text', default='',
help='Text to include in the subset')
parser.add_argument('--unicodes', default='',
help='Comma separated list of Unicode codepoints (hex) '
'to include in the subset; eg, "e7,0xe8,U+00e9"')
parser.add_argument('--glyphs', default='',
help='Comma separated list of glyph IDs (decimal) to '
'include in the subset; eg, "1,27"')
parser.add_argument('--hinting',default=False, action='store_true',
help='Enable hinting if specified, no hinting if not '
'present')
cmd_args = parser.parse_args(args)
options = Options()
# Definitely want the .notdef glyph and outlines.
options.notdef_glyph = True
options.notdef_outline = True
# Get the item. to keep in the subset.
text = cmd_args.text
unicodes_str = cmd_args.unicodes.lower().replace('0x', '').replace('u+', '')
unicodes = [ int(c,16) for c in unicodes_str.split(',') if c ]
glyphs = [ int(c) for c in cmd_args.glyphs.split(',') if c ]
fontfile = cmd_args.fontfile
options.hinting = cmd_args.hinting # False => no hinting
dir = os.path.dirname(fontfile)
basename = os.path.basename(fontfile)
filename, extension = os.path.splitext(basename)
output_file = dir + '/' + filename + '_subset' + extension
font = load_font(fontfile, options, lazy=False)
subsetter = Subsetter(options)
subsetter.populate(text=text, unicodes=unicodes, glyphs=glyphs)
subsetter.subset(font)
save_font(font, output_file, options)
if __name__ == '__main__':
main(sys.argv[1:])
|
|
0091af78bd191e34ecb621b20e79d6dd3d32ebb6
|
tests/test_core.py
|
tests/test_core.py
|
#!/usr/bin/env python
from __future__ import division
from unittest import TestCase, main
from metasane.core import VocabularySet
class VocabularySetTests(TestCase):
def setUp(self):
"""Initialize data used in the tests."""
self.single_vocab = {'vocab_1': VOCAB_1.split('\n')}
self.multi_vocab = {
'vocab_1': VOCAB_1.split('\n'),
'vocab_2': VOCAB_2.split('\n')
}
self.multi_vocab_inst = VocabularySet(self.multi_vocab)
def test_init_empty(self):
"""Test constructing an instance with no vocabs."""
obs = VocabularySet({})
self.assertEqual(len(obs), 0)
def test_init_single(self):
"""Test constructing an instance with a single vocab."""
obs = VocabularySet(self.single_vocab)
self.assertEqual(len(obs), 1)
self.assertTrue('vocab_1' in obs)
def test_init_multi(self):
"""Test constructing an instance with multiple vocabs."""
self.assertEqual(len(self.multi_vocab_inst), 2)
self.assertTrue('vocab_1' in self.multi_vocab_inst)
self.assertTrue('vocab_2' in self.multi_vocab_inst)
def test_contains(self):
"""Test membership based on ID."""
self.assertTrue('vocab_1' in self.multi_vocab_inst)
self.assertTrue('vocab_2' in self.multi_vocab_inst)
self.assertFalse('vocab_3' in self.multi_vocab_inst)
def test_getitem(self):
"""Test retrieving vocab based on ID."""
obs = self.multi_vocab_inst['vocab_1']
self.assertEqual(obs, set(['foo', 'bar', 'baz']))
obs = self.multi_vocab_inst['vocab_2']
self.assertEqual(obs, set(['xyz', '123', 'abc']))
def test_getitem_nonexistent(self):
"""Test retrieving vocab based on nonexistent ID."""
with self.assertRaises(KeyError):
_ = self.multi_vocab_inst['vocab_3']
def test_len(self):
"""Test retrieving the number of vocabs."""
self.assertEqual(len(self.multi_vocab_inst), 2)
VOCAB_1 = """foo
\t \t
baR\t\t
\t\tBAZ
"""
VOCAB_2 = """abc
123
xyz"""
if __name__ == '__main__':
main()
|
Add unit tests for VocabularySet
|
Add unit tests for VocabularySet
|
Python
|
bsd-3-clause
|
clemente-lab/metasane
|
Add unit tests for VocabularySet
|
#!/usr/bin/env python
from __future__ import division
from unittest import TestCase, main
from metasane.core import VocabularySet
class VocabularySetTests(TestCase):
def setUp(self):
"""Initialize data used in the tests."""
self.single_vocab = {'vocab_1': VOCAB_1.split('\n')}
self.multi_vocab = {
'vocab_1': VOCAB_1.split('\n'),
'vocab_2': VOCAB_2.split('\n')
}
self.multi_vocab_inst = VocabularySet(self.multi_vocab)
def test_init_empty(self):
"""Test constructing an instance with no vocabs."""
obs = VocabularySet({})
self.assertEqual(len(obs), 0)
def test_init_single(self):
"""Test constructing an instance with a single vocab."""
obs = VocabularySet(self.single_vocab)
self.assertEqual(len(obs), 1)
self.assertTrue('vocab_1' in obs)
def test_init_multi(self):
"""Test constructing an instance with multiple vocabs."""
self.assertEqual(len(self.multi_vocab_inst), 2)
self.assertTrue('vocab_1' in self.multi_vocab_inst)
self.assertTrue('vocab_2' in self.multi_vocab_inst)
def test_contains(self):
"""Test membership based on ID."""
self.assertTrue('vocab_1' in self.multi_vocab_inst)
self.assertTrue('vocab_2' in self.multi_vocab_inst)
self.assertFalse('vocab_3' in self.multi_vocab_inst)
def test_getitem(self):
"""Test retrieving vocab based on ID."""
obs = self.multi_vocab_inst['vocab_1']
self.assertEqual(obs, set(['foo', 'bar', 'baz']))
obs = self.multi_vocab_inst['vocab_2']
self.assertEqual(obs, set(['xyz', '123', 'abc']))
def test_getitem_nonexistent(self):
"""Test retrieving vocab based on nonexistent ID."""
with self.assertRaises(KeyError):
_ = self.multi_vocab_inst['vocab_3']
def test_len(self):
"""Test retrieving the number of vocabs."""
self.assertEqual(len(self.multi_vocab_inst), 2)
VOCAB_1 = """foo
\t \t
baR\t\t
\t\tBAZ
"""
VOCAB_2 = """abc
123
xyz"""
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add unit tests for VocabularySet<commit_after>
|
#!/usr/bin/env python
from __future__ import division
from unittest import TestCase, main
from metasane.core import VocabularySet
class VocabularySetTests(TestCase):
def setUp(self):
"""Initialize data used in the tests."""
self.single_vocab = {'vocab_1': VOCAB_1.split('\n')}
self.multi_vocab = {
'vocab_1': VOCAB_1.split('\n'),
'vocab_2': VOCAB_2.split('\n')
}
self.multi_vocab_inst = VocabularySet(self.multi_vocab)
def test_init_empty(self):
"""Test constructing an instance with no vocabs."""
obs = VocabularySet({})
self.assertEqual(len(obs), 0)
def test_init_single(self):
"""Test constructing an instance with a single vocab."""
obs = VocabularySet(self.single_vocab)
self.assertEqual(len(obs), 1)
self.assertTrue('vocab_1' in obs)
def test_init_multi(self):
"""Test constructing an instance with multiple vocabs."""
self.assertEqual(len(self.multi_vocab_inst), 2)
self.assertTrue('vocab_1' in self.multi_vocab_inst)
self.assertTrue('vocab_2' in self.multi_vocab_inst)
def test_contains(self):
"""Test membership based on ID."""
self.assertTrue('vocab_1' in self.multi_vocab_inst)
self.assertTrue('vocab_2' in self.multi_vocab_inst)
self.assertFalse('vocab_3' in self.multi_vocab_inst)
def test_getitem(self):
"""Test retrieving vocab based on ID."""
obs = self.multi_vocab_inst['vocab_1']
self.assertEqual(obs, set(['foo', 'bar', 'baz']))
obs = self.multi_vocab_inst['vocab_2']
self.assertEqual(obs, set(['xyz', '123', 'abc']))
def test_getitem_nonexistent(self):
"""Test retrieving vocab based on nonexistent ID."""
with self.assertRaises(KeyError):
_ = self.multi_vocab_inst['vocab_3']
def test_len(self):
"""Test retrieving the number of vocabs."""
self.assertEqual(len(self.multi_vocab_inst), 2)
VOCAB_1 = """foo
\t \t
baR\t\t
\t\tBAZ
"""
VOCAB_2 = """abc
123
xyz"""
if __name__ == '__main__':
main()
|
Add unit tests for VocabularySet#!/usr/bin/env python
from __future__ import division
from unittest import TestCase, main
from metasane.core import VocabularySet
class VocabularySetTests(TestCase):
def setUp(self):
"""Initialize data used in the tests."""
self.single_vocab = {'vocab_1': VOCAB_1.split('\n')}
self.multi_vocab = {
'vocab_1': VOCAB_1.split('\n'),
'vocab_2': VOCAB_2.split('\n')
}
self.multi_vocab_inst = VocabularySet(self.multi_vocab)
def test_init_empty(self):
"""Test constructing an instance with no vocabs."""
obs = VocabularySet({})
self.assertEqual(len(obs), 0)
def test_init_single(self):
"""Test constructing an instance with a single vocab."""
obs = VocabularySet(self.single_vocab)
self.assertEqual(len(obs), 1)
self.assertTrue('vocab_1' in obs)
def test_init_multi(self):
"""Test constructing an instance with multiple vocabs."""
self.assertEqual(len(self.multi_vocab_inst), 2)
self.assertTrue('vocab_1' in self.multi_vocab_inst)
self.assertTrue('vocab_2' in self.multi_vocab_inst)
def test_contains(self):
"""Test membership based on ID."""
self.assertTrue('vocab_1' in self.multi_vocab_inst)
self.assertTrue('vocab_2' in self.multi_vocab_inst)
self.assertFalse('vocab_3' in self.multi_vocab_inst)
def test_getitem(self):
"""Test retrieving vocab based on ID."""
obs = self.multi_vocab_inst['vocab_1']
self.assertEqual(obs, set(['foo', 'bar', 'baz']))
obs = self.multi_vocab_inst['vocab_2']
self.assertEqual(obs, set(['xyz', '123', 'abc']))
def test_getitem_nonexistent(self):
"""Test retrieving vocab based on nonexistent ID."""
with self.assertRaises(KeyError):
_ = self.multi_vocab_inst['vocab_3']
def test_len(self):
"""Test retrieving the number of vocabs."""
self.assertEqual(len(self.multi_vocab_inst), 2)
VOCAB_1 = """foo
\t \t
baR\t\t
\t\tBAZ
"""
VOCAB_2 = """abc
123
xyz"""
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add unit tests for VocabularySet<commit_after>#!/usr/bin/env python
from __future__ import division
from unittest import TestCase, main
from metasane.core import VocabularySet
class VocabularySetTests(TestCase):
def setUp(self):
"""Initialize data used in the tests."""
self.single_vocab = {'vocab_1': VOCAB_1.split('\n')}
self.multi_vocab = {
'vocab_1': VOCAB_1.split('\n'),
'vocab_2': VOCAB_2.split('\n')
}
self.multi_vocab_inst = VocabularySet(self.multi_vocab)
def test_init_empty(self):
"""Test constructing an instance with no vocabs."""
obs = VocabularySet({})
self.assertEqual(len(obs), 0)
def test_init_single(self):
"""Test constructing an instance with a single vocab."""
obs = VocabularySet(self.single_vocab)
self.assertEqual(len(obs), 1)
self.assertTrue('vocab_1' in obs)
def test_init_multi(self):
"""Test constructing an instance with multiple vocabs."""
self.assertEqual(len(self.multi_vocab_inst), 2)
self.assertTrue('vocab_1' in self.multi_vocab_inst)
self.assertTrue('vocab_2' in self.multi_vocab_inst)
def test_contains(self):
"""Test membership based on ID."""
self.assertTrue('vocab_1' in self.multi_vocab_inst)
self.assertTrue('vocab_2' in self.multi_vocab_inst)
self.assertFalse('vocab_3' in self.multi_vocab_inst)
def test_getitem(self):
"""Test retrieving vocab based on ID."""
obs = self.multi_vocab_inst['vocab_1']
self.assertEqual(obs, set(['foo', 'bar', 'baz']))
obs = self.multi_vocab_inst['vocab_2']
self.assertEqual(obs, set(['xyz', '123', 'abc']))
def test_getitem_nonexistent(self):
"""Test retrieving vocab based on nonexistent ID."""
with self.assertRaises(KeyError):
_ = self.multi_vocab_inst['vocab_3']
def test_len(self):
"""Test retrieving the number of vocabs."""
self.assertEqual(len(self.multi_vocab_inst), 2)
VOCAB_1 = """foo
\t \t
baR\t\t
\t\tBAZ
"""
VOCAB_2 = """abc
123
xyz"""
if __name__ == '__main__':
main()
|
|
1f9240f0b954afa9f587f468872c3e1e215f2eaa
|
txircd/modules/cmode_s.py
|
txircd/modules/cmode_s.py
|
from txircd.modbase import Mode
class SecretMode(Mode):
def listOutput(self, command, data):
if command != "LIST":
return data
cdata = data["cdata"]
if "s" in cdata["modes"] and cdata["name"] not in data["user"].channels:
data["cdata"] = {}
# other +s stuff is hiding in other modules.
class Spawner(object):
def __init__(self, ircd):
self.ircd = ircd
self.mode_s = None
def spawn(self):
self.mode_s = SecretMode()
return {
"modes": {
"cns": self.mode_s
},
"actions": {
"commandextra": [self.mode_s.listOutput]
}
def cleanup(self):
self.ircd.removeMode("cns")
self.ircd.actions["commandextra"].remove(self.mode_s.listOutput)
|
Implement channel mode +s (or what's left of it)
|
Implement channel mode +s (or what's left of it)
|
Python
|
bsd-3-clause
|
Heufneutje/txircd,DesertBus/txircd,ElementalAlchemist/txircd
|
Implement channel mode +s (or what's left of it)
|
from txircd.modbase import Mode
class SecretMode(Mode):
def listOutput(self, command, data):
if command != "LIST":
return data
cdata = data["cdata"]
if "s" in cdata["modes"] and cdata["name"] not in data["user"].channels:
data["cdata"] = {}
# other +s stuff is hiding in other modules.
class Spawner(object):
def __init__(self, ircd):
self.ircd = ircd
self.mode_s = None
def spawn(self):
self.mode_s = SecretMode()
return {
"modes": {
"cns": self.mode_s
},
"actions": {
"commandextra": [self.mode_s.listOutput]
}
def cleanup(self):
self.ircd.removeMode("cns")
self.ircd.actions["commandextra"].remove(self.mode_s.listOutput)
|
<commit_before><commit_msg>Implement channel mode +s (or what's left of it)<commit_after>
|
from txircd.modbase import Mode
class SecretMode(Mode):
def listOutput(self, command, data):
if command != "LIST":
return data
cdata = data["cdata"]
if "s" in cdata["modes"] and cdata["name"] not in data["user"].channels:
data["cdata"] = {}
# other +s stuff is hiding in other modules.
class Spawner(object):
def __init__(self, ircd):
self.ircd = ircd
self.mode_s = None
def spawn(self):
self.mode_s = SecretMode()
return {
"modes": {
"cns": self.mode_s
},
"actions": {
"commandextra": [self.mode_s.listOutput]
}
def cleanup(self):
self.ircd.removeMode("cns")
self.ircd.actions["commandextra"].remove(self.mode_s.listOutput)
|
Implement channel mode +s (or what's left of it)from txircd.modbase import Mode
class SecretMode(Mode):
def listOutput(self, command, data):
if command != "LIST":
return data
cdata = data["cdata"]
if "s" in cdata["modes"] and cdata["name"] not in data["user"].channels:
data["cdata"] = {}
# other +s stuff is hiding in other modules.
class Spawner(object):
def __init__(self, ircd):
self.ircd = ircd
self.mode_s = None
def spawn(self):
self.mode_s = SecretMode()
return {
"modes": {
"cns": self.mode_s
},
"actions": {
"commandextra": [self.mode_s.listOutput]
}
def cleanup(self):
self.ircd.removeMode("cns")
self.ircd.actions["commandextra"].remove(self.mode_s.listOutput)
|
<commit_before><commit_msg>Implement channel mode +s (or what's left of it)<commit_after>from txircd.modbase import Mode
class SecretMode(Mode):
def listOutput(self, command, data):
if command != "LIST":
return data
cdata = data["cdata"]
if "s" in cdata["modes"] and cdata["name"] not in data["user"].channels:
data["cdata"] = {}
# other +s stuff is hiding in other modules.
class Spawner(object):
def __init__(self, ircd):
self.ircd = ircd
self.mode_s = None
def spawn(self):
self.mode_s = SecretMode()
return {
"modes": {
"cns": self.mode_s
},
"actions": {
"commandextra": [self.mode_s.listOutput]
}
def cleanup(self):
self.ircd.removeMode("cns")
self.ircd.actions["commandextra"].remove(self.mode_s.listOutput)
|
|
d082eb41c2ccef7178d228896a7658fe52bcbdec
|
tests/UselessSymbolsRemove/__init__.py
|
tests/UselessSymbolsRemove/__init__.py
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 17.08.2017 14:38
:Licence GNUv3
Part of grammpy-transforms
"""
|
Create directory for useless symbols remove
|
Create directory for useless symbols remove
|
Python
|
mit
|
PatrikValkovic/grammpy
|
Create directory for useless symbols remove
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 17.08.2017 14:38
:Licence GNUv3
Part of grammpy-transforms
"""
|
<commit_before><commit_msg>Create directory for useless symbols remove<commit_after>
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 17.08.2017 14:38
:Licence GNUv3
Part of grammpy-transforms
"""
|
Create directory for useless symbols remove#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 17.08.2017 14:38
:Licence GNUv3
Part of grammpy-transforms
"""
|
<commit_before><commit_msg>Create directory for useless symbols remove<commit_after>#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 17.08.2017 14:38
:Licence GNUv3
Part of grammpy-transforms
"""
|
|
fefb9a9fa5a7c6080bc52896e2d1517828b01a3d
|
migrations/versions/299e1d15a55f_populate_provincial_legislatures.py
|
migrations/versions/299e1d15a55f_populate_provincial_legislatures.py
|
"""populate-provincial-legislatures
Revision ID: 299e1d15a55f
Revises: 1f97f799a477
Create Date: 2018-08-20 16:17:28.919476
"""
# revision identifiers, used by Alembic.
revision = '299e1d15a55f'
down_revision = '1f97f799a477'
from alembic import op
import sqlalchemy as sa
def upgrade():
"""
Ensure all provinces exist as Provincial Legislatures
"""
from pmg.models import House, db
from pmg.utils import get_provincial_legislatures
pls = [
{
'name': 'Eastern Cape Legislature',
'name_short': 'EC'
},
{
'name': 'Free State Legislature',
'name_short': 'FS'
},
{
'name': 'Gauteng Legislature',
'name_short': 'GT'
},
{
'name': 'KwaZulu-Natal Legislature',
'name_short': 'KZN'
},
{
'name': 'Limpopo Legislature',
'name_short': 'LIM'
},
{
'name': 'Mpumalanga Legislature',
'name_short': 'MP'
},
{
'name': 'Northern Cape Legislature',
'name_short': 'NC'
},
{
'name': 'North West Legislature',
'name_short': 'NW'
},
{
'name': 'Western Cape Parliament',
'name_short': 'WC'
}
]
existing_pls = House.query.filter(House.sphere=='provincial').all()
pl_codes = [p.name_short for p in existing_pls]
for pl in pls:
if pl['name_short'] not in pl_codes:
new_pl = House()
new_pl.name = pl['name']
new_pl.name_short = pl['name_short']
new_pl.sphere = 'provincial'
db.session.add(new_pl)
db.session.commit()
def downgrade():
pass
|
Add all PLs to db
|
Migration: Add all PLs to db
|
Python
|
apache-2.0
|
Code4SA/pmg-cms-2,Code4SA/pmg-cms-2,Code4SA/pmg-cms-2
|
Migration: Add all PLs to db
|
"""populate-provincial-legislatures
Revision ID: 299e1d15a55f
Revises: 1f97f799a477
Create Date: 2018-08-20 16:17:28.919476
"""
# revision identifiers, used by Alembic.
revision = '299e1d15a55f'
down_revision = '1f97f799a477'
from alembic import op
import sqlalchemy as sa
def upgrade():
"""
Ensure all provinces exist as Provincial Legislatures
"""
from pmg.models import House, db
from pmg.utils import get_provincial_legislatures
pls = [
{
'name': 'Eastern Cape Legislature',
'name_short': 'EC'
},
{
'name': 'Free State Legislature',
'name_short': 'FS'
},
{
'name': 'Gauteng Legislature',
'name_short': 'GT'
},
{
'name': 'KwaZulu-Natal Legislature',
'name_short': 'KZN'
},
{
'name': 'Limpopo Legislature',
'name_short': 'LIM'
},
{
'name': 'Mpumalanga Legislature',
'name_short': 'MP'
},
{
'name': 'Northern Cape Legislature',
'name_short': 'NC'
},
{
'name': 'North West Legislature',
'name_short': 'NW'
},
{
'name': 'Western Cape Parliament',
'name_short': 'WC'
}
]
existing_pls = House.query.filter(House.sphere=='provincial').all()
pl_codes = [p.name_short for p in existing_pls]
for pl in pls:
if pl['name_short'] not in pl_codes:
new_pl = House()
new_pl.name = pl['name']
new_pl.name_short = pl['name_short']
new_pl.sphere = 'provincial'
db.session.add(new_pl)
db.session.commit()
def downgrade():
pass
|
<commit_before><commit_msg>Migration: Add all PLs to db<commit_after>
|
"""populate-provincial-legislatures
Revision ID: 299e1d15a55f
Revises: 1f97f799a477
Create Date: 2018-08-20 16:17:28.919476
"""
# revision identifiers, used by Alembic.
revision = '299e1d15a55f'
down_revision = '1f97f799a477'
from alembic import op
import sqlalchemy as sa
def upgrade():
"""
Ensure all provinces exist as Provincial Legislatures
"""
from pmg.models import House, db
from pmg.utils import get_provincial_legislatures
pls = [
{
'name': 'Eastern Cape Legislature',
'name_short': 'EC'
},
{
'name': 'Free State Legislature',
'name_short': 'FS'
},
{
'name': 'Gauteng Legislature',
'name_short': 'GT'
},
{
'name': 'KwaZulu-Natal Legislature',
'name_short': 'KZN'
},
{
'name': 'Limpopo Legislature',
'name_short': 'LIM'
},
{
'name': 'Mpumalanga Legislature',
'name_short': 'MP'
},
{
'name': 'Northern Cape Legislature',
'name_short': 'NC'
},
{
'name': 'North West Legislature',
'name_short': 'NW'
},
{
'name': 'Western Cape Parliament',
'name_short': 'WC'
}
]
existing_pls = House.query.filter(House.sphere=='provincial').all()
pl_codes = [p.name_short for p in existing_pls]
for pl in pls:
if pl['name_short'] not in pl_codes:
new_pl = House()
new_pl.name = pl['name']
new_pl.name_short = pl['name_short']
new_pl.sphere = 'provincial'
db.session.add(new_pl)
db.session.commit()
def downgrade():
pass
|
Migration: Add all PLs to db"""populate-provincial-legislatures
Revision ID: 299e1d15a55f
Revises: 1f97f799a477
Create Date: 2018-08-20 16:17:28.919476
"""
# revision identifiers, used by Alembic.
revision = '299e1d15a55f'
down_revision = '1f97f799a477'
from alembic import op
import sqlalchemy as sa
def upgrade():
"""
Ensure all provinces exist as Provincial Legislatures
"""
from pmg.models import House, db
from pmg.utils import get_provincial_legislatures
pls = [
{
'name': 'Eastern Cape Legislature',
'name_short': 'EC'
},
{
'name': 'Free State Legislature',
'name_short': 'FS'
},
{
'name': 'Gauteng Legislature',
'name_short': 'GT'
},
{
'name': 'KwaZulu-Natal Legislature',
'name_short': 'KZN'
},
{
'name': 'Limpopo Legislature',
'name_short': 'LIM'
},
{
'name': 'Mpumalanga Legislature',
'name_short': 'MP'
},
{
'name': 'Northern Cape Legislature',
'name_short': 'NC'
},
{
'name': 'North West Legislature',
'name_short': 'NW'
},
{
'name': 'Western Cape Parliament',
'name_short': 'WC'
}
]
existing_pls = House.query.filter(House.sphere=='provincial').all()
pl_codes = [p.name_short for p in existing_pls]
for pl in pls:
if pl['name_short'] not in pl_codes:
new_pl = House()
new_pl.name = pl['name']
new_pl.name_short = pl['name_short']
new_pl.sphere = 'provincial'
db.session.add(new_pl)
db.session.commit()
def downgrade():
pass
|
<commit_before><commit_msg>Migration: Add all PLs to db<commit_after>"""populate-provincial-legislatures
Revision ID: 299e1d15a55f
Revises: 1f97f799a477
Create Date: 2018-08-20 16:17:28.919476
"""
# revision identifiers, used by Alembic.
revision = '299e1d15a55f'
down_revision = '1f97f799a477'
from alembic import op
import sqlalchemy as sa
def upgrade():
"""
Ensure all provinces exist as Provincial Legislatures
"""
from pmg.models import House, db
from pmg.utils import get_provincial_legislatures
pls = [
{
'name': 'Eastern Cape Legislature',
'name_short': 'EC'
},
{
'name': 'Free State Legislature',
'name_short': 'FS'
},
{
'name': 'Gauteng Legislature',
'name_short': 'GT'
},
{
'name': 'KwaZulu-Natal Legislature',
'name_short': 'KZN'
},
{
'name': 'Limpopo Legislature',
'name_short': 'LIM'
},
{
'name': 'Mpumalanga Legislature',
'name_short': 'MP'
},
{
'name': 'Northern Cape Legislature',
'name_short': 'NC'
},
{
'name': 'North West Legislature',
'name_short': 'NW'
},
{
'name': 'Western Cape Parliament',
'name_short': 'WC'
}
]
existing_pls = House.query.filter(House.sphere=='provincial').all()
pl_codes = [p.name_short for p in existing_pls]
for pl in pls:
if pl['name_short'] not in pl_codes:
new_pl = House()
new_pl.name = pl['name']
new_pl.name_short = pl['name_short']
new_pl.sphere = 'provincial'
db.session.add(new_pl)
db.session.commit()
def downgrade():
pass
|
|
6c9760b328716d6b2e099698293c93cba9361932
|
checkserver/testchecks/check_error.py
|
checkserver/testchecks/check_error.py
|
#!/usr/bin/env python
# Copyright 2012 The greplin-nagios-utils Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Status
nagios config:
use regular-service
params $HOSTNAME$
"""
from greplin.nagios import parseArgs, Maximum, ResponseBuilder
def check(argv):
"""Runs the check."""
_ = parseArgs('check_fast.py', ('NAME', str), argv=argv) / 0 # Badness!
(ResponseBuilder().addRule('seven', Maximum(8, 11), 7)).finish()
if __name__ == '__main__':
import sys
check(sys.argv)
|
Add script for testing error reporting.
|
Add script for testing error reporting.
|
Python
|
apache-2.0
|
Cue/greplin-nagios-utils,Cue/greplin-nagios-utils
|
Add script for testing error reporting.
|
#!/usr/bin/env python
# Copyright 2012 The greplin-nagios-utils Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Status
nagios config:
use regular-service
params $HOSTNAME$
"""
from greplin.nagios import parseArgs, Maximum, ResponseBuilder
def check(argv):
"""Runs the check."""
_ = parseArgs('check_fast.py', ('NAME', str), argv=argv) / 0 # Badness!
(ResponseBuilder().addRule('seven', Maximum(8, 11), 7)).finish()
if __name__ == '__main__':
import sys
check(sys.argv)
|
<commit_before><commit_msg>Add script for testing error reporting.<commit_after>
|
#!/usr/bin/env python
# Copyright 2012 The greplin-nagios-utils Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Status
nagios config:
use regular-service
params $HOSTNAME$
"""
from greplin.nagios import parseArgs, Maximum, ResponseBuilder
def check(argv):
"""Runs the check."""
_ = parseArgs('check_fast.py', ('NAME', str), argv=argv) / 0 # Badness!
(ResponseBuilder().addRule('seven', Maximum(8, 11), 7)).finish()
if __name__ == '__main__':
import sys
check(sys.argv)
|
Add script for testing error reporting.#!/usr/bin/env python
# Copyright 2012 The greplin-nagios-utils Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Status
nagios config:
use regular-service
params $HOSTNAME$
"""
from greplin.nagios import parseArgs, Maximum, ResponseBuilder
def check(argv):
"""Runs the check."""
_ = parseArgs('check_fast.py', ('NAME', str), argv=argv) / 0 # Badness!
(ResponseBuilder().addRule('seven', Maximum(8, 11), 7)).finish()
if __name__ == '__main__':
import sys
check(sys.argv)
|
<commit_before><commit_msg>Add script for testing error reporting.<commit_after>#!/usr/bin/env python
# Copyright 2012 The greplin-nagios-utils Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Status
nagios config:
use regular-service
params $HOSTNAME$
"""
from greplin.nagios import parseArgs, Maximum, ResponseBuilder
def check(argv):
"""Runs the check."""
_ = parseArgs('check_fast.py', ('NAME', str), argv=argv) / 0 # Badness!
(ResponseBuilder().addRule('seven', Maximum(8, 11), 7)).finish()
if __name__ == '__main__':
import sys
check(sys.argv)
|
|
ac7c5f51e270e48d3be9363a7c65b4b2f019c90c
|
contrib_bots/bots/xkcd/test_xkcd.py
|
contrib_bots/bots/xkcd/test_xkcd.py
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
import mock
import os
import sys
our_dir = os.path.dirname(os.path.abspath(__file__))
# For dev setups, we can find the API in the repo itself.
if os.path.exists(os.path.join(our_dir, '..')):
sys.path.insert(0, '..')
from bots_test_lib import BotTestCase
class TestXkcdBot(BotTestCase):
bot_name = "xkcd"
@mock.patch('logging.exception')
def test_bot(self, mock_logging_exception):
help_txt = "xkcd bot supports these commands:"
err_txt = "xkcd bot only supports these commands:"
commands = '''
* `@xkcd help` to show this help message.
* `@xkcd latest` to fetch the latest comic strip from xkcd.
* `@xkcd random` to fetch a random comic strip from xkcd.
* `@xkcd <comic id>` to fetch a comic strip based on `<comic id>` e.g `@xkcd 1234`.'''
invalid_id_txt = "Sorry, there is likely no xkcd comic strip with id: #"
expected = {
"": err_txt+commands,
"help": help_txt+commands,
"x": err_txt+commands,
"0": invalid_id_txt + "0",
"1": ("#1: **Barrel - Part 1**\n[Don't we all.]"
"(https://imgs.xkcd.com/comics/barrel_cropped_(1).jpg)"),
"1800": ("#1800: **Chess Notation**\n"
"[I've decided to score all my conversations "
"using chess win-loss notation. (??)]"
"(https://imgs.xkcd.com/comics/chess_notation.png)"),
"999999999": invalid_id_txt + "999999999",
}
for m, r in expected.items():
self.assert_bot_output(
{'content': m, 'type': "private", 'sender_email': "foo"}, r)
self.assert_bot_output(
{'content': m, 'type': "stream", 'sender_email': "foo"}, r)
|
Add tests for xkcd bot.
|
contrib_bots: Add tests for xkcd bot.
|
Python
|
apache-2.0
|
shubhamdhama/zulip,shubhamdhama/zulip,verma-varsha/zulip,punchagan/zulip,vabs22/zulip,shubhamdhama/zulip,vaidap/zulip,brockwhittaker/zulip,rishig/zulip,punchagan/zulip,synicalsyntax/zulip,hackerkid/zulip,brainwane/zulip,jrowan/zulip,amanharitsh123/zulip,punchagan/zulip,brainwane/zulip,rishig/zulip,synicalsyntax/zulip,rishig/zulip,rishig/zulip,showell/zulip,showell/zulip,eeshangarg/zulip,shubhamdhama/zulip,eeshangarg/zulip,dhcrzf/zulip,jrowan/zulip,Galexrt/zulip,andersk/zulip,brainwane/zulip,timabbott/zulip,amanharitsh123/zulip,showell/zulip,kou/zulip,hackerkid/zulip,andersk/zulip,kou/zulip,rht/zulip,tommyip/zulip,brockwhittaker/zulip,hackerkid/zulip,punchagan/zulip,Galexrt/zulip,rht/zulip,showell/zulip,rht/zulip,kou/zulip,shubhamdhama/zulip,eeshangarg/zulip,hackerkid/zulip,rishig/zulip,dhcrzf/zulip,andersk/zulip,timabbott/zulip,jrowan/zulip,rishig/zulip,synicalsyntax/zulip,tommyip/zulip,dhcrzf/zulip,mahim97/zulip,vaidap/zulip,punchagan/zulip,jackrzhang/zulip,amanharitsh123/zulip,eeshangarg/zulip,tommyip/zulip,timabbott/zulip,jrowan/zulip,rishig/zulip,verma-varsha/zulip,Galexrt/zulip,zulip/zulip,hackerkid/zulip,rht/zulip,vaidap/zulip,amanharitsh123/zulip,eeshangarg/zulip,vabs22/zulip,zulip/zulip,showell/zulip,jackrzhang/zulip,Galexrt/zulip,vabs22/zulip,jackrzhang/zulip,timabbott/zulip,synicalsyntax/zulip,jackrzhang/zulip,verma-varsha/zulip,showell/zulip,brainwane/zulip,rht/zulip,dhcrzf/zulip,synicalsyntax/zulip,mahim97/zulip,andersk/zulip,synicalsyntax/zulip,kou/zulip,jackrzhang/zulip,zulip/zulip,vabs22/zulip,rht/zulip,Galexrt/zulip,shubhamdhama/zulip,amanharitsh123/zulip,eeshangarg/zulip,zulip/zulip,jrowan/zulip,brainwane/zulip,verma-varsha/zulip,mahim97/zulip,timabbott/zulip,mahim97/zulip,verma-varsha/zulip,hackerkid/zulip,verma-varsha/zulip,punchagan/zulip,zulip/zulip,timabbott/zulip,eeshangarg/zulip,synicalsyntax/zulip,dhcrzf/zulip,kou/zulip,brainwane/zulip,dhcrzf/zulip,tommyip/zulip,dhcrzf/zulip,mahim97/zulip,tommyip/zulip,rht/zulip,brockwhittaker/zulip,hackerkid/zulip,vaidap/zulip,andersk/zulip,brockwhittaker/zulip,vabs22/zulip,shubhamdhama/zulip,mahim97/zulip,brainwane/zulip,brockwhittaker/zulip,andersk/zulip,vaidap/zulip,timabbott/zulip,Galexrt/zulip,showell/zulip,jrowan/zulip,andersk/zulip,zulip/zulip,kou/zulip,amanharitsh123/zulip,jackrzhang/zulip,tommyip/zulip,kou/zulip,tommyip/zulip,brockwhittaker/zulip,vaidap/zulip,Galexrt/zulip,jackrzhang/zulip,punchagan/zulip,zulip/zulip,vabs22/zulip
|
contrib_bots: Add tests for xkcd bot.
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
import mock
import os
import sys
our_dir = os.path.dirname(os.path.abspath(__file__))
# For dev setups, we can find the API in the repo itself.
if os.path.exists(os.path.join(our_dir, '..')):
sys.path.insert(0, '..')
from bots_test_lib import BotTestCase
class TestXkcdBot(BotTestCase):
bot_name = "xkcd"
@mock.patch('logging.exception')
def test_bot(self, mock_logging_exception):
help_txt = "xkcd bot supports these commands:"
err_txt = "xkcd bot only supports these commands:"
commands = '''
* `@xkcd help` to show this help message.
* `@xkcd latest` to fetch the latest comic strip from xkcd.
* `@xkcd random` to fetch a random comic strip from xkcd.
* `@xkcd <comic id>` to fetch a comic strip based on `<comic id>` e.g `@xkcd 1234`.'''
invalid_id_txt = "Sorry, there is likely no xkcd comic strip with id: #"
expected = {
"": err_txt+commands,
"help": help_txt+commands,
"x": err_txt+commands,
"0": invalid_id_txt + "0",
"1": ("#1: **Barrel - Part 1**\n[Don't we all.]"
"(https://imgs.xkcd.com/comics/barrel_cropped_(1).jpg)"),
"1800": ("#1800: **Chess Notation**\n"
"[I've decided to score all my conversations "
"using chess win-loss notation. (??)]"
"(https://imgs.xkcd.com/comics/chess_notation.png)"),
"999999999": invalid_id_txt + "999999999",
}
for m, r in expected.items():
self.assert_bot_output(
{'content': m, 'type': "private", 'sender_email': "foo"}, r)
self.assert_bot_output(
{'content': m, 'type': "stream", 'sender_email': "foo"}, r)
|
<commit_before><commit_msg>contrib_bots: Add tests for xkcd bot.<commit_after>
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
import mock
import os
import sys
our_dir = os.path.dirname(os.path.abspath(__file__))
# For dev setups, we can find the API in the repo itself.
if os.path.exists(os.path.join(our_dir, '..')):
sys.path.insert(0, '..')
from bots_test_lib import BotTestCase
class TestXkcdBot(BotTestCase):
bot_name = "xkcd"
@mock.patch('logging.exception')
def test_bot(self, mock_logging_exception):
help_txt = "xkcd bot supports these commands:"
err_txt = "xkcd bot only supports these commands:"
commands = '''
* `@xkcd help` to show this help message.
* `@xkcd latest` to fetch the latest comic strip from xkcd.
* `@xkcd random` to fetch a random comic strip from xkcd.
* `@xkcd <comic id>` to fetch a comic strip based on `<comic id>` e.g `@xkcd 1234`.'''
invalid_id_txt = "Sorry, there is likely no xkcd comic strip with id: #"
expected = {
"": err_txt+commands,
"help": help_txt+commands,
"x": err_txt+commands,
"0": invalid_id_txt + "0",
"1": ("#1: **Barrel - Part 1**\n[Don't we all.]"
"(https://imgs.xkcd.com/comics/barrel_cropped_(1).jpg)"),
"1800": ("#1800: **Chess Notation**\n"
"[I've decided to score all my conversations "
"using chess win-loss notation. (??)]"
"(https://imgs.xkcd.com/comics/chess_notation.png)"),
"999999999": invalid_id_txt + "999999999",
}
for m, r in expected.items():
self.assert_bot_output(
{'content': m, 'type': "private", 'sender_email': "foo"}, r)
self.assert_bot_output(
{'content': m, 'type': "stream", 'sender_email': "foo"}, r)
|
contrib_bots: Add tests for xkcd bot.#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
import mock
import os
import sys
our_dir = os.path.dirname(os.path.abspath(__file__))
# For dev setups, we can find the API in the repo itself.
if os.path.exists(os.path.join(our_dir, '..')):
sys.path.insert(0, '..')
from bots_test_lib import BotTestCase
class TestXkcdBot(BotTestCase):
bot_name = "xkcd"
@mock.patch('logging.exception')
def test_bot(self, mock_logging_exception):
help_txt = "xkcd bot supports these commands:"
err_txt = "xkcd bot only supports these commands:"
commands = '''
* `@xkcd help` to show this help message.
* `@xkcd latest` to fetch the latest comic strip from xkcd.
* `@xkcd random` to fetch a random comic strip from xkcd.
* `@xkcd <comic id>` to fetch a comic strip based on `<comic id>` e.g `@xkcd 1234`.'''
invalid_id_txt = "Sorry, there is likely no xkcd comic strip with id: #"
expected = {
"": err_txt+commands,
"help": help_txt+commands,
"x": err_txt+commands,
"0": invalid_id_txt + "0",
"1": ("#1: **Barrel - Part 1**\n[Don't we all.]"
"(https://imgs.xkcd.com/comics/barrel_cropped_(1).jpg)"),
"1800": ("#1800: **Chess Notation**\n"
"[I've decided to score all my conversations "
"using chess win-loss notation. (??)]"
"(https://imgs.xkcd.com/comics/chess_notation.png)"),
"999999999": invalid_id_txt + "999999999",
}
for m, r in expected.items():
self.assert_bot_output(
{'content': m, 'type': "private", 'sender_email': "foo"}, r)
self.assert_bot_output(
{'content': m, 'type': "stream", 'sender_email': "foo"}, r)
|
<commit_before><commit_msg>contrib_bots: Add tests for xkcd bot.<commit_after>#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
import mock
import os
import sys
our_dir = os.path.dirname(os.path.abspath(__file__))
# For dev setups, we can find the API in the repo itself.
if os.path.exists(os.path.join(our_dir, '..')):
sys.path.insert(0, '..')
from bots_test_lib import BotTestCase
class TestXkcdBot(BotTestCase):
bot_name = "xkcd"
@mock.patch('logging.exception')
def test_bot(self, mock_logging_exception):
help_txt = "xkcd bot supports these commands:"
err_txt = "xkcd bot only supports these commands:"
commands = '''
* `@xkcd help` to show this help message.
* `@xkcd latest` to fetch the latest comic strip from xkcd.
* `@xkcd random` to fetch a random comic strip from xkcd.
* `@xkcd <comic id>` to fetch a comic strip based on `<comic id>` e.g `@xkcd 1234`.'''
invalid_id_txt = "Sorry, there is likely no xkcd comic strip with id: #"
expected = {
"": err_txt+commands,
"help": help_txt+commands,
"x": err_txt+commands,
"0": invalid_id_txt + "0",
"1": ("#1: **Barrel - Part 1**\n[Don't we all.]"
"(https://imgs.xkcd.com/comics/barrel_cropped_(1).jpg)"),
"1800": ("#1800: **Chess Notation**\n"
"[I've decided to score all my conversations "
"using chess win-loss notation. (??)]"
"(https://imgs.xkcd.com/comics/chess_notation.png)"),
"999999999": invalid_id_txt + "999999999",
}
for m, r in expected.items():
self.assert_bot_output(
{'content': m, 'type': "private", 'sender_email': "foo"}, r)
self.assert_bot_output(
{'content': m, 'type': "stream", 'sender_email': "foo"}, r)
|
|
280aa4c8db7b5580b73ab6980f10d21a6ef2d761
|
Sketches/JT/Jam/library/trunk/Kamaelia/Apps/Jam/Audio/PyGameOutput.py
|
Sketches/JT/Jam/library/trunk/Kamaelia/Apps/Jam/Audio/PyGameOutput.py
|
import numpy
import Numeric
import pygame
import Axon
import time
class PyGameOutput(Axon.ThreadedComponent.threadedcomponent):
bufferSize = 1024
sampleRate = 44100
def __init__(self, **argd):
super(PyGameOutput, self).__init__(**argd)
pygame.mixer.init(self.sampleRate, -16, 1, self.bufferSize)
def main(self):
while 1:
if not pygame.mixer.get_init():
pygame.mixer.init(self.sampleRate, -16, 1, self.bufferSize)
else:
if self.dataReady("inbox"):
numpyArray = self.recv("inbox")
# Scale to 16 bit int
numpyArray *= 2**15-1
numpyArray = numpyArray.astype("int16")
numericArray = Numeric.asarray(numpyArray)
sound = pygame.sndarray.make_sound(numericArray)
sound.play()
if not self.anyReady():
self.pause()
if __name__ == "__main__":
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.Apps.Jam.Audio.SineSynth import SineOsc
Pipeline(SineOsc(), PyGameOutput()).run()
|
Add an audio output using the pygame mixer. This abuses pygame to a fair extent, but works reasonably with large-ish buffer sizes.
|
Add an audio output using the pygame mixer. This abuses pygame to a fair extent, but works reasonably with large-ish buffer sizes.
|
Python
|
apache-2.0
|
sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia
|
Add an audio output using the pygame mixer. This abuses pygame to a fair extent, but works reasonably with large-ish buffer sizes.
|
import numpy
import Numeric
import pygame
import Axon
import time
class PyGameOutput(Axon.ThreadedComponent.threadedcomponent):
bufferSize = 1024
sampleRate = 44100
def __init__(self, **argd):
super(PyGameOutput, self).__init__(**argd)
pygame.mixer.init(self.sampleRate, -16, 1, self.bufferSize)
def main(self):
while 1:
if not pygame.mixer.get_init():
pygame.mixer.init(self.sampleRate, -16, 1, self.bufferSize)
else:
if self.dataReady("inbox"):
numpyArray = self.recv("inbox")
# Scale to 16 bit int
numpyArray *= 2**15-1
numpyArray = numpyArray.astype("int16")
numericArray = Numeric.asarray(numpyArray)
sound = pygame.sndarray.make_sound(numericArray)
sound.play()
if not self.anyReady():
self.pause()
if __name__ == "__main__":
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.Apps.Jam.Audio.SineSynth import SineOsc
Pipeline(SineOsc(), PyGameOutput()).run()
|
<commit_before><commit_msg>Add an audio output using the pygame mixer. This abuses pygame to a fair extent, but works reasonably with large-ish buffer sizes.<commit_after>
|
import numpy
import Numeric
import pygame
import Axon
import time
class PyGameOutput(Axon.ThreadedComponent.threadedcomponent):
bufferSize = 1024
sampleRate = 44100
def __init__(self, **argd):
super(PyGameOutput, self).__init__(**argd)
pygame.mixer.init(self.sampleRate, -16, 1, self.bufferSize)
def main(self):
while 1:
if not pygame.mixer.get_init():
pygame.mixer.init(self.sampleRate, -16, 1, self.bufferSize)
else:
if self.dataReady("inbox"):
numpyArray = self.recv("inbox")
# Scale to 16 bit int
numpyArray *= 2**15-1
numpyArray = numpyArray.astype("int16")
numericArray = Numeric.asarray(numpyArray)
sound = pygame.sndarray.make_sound(numericArray)
sound.play()
if not self.anyReady():
self.pause()
if __name__ == "__main__":
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.Apps.Jam.Audio.SineSynth import SineOsc
Pipeline(SineOsc(), PyGameOutput()).run()
|
Add an audio output using the pygame mixer. This abuses pygame to a fair extent, but works reasonably with large-ish buffer sizes.import numpy
import Numeric
import pygame
import Axon
import time
class PyGameOutput(Axon.ThreadedComponent.threadedcomponent):
bufferSize = 1024
sampleRate = 44100
def __init__(self, **argd):
super(PyGameOutput, self).__init__(**argd)
pygame.mixer.init(self.sampleRate, -16, 1, self.bufferSize)
def main(self):
while 1:
if not pygame.mixer.get_init():
pygame.mixer.init(self.sampleRate, -16, 1, self.bufferSize)
else:
if self.dataReady("inbox"):
numpyArray = self.recv("inbox")
# Scale to 16 bit int
numpyArray *= 2**15-1
numpyArray = numpyArray.astype("int16")
numericArray = Numeric.asarray(numpyArray)
sound = pygame.sndarray.make_sound(numericArray)
sound.play()
if not self.anyReady():
self.pause()
if __name__ == "__main__":
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.Apps.Jam.Audio.SineSynth import SineOsc
Pipeline(SineOsc(), PyGameOutput()).run()
|
<commit_before><commit_msg>Add an audio output using the pygame mixer. This abuses pygame to a fair extent, but works reasonably with large-ish buffer sizes.<commit_after>import numpy
import Numeric
import pygame
import Axon
import time
class PyGameOutput(Axon.ThreadedComponent.threadedcomponent):
bufferSize = 1024
sampleRate = 44100
def __init__(self, **argd):
super(PyGameOutput, self).__init__(**argd)
pygame.mixer.init(self.sampleRate, -16, 1, self.bufferSize)
def main(self):
while 1:
if not pygame.mixer.get_init():
pygame.mixer.init(self.sampleRate, -16, 1, self.bufferSize)
else:
if self.dataReady("inbox"):
numpyArray = self.recv("inbox")
# Scale to 16 bit int
numpyArray *= 2**15-1
numpyArray = numpyArray.astype("int16")
numericArray = Numeric.asarray(numpyArray)
sound = pygame.sndarray.make_sound(numericArray)
sound.play()
if not self.anyReady():
self.pause()
if __name__ == "__main__":
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.Apps.Jam.Audio.SineSynth import SineOsc
Pipeline(SineOsc(), PyGameOutput()).run()
|
|
4433cadaa39dd84b922329c84a7e791d81cac7c6
|
nettests/simpletest.py
|
nettests/simpletest.py
|
from ooni import nettest
class SimpleTest(nettest.TestCase):
inputs = range(1,100)
optParameters = [['asset', 'a', None, 'Asset file'],
['controlserver', 'c', 'google.com', 'Specify the control server'],
['resume', 'r', 0, 'Resume at this index'],
['other', 'o', None, 'Other arguments']]
def test_foo(self, *arg, **kw):
print "Running %s with %s" % ("test_foo", self.input)
self.report['test_foo'] = 'Antani'
self.report['shared'] = "sblinda"
self.assertEqual(1,1)
def test_f4oo(self, *arg, **kw):
print "Running %s with %s" % ("test_f4oo", self.input)
self.report['test_f4oo'] = 'Antani'
self.report['shared'] = "sblinda2"
self.assertEqual(1,1)
|
Add a very simple test that *must* always pass. * Useful for testing the newstyle API
|
Add a very simple test that *must* always pass.
* Useful for testing the newstyle API
|
Python
|
bsd-2-clause
|
Karthikeyan-kkk/ooni-probe,lordappsec/ooni-probe,Karthikeyan-kkk/ooni-probe,lordappsec/ooni-probe,juga0/ooni-probe,kdmurray91/ooni-probe,juga0/ooni-probe,Karthikeyan-kkk/ooni-probe,0xPoly/ooni-probe,lordappsec/ooni-probe,0xPoly/ooni-probe,lordappsec/ooni-probe,hackerberry/ooni-probe,0xPoly/ooni-probe,kdmurray91/ooni-probe,0xPoly/ooni-probe,kdmurray91/ooni-probe,kdmurray91/ooni-probe,juga0/ooni-probe,hackerberry/ooni-probe,Karthikeyan-kkk/ooni-probe,juga0/ooni-probe
|
Add a very simple test that *must* always pass.
* Useful for testing the newstyle API
|
from ooni import nettest
class SimpleTest(nettest.TestCase):
inputs = range(1,100)
optParameters = [['asset', 'a', None, 'Asset file'],
['controlserver', 'c', 'google.com', 'Specify the control server'],
['resume', 'r', 0, 'Resume at this index'],
['other', 'o', None, 'Other arguments']]
def test_foo(self, *arg, **kw):
print "Running %s with %s" % ("test_foo", self.input)
self.report['test_foo'] = 'Antani'
self.report['shared'] = "sblinda"
self.assertEqual(1,1)
def test_f4oo(self, *arg, **kw):
print "Running %s with %s" % ("test_f4oo", self.input)
self.report['test_f4oo'] = 'Antani'
self.report['shared'] = "sblinda2"
self.assertEqual(1,1)
|
<commit_before><commit_msg>Add a very simple test that *must* always pass.
* Useful for testing the newstyle API<commit_after>
|
from ooni import nettest
class SimpleTest(nettest.TestCase):
inputs = range(1,100)
optParameters = [['asset', 'a', None, 'Asset file'],
['controlserver', 'c', 'google.com', 'Specify the control server'],
['resume', 'r', 0, 'Resume at this index'],
['other', 'o', None, 'Other arguments']]
def test_foo(self, *arg, **kw):
print "Running %s with %s" % ("test_foo", self.input)
self.report['test_foo'] = 'Antani'
self.report['shared'] = "sblinda"
self.assertEqual(1,1)
def test_f4oo(self, *arg, **kw):
print "Running %s with %s" % ("test_f4oo", self.input)
self.report['test_f4oo'] = 'Antani'
self.report['shared'] = "sblinda2"
self.assertEqual(1,1)
|
Add a very simple test that *must* always pass.
* Useful for testing the newstyle APIfrom ooni import nettest
class SimpleTest(nettest.TestCase):
inputs = range(1,100)
optParameters = [['asset', 'a', None, 'Asset file'],
['controlserver', 'c', 'google.com', 'Specify the control server'],
['resume', 'r', 0, 'Resume at this index'],
['other', 'o', None, 'Other arguments']]
def test_foo(self, *arg, **kw):
print "Running %s with %s" % ("test_foo", self.input)
self.report['test_foo'] = 'Antani'
self.report['shared'] = "sblinda"
self.assertEqual(1,1)
def test_f4oo(self, *arg, **kw):
print "Running %s with %s" % ("test_f4oo", self.input)
self.report['test_f4oo'] = 'Antani'
self.report['shared'] = "sblinda2"
self.assertEqual(1,1)
|
<commit_before><commit_msg>Add a very simple test that *must* always pass.
* Useful for testing the newstyle API<commit_after>from ooni import nettest
class SimpleTest(nettest.TestCase):
inputs = range(1,100)
optParameters = [['asset', 'a', None, 'Asset file'],
['controlserver', 'c', 'google.com', 'Specify the control server'],
['resume', 'r', 0, 'Resume at this index'],
['other', 'o', None, 'Other arguments']]
def test_foo(self, *arg, **kw):
print "Running %s with %s" % ("test_foo", self.input)
self.report['test_foo'] = 'Antani'
self.report['shared'] = "sblinda"
self.assertEqual(1,1)
def test_f4oo(self, *arg, **kw):
print "Running %s with %s" % ("test_f4oo", self.input)
self.report['test_f4oo'] = 'Antani'
self.report['shared'] = "sblinda2"
self.assertEqual(1,1)
|
|
8ccf3d937d25ec93d1ce22d60735ffbcaf776fe3
|
analysis/plot-target-distance.py
|
analysis/plot-target-distance.py
|
import climate
import itertools
import lmj.plot
import numpy as np
import source as experiment
import plots
@climate.annotate(
root='plot data from this experiment subjects',
pattern=('plot data from files matching this pattern', 'option'),
markers=('plot data for these mocap markers', 'option'),
target_num=('plot data for this target', 'option', None, int),
approach_sec=('plot variance for N sec prior to target acquisition', 'option', None, float),
)
def main(root, pattern='*/*block*/*circuit*.csv.gz', markers='r-fing-index l-fing-index r-heel r-knee', target_num=5, approach_sec=2):
with plots.plot() as ax:
for i, trial in enumerate(experiment.Experiment(root).trials_matching(pattern)):
for t in range(11):
s = trial.movement_to(t).distance_to_target().interpolate().reset_index(drop=True)
ax.plot(s.index, s.values, color=lmj.plot.COLOR11[t])
if __name__ == '__main__':
climate.call(main)
|
Add a script for plotting distance to target.
|
Add a script for plotting distance to target.
|
Python
|
mit
|
lmjohns3/cube-experiment,lmjohns3/cube-experiment,lmjohns3/cube-experiment
|
Add a script for plotting distance to target.
|
import climate
import itertools
import lmj.plot
import numpy as np
import source as experiment
import plots
@climate.annotate(
root='plot data from this experiment subjects',
pattern=('plot data from files matching this pattern', 'option'),
markers=('plot data for these mocap markers', 'option'),
target_num=('plot data for this target', 'option', None, int),
approach_sec=('plot variance for N sec prior to target acquisition', 'option', None, float),
)
def main(root, pattern='*/*block*/*circuit*.csv.gz', markers='r-fing-index l-fing-index r-heel r-knee', target_num=5, approach_sec=2):
with plots.plot() as ax:
for i, trial in enumerate(experiment.Experiment(root).trials_matching(pattern)):
for t in range(11):
s = trial.movement_to(t).distance_to_target().interpolate().reset_index(drop=True)
ax.plot(s.index, s.values, color=lmj.plot.COLOR11[t])
if __name__ == '__main__':
climate.call(main)
|
<commit_before><commit_msg>Add a script for plotting distance to target.<commit_after>
|
import climate
import itertools
import lmj.plot
import numpy as np
import source as experiment
import plots
@climate.annotate(
root='plot data from this experiment subjects',
pattern=('plot data from files matching this pattern', 'option'),
markers=('plot data for these mocap markers', 'option'),
target_num=('plot data for this target', 'option', None, int),
approach_sec=('plot variance for N sec prior to target acquisition', 'option', None, float),
)
def main(root, pattern='*/*block*/*circuit*.csv.gz', markers='r-fing-index l-fing-index r-heel r-knee', target_num=5, approach_sec=2):
with plots.plot() as ax:
for i, trial in enumerate(experiment.Experiment(root).trials_matching(pattern)):
for t in range(11):
s = trial.movement_to(t).distance_to_target().interpolate().reset_index(drop=True)
ax.plot(s.index, s.values, color=lmj.plot.COLOR11[t])
if __name__ == '__main__':
climate.call(main)
|
Add a script for plotting distance to target.import climate
import itertools
import lmj.plot
import numpy as np
import source as experiment
import plots
@climate.annotate(
root='plot data from this experiment subjects',
pattern=('plot data from files matching this pattern', 'option'),
markers=('plot data for these mocap markers', 'option'),
target_num=('plot data for this target', 'option', None, int),
approach_sec=('plot variance for N sec prior to target acquisition', 'option', None, float),
)
def main(root, pattern='*/*block*/*circuit*.csv.gz', markers='r-fing-index l-fing-index r-heel r-knee', target_num=5, approach_sec=2):
with plots.plot() as ax:
for i, trial in enumerate(experiment.Experiment(root).trials_matching(pattern)):
for t in range(11):
s = trial.movement_to(t).distance_to_target().interpolate().reset_index(drop=True)
ax.plot(s.index, s.values, color=lmj.plot.COLOR11[t])
if __name__ == '__main__':
climate.call(main)
|
<commit_before><commit_msg>Add a script for plotting distance to target.<commit_after>import climate
import itertools
import lmj.plot
import numpy as np
import source as experiment
import plots
@climate.annotate(
root='plot data from this experiment subjects',
pattern=('plot data from files matching this pattern', 'option'),
markers=('plot data for these mocap markers', 'option'),
target_num=('plot data for this target', 'option', None, int),
approach_sec=('plot variance for N sec prior to target acquisition', 'option', None, float),
)
def main(root, pattern='*/*block*/*circuit*.csv.gz', markers='r-fing-index l-fing-index r-heel r-knee', target_num=5, approach_sec=2):
with plots.plot() as ax:
for i, trial in enumerate(experiment.Experiment(root).trials_matching(pattern)):
for t in range(11):
s = trial.movement_to(t).distance_to_target().interpolate().reset_index(drop=True)
ax.plot(s.index, s.values, color=lmj.plot.COLOR11[t])
if __name__ == '__main__':
climate.call(main)
|
|
74550ef0c76a941c473c8d024ccc0a0403631c49
|
wqflask/tests/integration/test_markdown_routes.py
|
wqflask/tests/integration/test_markdown_routes.py
|
"Integration tests for markdown routes"
import unittest
from bs4 import BeautifulSoup
from wqflask import app
class TestGenMenu(unittest.TestCase):
"""Tests for glossary"""
def setUp(self):
self.app = app.test_client()
def tearDown(self):
pass
def test_glossary_page(self):
"""Test that the glossary page is rendered properly"""
response = self.app.get('/glossary', follow_redirects=True)
pass
|
Add basic structure for "/glossary" routes test
|
Add basic structure for "/glossary" routes test
|
Python
|
agpl-3.0
|
zsloan/genenetwork2,zsloan/genenetwork2,zsloan/genenetwork2,pjotrp/genenetwork2,genenetwork/genenetwork2,zsloan/genenetwork2,genenetwork/genenetwork2,genenetwork/genenetwork2,pjotrp/genenetwork2,pjotrp/genenetwork2,pjotrp/genenetwork2,genenetwork/genenetwork2,pjotrp/genenetwork2
|
Add basic structure for "/glossary" routes test
|
"Integration tests for markdown routes"
import unittest
from bs4 import BeautifulSoup
from wqflask import app
class TestGenMenu(unittest.TestCase):
"""Tests for glossary"""
def setUp(self):
self.app = app.test_client()
def tearDown(self):
pass
def test_glossary_page(self):
"""Test that the glossary page is rendered properly"""
response = self.app.get('/glossary', follow_redirects=True)
pass
|
<commit_before><commit_msg>Add basic structure for "/glossary" routes test<commit_after>
|
"Integration tests for markdown routes"
import unittest
from bs4 import BeautifulSoup
from wqflask import app
class TestGenMenu(unittest.TestCase):
"""Tests for glossary"""
def setUp(self):
self.app = app.test_client()
def tearDown(self):
pass
def test_glossary_page(self):
"""Test that the glossary page is rendered properly"""
response = self.app.get('/glossary', follow_redirects=True)
pass
|
Add basic structure for "/glossary" routes test"Integration tests for markdown routes"
import unittest
from bs4 import BeautifulSoup
from wqflask import app
class TestGenMenu(unittest.TestCase):
"""Tests for glossary"""
def setUp(self):
self.app = app.test_client()
def tearDown(self):
pass
def test_glossary_page(self):
"""Test that the glossary page is rendered properly"""
response = self.app.get('/glossary', follow_redirects=True)
pass
|
<commit_before><commit_msg>Add basic structure for "/glossary" routes test<commit_after>"Integration tests for markdown routes"
import unittest
from bs4 import BeautifulSoup
from wqflask import app
class TestGenMenu(unittest.TestCase):
"""Tests for glossary"""
def setUp(self):
self.app = app.test_client()
def tearDown(self):
pass
def test_glossary_page(self):
"""Test that the glossary page is rendered properly"""
response = self.app.get('/glossary', follow_redirects=True)
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.