commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f51c52b612e09c2ed6738bf234dbf4da9986b332
|
moduleManager.py
|
moduleManager.py
|
from glob import glob
import importlib
class ModuleManager:
def __init__(self):
self.availableNodes = {}
# Import all builtin modules first
modpaths = glob("modules/*.py")
for modpath in modpaths:
newmod = importlib.import_module("modules." + modpath[8:-3])
for nodeName in newmod.__nodes__:
nodeClass = getattr(newmod, nodeName)
self.availableNodes[nodeClass.nodeName] = nodeClass
# Then import all modules from home config folder
# TODO: Implement
# Then import all modules from project folder
# TODO: Implement
|
Move module manager to own module
|
Move module manager to own module
|
Python
|
mit
|
DrLuke/gpnshader
|
Move module manager to own module
|
from glob import glob
import importlib
class ModuleManager:
def __init__(self):
self.availableNodes = {}
# Import all builtin modules first
modpaths = glob("modules/*.py")
for modpath in modpaths:
newmod = importlib.import_module("modules." + modpath[8:-3])
for nodeName in newmod.__nodes__:
nodeClass = getattr(newmod, nodeName)
self.availableNodes[nodeClass.nodeName] = nodeClass
# Then import all modules from home config folder
# TODO: Implement
# Then import all modules from project folder
# TODO: Implement
|
<commit_before><commit_msg>Move module manager to own module<commit_after>
|
from glob import glob
import importlib
class ModuleManager:
def __init__(self):
self.availableNodes = {}
# Import all builtin modules first
modpaths = glob("modules/*.py")
for modpath in modpaths:
newmod = importlib.import_module("modules." + modpath[8:-3])
for nodeName in newmod.__nodes__:
nodeClass = getattr(newmod, nodeName)
self.availableNodes[nodeClass.nodeName] = nodeClass
# Then import all modules from home config folder
# TODO: Implement
# Then import all modules from project folder
# TODO: Implement
|
Move module manager to own modulefrom glob import glob
import importlib
class ModuleManager:
def __init__(self):
self.availableNodes = {}
# Import all builtin modules first
modpaths = glob("modules/*.py")
for modpath in modpaths:
newmod = importlib.import_module("modules." + modpath[8:-3])
for nodeName in newmod.__nodes__:
nodeClass = getattr(newmod, nodeName)
self.availableNodes[nodeClass.nodeName] = nodeClass
# Then import all modules from home config folder
# TODO: Implement
# Then import all modules from project folder
# TODO: Implement
|
<commit_before><commit_msg>Move module manager to own module<commit_after>from glob import glob
import importlib
class ModuleManager:
def __init__(self):
self.availableNodes = {}
# Import all builtin modules first
modpaths = glob("modules/*.py")
for modpath in modpaths:
newmod = importlib.import_module("modules." + modpath[8:-3])
for nodeName in newmod.__nodes__:
nodeClass = getattr(newmod, nodeName)
self.availableNodes[nodeClass.nodeName] = nodeClass
# Then import all modules from home config folder
# TODO: Implement
# Then import all modules from project folder
# TODO: Implement
|
|
a772e7cbc6597585408daab1cb2b00c1d397aa3c
|
CodeFights/efficientComparison.py
|
CodeFights/efficientComparison.py
|
#!/usr/local/bin/python
# Code Fights Efficient Comparison Problem
import time
def main():
x, y, L, R = 9, 9, 1, 10000
print("Procedure 1")
t1 = time.clock()
procedure1(x, y, L, R)
print(time.clock() - t1)
print("Procedure 2")
t2 = time.clock()
procedure2(x, y, L, R)
print(time.clock() - t2)
print("Procedure 3")
t3 = time.clock()
procedure3(x, y, L, R)
print(time.clock() - t3)
def procedure1(x, y, L, R):
return L < x**y <= R
def procedure2(x, y, L, R):
return x**y > L and x**y <= R
def procedure3(x, y, L, R):
return x**y in range(L + 1, R + 1)
if __name__ == '__main__':
main()
|
Solve Code Fights efficient comparison problem
|
Solve Code Fights efficient comparison problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights efficient comparison problem
|
#!/usr/local/bin/python
# Code Fights Efficient Comparison Problem
import time
def main():
x, y, L, R = 9, 9, 1, 10000
print("Procedure 1")
t1 = time.clock()
procedure1(x, y, L, R)
print(time.clock() - t1)
print("Procedure 2")
t2 = time.clock()
procedure2(x, y, L, R)
print(time.clock() - t2)
print("Procedure 3")
t3 = time.clock()
procedure3(x, y, L, R)
print(time.clock() - t3)
def procedure1(x, y, L, R):
return L < x**y <= R
def procedure2(x, y, L, R):
return x**y > L and x**y <= R
def procedure3(x, y, L, R):
return x**y in range(L + 1, R + 1)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights efficient comparison problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Efficient Comparison Problem
import time
def main():
x, y, L, R = 9, 9, 1, 10000
print("Procedure 1")
t1 = time.clock()
procedure1(x, y, L, R)
print(time.clock() - t1)
print("Procedure 2")
t2 = time.clock()
procedure2(x, y, L, R)
print(time.clock() - t2)
print("Procedure 3")
t3 = time.clock()
procedure3(x, y, L, R)
print(time.clock() - t3)
def procedure1(x, y, L, R):
return L < x**y <= R
def procedure2(x, y, L, R):
return x**y > L and x**y <= R
def procedure3(x, y, L, R):
return x**y in range(L + 1, R + 1)
if __name__ == '__main__':
main()
|
Solve Code Fights efficient comparison problem#!/usr/local/bin/python
# Code Fights Efficient Comparison Problem
import time
def main():
x, y, L, R = 9, 9, 1, 10000
print("Procedure 1")
t1 = time.clock()
procedure1(x, y, L, R)
print(time.clock() - t1)
print("Procedure 2")
t2 = time.clock()
procedure2(x, y, L, R)
print(time.clock() - t2)
print("Procedure 3")
t3 = time.clock()
procedure3(x, y, L, R)
print(time.clock() - t3)
def procedure1(x, y, L, R):
return L < x**y <= R
def procedure2(x, y, L, R):
return x**y > L and x**y <= R
def procedure3(x, y, L, R):
return x**y in range(L + 1, R + 1)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights efficient comparison problem<commit_after>#!/usr/local/bin/python
# Code Fights Efficient Comparison Problem
import time
def main():
x, y, L, R = 9, 9, 1, 10000
print("Procedure 1")
t1 = time.clock()
procedure1(x, y, L, R)
print(time.clock() - t1)
print("Procedure 2")
t2 = time.clock()
procedure2(x, y, L, R)
print(time.clock() - t2)
print("Procedure 3")
t3 = time.clock()
procedure3(x, y, L, R)
print(time.clock() - t3)
def procedure1(x, y, L, R):
return L < x**y <= R
def procedure2(x, y, L, R):
return x**y > L and x**y <= R
def procedure3(x, y, L, R):
return x**y in range(L + 1, R + 1)
if __name__ == '__main__':
main()
|
|
0d176a318fcc3a1206919935d3a257d0606fb49b
|
tools/validate_cli_serial.py
|
tools/validate_cli_serial.py
|
#!/bin/python2
# Copyright (C) 2021 OpenMotics BV
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
from serial.serialposix import Serial
def watch(port):
try:
previous_output = None
serial = Serial(port, 115200, timeout=0.5)
_read(serial) # Clear buffer
count = 0
while True:
count += 1
serial.write('output list\r\n')
output = _read(serial).strip()
if output != previous_output:
_print_diff(previous_output if previous_output is not None else output,
output)
previous_output = output
sys.stdout.write('Count: {0:04d}'.format(count))
sys.stdout.write('\rCount: {0:04d}'.format(count))
sys.stdout.flush()
except KeyboardInterrupt:
print('Exit')
def _print_diff(a_string, b_string):
output = ''
color_started = False
for i in range(max(len(a_string), len(b_string))):
a = a_string[i] if i < len(a_string) else '?'
b = b_string[i] if i < len(b_string) else '?'
if a != b:
if color_started is False:
output += '\033[101m'
color_started = True
else:
if color_started is True:
output += '\033[0m'
color_started = False
output += b
output += '\033[0m'
sys.stdout.write('\n\n{0}\n\n'.format(output))
sys.stdout.flush()
def _read(serial):
buffer = ''
new_data = serial.read(1)
while len(new_data) > 0:
buffer += new_data
if buffer.endswith('OK'):
return buffer
new_data = serial.read(1)
return buffer
if __name__ == "__main__":
if len(sys.argv) != 2:
print('Validates correct communication with the Brain using CLI (error list)')
print('Usage: ./validate_cli_serial.py <port>')
print('Port is typically /dev/ttyO2')
sys.exit(1)
watch(sys.argv[1])
|
Add script to validate CLI communications
|
Add script to validate CLI communications
|
Python
|
agpl-3.0
|
openmotics/gateway,openmotics/gateway
|
Add script to validate CLI communications
|
#!/bin/python2
# Copyright (C) 2021 OpenMotics BV
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
from serial.serialposix import Serial
def watch(port):
try:
previous_output = None
serial = Serial(port, 115200, timeout=0.5)
_read(serial) # Clear buffer
count = 0
while True:
count += 1
serial.write('output list\r\n')
output = _read(serial).strip()
if output != previous_output:
_print_diff(previous_output if previous_output is not None else output,
output)
previous_output = output
sys.stdout.write('Count: {0:04d}'.format(count))
sys.stdout.write('\rCount: {0:04d}'.format(count))
sys.stdout.flush()
except KeyboardInterrupt:
print('Exit')
def _print_diff(a_string, b_string):
output = ''
color_started = False
for i in range(max(len(a_string), len(b_string))):
a = a_string[i] if i < len(a_string) else '?'
b = b_string[i] if i < len(b_string) else '?'
if a != b:
if color_started is False:
output += '\033[101m'
color_started = True
else:
if color_started is True:
output += '\033[0m'
color_started = False
output += b
output += '\033[0m'
sys.stdout.write('\n\n{0}\n\n'.format(output))
sys.stdout.flush()
def _read(serial):
buffer = ''
new_data = serial.read(1)
while len(new_data) > 0:
buffer += new_data
if buffer.endswith('OK'):
return buffer
new_data = serial.read(1)
return buffer
if __name__ == "__main__":
if len(sys.argv) != 2:
print('Validates correct communication with the Brain using CLI (error list)')
print('Usage: ./validate_cli_serial.py <port>')
print('Port is typically /dev/ttyO2')
sys.exit(1)
watch(sys.argv[1])
|
<commit_before><commit_msg>Add script to validate CLI communications<commit_after>
|
#!/bin/python2
# Copyright (C) 2021 OpenMotics BV
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
from serial.serialposix import Serial
def watch(port):
try:
previous_output = None
serial = Serial(port, 115200, timeout=0.5)
_read(serial) # Clear buffer
count = 0
while True:
count += 1
serial.write('output list\r\n')
output = _read(serial).strip()
if output != previous_output:
_print_diff(previous_output if previous_output is not None else output,
output)
previous_output = output
sys.stdout.write('Count: {0:04d}'.format(count))
sys.stdout.write('\rCount: {0:04d}'.format(count))
sys.stdout.flush()
except KeyboardInterrupt:
print('Exit')
def _print_diff(a_string, b_string):
output = ''
color_started = False
for i in range(max(len(a_string), len(b_string))):
a = a_string[i] if i < len(a_string) else '?'
b = b_string[i] if i < len(b_string) else '?'
if a != b:
if color_started is False:
output += '\033[101m'
color_started = True
else:
if color_started is True:
output += '\033[0m'
color_started = False
output += b
output += '\033[0m'
sys.stdout.write('\n\n{0}\n\n'.format(output))
sys.stdout.flush()
def _read(serial):
buffer = ''
new_data = serial.read(1)
while len(new_data) > 0:
buffer += new_data
if buffer.endswith('OK'):
return buffer
new_data = serial.read(1)
return buffer
if __name__ == "__main__":
if len(sys.argv) != 2:
print('Validates correct communication with the Brain using CLI (error list)')
print('Usage: ./validate_cli_serial.py <port>')
print('Port is typically /dev/ttyO2')
sys.exit(1)
watch(sys.argv[1])
|
Add script to validate CLI communications#!/bin/python2
# Copyright (C) 2021 OpenMotics BV
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
from serial.serialposix import Serial
def watch(port):
try:
previous_output = None
serial = Serial(port, 115200, timeout=0.5)
_read(serial) # Clear buffer
count = 0
while True:
count += 1
serial.write('output list\r\n')
output = _read(serial).strip()
if output != previous_output:
_print_diff(previous_output if previous_output is not None else output,
output)
previous_output = output
sys.stdout.write('Count: {0:04d}'.format(count))
sys.stdout.write('\rCount: {0:04d}'.format(count))
sys.stdout.flush()
except KeyboardInterrupt:
print('Exit')
def _print_diff(a_string, b_string):
output = ''
color_started = False
for i in range(max(len(a_string), len(b_string))):
a = a_string[i] if i < len(a_string) else '?'
b = b_string[i] if i < len(b_string) else '?'
if a != b:
if color_started is False:
output += '\033[101m'
color_started = True
else:
if color_started is True:
output += '\033[0m'
color_started = False
output += b
output += '\033[0m'
sys.stdout.write('\n\n{0}\n\n'.format(output))
sys.stdout.flush()
def _read(serial):
buffer = ''
new_data = serial.read(1)
while len(new_data) > 0:
buffer += new_data
if buffer.endswith('OK'):
return buffer
new_data = serial.read(1)
return buffer
if __name__ == "__main__":
if len(sys.argv) != 2:
print('Validates correct communication with the Brain using CLI (error list)')
print('Usage: ./validate_cli_serial.py <port>')
print('Port is typically /dev/ttyO2')
sys.exit(1)
watch(sys.argv[1])
|
<commit_before><commit_msg>Add script to validate CLI communications<commit_after>#!/bin/python2
# Copyright (C) 2021 OpenMotics BV
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
from serial.serialposix import Serial
def watch(port):
try:
previous_output = None
serial = Serial(port, 115200, timeout=0.5)
_read(serial) # Clear buffer
count = 0
while True:
count += 1
serial.write('output list\r\n')
output = _read(serial).strip()
if output != previous_output:
_print_diff(previous_output if previous_output is not None else output,
output)
previous_output = output
sys.stdout.write('Count: {0:04d}'.format(count))
sys.stdout.write('\rCount: {0:04d}'.format(count))
sys.stdout.flush()
except KeyboardInterrupt:
print('Exit')
def _print_diff(a_string, b_string):
output = ''
color_started = False
for i in range(max(len(a_string), len(b_string))):
a = a_string[i] if i < len(a_string) else '?'
b = b_string[i] if i < len(b_string) else '?'
if a != b:
if color_started is False:
output += '\033[101m'
color_started = True
else:
if color_started is True:
output += '\033[0m'
color_started = False
output += b
output += '\033[0m'
sys.stdout.write('\n\n{0}\n\n'.format(output))
sys.stdout.flush()
def _read(serial):
buffer = ''
new_data = serial.read(1)
while len(new_data) > 0:
buffer += new_data
if buffer.endswith('OK'):
return buffer
new_data = serial.read(1)
return buffer
if __name__ == "__main__":
if len(sys.argv) != 2:
print('Validates correct communication with the Brain using CLI (error list)')
print('Usage: ./validate_cli_serial.py <port>')
print('Port is typically /dev/ttyO2')
sys.exit(1)
watch(sys.argv[1])
|
|
cf2a110cc9f71fa7555d212de86e5c67d6095ae3
|
CodeFights/calkinWilfSequence.py
|
CodeFights/calkinWilfSequence.py
|
#!/usr/local/bin/python
# Code Fights Calkin Wilf Problem
def calkinWilfSequence(number):
def fractions():
cur = (1, 1)
while True:
yield list(cur)
cur = (cur[1], (2 * int(cur[0] / cur[1]) + 1) * cur[1] -
cur[0])
gen = fractions()
res = 0
while next(gen) != number:
res += 1
return res
def main():
tests = [
[[1, 3], 3],
[[1, 1], 0],
[[3, 1], 6],
[[14, 3], 110],
[[7, 13], 129]
]
for t in tests:
res = calkinWilfSequence(t[0])
ans = t[1]
if ans == res:
print("PASSED: calkinWilfSequence({}) returned {}"
.format(t[0], res))
else:
print("FAILED: calkinWilfSequence({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights calkin wilf sequence problem
|
Solve Code Fights calkin wilf sequence problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights calkin wilf sequence problem
|
#!/usr/local/bin/python
# Code Fights Calkin Wilf Problem
def calkinWilfSequence(number):
def fractions():
cur = (1, 1)
while True:
yield list(cur)
cur = (cur[1], (2 * int(cur[0] / cur[1]) + 1) * cur[1] -
cur[0])
gen = fractions()
res = 0
while next(gen) != number:
res += 1
return res
def main():
tests = [
[[1, 3], 3],
[[1, 1], 0],
[[3, 1], 6],
[[14, 3], 110],
[[7, 13], 129]
]
for t in tests:
res = calkinWilfSequence(t[0])
ans = t[1]
if ans == res:
print("PASSED: calkinWilfSequence({}) returned {}"
.format(t[0], res))
else:
print("FAILED: calkinWilfSequence({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights calkin wilf sequence problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Calkin Wilf Problem
def calkinWilfSequence(number):
def fractions():
cur = (1, 1)
while True:
yield list(cur)
cur = (cur[1], (2 * int(cur[0] / cur[1]) + 1) * cur[1] -
cur[0])
gen = fractions()
res = 0
while next(gen) != number:
res += 1
return res
def main():
tests = [
[[1, 3], 3],
[[1, 1], 0],
[[3, 1], 6],
[[14, 3], 110],
[[7, 13], 129]
]
for t in tests:
res = calkinWilfSequence(t[0])
ans = t[1]
if ans == res:
print("PASSED: calkinWilfSequence({}) returned {}"
.format(t[0], res))
else:
print("FAILED: calkinWilfSequence({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights calkin wilf sequence problem#!/usr/local/bin/python
# Code Fights Calkin Wilf Problem
def calkinWilfSequence(number):
def fractions():
cur = (1, 1)
while True:
yield list(cur)
cur = (cur[1], (2 * int(cur[0] / cur[1]) + 1) * cur[1] -
cur[0])
gen = fractions()
res = 0
while next(gen) != number:
res += 1
return res
def main():
tests = [
[[1, 3], 3],
[[1, 1], 0],
[[3, 1], 6],
[[14, 3], 110],
[[7, 13], 129]
]
for t in tests:
res = calkinWilfSequence(t[0])
ans = t[1]
if ans == res:
print("PASSED: calkinWilfSequence({}) returned {}"
.format(t[0], res))
else:
print("FAILED: calkinWilfSequence({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights calkin wilf sequence problem<commit_after>#!/usr/local/bin/python
# Code Fights Calkin Wilf Problem
def calkinWilfSequence(number):
def fractions():
cur = (1, 1)
while True:
yield list(cur)
cur = (cur[1], (2 * int(cur[0] / cur[1]) + 1) * cur[1] -
cur[0])
gen = fractions()
res = 0
while next(gen) != number:
res += 1
return res
def main():
tests = [
[[1, 3], 3],
[[1, 1], 0],
[[3, 1], 6],
[[14, 3], 110],
[[7, 13], 129]
]
for t in tests:
res = calkinWilfSequence(t[0])
ans = t[1]
if ans == res:
print("PASSED: calkinWilfSequence({}) returned {}"
.format(t[0], res))
else:
print("FAILED: calkinWilfSequence({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
|
4042faa044dc051f38f473c1199b253ddd7b0b7a
|
dataset_collection.py
|
dataset_collection.py
|
from app import db
from app import Text
from sklearn.externals import joblib
import gc
#Train Data Set
training_collection = Text.query.filter_by(data_set = "train").all()
training_targets = []
training_text_collection = []
gc.disable()
for text in training_collection:
training_targets.append(text.period_start_year)
training_text_collection.append(text.text_content)
gc.enable()
save_training_text_collection = open("training_text_collection.pkl", "wb")
joblib.dump(training_text_collection, save_training_text_collection)
save_training_text_collection.close()
save_training_targets = open("training_targets.pkl", "wb")
joblib.dump(training_targets, save_training_targets)
save_training_targets.close()
#Test Data Set
testing_collection = Text.query.filter_by(data_set = "test").all()
testing_targets = []
testing_text_collection = []
gc.disable()
for text in testing_collection:
testing_targets.append(text.period_start_year)
testing_text_collection.append(text.text_content)
gc.enable()
save_testing_text_collection = open("testing_text_collection.pkl", "wb")
joblib.dump(testing_text_collection, save_testing_text_collection)
save_testing_text_collection.close()
save_testing_targets = open("testing_targets.pkl", "wb")
joblib.dump(testing_targets, save_testing_targets)
save_testing_targets.close()
|
Create training and testing text and target pickle files
|
Create training and testing text and target pickle files
|
Python
|
mit
|
npentella/CuriousCorpus,npentella/CuriousCorpus,npentella/CuriousCorpus
|
Create training and testing text and target pickle files
|
from app import db
from app import Text
from sklearn.externals import joblib
import gc
#Train Data Set
training_collection = Text.query.filter_by(data_set = "train").all()
training_targets = []
training_text_collection = []
gc.disable()
for text in training_collection:
training_targets.append(text.period_start_year)
training_text_collection.append(text.text_content)
gc.enable()
save_training_text_collection = open("training_text_collection.pkl", "wb")
joblib.dump(training_text_collection, save_training_text_collection)
save_training_text_collection.close()
save_training_targets = open("training_targets.pkl", "wb")
joblib.dump(training_targets, save_training_targets)
save_training_targets.close()
#Test Data Set
testing_collection = Text.query.filter_by(data_set = "test").all()
testing_targets = []
testing_text_collection = []
gc.disable()
for text in testing_collection:
testing_targets.append(text.period_start_year)
testing_text_collection.append(text.text_content)
gc.enable()
save_testing_text_collection = open("testing_text_collection.pkl", "wb")
joblib.dump(testing_text_collection, save_testing_text_collection)
save_testing_text_collection.close()
save_testing_targets = open("testing_targets.pkl", "wb")
joblib.dump(testing_targets, save_testing_targets)
save_testing_targets.close()
|
<commit_before><commit_msg>Create training and testing text and target pickle files<commit_after>
|
from app import db
from app import Text
from sklearn.externals import joblib
import gc
#Train Data Set
training_collection = Text.query.filter_by(data_set = "train").all()
training_targets = []
training_text_collection = []
gc.disable()
for text in training_collection:
training_targets.append(text.period_start_year)
training_text_collection.append(text.text_content)
gc.enable()
save_training_text_collection = open("training_text_collection.pkl", "wb")
joblib.dump(training_text_collection, save_training_text_collection)
save_training_text_collection.close()
save_training_targets = open("training_targets.pkl", "wb")
joblib.dump(training_targets, save_training_targets)
save_training_targets.close()
#Test Data Set
testing_collection = Text.query.filter_by(data_set = "test").all()
testing_targets = []
testing_text_collection = []
gc.disable()
for text in testing_collection:
testing_targets.append(text.period_start_year)
testing_text_collection.append(text.text_content)
gc.enable()
save_testing_text_collection = open("testing_text_collection.pkl", "wb")
joblib.dump(testing_text_collection, save_testing_text_collection)
save_testing_text_collection.close()
save_testing_targets = open("testing_targets.pkl", "wb")
joblib.dump(testing_targets, save_testing_targets)
save_testing_targets.close()
|
Create training and testing text and target pickle filesfrom app import db
from app import Text
from sklearn.externals import joblib
import gc
#Train Data Set
training_collection = Text.query.filter_by(data_set = "train").all()
training_targets = []
training_text_collection = []
gc.disable()
for text in training_collection:
training_targets.append(text.period_start_year)
training_text_collection.append(text.text_content)
gc.enable()
save_training_text_collection = open("training_text_collection.pkl", "wb")
joblib.dump(training_text_collection, save_training_text_collection)
save_training_text_collection.close()
save_training_targets = open("training_targets.pkl", "wb")
joblib.dump(training_targets, save_training_targets)
save_training_targets.close()
#Test Data Set
testing_collection = Text.query.filter_by(data_set = "test").all()
testing_targets = []
testing_text_collection = []
gc.disable()
for text in testing_collection:
testing_targets.append(text.period_start_year)
testing_text_collection.append(text.text_content)
gc.enable()
save_testing_text_collection = open("testing_text_collection.pkl", "wb")
joblib.dump(testing_text_collection, save_testing_text_collection)
save_testing_text_collection.close()
save_testing_targets = open("testing_targets.pkl", "wb")
joblib.dump(testing_targets, save_testing_targets)
save_testing_targets.close()
|
<commit_before><commit_msg>Create training and testing text and target pickle files<commit_after>from app import db
from app import Text
from sklearn.externals import joblib
import gc
#Train Data Set
training_collection = Text.query.filter_by(data_set = "train").all()
training_targets = []
training_text_collection = []
gc.disable()
for text in training_collection:
training_targets.append(text.period_start_year)
training_text_collection.append(text.text_content)
gc.enable()
save_training_text_collection = open("training_text_collection.pkl", "wb")
joblib.dump(training_text_collection, save_training_text_collection)
save_training_text_collection.close()
save_training_targets = open("training_targets.pkl", "wb")
joblib.dump(training_targets, save_training_targets)
save_training_targets.close()
#Test Data Set
testing_collection = Text.query.filter_by(data_set = "test").all()
testing_targets = []
testing_text_collection = []
gc.disable()
for text in testing_collection:
testing_targets.append(text.period_start_year)
testing_text_collection.append(text.text_content)
gc.enable()
save_testing_text_collection = open("testing_text_collection.pkl", "wb")
joblib.dump(testing_text_collection, save_testing_text_collection)
save_testing_text_collection.close()
save_testing_targets = open("testing_targets.pkl", "wb")
joblib.dump(testing_targets, save_testing_targets)
save_testing_targets.close()
|
|
0c22dfc65c4d6188c2d1fa127d357945914aa100
|
biolib/src/test/cafparser_test.py
|
biolib/src/test/cafparser_test.py
|
'''
Created on 2009 mar 11
@author: peio
'''
import unittest
from biolib.cafparser import CafFile
class Test(unittest.TestCase):
''' It tests '''
def setUp(self):
self._file2test = '/home/peio/eucalyptus_out.caf'
def test_caf_parser(self):
''' It tests if we can create and caf file giving a file name'''
# caf_file_name = '../doc/Caf_example_file.caf'
caf_file = CafFile(self._file2test)
assert caf_file
def test_reads(self):
''' we check if we can take the reads from the caf file'''
# caf_file_name = '../doc/Caf_example_file.caf'
caf_file = CafFile(self._file2test)
for read in caf_file.reads():
read_object = caf_file.read_record2read(read['name'])
assert read_object
def test_contigs(self):
''' It checks if the contig method returns contigs'''
# caf_file_name = '../doc/Caf_example_file.caf'
caf_file = CafFile(self._file2test)
for contig in caf_file.contigs():
contig_object = caf_file.contig_record2contig(contig['name'])
print contig_object
if __name__ == "__main__":
unittest.main()
|
Add three test to check the module
|
Add three test to check the module
|
Python
|
agpl-3.0
|
JoseBlanca/franklin,JoseBlanca/franklin
|
Add three test to check the module
|
'''
Created on 2009 mar 11
@author: peio
'''
import unittest
from biolib.cafparser import CafFile
class Test(unittest.TestCase):
''' It tests '''
def setUp(self):
self._file2test = '/home/peio/eucalyptus_out.caf'
def test_caf_parser(self):
''' It tests if we can create and caf file giving a file name'''
# caf_file_name = '../doc/Caf_example_file.caf'
caf_file = CafFile(self._file2test)
assert caf_file
def test_reads(self):
''' we check if we can take the reads from the caf file'''
# caf_file_name = '../doc/Caf_example_file.caf'
caf_file = CafFile(self._file2test)
for read in caf_file.reads():
read_object = caf_file.read_record2read(read['name'])
assert read_object
def test_contigs(self):
''' It checks if the contig method returns contigs'''
# caf_file_name = '../doc/Caf_example_file.caf'
caf_file = CafFile(self._file2test)
for contig in caf_file.contigs():
contig_object = caf_file.contig_record2contig(contig['name'])
print contig_object
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add three test to check the module<commit_after>
|
'''
Created on 2009 mar 11
@author: peio
'''
import unittest
from biolib.cafparser import CafFile
class Test(unittest.TestCase):
''' It tests '''
def setUp(self):
self._file2test = '/home/peio/eucalyptus_out.caf'
def test_caf_parser(self):
''' It tests if we can create and caf file giving a file name'''
# caf_file_name = '../doc/Caf_example_file.caf'
caf_file = CafFile(self._file2test)
assert caf_file
def test_reads(self):
''' we check if we can take the reads from the caf file'''
# caf_file_name = '../doc/Caf_example_file.caf'
caf_file = CafFile(self._file2test)
for read in caf_file.reads():
read_object = caf_file.read_record2read(read['name'])
assert read_object
def test_contigs(self):
''' It checks if the contig method returns contigs'''
# caf_file_name = '../doc/Caf_example_file.caf'
caf_file = CafFile(self._file2test)
for contig in caf_file.contigs():
contig_object = caf_file.contig_record2contig(contig['name'])
print contig_object
if __name__ == "__main__":
unittest.main()
|
Add three test to check the module'''
Created on 2009 mar 11
@author: peio
'''
import unittest
from biolib.cafparser import CafFile
class Test(unittest.TestCase):
''' It tests '''
def setUp(self):
self._file2test = '/home/peio/eucalyptus_out.caf'
def test_caf_parser(self):
''' It tests if we can create and caf file giving a file name'''
# caf_file_name = '../doc/Caf_example_file.caf'
caf_file = CafFile(self._file2test)
assert caf_file
def test_reads(self):
''' we check if we can take the reads from the caf file'''
# caf_file_name = '../doc/Caf_example_file.caf'
caf_file = CafFile(self._file2test)
for read in caf_file.reads():
read_object = caf_file.read_record2read(read['name'])
assert read_object
def test_contigs(self):
''' It checks if the contig method returns contigs'''
# caf_file_name = '../doc/Caf_example_file.caf'
caf_file = CafFile(self._file2test)
for contig in caf_file.contigs():
contig_object = caf_file.contig_record2contig(contig['name'])
print contig_object
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add three test to check the module<commit_after>'''
Created on 2009 mar 11
@author: peio
'''
import unittest
from biolib.cafparser import CafFile
class Test(unittest.TestCase):
''' It tests '''
def setUp(self):
self._file2test = '/home/peio/eucalyptus_out.caf'
def test_caf_parser(self):
''' It tests if we can create and caf file giving a file name'''
# caf_file_name = '../doc/Caf_example_file.caf'
caf_file = CafFile(self._file2test)
assert caf_file
def test_reads(self):
''' we check if we can take the reads from the caf file'''
# caf_file_name = '../doc/Caf_example_file.caf'
caf_file = CafFile(self._file2test)
for read in caf_file.reads():
read_object = caf_file.read_record2read(read['name'])
assert read_object
def test_contigs(self):
''' It checks if the contig method returns contigs'''
# caf_file_name = '../doc/Caf_example_file.caf'
caf_file = CafFile(self._file2test)
for contig in caf_file.contigs():
contig_object = caf_file.contig_record2contig(contig['name'])
print contig_object
if __name__ == "__main__":
unittest.main()
|
|
5d1ca10b9e33e8e37e08de5233a8fb143c99936b
|
spikes_to_mat.py
|
spikes_to_mat.py
|
import click
import numpy as np
from scipy import stats
import scipy.io as sio
from scipy.special import expit
from spikes_activity_generator import generate_spikes, spike_and_slab
@click.command()
@click.option('--num_neurons', type=click.INT,
default=10,
help='number of neurons in the network')
@click.option('--time_steps', type=click.INT,
default=100,
help='Number of time stamps. Length of recording')
@click.option('--likelihood_function', type=click.STRING,
default='probit',
help='Should be either probit or logistic')
@click.option('--sparsity', type=click.FLOAT,
default=0.3,
help='Set sparsity of connectivity, aka ro parameter.')
def main(num_neurons, time_steps, likelihood_function, sparsity):
# Get the spiking activity
N = num_neurons
T = time_steps
J = spike_and_slab(sparsity, N)
S0 = -np.ones(N)
if likelihood_function == 'probit':
energy_function = stats.norm.cdf
elif likelihood_function == 'logistic':
energy_function = expit
else:
raise ValueError('Unknown likelihood function')
S = generate_spikes(N, T, S0, J, energy_function)
file_name = 'spikes_connectivity_N_' + str(N) + '_T_' + str(T) + '_ro_' + str(sparsity).replace(".", "") \
+ "_"+ likelihood_function + '.mat'
sio.savemat(file_name, {'S': S, 'J': J})
if __name__ == "__main__":
main()
|
Add file to save activity and connectivity matrices to a matlab file
|
Add file to save activity and connectivity matrices to a matlab file
|
Python
|
mit
|
noashin/kinetic_ising_model_neurons
|
Add file to save activity and connectivity matrices to a matlab file
|
import click
import numpy as np
from scipy import stats
import scipy.io as sio
from scipy.special import expit
from spikes_activity_generator import generate_spikes, spike_and_slab
@click.command()
@click.option('--num_neurons', type=click.INT,
default=10,
help='number of neurons in the network')
@click.option('--time_steps', type=click.INT,
default=100,
help='Number of time stamps. Length of recording')
@click.option('--likelihood_function', type=click.STRING,
default='probit',
help='Should be either probit or logistic')
@click.option('--sparsity', type=click.FLOAT,
default=0.3,
help='Set sparsity of connectivity, aka ro parameter.')
def main(num_neurons, time_steps, likelihood_function, sparsity):
# Get the spiking activity
N = num_neurons
T = time_steps
J = spike_and_slab(sparsity, N)
S0 = -np.ones(N)
if likelihood_function == 'probit':
energy_function = stats.norm.cdf
elif likelihood_function == 'logistic':
energy_function = expit
else:
raise ValueError('Unknown likelihood function')
S = generate_spikes(N, T, S0, J, energy_function)
file_name = 'spikes_connectivity_N_' + str(N) + '_T_' + str(T) + '_ro_' + str(sparsity).replace(".", "") \
+ "_"+ likelihood_function + '.mat'
sio.savemat(file_name, {'S': S, 'J': J})
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add file to save activity and connectivity matrices to a matlab file<commit_after>
|
import click
import numpy as np
from scipy import stats
import scipy.io as sio
from scipy.special import expit
from spikes_activity_generator import generate_spikes, spike_and_slab
@click.command()
@click.option('--num_neurons', type=click.INT,
default=10,
help='number of neurons in the network')
@click.option('--time_steps', type=click.INT,
default=100,
help='Number of time stamps. Length of recording')
@click.option('--likelihood_function', type=click.STRING,
default='probit',
help='Should be either probit or logistic')
@click.option('--sparsity', type=click.FLOAT,
default=0.3,
help='Set sparsity of connectivity, aka ro parameter.')
def main(num_neurons, time_steps, likelihood_function, sparsity):
# Get the spiking activity
N = num_neurons
T = time_steps
J = spike_and_slab(sparsity, N)
S0 = -np.ones(N)
if likelihood_function == 'probit':
energy_function = stats.norm.cdf
elif likelihood_function == 'logistic':
energy_function = expit
else:
raise ValueError('Unknown likelihood function')
S = generate_spikes(N, T, S0, J, energy_function)
file_name = 'spikes_connectivity_N_' + str(N) + '_T_' + str(T) + '_ro_' + str(sparsity).replace(".", "") \
+ "_"+ likelihood_function + '.mat'
sio.savemat(file_name, {'S': S, 'J': J})
if __name__ == "__main__":
main()
|
Add file to save activity and connectivity matrices to a matlab fileimport click
import numpy as np
from scipy import stats
import scipy.io as sio
from scipy.special import expit
from spikes_activity_generator import generate_spikes, spike_and_slab
@click.command()
@click.option('--num_neurons', type=click.INT,
default=10,
help='number of neurons in the network')
@click.option('--time_steps', type=click.INT,
default=100,
help='Number of time stamps. Length of recording')
@click.option('--likelihood_function', type=click.STRING,
default='probit',
help='Should be either probit or logistic')
@click.option('--sparsity', type=click.FLOAT,
default=0.3,
help='Set sparsity of connectivity, aka ro parameter.')
def main(num_neurons, time_steps, likelihood_function, sparsity):
# Get the spiking activity
N = num_neurons
T = time_steps
J = spike_and_slab(sparsity, N)
S0 = -np.ones(N)
if likelihood_function == 'probit':
energy_function = stats.norm.cdf
elif likelihood_function == 'logistic':
energy_function = expit
else:
raise ValueError('Unknown likelihood function')
S = generate_spikes(N, T, S0, J, energy_function)
file_name = 'spikes_connectivity_N_' + str(N) + '_T_' + str(T) + '_ro_' + str(sparsity).replace(".", "") \
+ "_"+ likelihood_function + '.mat'
sio.savemat(file_name, {'S': S, 'J': J})
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add file to save activity and connectivity matrices to a matlab file<commit_after>import click
import numpy as np
from scipy import stats
import scipy.io as sio
from scipy.special import expit
from spikes_activity_generator import generate_spikes, spike_and_slab
@click.command()
@click.option('--num_neurons', type=click.INT,
default=10,
help='number of neurons in the network')
@click.option('--time_steps', type=click.INT,
default=100,
help='Number of time stamps. Length of recording')
@click.option('--likelihood_function', type=click.STRING,
default='probit',
help='Should be either probit or logistic')
@click.option('--sparsity', type=click.FLOAT,
default=0.3,
help='Set sparsity of connectivity, aka ro parameter.')
def main(num_neurons, time_steps, likelihood_function, sparsity):
# Get the spiking activity
N = num_neurons
T = time_steps
J = spike_and_slab(sparsity, N)
S0 = -np.ones(N)
if likelihood_function == 'probit':
energy_function = stats.norm.cdf
elif likelihood_function == 'logistic':
energy_function = expit
else:
raise ValueError('Unknown likelihood function')
S = generate_spikes(N, T, S0, J, energy_function)
file_name = 'spikes_connectivity_N_' + str(N) + '_T_' + str(T) + '_ro_' + str(sparsity).replace(".", "") \
+ "_"+ likelihood_function + '.mat'
sio.savemat(file_name, {'S': S, 'J': J})
if __name__ == "__main__":
main()
|
|
d4e7571b1d361a9d24650a74fffbc1980c2bbc70
|
blaze/compute/air/frontend/ckernel_impls.py
|
blaze/compute/air/frontend/ckernel_impls.py
|
"""
Lift ckernels to their appropriate rank so they always consume the full array
arguments.
"""
from __future__ import absolute_import, division, print_function
import datashape
from pykit.ir import transform, Op
#------------------------------------------------------------------------
# Run
#------------------------------------------------------------------------
def run(func, env):
strategies = env['strategies']
transform(CKernelImplementations(strategies), func)
#------------------------------------------------------------------------
# Extract CKernel Implementations
#------------------------------------------------------------------------
class CKernelImplementations(object):
"""
For kernels that are implemented via ckernels, this
grabs the ckernel_deferred and turns it into a ckernel
op.
"""
def __init__(self, strategies):
self.strategies = strategies
def op_kernel(self, op):
if self.strategies[op] != 'ckernel':
return
function = op.metadata['kernel']
overload = op.metadata['overload']
func = overload.func
polysig = overload.sig
monosig = overload.resolved_sig
argtypes = datashape.coretypes.Tuple(monosig.argtypes)
try:
overload = function.best_match('ckernel', argtypes)
except datashape.CoercionError:
return op
impl = overload.func
assert monosig == overload.resolved_sig, (monosig,
overload.resolved_sig)
new_op = Op('ckernel', op.type, [impl, op.args[1:]], op.result)
new_op.add_metadata({'rank': 0,
'parallel': True})
return new_op
|
"""
Lift ckernels to their appropriate rank so they always consume the full array
arguments.
"""
from __future__ import absolute_import, division, print_function
import datashape
from pykit.ir import transform, Op
#------------------------------------------------------------------------
# Run
#------------------------------------------------------------------------
def run(func, env):
strategies = env['strategies']
transform(CKernelImplementations(strategies), func)
#------------------------------------------------------------------------
# Extract CKernel Implementations
#------------------------------------------------------------------------
class CKernelImplementations(object):
"""
For kernels that are implemented via ckernels, this
grabs the ckernel_deferred and turns it into a ckernel
op.
"""
def __init__(self, strategies):
self.strategies = strategies
def op_kernel(self, op):
if self.strategies[op] != 'ckernel':
return
function = op.metadata['kernel']
overload = op.metadata['overload']
# Default overload is CKERNEL, so no need to look it up again
func = overload.func
polysig = overload.sig
monosig = overload.resolved_sig
argtypes = datashape.coretypes.Tuple(monosig.argtypes)
impl = overload.func
assert monosig == overload.resolved_sig, (monosig,
overload.resolved_sig)
new_op = Op('ckernel', op.type, [impl, op.args[1:]], op.result)
new_op.add_metadata({'rank': 0,
'parallel': True})
return new_op
|
Remove redundant 'ckernel' overload match
|
Remove redundant 'ckernel' overload match
|
Python
|
bsd-3-clause
|
jdmcbr/blaze,alexmojaki/blaze,FrancescAlted/blaze,aterrel/blaze,xlhtc007/blaze,mwiebe/blaze,cowlicks/blaze,ChinaQuants/blaze,mwiebe/blaze,mwiebe/blaze,LiaoPan/blaze,xlhtc007/blaze,dwillmer/blaze,FrancescAlted/blaze,scls19fr/blaze,cpcloud/blaze,scls19fr/blaze,caseyclements/blaze,mrocklin/blaze,FrancescAlted/blaze,ContinuumIO/blaze,FrancescAlted/blaze,LiaoPan/blaze,dwillmer/blaze,nkhuyu/blaze,jdmcbr/blaze,cowlicks/blaze,alexmojaki/blaze,aterrel/blaze,jcrist/blaze,maxalbert/blaze,mrocklin/blaze,caseyclements/blaze,ContinuumIO/blaze,nkhuyu/blaze,mwiebe/blaze,ChinaQuants/blaze,aterrel/blaze,jcrist/blaze,cpcloud/blaze,maxalbert/blaze
|
"""
Lift ckernels to their appropriate rank so they always consume the full array
arguments.
"""
from __future__ import absolute_import, division, print_function
import datashape
from pykit.ir import transform, Op
#------------------------------------------------------------------------
# Run
#------------------------------------------------------------------------
def run(func, env):
strategies = env['strategies']
transform(CKernelImplementations(strategies), func)
#------------------------------------------------------------------------
# Extract CKernel Implementations
#------------------------------------------------------------------------
class CKernelImplementations(object):
"""
For kernels that are implemented via ckernels, this
grabs the ckernel_deferred and turns it into a ckernel
op.
"""
def __init__(self, strategies):
self.strategies = strategies
def op_kernel(self, op):
if self.strategies[op] != 'ckernel':
return
function = op.metadata['kernel']
overload = op.metadata['overload']
func = overload.func
polysig = overload.sig
monosig = overload.resolved_sig
argtypes = datashape.coretypes.Tuple(monosig.argtypes)
try:
overload = function.best_match('ckernel', argtypes)
except datashape.CoercionError:
return op
impl = overload.func
assert monosig == overload.resolved_sig, (monosig,
overload.resolved_sig)
new_op = Op('ckernel', op.type, [impl, op.args[1:]], op.result)
new_op.add_metadata({'rank': 0,
'parallel': True})
return new_op
Remove redundant 'ckernel' overload match
|
"""
Lift ckernels to their appropriate rank so they always consume the full array
arguments.
"""
from __future__ import absolute_import, division, print_function
import datashape
from pykit.ir import transform, Op
#------------------------------------------------------------------------
# Run
#------------------------------------------------------------------------
def run(func, env):
strategies = env['strategies']
transform(CKernelImplementations(strategies), func)
#------------------------------------------------------------------------
# Extract CKernel Implementations
#------------------------------------------------------------------------
class CKernelImplementations(object):
"""
For kernels that are implemented via ckernels, this
grabs the ckernel_deferred and turns it into a ckernel
op.
"""
def __init__(self, strategies):
self.strategies = strategies
def op_kernel(self, op):
if self.strategies[op] != 'ckernel':
return
function = op.metadata['kernel']
overload = op.metadata['overload']
# Default overload is CKERNEL, so no need to look it up again
func = overload.func
polysig = overload.sig
monosig = overload.resolved_sig
argtypes = datashape.coretypes.Tuple(monosig.argtypes)
impl = overload.func
assert monosig == overload.resolved_sig, (monosig,
overload.resolved_sig)
new_op = Op('ckernel', op.type, [impl, op.args[1:]], op.result)
new_op.add_metadata({'rank': 0,
'parallel': True})
return new_op
|
<commit_before>"""
Lift ckernels to their appropriate rank so they always consume the full array
arguments.
"""
from __future__ import absolute_import, division, print_function
import datashape
from pykit.ir import transform, Op
#------------------------------------------------------------------------
# Run
#------------------------------------------------------------------------
def run(func, env):
strategies = env['strategies']
transform(CKernelImplementations(strategies), func)
#------------------------------------------------------------------------
# Extract CKernel Implementations
#------------------------------------------------------------------------
class CKernelImplementations(object):
"""
For kernels that are implemented via ckernels, this
grabs the ckernel_deferred and turns it into a ckernel
op.
"""
def __init__(self, strategies):
self.strategies = strategies
def op_kernel(self, op):
if self.strategies[op] != 'ckernel':
return
function = op.metadata['kernel']
overload = op.metadata['overload']
func = overload.func
polysig = overload.sig
monosig = overload.resolved_sig
argtypes = datashape.coretypes.Tuple(monosig.argtypes)
try:
overload = function.best_match('ckernel', argtypes)
except datashape.CoercionError:
return op
impl = overload.func
assert monosig == overload.resolved_sig, (monosig,
overload.resolved_sig)
new_op = Op('ckernel', op.type, [impl, op.args[1:]], op.result)
new_op.add_metadata({'rank': 0,
'parallel': True})
return new_op
<commit_msg>Remove redundant 'ckernel' overload match<commit_after>
|
"""
Lift ckernels to their appropriate rank so they always consume the full array
arguments.
"""
from __future__ import absolute_import, division, print_function
import datashape
from pykit.ir import transform, Op
#------------------------------------------------------------------------
# Run
#------------------------------------------------------------------------
def run(func, env):
strategies = env['strategies']
transform(CKernelImplementations(strategies), func)
#------------------------------------------------------------------------
# Extract CKernel Implementations
#------------------------------------------------------------------------
class CKernelImplementations(object):
"""
For kernels that are implemented via ckernels, this
grabs the ckernel_deferred and turns it into a ckernel
op.
"""
def __init__(self, strategies):
self.strategies = strategies
def op_kernel(self, op):
if self.strategies[op] != 'ckernel':
return
function = op.metadata['kernel']
overload = op.metadata['overload']
# Default overload is CKERNEL, so no need to look it up again
func = overload.func
polysig = overload.sig
monosig = overload.resolved_sig
argtypes = datashape.coretypes.Tuple(monosig.argtypes)
impl = overload.func
assert monosig == overload.resolved_sig, (monosig,
overload.resolved_sig)
new_op = Op('ckernel', op.type, [impl, op.args[1:]], op.result)
new_op.add_metadata({'rank': 0,
'parallel': True})
return new_op
|
"""
Lift ckernels to their appropriate rank so they always consume the full array
arguments.
"""
from __future__ import absolute_import, division, print_function
import datashape
from pykit.ir import transform, Op
#------------------------------------------------------------------------
# Run
#------------------------------------------------------------------------
def run(func, env):
strategies = env['strategies']
transform(CKernelImplementations(strategies), func)
#------------------------------------------------------------------------
# Extract CKernel Implementations
#------------------------------------------------------------------------
class CKernelImplementations(object):
"""
For kernels that are implemented via ckernels, this
grabs the ckernel_deferred and turns it into a ckernel
op.
"""
def __init__(self, strategies):
self.strategies = strategies
def op_kernel(self, op):
if self.strategies[op] != 'ckernel':
return
function = op.metadata['kernel']
overload = op.metadata['overload']
func = overload.func
polysig = overload.sig
monosig = overload.resolved_sig
argtypes = datashape.coretypes.Tuple(monosig.argtypes)
try:
overload = function.best_match('ckernel', argtypes)
except datashape.CoercionError:
return op
impl = overload.func
assert monosig == overload.resolved_sig, (monosig,
overload.resolved_sig)
new_op = Op('ckernel', op.type, [impl, op.args[1:]], op.result)
new_op.add_metadata({'rank': 0,
'parallel': True})
return new_op
Remove redundant 'ckernel' overload match"""
Lift ckernels to their appropriate rank so they always consume the full array
arguments.
"""
from __future__ import absolute_import, division, print_function
import datashape
from pykit.ir import transform, Op
#------------------------------------------------------------------------
# Run
#------------------------------------------------------------------------
def run(func, env):
strategies = env['strategies']
transform(CKernelImplementations(strategies), func)
#------------------------------------------------------------------------
# Extract CKernel Implementations
#------------------------------------------------------------------------
class CKernelImplementations(object):
"""
For kernels that are implemented via ckernels, this
grabs the ckernel_deferred and turns it into a ckernel
op.
"""
def __init__(self, strategies):
self.strategies = strategies
def op_kernel(self, op):
if self.strategies[op] != 'ckernel':
return
function = op.metadata['kernel']
overload = op.metadata['overload']
# Default overload is CKERNEL, so no need to look it up again
func = overload.func
polysig = overload.sig
monosig = overload.resolved_sig
argtypes = datashape.coretypes.Tuple(monosig.argtypes)
impl = overload.func
assert monosig == overload.resolved_sig, (monosig,
overload.resolved_sig)
new_op = Op('ckernel', op.type, [impl, op.args[1:]], op.result)
new_op.add_metadata({'rank': 0,
'parallel': True})
return new_op
|
<commit_before>"""
Lift ckernels to their appropriate rank so they always consume the full array
arguments.
"""
from __future__ import absolute_import, division, print_function
import datashape
from pykit.ir import transform, Op
#------------------------------------------------------------------------
# Run
#------------------------------------------------------------------------
def run(func, env):
strategies = env['strategies']
transform(CKernelImplementations(strategies), func)
#------------------------------------------------------------------------
# Extract CKernel Implementations
#------------------------------------------------------------------------
class CKernelImplementations(object):
"""
For kernels that are implemented via ckernels, this
grabs the ckernel_deferred and turns it into a ckernel
op.
"""
def __init__(self, strategies):
self.strategies = strategies
def op_kernel(self, op):
if self.strategies[op] != 'ckernel':
return
function = op.metadata['kernel']
overload = op.metadata['overload']
func = overload.func
polysig = overload.sig
monosig = overload.resolved_sig
argtypes = datashape.coretypes.Tuple(monosig.argtypes)
try:
overload = function.best_match('ckernel', argtypes)
except datashape.CoercionError:
return op
impl = overload.func
assert monosig == overload.resolved_sig, (monosig,
overload.resolved_sig)
new_op = Op('ckernel', op.type, [impl, op.args[1:]], op.result)
new_op.add_metadata({'rank': 0,
'parallel': True})
return new_op
<commit_msg>Remove redundant 'ckernel' overload match<commit_after>"""
Lift ckernels to their appropriate rank so they always consume the full array
arguments.
"""
from __future__ import absolute_import, division, print_function
import datashape
from pykit.ir import transform, Op
#------------------------------------------------------------------------
# Run
#------------------------------------------------------------------------
def run(func, env):
strategies = env['strategies']
transform(CKernelImplementations(strategies), func)
#------------------------------------------------------------------------
# Extract CKernel Implementations
#------------------------------------------------------------------------
class CKernelImplementations(object):
"""
For kernels that are implemented via ckernels, this
grabs the ckernel_deferred and turns it into a ckernel
op.
"""
def __init__(self, strategies):
self.strategies = strategies
def op_kernel(self, op):
if self.strategies[op] != 'ckernel':
return
function = op.metadata['kernel']
overload = op.metadata['overload']
# Default overload is CKERNEL, so no need to look it up again
func = overload.func
polysig = overload.sig
monosig = overload.resolved_sig
argtypes = datashape.coretypes.Tuple(monosig.argtypes)
impl = overload.func
assert monosig == overload.resolved_sig, (monosig,
overload.resolved_sig)
new_op = Op('ckernel', op.type, [impl, op.args[1:]], op.result)
new_op.add_metadata({'rank': 0,
'parallel': True})
return new_op
|
f623775309c75cd0742b03df4ff4759efee4470d
|
Code/Python/Kamaelia/Test/Internet/test_MulticastTransceiverSystem.py
|
Code/Python/Kamaelia/Test/Internet/test_MulticastTransceiverSystem.py
|
#!/usr/bin/python
#
# Basic acceptance test harness for the Multicast_sender and receiver
# components.
#
import socket
import Axon
def tests():
from Axon.Scheduler import scheduler
from Kamaelia.Util.ConsoleEcho import consoleEchoer
from Kamaelia.Util.Chargen import Chargen
from Kamaelia.Internet.Multicast_sender import Multicast_sender
from Kamaelia.Internet.Multicast_receiver import Multicast_receiver
from Kamaelia.Internet.Multicast_transceiver import Multicast_transceiver
class testComponent(Axon.Component.component):
def main(self):
chargen= Chargen()
sender = Multicast_transceiver("0.0.0.0", 0, "224.168.2.9", 1600)
receiver = Multicast_transceiver("0.0.0.0", 1600, "224.168.2.9", 0)
display = consoleEchoer()
self.link((chargen,"outbox"), (sender,"inbox"))
self.link((receiver,"outbox"), (display,"inbox"))
self.addChildren(chargen, sender, receiver, display)
yield Axon.Ipc.newComponent(*(self.children))
while 1:
self.pause()
yield 1
harness = testComponent()
harness.activate()
scheduler.run.runThreads(slowmo=0.1)
if __name__=="__main__":
tests()
|
Test harness for the multicast transceiver.
|
Test harness for the multicast transceiver.
Michael.
|
Python
|
apache-2.0
|
sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia
|
Test harness for the multicast transceiver.
Michael.
|
#!/usr/bin/python
#
# Basic acceptance test harness for the Multicast_sender and receiver
# components.
#
import socket
import Axon
def tests():
from Axon.Scheduler import scheduler
from Kamaelia.Util.ConsoleEcho import consoleEchoer
from Kamaelia.Util.Chargen import Chargen
from Kamaelia.Internet.Multicast_sender import Multicast_sender
from Kamaelia.Internet.Multicast_receiver import Multicast_receiver
from Kamaelia.Internet.Multicast_transceiver import Multicast_transceiver
class testComponent(Axon.Component.component):
def main(self):
chargen= Chargen()
sender = Multicast_transceiver("0.0.0.0", 0, "224.168.2.9", 1600)
receiver = Multicast_transceiver("0.0.0.0", 1600, "224.168.2.9", 0)
display = consoleEchoer()
self.link((chargen,"outbox"), (sender,"inbox"))
self.link((receiver,"outbox"), (display,"inbox"))
self.addChildren(chargen, sender, receiver, display)
yield Axon.Ipc.newComponent(*(self.children))
while 1:
self.pause()
yield 1
harness = testComponent()
harness.activate()
scheduler.run.runThreads(slowmo=0.1)
if __name__=="__main__":
tests()
|
<commit_before><commit_msg>Test harness for the multicast transceiver.
Michael.<commit_after>
|
#!/usr/bin/python
#
# Basic acceptance test harness for the Multicast_sender and receiver
# components.
#
import socket
import Axon
def tests():
from Axon.Scheduler import scheduler
from Kamaelia.Util.ConsoleEcho import consoleEchoer
from Kamaelia.Util.Chargen import Chargen
from Kamaelia.Internet.Multicast_sender import Multicast_sender
from Kamaelia.Internet.Multicast_receiver import Multicast_receiver
from Kamaelia.Internet.Multicast_transceiver import Multicast_transceiver
class testComponent(Axon.Component.component):
def main(self):
chargen= Chargen()
sender = Multicast_transceiver("0.0.0.0", 0, "224.168.2.9", 1600)
receiver = Multicast_transceiver("0.0.0.0", 1600, "224.168.2.9", 0)
display = consoleEchoer()
self.link((chargen,"outbox"), (sender,"inbox"))
self.link((receiver,"outbox"), (display,"inbox"))
self.addChildren(chargen, sender, receiver, display)
yield Axon.Ipc.newComponent(*(self.children))
while 1:
self.pause()
yield 1
harness = testComponent()
harness.activate()
scheduler.run.runThreads(slowmo=0.1)
if __name__=="__main__":
tests()
|
Test harness for the multicast transceiver.
Michael.#!/usr/bin/python
#
# Basic acceptance test harness for the Multicast_sender and receiver
# components.
#
import socket
import Axon
def tests():
from Axon.Scheduler import scheduler
from Kamaelia.Util.ConsoleEcho import consoleEchoer
from Kamaelia.Util.Chargen import Chargen
from Kamaelia.Internet.Multicast_sender import Multicast_sender
from Kamaelia.Internet.Multicast_receiver import Multicast_receiver
from Kamaelia.Internet.Multicast_transceiver import Multicast_transceiver
class testComponent(Axon.Component.component):
def main(self):
chargen= Chargen()
sender = Multicast_transceiver("0.0.0.0", 0, "224.168.2.9", 1600)
receiver = Multicast_transceiver("0.0.0.0", 1600, "224.168.2.9", 0)
display = consoleEchoer()
self.link((chargen,"outbox"), (sender,"inbox"))
self.link((receiver,"outbox"), (display,"inbox"))
self.addChildren(chargen, sender, receiver, display)
yield Axon.Ipc.newComponent(*(self.children))
while 1:
self.pause()
yield 1
harness = testComponent()
harness.activate()
scheduler.run.runThreads(slowmo=0.1)
if __name__=="__main__":
tests()
|
<commit_before><commit_msg>Test harness for the multicast transceiver.
Michael.<commit_after>#!/usr/bin/python
#
# Basic acceptance test harness for the Multicast_sender and receiver
# components.
#
import socket
import Axon
def tests():
from Axon.Scheduler import scheduler
from Kamaelia.Util.ConsoleEcho import consoleEchoer
from Kamaelia.Util.Chargen import Chargen
from Kamaelia.Internet.Multicast_sender import Multicast_sender
from Kamaelia.Internet.Multicast_receiver import Multicast_receiver
from Kamaelia.Internet.Multicast_transceiver import Multicast_transceiver
class testComponent(Axon.Component.component):
def main(self):
chargen= Chargen()
sender = Multicast_transceiver("0.0.0.0", 0, "224.168.2.9", 1600)
receiver = Multicast_transceiver("0.0.0.0", 1600, "224.168.2.9", 0)
display = consoleEchoer()
self.link((chargen,"outbox"), (sender,"inbox"))
self.link((receiver,"outbox"), (display,"inbox"))
self.addChildren(chargen, sender, receiver, display)
yield Axon.Ipc.newComponent(*(self.children))
while 1:
self.pause()
yield 1
harness = testComponent()
harness.activate()
scheduler.run.runThreads(slowmo=0.1)
if __name__=="__main__":
tests()
|
|
74103c1af330221cfa668eb2496ab99b49775e7c
|
storyboard/db/migration/alembic_migrations/versions/040_create_accesstoken_index.py
|
storyboard/db/migration/alembic_migrations/versions/040_create_accesstoken_index.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""This migration creates a new index on accesstokens table
for the access_token column.
Revision ID: 040
Revises: 039
Create Date: 2015-02-17 12:00:00
"""
# revision identifiers, used by Alembic.
revision = '040'
down_revision = '039'
from alembic import op
def upgrade(active_plugins=None, options=None):
op.create_index('accesstokens_access_token_idx',
'accesstokens', ['access_token'])
def downgrade(active_plugins=None, options=None):
op.drop_index('accesstokens_access_token_idx')
|
Add an index on accesstokens table for access_token column
|
Add an index on accesstokens table for access_token column
Performance improvement tweak for retrieving and validating
access_tokens.
Change-Id: I96a81902d607cc3a3bbb20e71df5f87ff544406e
Story: 2000165
|
Python
|
apache-2.0
|
ColdrickSotK/storyboard,ColdrickSotK/storyboard,ColdrickSotK/storyboard
|
Add an index on accesstokens table for access_token column
Performance improvement tweak for retrieving and validating
access_tokens.
Change-Id: I96a81902d607cc3a3bbb20e71df5f87ff544406e
Story: 2000165
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""This migration creates a new index on accesstokens table
for the access_token column.
Revision ID: 040
Revises: 039
Create Date: 2015-02-17 12:00:00
"""
# revision identifiers, used by Alembic.
revision = '040'
down_revision = '039'
from alembic import op
def upgrade(active_plugins=None, options=None):
op.create_index('accesstokens_access_token_idx',
'accesstokens', ['access_token'])
def downgrade(active_plugins=None, options=None):
op.drop_index('accesstokens_access_token_idx')
|
<commit_before><commit_msg>Add an index on accesstokens table for access_token column
Performance improvement tweak for retrieving and validating
access_tokens.
Change-Id: I96a81902d607cc3a3bbb20e71df5f87ff544406e
Story: 2000165<commit_after>
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""This migration creates a new index on accesstokens table
for the access_token column.
Revision ID: 040
Revises: 039
Create Date: 2015-02-17 12:00:00
"""
# revision identifiers, used by Alembic.
revision = '040'
down_revision = '039'
from alembic import op
def upgrade(active_plugins=None, options=None):
op.create_index('accesstokens_access_token_idx',
'accesstokens', ['access_token'])
def downgrade(active_plugins=None, options=None):
op.drop_index('accesstokens_access_token_idx')
|
Add an index on accesstokens table for access_token column
Performance improvement tweak for retrieving and validating
access_tokens.
Change-Id: I96a81902d607cc3a3bbb20e71df5f87ff544406e
Story: 2000165# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""This migration creates a new index on accesstokens table
for the access_token column.
Revision ID: 040
Revises: 039
Create Date: 2015-02-17 12:00:00
"""
# revision identifiers, used by Alembic.
revision = '040'
down_revision = '039'
from alembic import op
def upgrade(active_plugins=None, options=None):
op.create_index('accesstokens_access_token_idx',
'accesstokens', ['access_token'])
def downgrade(active_plugins=None, options=None):
op.drop_index('accesstokens_access_token_idx')
|
<commit_before><commit_msg>Add an index on accesstokens table for access_token column
Performance improvement tweak for retrieving and validating
access_tokens.
Change-Id: I96a81902d607cc3a3bbb20e71df5f87ff544406e
Story: 2000165<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""This migration creates a new index on accesstokens table
for the access_token column.
Revision ID: 040
Revises: 039
Create Date: 2015-02-17 12:00:00
"""
# revision identifiers, used by Alembic.
revision = '040'
down_revision = '039'
from alembic import op
def upgrade(active_plugins=None, options=None):
op.create_index('accesstokens_access_token_idx',
'accesstokens', ['access_token'])
def downgrade(active_plugins=None, options=None):
op.drop_index('accesstokens_access_token_idx')
|
|
ac8e94368b86d406811016233a4c0f0b47cf87e9
|
IMAP/move_imap.py
|
IMAP/move_imap.py
|
import imaplib, getpass, re
pattern_uid = re.compile('\d+ \(UID (?P<uid>\d+)\)')
def connect(email):
imap = imaplib.IMAP4_SSL("imap.gmail.com")
password = getpass.getpass("Enter your password: ")
imap.login(email, password)
return imap
def disconnect(imap):
imap.logout()
def parse_uid(data):
match = pattern_uid.match(data)
return match.group('uid')
if __name__ == '__main__':
imap = connect('example@gmail.com')
imap.select(mailbox='inbox', readonly=False)
resp, items = imap.search(None, 'All')
email_ids = items[0].split()
latest_email_id = email_ids[-1] # Assuming that you are moving the latest email.
resp, data = imap.fetch(latest_email_id, "(UID)")
msg_uid = parse_uid(data[0])
result = imap.uid('COPY', msg_uid, 'Bozze')
#if result[0] == 'OK':
# mov, data = imap.uid('STORE', msg_uid , '+FLAGS', '(\Deleted)')
imap.expunge()
disconnect(imap)
|
Add script to move mail in imap folder
|
Add script to move mail in imap folder
|
Python
|
agpl-3.0
|
Micronaet/micronaet-script,Micronaet/micronaet-script
|
Add script to move mail in imap folder
|
import imaplib, getpass, re
pattern_uid = re.compile('\d+ \(UID (?P<uid>\d+)\)')
def connect(email):
imap = imaplib.IMAP4_SSL("imap.gmail.com")
password = getpass.getpass("Enter your password: ")
imap.login(email, password)
return imap
def disconnect(imap):
imap.logout()
def parse_uid(data):
match = pattern_uid.match(data)
return match.group('uid')
if __name__ == '__main__':
imap = connect('example@gmail.com')
imap.select(mailbox='inbox', readonly=False)
resp, items = imap.search(None, 'All')
email_ids = items[0].split()
latest_email_id = email_ids[-1] # Assuming that you are moving the latest email.
resp, data = imap.fetch(latest_email_id, "(UID)")
msg_uid = parse_uid(data[0])
result = imap.uid('COPY', msg_uid, 'Bozze')
#if result[0] == 'OK':
# mov, data = imap.uid('STORE', msg_uid , '+FLAGS', '(\Deleted)')
imap.expunge()
disconnect(imap)
|
<commit_before><commit_msg>Add script to move mail in imap folder<commit_after>
|
import imaplib, getpass, re
pattern_uid = re.compile('\d+ \(UID (?P<uid>\d+)\)')
def connect(email):
imap = imaplib.IMAP4_SSL("imap.gmail.com")
password = getpass.getpass("Enter your password: ")
imap.login(email, password)
return imap
def disconnect(imap):
imap.logout()
def parse_uid(data):
match = pattern_uid.match(data)
return match.group('uid')
if __name__ == '__main__':
imap = connect('example@gmail.com')
imap.select(mailbox='inbox', readonly=False)
resp, items = imap.search(None, 'All')
email_ids = items[0].split()
latest_email_id = email_ids[-1] # Assuming that you are moving the latest email.
resp, data = imap.fetch(latest_email_id, "(UID)")
msg_uid = parse_uid(data[0])
result = imap.uid('COPY', msg_uid, 'Bozze')
#if result[0] == 'OK':
# mov, data = imap.uid('STORE', msg_uid , '+FLAGS', '(\Deleted)')
imap.expunge()
disconnect(imap)
|
Add script to move mail in imap folderimport imaplib, getpass, re
pattern_uid = re.compile('\d+ \(UID (?P<uid>\d+)\)')
def connect(email):
imap = imaplib.IMAP4_SSL("imap.gmail.com")
password = getpass.getpass("Enter your password: ")
imap.login(email, password)
return imap
def disconnect(imap):
imap.logout()
def parse_uid(data):
match = pattern_uid.match(data)
return match.group('uid')
if __name__ == '__main__':
imap = connect('example@gmail.com')
imap.select(mailbox='inbox', readonly=False)
resp, items = imap.search(None, 'All')
email_ids = items[0].split()
latest_email_id = email_ids[-1] # Assuming that you are moving the latest email.
resp, data = imap.fetch(latest_email_id, "(UID)")
msg_uid = parse_uid(data[0])
result = imap.uid('COPY', msg_uid, 'Bozze')
#if result[0] == 'OK':
# mov, data = imap.uid('STORE', msg_uid , '+FLAGS', '(\Deleted)')
imap.expunge()
disconnect(imap)
|
<commit_before><commit_msg>Add script to move mail in imap folder<commit_after>import imaplib, getpass, re
pattern_uid = re.compile('\d+ \(UID (?P<uid>\d+)\)')
def connect(email):
imap = imaplib.IMAP4_SSL("imap.gmail.com")
password = getpass.getpass("Enter your password: ")
imap.login(email, password)
return imap
def disconnect(imap):
imap.logout()
def parse_uid(data):
match = pattern_uid.match(data)
return match.group('uid')
if __name__ == '__main__':
imap = connect('example@gmail.com')
imap.select(mailbox='inbox', readonly=False)
resp, items = imap.search(None, 'All')
email_ids = items[0].split()
latest_email_id = email_ids[-1] # Assuming that you are moving the latest email.
resp, data = imap.fetch(latest_email_id, "(UID)")
msg_uid = parse_uid(data[0])
result = imap.uid('COPY', msg_uid, 'Bozze')
#if result[0] == 'OK':
# mov, data = imap.uid('STORE', msg_uid , '+FLAGS', '(\Deleted)')
imap.expunge()
disconnect(imap)
|
|
db29615f7de3fb809e9fd78f43b6d3a61452623d
|
14B-088/HI/imaging/deproj_cube.py
|
14B-088/HI/imaging/deproj_cube.py
|
'''
Create a deprojected cube in M33's frame
'''
from spectral_cube import SpectralCube
from astropy.io import fits
import numpy as np
import os
import astropy.units as u
from radio_beam import Beam
from cube_analysis.cube_deproject import deproject_cube
from paths import (fourteenB_wGBT_HI_file_dict, allfigs_path,
fourteenB_HI_data_wGBT_path, data_path)
from galaxy_params import gal_feath as gal
cube = SpectralCube.read(fourteenB_HI_data_wGBT_path("M33_14B-088_HI.clean.image.GBT_feathered.pbcov_gt_0.5_masked.com_beam.fits"))
deproject_cube(cube, gal, num_cores=6, chunk=100,
save_name=fourteenB_HI_data_wGBT_path("M33_14B-088_HI.clean.image.GBT_feathered.pbcov_gt_0.5_masked.deproject.fits",
no_check=True))
hdu = fits.open(fourteenB_HI_data_wGBT_path("M33_14B-088_HI.clean.image.GBT_feathered.pbcov_gt_0.5_masked.deproject.fits"),
mode='update')
# Update the beam in the header
hdr = cube.header
hdr.update(Beam(major=cube.beam.major / np.cos(gal.inclination),
minor=cube.beam.major,
pa=gal.position_angle + 90 * u.deg).to_header_keywords())
hdu.flush()
hdu.close()
|
Add script to make a deprojected cube
|
Add script to make a deprojected cube
|
Python
|
mit
|
e-koch/VLA_Lband,e-koch/VLA_Lband
|
Add script to make a deprojected cube
|
'''
Create a deprojected cube in M33's frame
'''
from spectral_cube import SpectralCube
from astropy.io import fits
import numpy as np
import os
import astropy.units as u
from radio_beam import Beam
from cube_analysis.cube_deproject import deproject_cube
from paths import (fourteenB_wGBT_HI_file_dict, allfigs_path,
fourteenB_HI_data_wGBT_path, data_path)
from galaxy_params import gal_feath as gal
cube = SpectralCube.read(fourteenB_HI_data_wGBT_path("M33_14B-088_HI.clean.image.GBT_feathered.pbcov_gt_0.5_masked.com_beam.fits"))
deproject_cube(cube, gal, num_cores=6, chunk=100,
save_name=fourteenB_HI_data_wGBT_path("M33_14B-088_HI.clean.image.GBT_feathered.pbcov_gt_0.5_masked.deproject.fits",
no_check=True))
hdu = fits.open(fourteenB_HI_data_wGBT_path("M33_14B-088_HI.clean.image.GBT_feathered.pbcov_gt_0.5_masked.deproject.fits"),
mode='update')
# Update the beam in the header
hdr = cube.header
hdr.update(Beam(major=cube.beam.major / np.cos(gal.inclination),
minor=cube.beam.major,
pa=gal.position_angle + 90 * u.deg).to_header_keywords())
hdu.flush()
hdu.close()
|
<commit_before><commit_msg>Add script to make a deprojected cube<commit_after>
|
'''
Create a deprojected cube in M33's frame
'''
from spectral_cube import SpectralCube
from astropy.io import fits
import numpy as np
import os
import astropy.units as u
from radio_beam import Beam
from cube_analysis.cube_deproject import deproject_cube
from paths import (fourteenB_wGBT_HI_file_dict, allfigs_path,
fourteenB_HI_data_wGBT_path, data_path)
from galaxy_params import gal_feath as gal
cube = SpectralCube.read(fourteenB_HI_data_wGBT_path("M33_14B-088_HI.clean.image.GBT_feathered.pbcov_gt_0.5_masked.com_beam.fits"))
deproject_cube(cube, gal, num_cores=6, chunk=100,
save_name=fourteenB_HI_data_wGBT_path("M33_14B-088_HI.clean.image.GBT_feathered.pbcov_gt_0.5_masked.deproject.fits",
no_check=True))
hdu = fits.open(fourteenB_HI_data_wGBT_path("M33_14B-088_HI.clean.image.GBT_feathered.pbcov_gt_0.5_masked.deproject.fits"),
mode='update')
# Update the beam in the header
hdr = cube.header
hdr.update(Beam(major=cube.beam.major / np.cos(gal.inclination),
minor=cube.beam.major,
pa=gal.position_angle + 90 * u.deg).to_header_keywords())
hdu.flush()
hdu.close()
|
Add script to make a deprojected cube
'''
Create a deprojected cube in M33's frame
'''
from spectral_cube import SpectralCube
from astropy.io import fits
import numpy as np
import os
import astropy.units as u
from radio_beam import Beam
from cube_analysis.cube_deproject import deproject_cube
from paths import (fourteenB_wGBT_HI_file_dict, allfigs_path,
fourteenB_HI_data_wGBT_path, data_path)
from galaxy_params import gal_feath as gal
cube = SpectralCube.read(fourteenB_HI_data_wGBT_path("M33_14B-088_HI.clean.image.GBT_feathered.pbcov_gt_0.5_masked.com_beam.fits"))
deproject_cube(cube, gal, num_cores=6, chunk=100,
save_name=fourteenB_HI_data_wGBT_path("M33_14B-088_HI.clean.image.GBT_feathered.pbcov_gt_0.5_masked.deproject.fits",
no_check=True))
hdu = fits.open(fourteenB_HI_data_wGBT_path("M33_14B-088_HI.clean.image.GBT_feathered.pbcov_gt_0.5_masked.deproject.fits"),
mode='update')
# Update the beam in the header
hdr = cube.header
hdr.update(Beam(major=cube.beam.major / np.cos(gal.inclination),
minor=cube.beam.major,
pa=gal.position_angle + 90 * u.deg).to_header_keywords())
hdu.flush()
hdu.close()
|
<commit_before><commit_msg>Add script to make a deprojected cube<commit_after>
'''
Create a deprojected cube in M33's frame
'''
from spectral_cube import SpectralCube
from astropy.io import fits
import numpy as np
import os
import astropy.units as u
from radio_beam import Beam
from cube_analysis.cube_deproject import deproject_cube
from paths import (fourteenB_wGBT_HI_file_dict, allfigs_path,
fourteenB_HI_data_wGBT_path, data_path)
from galaxy_params import gal_feath as gal
cube = SpectralCube.read(fourteenB_HI_data_wGBT_path("M33_14B-088_HI.clean.image.GBT_feathered.pbcov_gt_0.5_masked.com_beam.fits"))
deproject_cube(cube, gal, num_cores=6, chunk=100,
save_name=fourteenB_HI_data_wGBT_path("M33_14B-088_HI.clean.image.GBT_feathered.pbcov_gt_0.5_masked.deproject.fits",
no_check=True))
hdu = fits.open(fourteenB_HI_data_wGBT_path("M33_14B-088_HI.clean.image.GBT_feathered.pbcov_gt_0.5_masked.deproject.fits"),
mode='update')
# Update the beam in the header
hdr = cube.header
hdr.update(Beam(major=cube.beam.major / np.cos(gal.inclination),
minor=cube.beam.major,
pa=gal.position_angle + 90 * u.deg).to_header_keywords())
hdu.flush()
hdu.close()
|
|
d55c23575bd247affcb200e3d835fe74fcf1fd54
|
web/web/settings/arnes.py
|
web/web/settings/arnes.py
|
from common import *
SECRET_KEY = os.environ['SECRET_KEY']
DEBUG = False
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = ['tomo.arnes.si']
WSGI_APPLICATION = 'web.wsgi.dev.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'tomo',
'USER': 'tomo',
'PASSWORD': 'tomo',
'HOST': 'db',
'PORT': '',
}
}
STATIC_ROOT = '/home/tomo/projekt-tomo/web/static'
STATIC_URL = '/static/'
LOGIN_URL = '/accounts/login/'
LOGOUT_URL = '/accounts/logout/'
LOGIN_REDIRECT_URL = '/'
AD_DNS_NAME = 'warpout.fmf.uni-lj.si'
AD_LDAP_PORT = 389
AD_SEARCH_DN = 'ou=uporabniki,dc=std,dc=fmf,dc=uni-lj,dc=si'
AD_NT4_DOMAIN = 'std'
AD_SEARCH_FIELDS = ['mail', 'givenName', 'sn', 'sAMAccountName']
AD_LDAP_URL = 'ldap://%s:%s' % (AD_DNS_NAME, AD_LDAP_PORT)
AUTHENTICATION_BACKENDS = (
'utils.auth.ActiveDirectoryBackend',
'django.contrib.auth.backends.ModelBackend',
)
|
Add settings for ARNES Tomo instance
|
Add settings for ARNES Tomo instance
|
Python
|
agpl-3.0
|
matijapretnar/projekt-tomo,ul-fmf/projekt-tomo,ul-fmf/projekt-tomo,ul-fmf/projekt-tomo,ul-fmf/projekt-tomo,ul-fmf/projekt-tomo,matijapretnar/projekt-tomo,matijapretnar/projekt-tomo,ul-fmf/projekt-tomo,matijapretnar/projekt-tomo,matijapretnar/projekt-tomo
|
Add settings for ARNES Tomo instance
|
from common import *
SECRET_KEY = os.environ['SECRET_KEY']
DEBUG = False
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = ['tomo.arnes.si']
WSGI_APPLICATION = 'web.wsgi.dev.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'tomo',
'USER': 'tomo',
'PASSWORD': 'tomo',
'HOST': 'db',
'PORT': '',
}
}
STATIC_ROOT = '/home/tomo/projekt-tomo/web/static'
STATIC_URL = '/static/'
LOGIN_URL = '/accounts/login/'
LOGOUT_URL = '/accounts/logout/'
LOGIN_REDIRECT_URL = '/'
AD_DNS_NAME = 'warpout.fmf.uni-lj.si'
AD_LDAP_PORT = 389
AD_SEARCH_DN = 'ou=uporabniki,dc=std,dc=fmf,dc=uni-lj,dc=si'
AD_NT4_DOMAIN = 'std'
AD_SEARCH_FIELDS = ['mail', 'givenName', 'sn', 'sAMAccountName']
AD_LDAP_URL = 'ldap://%s:%s' % (AD_DNS_NAME, AD_LDAP_PORT)
AUTHENTICATION_BACKENDS = (
'utils.auth.ActiveDirectoryBackend',
'django.contrib.auth.backends.ModelBackend',
)
|
<commit_before><commit_msg>Add settings for ARNES Tomo instance<commit_after>
|
from common import *
SECRET_KEY = os.environ['SECRET_KEY']
DEBUG = False
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = ['tomo.arnes.si']
WSGI_APPLICATION = 'web.wsgi.dev.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'tomo',
'USER': 'tomo',
'PASSWORD': 'tomo',
'HOST': 'db',
'PORT': '',
}
}
STATIC_ROOT = '/home/tomo/projekt-tomo/web/static'
STATIC_URL = '/static/'
LOGIN_URL = '/accounts/login/'
LOGOUT_URL = '/accounts/logout/'
LOGIN_REDIRECT_URL = '/'
AD_DNS_NAME = 'warpout.fmf.uni-lj.si'
AD_LDAP_PORT = 389
AD_SEARCH_DN = 'ou=uporabniki,dc=std,dc=fmf,dc=uni-lj,dc=si'
AD_NT4_DOMAIN = 'std'
AD_SEARCH_FIELDS = ['mail', 'givenName', 'sn', 'sAMAccountName']
AD_LDAP_URL = 'ldap://%s:%s' % (AD_DNS_NAME, AD_LDAP_PORT)
AUTHENTICATION_BACKENDS = (
'utils.auth.ActiveDirectoryBackend',
'django.contrib.auth.backends.ModelBackend',
)
|
Add settings for ARNES Tomo instancefrom common import *
SECRET_KEY = os.environ['SECRET_KEY']
DEBUG = False
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = ['tomo.arnes.si']
WSGI_APPLICATION = 'web.wsgi.dev.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'tomo',
'USER': 'tomo',
'PASSWORD': 'tomo',
'HOST': 'db',
'PORT': '',
}
}
STATIC_ROOT = '/home/tomo/projekt-tomo/web/static'
STATIC_URL = '/static/'
LOGIN_URL = '/accounts/login/'
LOGOUT_URL = '/accounts/logout/'
LOGIN_REDIRECT_URL = '/'
AD_DNS_NAME = 'warpout.fmf.uni-lj.si'
AD_LDAP_PORT = 389
AD_SEARCH_DN = 'ou=uporabniki,dc=std,dc=fmf,dc=uni-lj,dc=si'
AD_NT4_DOMAIN = 'std'
AD_SEARCH_FIELDS = ['mail', 'givenName', 'sn', 'sAMAccountName']
AD_LDAP_URL = 'ldap://%s:%s' % (AD_DNS_NAME, AD_LDAP_PORT)
AUTHENTICATION_BACKENDS = (
'utils.auth.ActiveDirectoryBackend',
'django.contrib.auth.backends.ModelBackend',
)
|
<commit_before><commit_msg>Add settings for ARNES Tomo instance<commit_after>from common import *
SECRET_KEY = os.environ['SECRET_KEY']
DEBUG = False
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = ['tomo.arnes.si']
WSGI_APPLICATION = 'web.wsgi.dev.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'tomo',
'USER': 'tomo',
'PASSWORD': 'tomo',
'HOST': 'db',
'PORT': '',
}
}
STATIC_ROOT = '/home/tomo/projekt-tomo/web/static'
STATIC_URL = '/static/'
LOGIN_URL = '/accounts/login/'
LOGOUT_URL = '/accounts/logout/'
LOGIN_REDIRECT_URL = '/'
AD_DNS_NAME = 'warpout.fmf.uni-lj.si'
AD_LDAP_PORT = 389
AD_SEARCH_DN = 'ou=uporabniki,dc=std,dc=fmf,dc=uni-lj,dc=si'
AD_NT4_DOMAIN = 'std'
AD_SEARCH_FIELDS = ['mail', 'givenName', 'sn', 'sAMAccountName']
AD_LDAP_URL = 'ldap://%s:%s' % (AD_DNS_NAME, AD_LDAP_PORT)
AUTHENTICATION_BACKENDS = (
'utils.auth.ActiveDirectoryBackend',
'django.contrib.auth.backends.ModelBackend',
)
|
|
c46d628449651fde613fb4f7c1829f7770d2e353
|
django-server/feel/core/db/load_fixtures.py
|
django-server/feel/core/db/load_fixtures.py
|
import subprocess
from django.conf import settings
MY_APPS = settings.MY_APPS
COMMAND_FORMAT = "python manage.py loaddata core/fixtures/{app}.json"
def load_fixtures():
for app in MY_APPS:
command = COMMAND_FORMAT.format(app=app)
print(command)
subprocess.check_output(command, shell=True)
if __name__ == '__main__':
load_fixtures()
|
Add script to load fixtures into tables.
|
Fixtures: Add script to load fixtures into tables.
|
Python
|
mit
|
pixyj/feel,pixyj/feel,pixyj/feel,pixyj/feel,pixyj/feel
|
Fixtures: Add script to load fixtures into tables.
|
import subprocess
from django.conf import settings
MY_APPS = settings.MY_APPS
COMMAND_FORMAT = "python manage.py loaddata core/fixtures/{app}.json"
def load_fixtures():
for app in MY_APPS:
command = COMMAND_FORMAT.format(app=app)
print(command)
subprocess.check_output(command, shell=True)
if __name__ == '__main__':
load_fixtures()
|
<commit_before><commit_msg>Fixtures: Add script to load fixtures into tables.<commit_after>
|
import subprocess
from django.conf import settings
MY_APPS = settings.MY_APPS
COMMAND_FORMAT = "python manage.py loaddata core/fixtures/{app}.json"
def load_fixtures():
for app in MY_APPS:
command = COMMAND_FORMAT.format(app=app)
print(command)
subprocess.check_output(command, shell=True)
if __name__ == '__main__':
load_fixtures()
|
Fixtures: Add script to load fixtures into tables.import subprocess
from django.conf import settings
MY_APPS = settings.MY_APPS
COMMAND_FORMAT = "python manage.py loaddata core/fixtures/{app}.json"
def load_fixtures():
for app in MY_APPS:
command = COMMAND_FORMAT.format(app=app)
print(command)
subprocess.check_output(command, shell=True)
if __name__ == '__main__':
load_fixtures()
|
<commit_before><commit_msg>Fixtures: Add script to load fixtures into tables.<commit_after>import subprocess
from django.conf import settings
MY_APPS = settings.MY_APPS
COMMAND_FORMAT = "python manage.py loaddata core/fixtures/{app}.json"
def load_fixtures():
for app in MY_APPS:
command = COMMAND_FORMAT.format(app=app)
print(command)
subprocess.check_output(command, shell=True)
if __name__ == '__main__':
load_fixtures()
|
|
30ac63d485d548241e586b2698a10123b2a3cad9
|
DataStructuresAndAlgorithmsInPython/Chapter01.PythonPrimer/PreviewOfAPythonProgram.py
|
DataStructuresAndAlgorithmsInPython/Chapter01.PythonPrimer/PreviewOfAPythonProgram.py
|
print("\nTest quotations.")
print("Welcome to the GPA calculator.");
print("""Welcome to the GPA calculator.
Please enter all your letter grades, one per line.
Enter a blank line to designate the end.""");
print("\nPairs.");
pairs = [('ga','Irish'), ('de','German')];
print(pairs[0]);
print("\nThe modulo operator.");
print("27 // 4 =");
print(27 // 4);
print("\nInteger division.");
print("27 % 4 =");
print(27 % 4);
|
Add some python test codes.
|
Add some python test codes.
|
Python
|
mit
|
iandmyhand/python-utils
|
Add some python test codes.
|
print("\nTest quotations.")
print("Welcome to the GPA calculator.");
print("""Welcome to the GPA calculator.
Please enter all your letter grades, one per line.
Enter a blank line to designate the end.""");
print("\nPairs.");
pairs = [('ga','Irish'), ('de','German')];
print(pairs[0]);
print("\nThe modulo operator.");
print("27 // 4 =");
print(27 // 4);
print("\nInteger division.");
print("27 % 4 =");
print(27 % 4);
|
<commit_before><commit_msg>Add some python test codes.<commit_after>
|
print("\nTest quotations.")
print("Welcome to the GPA calculator.");
print("""Welcome to the GPA calculator.
Please enter all your letter grades, one per line.
Enter a blank line to designate the end.""");
print("\nPairs.");
pairs = [('ga','Irish'), ('de','German')];
print(pairs[0]);
print("\nThe modulo operator.");
print("27 // 4 =");
print(27 // 4);
print("\nInteger division.");
print("27 % 4 =");
print(27 % 4);
|
Add some python test codes.print("\nTest quotations.")
print("Welcome to the GPA calculator.");
print("""Welcome to the GPA calculator.
Please enter all your letter grades, one per line.
Enter a blank line to designate the end.""");
print("\nPairs.");
pairs = [('ga','Irish'), ('de','German')];
print(pairs[0]);
print("\nThe modulo operator.");
print("27 // 4 =");
print(27 // 4);
print("\nInteger division.");
print("27 % 4 =");
print(27 % 4);
|
<commit_before><commit_msg>Add some python test codes.<commit_after>print("\nTest quotations.")
print("Welcome to the GPA calculator.");
print("""Welcome to the GPA calculator.
Please enter all your letter grades, one per line.
Enter a blank line to designate the end.""");
print("\nPairs.");
pairs = [('ga','Irish'), ('de','German')];
print(pairs[0]);
print("\nThe modulo operator.");
print("27 // 4 =");
print(27 // 4);
print("\nInteger division.");
print("27 % 4 =");
print(27 % 4);
|
|
5619a15402099b1209db9ed7f71e1e55548ddebe
|
run-thnvm-se.py
|
run-thnvm-se.py
|
#!/bin/bash
if [ $# -lt 1 ]; then
echo "Usage: $0 [-h] [-c COMMAND] [-o OPTIONS]"
exit -1
fi
GEM5ROOT=~/Projects/Sexain-MemController/gem5-stable
ARCH=X86 #X86_MESI_CMP_directory # in ./build_opts
CPU_TYPE=atomic # timing, detailed
NUM_CPUS=1
MEM_TYPE=simple_mem
MEM_SIZE=2GB
L1D_SIZE=32kB
L1D_ASSOC=8
L1I_SIZE=32kB
L1I_ASSOC=8
L2_SIZE=256kB
L2_ASSOC=8
NUM_L2CACHES=$NUM_CPUS
L3_SIZE=$((3*NUM_CPUS))MB
L3_ASSOC=30
OPTIONS="--caches --l2cache"
OPTIONS+=" --cpu-type=$CPU_TYPE"
OPTIONS+=" --num-cpus=$NUM_CPUS"
OPTIONS+=" --mem-type=$MEM_TYPE"
OPTIONS+=" --mem-size=$MEM_SIZE"
OPTIONS+=" --l1d_size=$L1D_SIZE"
OPTIONS+=" --l1d_assoc=$L1D_ASSOC"
OPTIONS+=" --l1i_size=$L1I_SIZE"
OPTIONS+=" --l1i_assoc=$L1I_ASSOC"
OPTIONS+=" --l2_size=$L2_SIZE"
OPTIONS+=" --l2_assoc=$L2_ASSOC"
OPTIONS+=" --num-l2caches=$NUM_L2CACHES"
OPTIONS+=" --l3_size=$L3_SIZE"
OPTIONS+=" --l3_assoc=$L3_ASSOC"
$GEM5ROOT/build/$ARCH/gem5.opt $GEM5ROOT/configs/thnvm-se.py $OPTIONS $@
|
Add basic run script with config for syscall emulation.
|
[gem5] Add basic run script with config for syscall emulation.
|
Python
|
apache-2.0
|
basicthinker/Sexain-MemController,basicthinker/Sexain-MemController,basicthinker/Sexain-MemController,basicthinker/Sexain-MemController,basicthinker/Sexain-MemController,basicthinker/Sexain-MemController,basicthinker/Sexain-MemController,basicthinker/Sexain-MemController
|
[gem5] Add basic run script with config for syscall emulation.
|
#!/bin/bash
if [ $# -lt 1 ]; then
echo "Usage: $0 [-h] [-c COMMAND] [-o OPTIONS]"
exit -1
fi
GEM5ROOT=~/Projects/Sexain-MemController/gem5-stable
ARCH=X86 #X86_MESI_CMP_directory # in ./build_opts
CPU_TYPE=atomic # timing, detailed
NUM_CPUS=1
MEM_TYPE=simple_mem
MEM_SIZE=2GB
L1D_SIZE=32kB
L1D_ASSOC=8
L1I_SIZE=32kB
L1I_ASSOC=8
L2_SIZE=256kB
L2_ASSOC=8
NUM_L2CACHES=$NUM_CPUS
L3_SIZE=$((3*NUM_CPUS))MB
L3_ASSOC=30
OPTIONS="--caches --l2cache"
OPTIONS+=" --cpu-type=$CPU_TYPE"
OPTIONS+=" --num-cpus=$NUM_CPUS"
OPTIONS+=" --mem-type=$MEM_TYPE"
OPTIONS+=" --mem-size=$MEM_SIZE"
OPTIONS+=" --l1d_size=$L1D_SIZE"
OPTIONS+=" --l1d_assoc=$L1D_ASSOC"
OPTIONS+=" --l1i_size=$L1I_SIZE"
OPTIONS+=" --l1i_assoc=$L1I_ASSOC"
OPTIONS+=" --l2_size=$L2_SIZE"
OPTIONS+=" --l2_assoc=$L2_ASSOC"
OPTIONS+=" --num-l2caches=$NUM_L2CACHES"
OPTIONS+=" --l3_size=$L3_SIZE"
OPTIONS+=" --l3_assoc=$L3_ASSOC"
$GEM5ROOT/build/$ARCH/gem5.opt $GEM5ROOT/configs/thnvm-se.py $OPTIONS $@
|
<commit_before><commit_msg>[gem5] Add basic run script with config for syscall emulation.<commit_after>
|
#!/bin/bash
if [ $# -lt 1 ]; then
echo "Usage: $0 [-h] [-c COMMAND] [-o OPTIONS]"
exit -1
fi
GEM5ROOT=~/Projects/Sexain-MemController/gem5-stable
ARCH=X86 #X86_MESI_CMP_directory # in ./build_opts
CPU_TYPE=atomic # timing, detailed
NUM_CPUS=1
MEM_TYPE=simple_mem
MEM_SIZE=2GB
L1D_SIZE=32kB
L1D_ASSOC=8
L1I_SIZE=32kB
L1I_ASSOC=8
L2_SIZE=256kB
L2_ASSOC=8
NUM_L2CACHES=$NUM_CPUS
L3_SIZE=$((3*NUM_CPUS))MB
L3_ASSOC=30
OPTIONS="--caches --l2cache"
OPTIONS+=" --cpu-type=$CPU_TYPE"
OPTIONS+=" --num-cpus=$NUM_CPUS"
OPTIONS+=" --mem-type=$MEM_TYPE"
OPTIONS+=" --mem-size=$MEM_SIZE"
OPTIONS+=" --l1d_size=$L1D_SIZE"
OPTIONS+=" --l1d_assoc=$L1D_ASSOC"
OPTIONS+=" --l1i_size=$L1I_SIZE"
OPTIONS+=" --l1i_assoc=$L1I_ASSOC"
OPTIONS+=" --l2_size=$L2_SIZE"
OPTIONS+=" --l2_assoc=$L2_ASSOC"
OPTIONS+=" --num-l2caches=$NUM_L2CACHES"
OPTIONS+=" --l3_size=$L3_SIZE"
OPTIONS+=" --l3_assoc=$L3_ASSOC"
$GEM5ROOT/build/$ARCH/gem5.opt $GEM5ROOT/configs/thnvm-se.py $OPTIONS $@
|
[gem5] Add basic run script with config for syscall emulation.#!/bin/bash
if [ $# -lt 1 ]; then
echo "Usage: $0 [-h] [-c COMMAND] [-o OPTIONS]"
exit -1
fi
GEM5ROOT=~/Projects/Sexain-MemController/gem5-stable
ARCH=X86 #X86_MESI_CMP_directory # in ./build_opts
CPU_TYPE=atomic # timing, detailed
NUM_CPUS=1
MEM_TYPE=simple_mem
MEM_SIZE=2GB
L1D_SIZE=32kB
L1D_ASSOC=8
L1I_SIZE=32kB
L1I_ASSOC=8
L2_SIZE=256kB
L2_ASSOC=8
NUM_L2CACHES=$NUM_CPUS
L3_SIZE=$((3*NUM_CPUS))MB
L3_ASSOC=30
OPTIONS="--caches --l2cache"
OPTIONS+=" --cpu-type=$CPU_TYPE"
OPTIONS+=" --num-cpus=$NUM_CPUS"
OPTIONS+=" --mem-type=$MEM_TYPE"
OPTIONS+=" --mem-size=$MEM_SIZE"
OPTIONS+=" --l1d_size=$L1D_SIZE"
OPTIONS+=" --l1d_assoc=$L1D_ASSOC"
OPTIONS+=" --l1i_size=$L1I_SIZE"
OPTIONS+=" --l1i_assoc=$L1I_ASSOC"
OPTIONS+=" --l2_size=$L2_SIZE"
OPTIONS+=" --l2_assoc=$L2_ASSOC"
OPTIONS+=" --num-l2caches=$NUM_L2CACHES"
OPTIONS+=" --l3_size=$L3_SIZE"
OPTIONS+=" --l3_assoc=$L3_ASSOC"
$GEM5ROOT/build/$ARCH/gem5.opt $GEM5ROOT/configs/thnvm-se.py $OPTIONS $@
|
<commit_before><commit_msg>[gem5] Add basic run script with config for syscall emulation.<commit_after>#!/bin/bash
if [ $# -lt 1 ]; then
echo "Usage: $0 [-h] [-c COMMAND] [-o OPTIONS]"
exit -1
fi
GEM5ROOT=~/Projects/Sexain-MemController/gem5-stable
ARCH=X86 #X86_MESI_CMP_directory # in ./build_opts
CPU_TYPE=atomic # timing, detailed
NUM_CPUS=1
MEM_TYPE=simple_mem
MEM_SIZE=2GB
L1D_SIZE=32kB
L1D_ASSOC=8
L1I_SIZE=32kB
L1I_ASSOC=8
L2_SIZE=256kB
L2_ASSOC=8
NUM_L2CACHES=$NUM_CPUS
L3_SIZE=$((3*NUM_CPUS))MB
L3_ASSOC=30
OPTIONS="--caches --l2cache"
OPTIONS+=" --cpu-type=$CPU_TYPE"
OPTIONS+=" --num-cpus=$NUM_CPUS"
OPTIONS+=" --mem-type=$MEM_TYPE"
OPTIONS+=" --mem-size=$MEM_SIZE"
OPTIONS+=" --l1d_size=$L1D_SIZE"
OPTIONS+=" --l1d_assoc=$L1D_ASSOC"
OPTIONS+=" --l1i_size=$L1I_SIZE"
OPTIONS+=" --l1i_assoc=$L1I_ASSOC"
OPTIONS+=" --l2_size=$L2_SIZE"
OPTIONS+=" --l2_assoc=$L2_ASSOC"
OPTIONS+=" --num-l2caches=$NUM_L2CACHES"
OPTIONS+=" --l3_size=$L3_SIZE"
OPTIONS+=" --l3_assoc=$L3_ASSOC"
$GEM5ROOT/build/$ARCH/gem5.opt $GEM5ROOT/configs/thnvm-se.py $OPTIONS $@
|
|
2dfab0f34bc96f1547382139e6a83bea3a3d202a
|
error_messenger.py
|
error_messenger.py
|
#!/usr/bin/env python3
# This file provides exactly one method: send_error_message
# If the setup.ERROR_MESSAGE_RECIPIENT_SCREEN_NAME is not set to None,
# an error message should be sent to the recipient via DM
# import twythonaccess for sending DMs
import twythonaccess
# import setup
import setup
# The main function
def send_error_message(message, place):
dm_name = setup.ERROR_MESSAGE_RECIPIENT_SCREEN_NAME
if dm_name == None:
return
text = "Error in " + place + ": " + str(message)
if len(text) < 10000:
twythonaccess.authorize(twitter_app = twythonaccess.TwitterApp.error_messenger).send_direct_message(screen_name = dm_name, text = text)
|
Add error messenger via DM
|
Add error messenger via DM
|
Python
|
mit
|
ArVID220u/LoveAgainstHate
|
Add error messenger via DM
|
#!/usr/bin/env python3
# This file provides exactly one method: send_error_message
# If the setup.ERROR_MESSAGE_RECIPIENT_SCREEN_NAME is not set to None,
# an error message should be sent to the recipient via DM
# import twythonaccess for sending DMs
import twythonaccess
# import setup
import setup
# The main function
def send_error_message(message, place):
dm_name = setup.ERROR_MESSAGE_RECIPIENT_SCREEN_NAME
if dm_name == None:
return
text = "Error in " + place + ": " + str(message)
if len(text) < 10000:
twythonaccess.authorize(twitter_app = twythonaccess.TwitterApp.error_messenger).send_direct_message(screen_name = dm_name, text = text)
|
<commit_before><commit_msg>Add error messenger via DM<commit_after>
|
#!/usr/bin/env python3
# This file provides exactly one method: send_error_message
# If the setup.ERROR_MESSAGE_RECIPIENT_SCREEN_NAME is not set to None,
# an error message should be sent to the recipient via DM
# import twythonaccess for sending DMs
import twythonaccess
# import setup
import setup
# The main function
def send_error_message(message, place):
dm_name = setup.ERROR_MESSAGE_RECIPIENT_SCREEN_NAME
if dm_name == None:
return
text = "Error in " + place + ": " + str(message)
if len(text) < 10000:
twythonaccess.authorize(twitter_app = twythonaccess.TwitterApp.error_messenger).send_direct_message(screen_name = dm_name, text = text)
|
Add error messenger via DM#!/usr/bin/env python3
# This file provides exactly one method: send_error_message
# If the setup.ERROR_MESSAGE_RECIPIENT_SCREEN_NAME is not set to None,
# an error message should be sent to the recipient via DM
# import twythonaccess for sending DMs
import twythonaccess
# import setup
import setup
# The main function
def send_error_message(message, place):
dm_name = setup.ERROR_MESSAGE_RECIPIENT_SCREEN_NAME
if dm_name == None:
return
text = "Error in " + place + ": " + str(message)
if len(text) < 10000:
twythonaccess.authorize(twitter_app = twythonaccess.TwitterApp.error_messenger).send_direct_message(screen_name = dm_name, text = text)
|
<commit_before><commit_msg>Add error messenger via DM<commit_after>#!/usr/bin/env python3
# This file provides exactly one method: send_error_message
# If the setup.ERROR_MESSAGE_RECIPIENT_SCREEN_NAME is not set to None,
# an error message should be sent to the recipient via DM
# import twythonaccess for sending DMs
import twythonaccess
# import setup
import setup
# The main function
def send_error_message(message, place):
dm_name = setup.ERROR_MESSAGE_RECIPIENT_SCREEN_NAME
if dm_name == None:
return
text = "Error in " + place + ": " + str(message)
if len(text) < 10000:
twythonaccess.authorize(twitter_app = twythonaccess.TwitterApp.error_messenger).send_direct_message(screen_name = dm_name, text = text)
|
|
5e577befa191561dcdd2025842266f4ec9ef46f3
|
examples/to_csv.py
|
examples/to_csv.py
|
"""
This file is an exmaple for running the conversion script
"""
from datetime import datetime, timedelta
import sys
sys.path.append('.')
sys.path.append('../')
from convert import Convert # NOQA
convert = Convert()
convert.CSV_FILE_LOCATION = 'examples/BostonCruiseTerminalSchedule.csv'
convert.SAVE_LOCATION = 'examples/arrive.ics'
convert.read_ical(convert.SAVE_LOCATION)
convert.make_csv()
convert.save_csv(convert.CSV_FILE_LOCATION)
|
Add script to convert from ical to csv
|
Add script to convert from ical to csv
|
Python
|
mit
|
albertyw/csv-to-ical
|
Add script to convert from ical to csv
|
"""
This file is an exmaple for running the conversion script
"""
from datetime import datetime, timedelta
import sys
sys.path.append('.')
sys.path.append('../')
from convert import Convert # NOQA
convert = Convert()
convert.CSV_FILE_LOCATION = 'examples/BostonCruiseTerminalSchedule.csv'
convert.SAVE_LOCATION = 'examples/arrive.ics'
convert.read_ical(convert.SAVE_LOCATION)
convert.make_csv()
convert.save_csv(convert.CSV_FILE_LOCATION)
|
<commit_before><commit_msg>Add script to convert from ical to csv<commit_after>
|
"""
This file is an exmaple for running the conversion script
"""
from datetime import datetime, timedelta
import sys
sys.path.append('.')
sys.path.append('../')
from convert import Convert # NOQA
convert = Convert()
convert.CSV_FILE_LOCATION = 'examples/BostonCruiseTerminalSchedule.csv'
convert.SAVE_LOCATION = 'examples/arrive.ics'
convert.read_ical(convert.SAVE_LOCATION)
convert.make_csv()
convert.save_csv(convert.CSV_FILE_LOCATION)
|
Add script to convert from ical to csv"""
This file is an exmaple for running the conversion script
"""
from datetime import datetime, timedelta
import sys
sys.path.append('.')
sys.path.append('../')
from convert import Convert # NOQA
convert = Convert()
convert.CSV_FILE_LOCATION = 'examples/BostonCruiseTerminalSchedule.csv'
convert.SAVE_LOCATION = 'examples/arrive.ics'
convert.read_ical(convert.SAVE_LOCATION)
convert.make_csv()
convert.save_csv(convert.CSV_FILE_LOCATION)
|
<commit_before><commit_msg>Add script to convert from ical to csv<commit_after>"""
This file is an exmaple for running the conversion script
"""
from datetime import datetime, timedelta
import sys
sys.path.append('.')
sys.path.append('../')
from convert import Convert # NOQA
convert = Convert()
convert.CSV_FILE_LOCATION = 'examples/BostonCruiseTerminalSchedule.csv'
convert.SAVE_LOCATION = 'examples/arrive.ics'
convert.read_ical(convert.SAVE_LOCATION)
convert.make_csv()
convert.save_csv(convert.CSV_FILE_LOCATION)
|
|
00d6f99cf1f94babb237bff00364497ec30f475c
|
examples/hwapi/hwconfig_console.py
|
examples/hwapi/hwconfig_console.py
|
# This is hwconfig for "emulation" for cases when there's no real hardware.
# It just prints information to console.
class LEDClass:
def __init__(self, id):
self.id = id
def value(self, v):
print("LED(%d):" % self.id, v)
LED = LEDClass(1)
LED2 = LEDClass(12)
|
Add hwconfig for console tracing of LED operations.
|
examples/hwapi: Add hwconfig for console tracing of LED operations.
|
Python
|
mit
|
puuu/micropython,MrSurly/micropython,oopy/micropython,deshipu/micropython,pozetroninc/micropython,pfalcon/micropython,oopy/micropython,ryannathans/micropython,kerneltask/micropython,TDAbboud/micropython,MrSurly/micropython-esp32,trezor/micropython,deshipu/micropython,dmazzella/micropython,chrisdearman/micropython,pozetroninc/micropython,cwyark/micropython,deshipu/micropython,infinnovation/micropython,torwag/micropython,tobbad/micropython,chrisdearman/micropython,AriZuu/micropython,ryannathans/micropython,infinnovation/micropython,ryannathans/micropython,selste/micropython,tobbad/micropython,alex-robbins/micropython,tobbad/micropython,MrSurly/micropython,MrSurly/micropython,dmazzella/micropython,kerneltask/micropython,adafruit/circuitpython,deshipu/micropython,tralamazza/micropython,PappaPeppar/micropython,adafruit/circuitpython,Timmenem/micropython,MrSurly/micropython-esp32,pramasoul/micropython,MrSurly/micropython,swegener/micropython,MrSurly/micropython-esp32,Timmenem/micropython,lowRISC/micropython,adafruit/circuitpython,torwag/micropython,infinnovation/micropython,cwyark/micropython,pozetroninc/micropython,AriZuu/micropython,swegener/micropython,swegener/micropython,Peetz0r/micropython-esp32,trezor/micropython,pramasoul/micropython,SHA2017-badge/micropython-esp32,micropython/micropython-esp32,bvernoux/micropython,selste/micropython,PappaPeppar/micropython,PappaPeppar/micropython,SHA2017-badge/micropython-esp32,micropython/micropython-esp32,pramasoul/micropython,tralamazza/micropython,hiway/micropython,toolmacher/micropython,HenrikSolver/micropython,infinnovation/micropython,HenrikSolver/micropython,bvernoux/micropython,blazewicz/micropython,bvernoux/micropython,AriZuu/micropython,torwag/micropython,oopy/micropython,adafruit/micropython,adafruit/micropython,oopy/micropython,cwyark/micropython,pfalcon/micropython,MrSurly/micropython-esp32,tralamazza/micropython,kerneltask/micropython,mhoffma/micropython,ryannathans/micropython,swegener/micropython,hiway/micropython,tuc-osg/micropython,Peetz0r/micropython-esp32,TDAbboud/micropython,selste/micropython,adafruit/circuitpython,blazewicz/micropython,HenrikSolver/micropython,oopy/micropython,alex-robbins/micropython,alex-robbins/micropython,alex-robbins/micropython,blazewicz/micropython,PappaPeppar/micropython,HenrikSolver/micropython,hiway/micropython,tobbad/micropython,blazewicz/micropython,alex-robbins/micropython,micropython/micropython-esp32,SHA2017-badge/micropython-esp32,pramasoul/micropython,toolmacher/micropython,SHA2017-badge/micropython-esp32,deshipu/micropython,blazewicz/micropython,henriknelson/micropython,Timmenem/micropython,dmazzella/micropython,adafruit/micropython,torwag/micropython,trezor/micropython,torwag/micropython,TDAbboud/micropython,mhoffma/micropython,puuu/micropython,micropython/micropython-esp32,henriknelson/micropython,mhoffma/micropython,Timmenem/micropython,ryannathans/micropython,Peetz0r/micropython-esp32,adafruit/circuitpython,Peetz0r/micropython-esp32,lowRISC/micropython,AriZuu/micropython,tuc-osg/micropython,tobbad/micropython,toolmacher/micropython,selste/micropython,selste/micropython,puuu/micropython,pramasoul/micropython,pozetroninc/micropython,puuu/micropython,dmazzella/micropython,pfalcon/micropython,adafruit/circuitpython,toolmacher/micropython,trezor/micropython,HenrikSolver/micropython,pfalcon/micropython,puuu/micropython,cwyark/micropython,bvernoux/micropython,bvernoux/micropython,trezor/micropython,lowRISC/micropython,pozetroninc/micropython,tuc-osg/micropython,kerneltask/micropython,MrSurly/micropython,lowRISC/micropython,TDAbboud/micropython,Timmenem/micropython,hiway/micropython,kerneltask/micropython,adafruit/micropython,hiway/micropython,henriknelson/micropython,mhoffma/micropython,swegener/micropython,toolmacher/micropython,MrSurly/micropython-esp32,AriZuu/micropython,chrisdearman/micropython,lowRISC/micropython,mhoffma/micropython,adafruit/micropython,chrisdearman/micropython,tuc-osg/micropython,micropython/micropython-esp32,SHA2017-badge/micropython-esp32,henriknelson/micropython,tuc-osg/micropython,infinnovation/micropython,chrisdearman/micropython,tralamazza/micropython,Peetz0r/micropython-esp32,PappaPeppar/micropython,pfalcon/micropython,cwyark/micropython,henriknelson/micropython,TDAbboud/micropython
|
examples/hwapi: Add hwconfig for console tracing of LED operations.
|
# This is hwconfig for "emulation" for cases when there's no real hardware.
# It just prints information to console.
class LEDClass:
def __init__(self, id):
self.id = id
def value(self, v):
print("LED(%d):" % self.id, v)
LED = LEDClass(1)
LED2 = LEDClass(12)
|
<commit_before><commit_msg>examples/hwapi: Add hwconfig for console tracing of LED operations.<commit_after>
|
# This is hwconfig for "emulation" for cases when there's no real hardware.
# It just prints information to console.
class LEDClass:
def __init__(self, id):
self.id = id
def value(self, v):
print("LED(%d):" % self.id, v)
LED = LEDClass(1)
LED2 = LEDClass(12)
|
examples/hwapi: Add hwconfig for console tracing of LED operations.# This is hwconfig for "emulation" for cases when there's no real hardware.
# It just prints information to console.
class LEDClass:
def __init__(self, id):
self.id = id
def value(self, v):
print("LED(%d):" % self.id, v)
LED = LEDClass(1)
LED2 = LEDClass(12)
|
<commit_before><commit_msg>examples/hwapi: Add hwconfig for console tracing of LED operations.<commit_after># This is hwconfig for "emulation" for cases when there's no real hardware.
# It just prints information to console.
class LEDClass:
def __init__(self, id):
self.id = id
def value(self, v):
print("LED(%d):" % self.id, v)
LED = LEDClass(1)
LED2 = LEDClass(12)
|
|
8b1b0418d559d0765b30da5e1e431bc7ec6441c1
|
examples/sns/create_and_publish.py
|
examples/sns/create_and_publish.py
|
import os
import sys
from tornado.ioloop import IOLoop
from tornado.gen import coroutine
from asyncaws import SNS
ioloop = IOLoop.current()
aws_key_id = os.environ['AWS_ACCESS_KEY_ID']
aws_key_secret = os.environ['AWS_SECRET_ACCESS_KEY']
sns = SNS(aws_key_id, aws_key_secret, "eu-west-1")
@coroutine
def create_and_publish():
"""Create an SQS queue and send a message"""
topic_arn = yield sns.create_topic("test-topic")
yield sns.publish("Hello, World!", "Some subject", topic_arn)
sys.exit(0)
if __name__ == '__main__':
ioloop.add_callback(create_and_publish)
ioloop.start()
|
Add SNS example file with case: create a topic and publish message to it
|
Add SNS example file with case: create a topic and publish message to it
|
Python
|
mit
|
MA3STR0/AsyncAWS
|
Add SNS example file with case: create a topic and publish message to it
|
import os
import sys
from tornado.ioloop import IOLoop
from tornado.gen import coroutine
from asyncaws import SNS
ioloop = IOLoop.current()
aws_key_id = os.environ['AWS_ACCESS_KEY_ID']
aws_key_secret = os.environ['AWS_SECRET_ACCESS_KEY']
sns = SNS(aws_key_id, aws_key_secret, "eu-west-1")
@coroutine
def create_and_publish():
"""Create an SQS queue and send a message"""
topic_arn = yield sns.create_topic("test-topic")
yield sns.publish("Hello, World!", "Some subject", topic_arn)
sys.exit(0)
if __name__ == '__main__':
ioloop.add_callback(create_and_publish)
ioloop.start()
|
<commit_before><commit_msg>Add SNS example file with case: create a topic and publish message to it<commit_after>
|
import os
import sys
from tornado.ioloop import IOLoop
from tornado.gen import coroutine
from asyncaws import SNS
ioloop = IOLoop.current()
aws_key_id = os.environ['AWS_ACCESS_KEY_ID']
aws_key_secret = os.environ['AWS_SECRET_ACCESS_KEY']
sns = SNS(aws_key_id, aws_key_secret, "eu-west-1")
@coroutine
def create_and_publish():
"""Create an SQS queue and send a message"""
topic_arn = yield sns.create_topic("test-topic")
yield sns.publish("Hello, World!", "Some subject", topic_arn)
sys.exit(0)
if __name__ == '__main__':
ioloop.add_callback(create_and_publish)
ioloop.start()
|
Add SNS example file with case: create a topic and publish message to itimport os
import sys
from tornado.ioloop import IOLoop
from tornado.gen import coroutine
from asyncaws import SNS
ioloop = IOLoop.current()
aws_key_id = os.environ['AWS_ACCESS_KEY_ID']
aws_key_secret = os.environ['AWS_SECRET_ACCESS_KEY']
sns = SNS(aws_key_id, aws_key_secret, "eu-west-1")
@coroutine
def create_and_publish():
"""Create an SQS queue and send a message"""
topic_arn = yield sns.create_topic("test-topic")
yield sns.publish("Hello, World!", "Some subject", topic_arn)
sys.exit(0)
if __name__ == '__main__':
ioloop.add_callback(create_and_publish)
ioloop.start()
|
<commit_before><commit_msg>Add SNS example file with case: create a topic and publish message to it<commit_after>import os
import sys
from tornado.ioloop import IOLoop
from tornado.gen import coroutine
from asyncaws import SNS
ioloop = IOLoop.current()
aws_key_id = os.environ['AWS_ACCESS_KEY_ID']
aws_key_secret = os.environ['AWS_SECRET_ACCESS_KEY']
sns = SNS(aws_key_id, aws_key_secret, "eu-west-1")
@coroutine
def create_and_publish():
"""Create an SQS queue and send a message"""
topic_arn = yield sns.create_topic("test-topic")
yield sns.publish("Hello, World!", "Some subject", topic_arn)
sys.exit(0)
if __name__ == '__main__':
ioloop.add_callback(create_and_publish)
ioloop.start()
|
|
da4b904714cb77b862633c76085ecabf20d3edd6
|
filer/test_utils/extended_app/migrations/0002_auto_20160702_0839.py
|
filer/test_utils/extended_app/migrations/0002_auto_20160702_0839.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('extended_app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='extimage',
name='file_ptr',
field=models.OneToOneField(serialize=False, primary_key=True, related_name='extended_app_extimage_file', to='filer.File'),
),
migrations.AlterField(
model_name='extimage',
name='subject_location',
field=models.CharField(blank=True, default='', max_length=64, verbose_name='subject location'),
),
]
|
Add missing migration for extended_app
|
Add missing migration for extended_app
|
Python
|
bsd-3-clause
|
webu/django-filer,divio/django-filer,jakob-o/django-filer,skirsdeda/django-filer,webu/django-filer,skirsdeda/django-filer,webu/django-filer,stefanfoulis/django-filer,stefanfoulis/django-filer,divio/django-filer,skirsdeda/django-filer,webu/django-filer,jakob-o/django-filer,stefanfoulis/django-filer,stefanfoulis/django-filer,divio/django-filer,jakob-o/django-filer,jakob-o/django-filer,divio/django-filer,skirsdeda/django-filer,jakob-o/django-filer,skirsdeda/django-filer,stefanfoulis/django-filer
|
Add missing migration for extended_app
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('extended_app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='extimage',
name='file_ptr',
field=models.OneToOneField(serialize=False, primary_key=True, related_name='extended_app_extimage_file', to='filer.File'),
),
migrations.AlterField(
model_name='extimage',
name='subject_location',
field=models.CharField(blank=True, default='', max_length=64, verbose_name='subject location'),
),
]
|
<commit_before><commit_msg>Add missing migration for extended_app<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('extended_app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='extimage',
name='file_ptr',
field=models.OneToOneField(serialize=False, primary_key=True, related_name='extended_app_extimage_file', to='filer.File'),
),
migrations.AlterField(
model_name='extimage',
name='subject_location',
field=models.CharField(blank=True, default='', max_length=64, verbose_name='subject location'),
),
]
|
Add missing migration for extended_app# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('extended_app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='extimage',
name='file_ptr',
field=models.OneToOneField(serialize=False, primary_key=True, related_name='extended_app_extimage_file', to='filer.File'),
),
migrations.AlterField(
model_name='extimage',
name='subject_location',
field=models.CharField(blank=True, default='', max_length=64, verbose_name='subject location'),
),
]
|
<commit_before><commit_msg>Add missing migration for extended_app<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('extended_app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='extimage',
name='file_ptr',
field=models.OneToOneField(serialize=False, primary_key=True, related_name='extended_app_extimage_file', to='filer.File'),
),
migrations.AlterField(
model_name='extimage',
name='subject_location',
field=models.CharField(blank=True, default='', max_length=64, verbose_name='subject location'),
),
]
|
|
1e7cde33af4161d89bfce32a91c03d8f7ad7a7af
|
ielex/lexicon/migrations/0130_copy_hindi_transliteration_to_urdu.py
|
ielex/lexicon/migrations/0130_copy_hindi_transliteration_to_urdu.py
|
# -*- coding: utf-8 -*-
# Inspired by:
# https://github.com/lingdb/CoBL/issues/223#issuecomment-256815113
from __future__ import unicode_literals, print_function
from django.db import migrations
def forwards_func(apps, schema_editor):
Language = apps.get_model("lexicon", "Language")
hindi = Language.objects.get(ascii_name='Hindi')
urdu = Language.objects.get(ascii_name='Urdu')
Lexeme = apps.get_model("lexicon", "Lexeme")
hindiMeaningLexemeMap = dict(
Lexeme.objects.filter(
language=hindi).values_list(
'meaning_id', 'transliteration'))
for lexeme in Lexeme.objects.filter(language=urdu).all():
if lexeme.meaning_id in hindiMeaningLexemeMap:
lexeme.transliteration = hindiMeaningLexemeMap[lexeme.meaning_id]
lexeme.save()
def reverse_func(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [('lexicon', '0129_link_author_user')]
operations = [
migrations.RunPython(forwards_func, reverse_func),
]
|
Update Urdu transliteration from Hindi
|
Update Urdu transliteration from Hindi
- Adds 0130_copy_hindi_transliteration_to_urdu.py which was requested
by @cormacanderson
|
Python
|
bsd-2-clause
|
lingdb/CoBL-public,lingdb/CoBL-public,lingdb/CoBL-public,lingdb/CoBL-public
|
Update Urdu transliteration from Hindi
- Adds 0130_copy_hindi_transliteration_to_urdu.py which was requested
by @cormacanderson
|
# -*- coding: utf-8 -*-
# Inspired by:
# https://github.com/lingdb/CoBL/issues/223#issuecomment-256815113
from __future__ import unicode_literals, print_function
from django.db import migrations
def forwards_func(apps, schema_editor):
Language = apps.get_model("lexicon", "Language")
hindi = Language.objects.get(ascii_name='Hindi')
urdu = Language.objects.get(ascii_name='Urdu')
Lexeme = apps.get_model("lexicon", "Lexeme")
hindiMeaningLexemeMap = dict(
Lexeme.objects.filter(
language=hindi).values_list(
'meaning_id', 'transliteration'))
for lexeme in Lexeme.objects.filter(language=urdu).all():
if lexeme.meaning_id in hindiMeaningLexemeMap:
lexeme.transliteration = hindiMeaningLexemeMap[lexeme.meaning_id]
lexeme.save()
def reverse_func(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [('lexicon', '0129_link_author_user')]
operations = [
migrations.RunPython(forwards_func, reverse_func),
]
|
<commit_before><commit_msg>Update Urdu transliteration from Hindi
- Adds 0130_copy_hindi_transliteration_to_urdu.py which was requested
by @cormacanderson<commit_after>
|
# -*- coding: utf-8 -*-
# Inspired by:
# https://github.com/lingdb/CoBL/issues/223#issuecomment-256815113
from __future__ import unicode_literals, print_function
from django.db import migrations
def forwards_func(apps, schema_editor):
Language = apps.get_model("lexicon", "Language")
hindi = Language.objects.get(ascii_name='Hindi')
urdu = Language.objects.get(ascii_name='Urdu')
Lexeme = apps.get_model("lexicon", "Lexeme")
hindiMeaningLexemeMap = dict(
Lexeme.objects.filter(
language=hindi).values_list(
'meaning_id', 'transliteration'))
for lexeme in Lexeme.objects.filter(language=urdu).all():
if lexeme.meaning_id in hindiMeaningLexemeMap:
lexeme.transliteration = hindiMeaningLexemeMap[lexeme.meaning_id]
lexeme.save()
def reverse_func(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [('lexicon', '0129_link_author_user')]
operations = [
migrations.RunPython(forwards_func, reverse_func),
]
|
Update Urdu transliteration from Hindi
- Adds 0130_copy_hindi_transliteration_to_urdu.py which was requested
by @cormacanderson# -*- coding: utf-8 -*-
# Inspired by:
# https://github.com/lingdb/CoBL/issues/223#issuecomment-256815113
from __future__ import unicode_literals, print_function
from django.db import migrations
def forwards_func(apps, schema_editor):
Language = apps.get_model("lexicon", "Language")
hindi = Language.objects.get(ascii_name='Hindi')
urdu = Language.objects.get(ascii_name='Urdu')
Lexeme = apps.get_model("lexicon", "Lexeme")
hindiMeaningLexemeMap = dict(
Lexeme.objects.filter(
language=hindi).values_list(
'meaning_id', 'transliteration'))
for lexeme in Lexeme.objects.filter(language=urdu).all():
if lexeme.meaning_id in hindiMeaningLexemeMap:
lexeme.transliteration = hindiMeaningLexemeMap[lexeme.meaning_id]
lexeme.save()
def reverse_func(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [('lexicon', '0129_link_author_user')]
operations = [
migrations.RunPython(forwards_func, reverse_func),
]
|
<commit_before><commit_msg>Update Urdu transliteration from Hindi
- Adds 0130_copy_hindi_transliteration_to_urdu.py which was requested
by @cormacanderson<commit_after># -*- coding: utf-8 -*-
# Inspired by:
# https://github.com/lingdb/CoBL/issues/223#issuecomment-256815113
from __future__ import unicode_literals, print_function
from django.db import migrations
def forwards_func(apps, schema_editor):
Language = apps.get_model("lexicon", "Language")
hindi = Language.objects.get(ascii_name='Hindi')
urdu = Language.objects.get(ascii_name='Urdu')
Lexeme = apps.get_model("lexicon", "Lexeme")
hindiMeaningLexemeMap = dict(
Lexeme.objects.filter(
language=hindi).values_list(
'meaning_id', 'transliteration'))
for lexeme in Lexeme.objects.filter(language=urdu).all():
if lexeme.meaning_id in hindiMeaningLexemeMap:
lexeme.transliteration = hindiMeaningLexemeMap[lexeme.meaning_id]
lexeme.save()
def reverse_func(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [('lexicon', '0129_link_author_user')]
operations = [
migrations.RunPython(forwards_func, reverse_func),
]
|
|
88f14a5b72637bed435405a01e66931df6534e52
|
goatools/obo_tasks.py
|
goatools/obo_tasks.py
|
"""Tasks for GOTerms in obo dag."""
def get_all_parents(go_objs):
"""Return a set containing all GO Term parents of multiple GOTerm objects."""
go_parents = set()
for go_obj in go_objs:
go_parents |= go_obj.get_all_parents()
return go_parents
|
Add file containing small, common obo tasks.
|
Add file containing small, common obo tasks.
|
Python
|
bsd-2-clause
|
lileiting/goatools,tanghaibao/goatools,tanghaibao/goatools,lileiting/goatools
|
Add file containing small, common obo tasks.
|
"""Tasks for GOTerms in obo dag."""
def get_all_parents(go_objs):
"""Return a set containing all GO Term parents of multiple GOTerm objects."""
go_parents = set()
for go_obj in go_objs:
go_parents |= go_obj.get_all_parents()
return go_parents
|
<commit_before><commit_msg>Add file containing small, common obo tasks.<commit_after>
|
"""Tasks for GOTerms in obo dag."""
def get_all_parents(go_objs):
"""Return a set containing all GO Term parents of multiple GOTerm objects."""
go_parents = set()
for go_obj in go_objs:
go_parents |= go_obj.get_all_parents()
return go_parents
|
Add file containing small, common obo tasks."""Tasks for GOTerms in obo dag."""
def get_all_parents(go_objs):
"""Return a set containing all GO Term parents of multiple GOTerm objects."""
go_parents = set()
for go_obj in go_objs:
go_parents |= go_obj.get_all_parents()
return go_parents
|
<commit_before><commit_msg>Add file containing small, common obo tasks.<commit_after>"""Tasks for GOTerms in obo dag."""
def get_all_parents(go_objs):
"""Return a set containing all GO Term parents of multiple GOTerm objects."""
go_parents = set()
for go_obj in go_objs:
go_parents |= go_obj.get_all_parents()
return go_parents
|
|
63a2475d674b611cc5e8f57218272f0aac8d13a4
|
fuel_plugin/ostf_adapter/logger.py
|
fuel_plugin/ostf_adapter/logger.py
|
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import logging
import logging.handlers
def setup(log_file=None):
formatter = logging.Formatter(
'[%(asctime)s] %(levelname)-8s %(message)s')
log = logging.getLogger(None)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
stream_handler.setFormatter(formatter)
log.addHandler(stream_handler)
if log_file:
log_file = os.path.abspath(log_file)
file_handler = logging.handlers.WatchedFileHandler(log_file)
file_handler.setLevel(logging.DEBUG)
mode = int('0644', 8)
os.chmod(log_file, mode)
file_handler.setFormatter(formatter)
log.addHandler(file_handler)
log.setLevel(logging.INFO)
|
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import logging
import logging.handlers
def setup(log_file=None):
formatter = logging.Formatter(
'%(asctime)s %(levelname)s (%(module)s) %(message)s',
"%Y-%m-%d %H:%M:%S")
log = logging.getLogger(None)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
stream_handler.setFormatter(formatter)
log.addHandler(stream_handler)
if log_file:
log_file = os.path.abspath(log_file)
file_handler = logging.handlers.WatchedFileHandler(log_file)
file_handler.setLevel(logging.DEBUG)
mode = int('0644', 8)
os.chmod(log_file, mode)
file_handler.setFormatter(formatter)
log.addHandler(file_handler)
log.setLevel(logging.INFO)
|
Change logging format. To be compatible with nailgun web ui
|
Change logging format. To be compatible with nailgun web ui
Change-Id: I2e8bfe32bbb1b8f48e5ab0a418ab9592cc00adc3
|
Python
|
apache-2.0
|
stackforge/fuel-ostf,mcloudv/fuel-ostf,mcloudv/fuel-ostf,eayunstack/fuel-ostf,stackforge/fuel-ostf,eayunstack/fuel-ostf
|
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import logging
import logging.handlers
def setup(log_file=None):
formatter = logging.Formatter(
'[%(asctime)s] %(levelname)-8s %(message)s')
log = logging.getLogger(None)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
stream_handler.setFormatter(formatter)
log.addHandler(stream_handler)
if log_file:
log_file = os.path.abspath(log_file)
file_handler = logging.handlers.WatchedFileHandler(log_file)
file_handler.setLevel(logging.DEBUG)
mode = int('0644', 8)
os.chmod(log_file, mode)
file_handler.setFormatter(formatter)
log.addHandler(file_handler)
log.setLevel(logging.INFO)
Change logging format. To be compatible with nailgun web ui
Change-Id: I2e8bfe32bbb1b8f48e5ab0a418ab9592cc00adc3
|
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import logging
import logging.handlers
def setup(log_file=None):
formatter = logging.Formatter(
'%(asctime)s %(levelname)s (%(module)s) %(message)s',
"%Y-%m-%d %H:%M:%S")
log = logging.getLogger(None)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
stream_handler.setFormatter(formatter)
log.addHandler(stream_handler)
if log_file:
log_file = os.path.abspath(log_file)
file_handler = logging.handlers.WatchedFileHandler(log_file)
file_handler.setLevel(logging.DEBUG)
mode = int('0644', 8)
os.chmod(log_file, mode)
file_handler.setFormatter(formatter)
log.addHandler(file_handler)
log.setLevel(logging.INFO)
|
<commit_before># Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import logging
import logging.handlers
def setup(log_file=None):
formatter = logging.Formatter(
'[%(asctime)s] %(levelname)-8s %(message)s')
log = logging.getLogger(None)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
stream_handler.setFormatter(formatter)
log.addHandler(stream_handler)
if log_file:
log_file = os.path.abspath(log_file)
file_handler = logging.handlers.WatchedFileHandler(log_file)
file_handler.setLevel(logging.DEBUG)
mode = int('0644', 8)
os.chmod(log_file, mode)
file_handler.setFormatter(formatter)
log.addHandler(file_handler)
log.setLevel(logging.INFO)
<commit_msg>Change logging format. To be compatible with nailgun web ui
Change-Id: I2e8bfe32bbb1b8f48e5ab0a418ab9592cc00adc3<commit_after>
|
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import logging
import logging.handlers
def setup(log_file=None):
formatter = logging.Formatter(
'%(asctime)s %(levelname)s (%(module)s) %(message)s',
"%Y-%m-%d %H:%M:%S")
log = logging.getLogger(None)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
stream_handler.setFormatter(formatter)
log.addHandler(stream_handler)
if log_file:
log_file = os.path.abspath(log_file)
file_handler = logging.handlers.WatchedFileHandler(log_file)
file_handler.setLevel(logging.DEBUG)
mode = int('0644', 8)
os.chmod(log_file, mode)
file_handler.setFormatter(formatter)
log.addHandler(file_handler)
log.setLevel(logging.INFO)
|
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import logging
import logging.handlers
def setup(log_file=None):
formatter = logging.Formatter(
'[%(asctime)s] %(levelname)-8s %(message)s')
log = logging.getLogger(None)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
stream_handler.setFormatter(formatter)
log.addHandler(stream_handler)
if log_file:
log_file = os.path.abspath(log_file)
file_handler = logging.handlers.WatchedFileHandler(log_file)
file_handler.setLevel(logging.DEBUG)
mode = int('0644', 8)
os.chmod(log_file, mode)
file_handler.setFormatter(formatter)
log.addHandler(file_handler)
log.setLevel(logging.INFO)
Change logging format. To be compatible with nailgun web ui
Change-Id: I2e8bfe32bbb1b8f48e5ab0a418ab9592cc00adc3# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import logging
import logging.handlers
def setup(log_file=None):
formatter = logging.Formatter(
'%(asctime)s %(levelname)s (%(module)s) %(message)s',
"%Y-%m-%d %H:%M:%S")
log = logging.getLogger(None)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
stream_handler.setFormatter(formatter)
log.addHandler(stream_handler)
if log_file:
log_file = os.path.abspath(log_file)
file_handler = logging.handlers.WatchedFileHandler(log_file)
file_handler.setLevel(logging.DEBUG)
mode = int('0644', 8)
os.chmod(log_file, mode)
file_handler.setFormatter(formatter)
log.addHandler(file_handler)
log.setLevel(logging.INFO)
|
<commit_before># Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import logging
import logging.handlers
def setup(log_file=None):
formatter = logging.Formatter(
'[%(asctime)s] %(levelname)-8s %(message)s')
log = logging.getLogger(None)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
stream_handler.setFormatter(formatter)
log.addHandler(stream_handler)
if log_file:
log_file = os.path.abspath(log_file)
file_handler = logging.handlers.WatchedFileHandler(log_file)
file_handler.setLevel(logging.DEBUG)
mode = int('0644', 8)
os.chmod(log_file, mode)
file_handler.setFormatter(formatter)
log.addHandler(file_handler)
log.setLevel(logging.INFO)
<commit_msg>Change logging format. To be compatible with nailgun web ui
Change-Id: I2e8bfe32bbb1b8f48e5ab0a418ab9592cc00adc3<commit_after># Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import logging
import logging.handlers
def setup(log_file=None):
formatter = logging.Formatter(
'%(asctime)s %(levelname)s (%(module)s) %(message)s',
"%Y-%m-%d %H:%M:%S")
log = logging.getLogger(None)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
stream_handler.setFormatter(formatter)
log.addHandler(stream_handler)
if log_file:
log_file = os.path.abspath(log_file)
file_handler = logging.handlers.WatchedFileHandler(log_file)
file_handler.setLevel(logging.DEBUG)
mode = int('0644', 8)
os.chmod(log_file, mode)
file_handler.setFormatter(formatter)
log.addHandler(file_handler)
log.setLevel(logging.INFO)
|
28729a6d9c1944aa888d91d47fa4a57a631d4ca1
|
scikits/statsmodels/tools/tests/test_data.py
|
scikits/statsmodels/tools/tests/test_data.py
|
import pandas
import numpy as np
from scikits.statsmodels.tools import data
def test_missing_data_pandas():
"""
Fixes GH: #144
"""
X = np.random.random((10,5))
X[1,2] = np.nan
df = pandas.DataFrame(X)
vals, cnames, rnames = data.interpret_data(df)
np.testing.assert_equal(rnames, [0,2,3,4,5,6,7,8,9])
|
Add test for missing data in DataFrame
|
TST: Add test for missing data in DataFrame
|
Python
|
bsd-3-clause
|
josef-pkt/statsmodels,Averroes/statsmodels,astocko/statsmodels,gef756/statsmodels,astocko/statsmodels,rgommers/statsmodels,huongttlan/statsmodels,ChadFulton/statsmodels,wkfwkf/statsmodels,bavardage/statsmodels,wzbozon/statsmodels,saketkc/statsmodels,hainm/statsmodels,gef756/statsmodels,gef756/statsmodels,yl565/statsmodels,kiyoto/statsmodels,saketkc/statsmodels,hainm/statsmodels,josef-pkt/statsmodels,wzbozon/statsmodels,adammenges/statsmodels,statsmodels/statsmodels,wwf5067/statsmodels,wwf5067/statsmodels,waynenilsen/statsmodels,jstoxrocky/statsmodels,bavardage/statsmodels,waynenilsen/statsmodels,bsipocz/statsmodels,phobson/statsmodels,bert9bert/statsmodels,bsipocz/statsmodels,wzbozon/statsmodels,DonBeo/statsmodels,jseabold/statsmodels,gef756/statsmodels,YihaoLu/statsmodels,musically-ut/statsmodels,bashtage/statsmodels,bashtage/statsmodels,josef-pkt/statsmodels,bert9bert/statsmodels,jseabold/statsmodels,wwf5067/statsmodels,nvoron23/statsmodels,DonBeo/statsmodels,alekz112/statsmodels,YihaoLu/statsmodels,pprett/statsmodels,wkfwkf/statsmodels,nguyentu1602/statsmodels,edhuckle/statsmodels,nvoron23/statsmodels,wzbozon/statsmodels,Averroes/statsmodels,pprett/statsmodels,bsipocz/statsmodels,adammenges/statsmodels,statsmodels/statsmodels,cbmoore/statsmodels,cbmoore/statsmodels,YihaoLu/statsmodels,hlin117/statsmodels,wdurhamh/statsmodels,saketkc/statsmodels,hlin117/statsmodels,YihaoLu/statsmodels,yl565/statsmodels,adammenges/statsmodels,statsmodels/statsmodels,rgommers/statsmodels,jstoxrocky/statsmodels,hainm/statsmodels,bashtage/statsmodels,edhuckle/statsmodels,bert9bert/statsmodels,DonBeo/statsmodels,astocko/statsmodels,adammenges/statsmodels,saketkc/statsmodels,wdurhamh/statsmodels,ChadFulton/statsmodels,kiyoto/statsmodels,saketkc/statsmodels,edhuckle/statsmodels,wdurhamh/statsmodels,jseabold/statsmodels,Averroes/statsmodels,cbmoore/statsmodels,huongttlan/statsmodels,detrout/debian-statsmodels,jstoxrocky/statsmodels,wdurhamh/statsmodels,josef-pkt/statsmodels,wkfwkf/statsmodels,edhuckle/statsmodels,bzero/statsmodels,kiyoto/statsmodels,bashtage/statsmodels,statsmodels/statsmodels,yarikoptic/pystatsmodels,rgommers/statsmodels,musically-ut/statsmodels,wdurhamh/statsmodels,nvoron23/statsmodels,bzero/statsmodels,gef756/statsmodels,nguyentu1602/statsmodels,rgommers/statsmodels,nguyentu1602/statsmodels,phobson/statsmodels,ChadFulton/statsmodels,rgommers/statsmodels,yl565/statsmodels,bavardage/statsmodels,phobson/statsmodels,yarikoptic/pystatsmodels,statsmodels/statsmodels,wkfwkf/statsmodels,phobson/statsmodels,wkfwkf/statsmodels,bzero/statsmodels,ChadFulton/statsmodels,nguyentu1602/statsmodels,jseabold/statsmodels,phobson/statsmodels,hlin117/statsmodels,detrout/debian-statsmodels,pprett/statsmodels,bsipocz/statsmodels,bavardage/statsmodels,jseabold/statsmodels,nvoron23/statsmodels,huongttlan/statsmodels,josef-pkt/statsmodels,yl565/statsmodels,waynenilsen/statsmodels,wwf5067/statsmodels,nvoron23/statsmodels,YihaoLu/statsmodels,jstoxrocky/statsmodels,bavardage/statsmodels,detrout/debian-statsmodels,pprett/statsmodels,DonBeo/statsmodels,alekz112/statsmodels,kiyoto/statsmodels,bzero/statsmodels,bert9bert/statsmodels,edhuckle/statsmodels,detrout/debian-statsmodels,hlin117/statsmodels,ChadFulton/statsmodels,statsmodels/statsmodels,alekz112/statsmodels,cbmoore/statsmodels,musically-ut/statsmodels,yl565/statsmodels,josef-pkt/statsmodels,bashtage/statsmodels,Averroes/statsmodels,bzero/statsmodels,ChadFulton/statsmodels,kiyoto/statsmodels,hainm/statsmodels,DonBeo/statsmodels,waynenilsen/statsmodels,alekz112/statsmodels,astocko/statsmodels,musically-ut/statsmodels,cbmoore/statsmodels,bashtage/statsmodels,bert9bert/statsmodels,wzbozon/statsmodels,yarikoptic/pystatsmodels,huongttlan/statsmodels
|
TST: Add test for missing data in DataFrame
|
import pandas
import numpy as np
from scikits.statsmodels.tools import data
def test_missing_data_pandas():
"""
Fixes GH: #144
"""
X = np.random.random((10,5))
X[1,2] = np.nan
df = pandas.DataFrame(X)
vals, cnames, rnames = data.interpret_data(df)
np.testing.assert_equal(rnames, [0,2,3,4,5,6,7,8,9])
|
<commit_before><commit_msg>TST: Add test for missing data in DataFrame<commit_after>
|
import pandas
import numpy as np
from scikits.statsmodels.tools import data
def test_missing_data_pandas():
"""
Fixes GH: #144
"""
X = np.random.random((10,5))
X[1,2] = np.nan
df = pandas.DataFrame(X)
vals, cnames, rnames = data.interpret_data(df)
np.testing.assert_equal(rnames, [0,2,3,4,5,6,7,8,9])
|
TST: Add test for missing data in DataFrameimport pandas
import numpy as np
from scikits.statsmodels.tools import data
def test_missing_data_pandas():
"""
Fixes GH: #144
"""
X = np.random.random((10,5))
X[1,2] = np.nan
df = pandas.DataFrame(X)
vals, cnames, rnames = data.interpret_data(df)
np.testing.assert_equal(rnames, [0,2,3,4,5,6,7,8,9])
|
<commit_before><commit_msg>TST: Add test for missing data in DataFrame<commit_after>import pandas
import numpy as np
from scikits.statsmodels.tools import data
def test_missing_data_pandas():
"""
Fixes GH: #144
"""
X = np.random.random((10,5))
X[1,2] = np.nan
df = pandas.DataFrame(X)
vals, cnames, rnames = data.interpret_data(df)
np.testing.assert_equal(rnames, [0,2,3,4,5,6,7,8,9])
|
|
36cd44ad23db1cb0707e5ec3b1fff8680708fb70
|
scripts/examples/02-Board-Control/usb_vcp.py
|
scripts/examples/02-Board-Control/usb_vcp.py
|
# USB VCP example.
# This example shows how to use the USB VCP class to send an image to PC on demand.
#
# WARNING:
# This script should NOT be run from the IDE or command line, it should be saved as main.py
# Note the following commented script shows how to receive the image from the host side.
#
# #!/usr/bin/env python2.7
# import sys, serial, struct
# port = '/dev/ttyACM0'
# sp = serial.Serial(port, baudrate=115200, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE,
# xonxoff=False, rtscts=False, stopbits=serial.STOPBITS_ONE, timeout=None, dsrdtr=True)
# sp.write("snap")
# sp.flush()
# size = struct.unpack('<L', sp.read(4))[0]
# img = sp.read(size)
# sp.close()
#
# with open("img.jpg", "w") as f:
# f.write(img)
import sensor, image, time, ustruct
from pyb import USB_VCP
usb = USB_VCP()
sensor.reset() # Reset and initialize the sensor.
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
sensor.skip_frames(time = 2000) # Wait for settings take effect.
while(True):
cmd = usb.recv(4, timeout=5000)
if (cmd == b'snap'):
img = sensor.snapshot().compress()
usb.send(ustruct.pack("<L", img.size()))
usb.send(img)
|
Add USB VCP example script.
|
Add USB VCP example script.
|
Python
|
mit
|
iabdalkader/openmv,iabdalkader/openmv,openmv/openmv,kwagyeman/openmv,openmv/openmv,kwagyeman/openmv,kwagyeman/openmv,kwagyeman/openmv,openmv/openmv,iabdalkader/openmv,openmv/openmv,iabdalkader/openmv
|
Add USB VCP example script.
|
# USB VCP example.
# This example shows how to use the USB VCP class to send an image to PC on demand.
#
# WARNING:
# This script should NOT be run from the IDE or command line, it should be saved as main.py
# Note the following commented script shows how to receive the image from the host side.
#
# #!/usr/bin/env python2.7
# import sys, serial, struct
# port = '/dev/ttyACM0'
# sp = serial.Serial(port, baudrate=115200, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE,
# xonxoff=False, rtscts=False, stopbits=serial.STOPBITS_ONE, timeout=None, dsrdtr=True)
# sp.write("snap")
# sp.flush()
# size = struct.unpack('<L', sp.read(4))[0]
# img = sp.read(size)
# sp.close()
#
# with open("img.jpg", "w") as f:
# f.write(img)
import sensor, image, time, ustruct
from pyb import USB_VCP
usb = USB_VCP()
sensor.reset() # Reset and initialize the sensor.
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
sensor.skip_frames(time = 2000) # Wait for settings take effect.
while(True):
cmd = usb.recv(4, timeout=5000)
if (cmd == b'snap'):
img = sensor.snapshot().compress()
usb.send(ustruct.pack("<L", img.size()))
usb.send(img)
|
<commit_before><commit_msg>Add USB VCP example script.<commit_after>
|
# USB VCP example.
# This example shows how to use the USB VCP class to send an image to PC on demand.
#
# WARNING:
# This script should NOT be run from the IDE or command line, it should be saved as main.py
# Note the following commented script shows how to receive the image from the host side.
#
# #!/usr/bin/env python2.7
# import sys, serial, struct
# port = '/dev/ttyACM0'
# sp = serial.Serial(port, baudrate=115200, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE,
# xonxoff=False, rtscts=False, stopbits=serial.STOPBITS_ONE, timeout=None, dsrdtr=True)
# sp.write("snap")
# sp.flush()
# size = struct.unpack('<L', sp.read(4))[0]
# img = sp.read(size)
# sp.close()
#
# with open("img.jpg", "w") as f:
# f.write(img)
import sensor, image, time, ustruct
from pyb import USB_VCP
usb = USB_VCP()
sensor.reset() # Reset and initialize the sensor.
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
sensor.skip_frames(time = 2000) # Wait for settings take effect.
while(True):
cmd = usb.recv(4, timeout=5000)
if (cmd == b'snap'):
img = sensor.snapshot().compress()
usb.send(ustruct.pack("<L", img.size()))
usb.send(img)
|
Add USB VCP example script.# USB VCP example.
# This example shows how to use the USB VCP class to send an image to PC on demand.
#
# WARNING:
# This script should NOT be run from the IDE or command line, it should be saved as main.py
# Note the following commented script shows how to receive the image from the host side.
#
# #!/usr/bin/env python2.7
# import sys, serial, struct
# port = '/dev/ttyACM0'
# sp = serial.Serial(port, baudrate=115200, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE,
# xonxoff=False, rtscts=False, stopbits=serial.STOPBITS_ONE, timeout=None, dsrdtr=True)
# sp.write("snap")
# sp.flush()
# size = struct.unpack('<L', sp.read(4))[0]
# img = sp.read(size)
# sp.close()
#
# with open("img.jpg", "w") as f:
# f.write(img)
import sensor, image, time, ustruct
from pyb import USB_VCP
usb = USB_VCP()
sensor.reset() # Reset and initialize the sensor.
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
sensor.skip_frames(time = 2000) # Wait for settings take effect.
while(True):
cmd = usb.recv(4, timeout=5000)
if (cmd == b'snap'):
img = sensor.snapshot().compress()
usb.send(ustruct.pack("<L", img.size()))
usb.send(img)
|
<commit_before><commit_msg>Add USB VCP example script.<commit_after># USB VCP example.
# This example shows how to use the USB VCP class to send an image to PC on demand.
#
# WARNING:
# This script should NOT be run from the IDE or command line, it should be saved as main.py
# Note the following commented script shows how to receive the image from the host side.
#
# #!/usr/bin/env python2.7
# import sys, serial, struct
# port = '/dev/ttyACM0'
# sp = serial.Serial(port, baudrate=115200, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE,
# xonxoff=False, rtscts=False, stopbits=serial.STOPBITS_ONE, timeout=None, dsrdtr=True)
# sp.write("snap")
# sp.flush()
# size = struct.unpack('<L', sp.read(4))[0]
# img = sp.read(size)
# sp.close()
#
# with open("img.jpg", "w") as f:
# f.write(img)
import sensor, image, time, ustruct
from pyb import USB_VCP
usb = USB_VCP()
sensor.reset() # Reset and initialize the sensor.
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
sensor.skip_frames(time = 2000) # Wait for settings take effect.
while(True):
cmd = usb.recv(4, timeout=5000)
if (cmd == b'snap'):
img = sensor.snapshot().compress()
usb.send(ustruct.pack("<L", img.size()))
usb.send(img)
|
|
0e0d41e875236c421cd1016449f56c4fa6717c2e
|
examples/shp_lines_to_polygons.py
|
examples/shp_lines_to_polygons.py
|
#!/usr/bin/env python
from __future__ import print_function
from stompy.spatial import join_features
from optparse import OptionParser
try:
from osgeo import ogr
except ImportError:
import ogr
# # How to use this:
# ### Load shapefile
# ods = ogr.Open("/home/rusty/classes/research/spatialdata/us/ca/suntans/shoreline/noaa-medres/pacific_medium_shoreline-cropped.shp")
# output = "/home/rusty/classes/research/spatialdata/us/ca/suntans/shoreline/noaa-medres/pacific_medium_shoreline-cropped-merged.shp"
# orig_layer = ods.GetLayer(0)
# ## process it
# process_layer(orig_layer,output)
if __name__ == '__main__':
parser = OptionParser(usage="usage: %prog [options] input.shp output.shp")
parser.add_option("-p", "--poly",
help="create polygons from closed linestrings",
action="store_true",
dest='create_polygons',default=False)
parser.add_option("-a", "--arc", dest="close_arc", default=False,
action="store_true",
help="close the largest open linestring with a circular arc")
parser.add_option("-t","--tolerance", dest="tolerance", type="float", default=0.0,
metavar="DISTANCE",
help="Tolerance for joining two endpoints, in geographic units")
parser.add_option("-m","--multiple", dest="single_feature", default=True,
action="store_false",metavar="SINGLE_FEATURE")
(options, args) = parser.parse_args()
input_shp,output_shp = args
ods = ogr.Open(input_shp)
layer = ods.GetLayer(0)
join_features.process_layer(layer,output_shp,
create_polygons=options.create_polygons,close_arc=options.close_arc,
tolerance=options.tolerance,single_feature=options.single_feature)
|
Add CLI for joining lines to polygons
|
Add CLI for joining lines to polygons
|
Python
|
mit
|
rustychris/stompy,rustychris/stompy
|
Add CLI for joining lines to polygons
|
#!/usr/bin/env python
from __future__ import print_function
from stompy.spatial import join_features
from optparse import OptionParser
try:
from osgeo import ogr
except ImportError:
import ogr
# # How to use this:
# ### Load shapefile
# ods = ogr.Open("/home/rusty/classes/research/spatialdata/us/ca/suntans/shoreline/noaa-medres/pacific_medium_shoreline-cropped.shp")
# output = "/home/rusty/classes/research/spatialdata/us/ca/suntans/shoreline/noaa-medres/pacific_medium_shoreline-cropped-merged.shp"
# orig_layer = ods.GetLayer(0)
# ## process it
# process_layer(orig_layer,output)
if __name__ == '__main__':
parser = OptionParser(usage="usage: %prog [options] input.shp output.shp")
parser.add_option("-p", "--poly",
help="create polygons from closed linestrings",
action="store_true",
dest='create_polygons',default=False)
parser.add_option("-a", "--arc", dest="close_arc", default=False,
action="store_true",
help="close the largest open linestring with a circular arc")
parser.add_option("-t","--tolerance", dest="tolerance", type="float", default=0.0,
metavar="DISTANCE",
help="Tolerance for joining two endpoints, in geographic units")
parser.add_option("-m","--multiple", dest="single_feature", default=True,
action="store_false",metavar="SINGLE_FEATURE")
(options, args) = parser.parse_args()
input_shp,output_shp = args
ods = ogr.Open(input_shp)
layer = ods.GetLayer(0)
join_features.process_layer(layer,output_shp,
create_polygons=options.create_polygons,close_arc=options.close_arc,
tolerance=options.tolerance,single_feature=options.single_feature)
|
<commit_before><commit_msg>Add CLI for joining lines to polygons<commit_after>
|
#!/usr/bin/env python
from __future__ import print_function
from stompy.spatial import join_features
from optparse import OptionParser
try:
from osgeo import ogr
except ImportError:
import ogr
# # How to use this:
# ### Load shapefile
# ods = ogr.Open("/home/rusty/classes/research/spatialdata/us/ca/suntans/shoreline/noaa-medres/pacific_medium_shoreline-cropped.shp")
# output = "/home/rusty/classes/research/spatialdata/us/ca/suntans/shoreline/noaa-medres/pacific_medium_shoreline-cropped-merged.shp"
# orig_layer = ods.GetLayer(0)
# ## process it
# process_layer(orig_layer,output)
if __name__ == '__main__':
parser = OptionParser(usage="usage: %prog [options] input.shp output.shp")
parser.add_option("-p", "--poly",
help="create polygons from closed linestrings",
action="store_true",
dest='create_polygons',default=False)
parser.add_option("-a", "--arc", dest="close_arc", default=False,
action="store_true",
help="close the largest open linestring with a circular arc")
parser.add_option("-t","--tolerance", dest="tolerance", type="float", default=0.0,
metavar="DISTANCE",
help="Tolerance for joining two endpoints, in geographic units")
parser.add_option("-m","--multiple", dest="single_feature", default=True,
action="store_false",metavar="SINGLE_FEATURE")
(options, args) = parser.parse_args()
input_shp,output_shp = args
ods = ogr.Open(input_shp)
layer = ods.GetLayer(0)
join_features.process_layer(layer,output_shp,
create_polygons=options.create_polygons,close_arc=options.close_arc,
tolerance=options.tolerance,single_feature=options.single_feature)
|
Add CLI for joining lines to polygons#!/usr/bin/env python
from __future__ import print_function
from stompy.spatial import join_features
from optparse import OptionParser
try:
from osgeo import ogr
except ImportError:
import ogr
# # How to use this:
# ### Load shapefile
# ods = ogr.Open("/home/rusty/classes/research/spatialdata/us/ca/suntans/shoreline/noaa-medres/pacific_medium_shoreline-cropped.shp")
# output = "/home/rusty/classes/research/spatialdata/us/ca/suntans/shoreline/noaa-medres/pacific_medium_shoreline-cropped-merged.shp"
# orig_layer = ods.GetLayer(0)
# ## process it
# process_layer(orig_layer,output)
if __name__ == '__main__':
parser = OptionParser(usage="usage: %prog [options] input.shp output.shp")
parser.add_option("-p", "--poly",
help="create polygons from closed linestrings",
action="store_true",
dest='create_polygons',default=False)
parser.add_option("-a", "--arc", dest="close_arc", default=False,
action="store_true",
help="close the largest open linestring with a circular arc")
parser.add_option("-t","--tolerance", dest="tolerance", type="float", default=0.0,
metavar="DISTANCE",
help="Tolerance for joining two endpoints, in geographic units")
parser.add_option("-m","--multiple", dest="single_feature", default=True,
action="store_false",metavar="SINGLE_FEATURE")
(options, args) = parser.parse_args()
input_shp,output_shp = args
ods = ogr.Open(input_shp)
layer = ods.GetLayer(0)
join_features.process_layer(layer,output_shp,
create_polygons=options.create_polygons,close_arc=options.close_arc,
tolerance=options.tolerance,single_feature=options.single_feature)
|
<commit_before><commit_msg>Add CLI for joining lines to polygons<commit_after>#!/usr/bin/env python
from __future__ import print_function
from stompy.spatial import join_features
from optparse import OptionParser
try:
from osgeo import ogr
except ImportError:
import ogr
# # How to use this:
# ### Load shapefile
# ods = ogr.Open("/home/rusty/classes/research/spatialdata/us/ca/suntans/shoreline/noaa-medres/pacific_medium_shoreline-cropped.shp")
# output = "/home/rusty/classes/research/spatialdata/us/ca/suntans/shoreline/noaa-medres/pacific_medium_shoreline-cropped-merged.shp"
# orig_layer = ods.GetLayer(0)
# ## process it
# process_layer(orig_layer,output)
if __name__ == '__main__':
parser = OptionParser(usage="usage: %prog [options] input.shp output.shp")
parser.add_option("-p", "--poly",
help="create polygons from closed linestrings",
action="store_true",
dest='create_polygons',default=False)
parser.add_option("-a", "--arc", dest="close_arc", default=False,
action="store_true",
help="close the largest open linestring with a circular arc")
parser.add_option("-t","--tolerance", dest="tolerance", type="float", default=0.0,
metavar="DISTANCE",
help="Tolerance for joining two endpoints, in geographic units")
parser.add_option("-m","--multiple", dest="single_feature", default=True,
action="store_false",metavar="SINGLE_FEATURE")
(options, args) = parser.parse_args()
input_shp,output_shp = args
ods = ogr.Open(input_shp)
layer = ods.GetLayer(0)
join_features.process_layer(layer,output_shp,
create_polygons=options.create_polygons,close_arc=options.close_arc,
tolerance=options.tolerance,single_feature=options.single_feature)
|
|
35e23c4298283413ed9862125d31d5fc3e0a960c
|
src/program/lwaftr/tests/subcommands/generate_binding_table_test.py
|
src/program/lwaftr/tests/subcommands/generate_binding_table_test.py
|
"""
Test uses "snabb lwaftr generate-binding-table" subcommand. Does not
need NICs as it doesn't use any network functionality. The command is
just to produce a binding table config result.
"""
from subprocess import Popen, PIPE
from test_env import SNABB_CMD, BaseTestCase
class TestGenerateBindingTable(BaseTestCase):
generation_args = (str(SNABB_CMD), "lwaftr", "generate-binding-table")
def test_binding_table_generation(self):
"""
This runs the generate-binding-table subcommand and verifies that
it gets the number of softwires it expects back.
Usage can be found in the README however, it's:
<ipv4> <num_ipv4s> <br_address> <b4> <psid_len> <shift>
"""
# Build the generate-binding-table command.
cmd = list(self.generation_args)
num = 10
cmd.extend(
("193.5.1.100", str(num), "fc00::100", "fc00:1:2:3:4:5:0:7e", "1")
)
# Execute the command.
generation_proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
# Wait until it's finished.
generation_proc.wait()
# Check the status code is okay.
self.assertEqual(generation_proc.returncode, 0)
# Finally get the stdout value which should be the config.
config = generation_proc.stdout.readlines()
# Iterate over the lines and count the number of softwire definitions.
count = sum([1 for line in config if b"softwire {" in line])
self.assertEqual(count, num)
|
Add test for lwaftr's generate-binding-table subcommand
|
Add test for lwaftr's generate-binding-table subcommand
This adds a test which runs the generate-binding-table command and
verifies that it exits with a 0 status code and produces the expected
output for the command line parameters.
It doesn't check the contents of the block but it does check certain
expected things which should verify some problems if they occur.
|
Python
|
apache-2.0
|
snabbco/snabb,alexandergall/snabbswitch,eugeneia/snabbswitch,alexandergall/snabbswitch,snabbco/snabb,SnabbCo/snabbswitch,Igalia/snabb,Igalia/snabb,dpino/snabb,dpino/snabb,eugeneia/snabb,eugeneia/snabbswitch,dpino/snabb,dpino/snabb,eugeneia/snabb,eugeneia/snabbswitch,Igalia/snabbswitch,dpino/snabbswitch,snabbco/snabb,Igalia/snabb,eugeneia/snabbswitch,Igalia/snabbswitch,dpino/snabbswitch,Igalia/snabb,snabbco/snabb,dpino/snabb,alexandergall/snabbswitch,eugeneia/snabb,snabbco/snabb,eugeneia/snabb,dpino/snabbswitch,dpino/snabb,eugeneia/snabb,alexandergall/snabbswitch,Igalia/snabb,alexandergall/snabbswitch,SnabbCo/snabbswitch,alexandergall/snabbswitch,SnabbCo/snabbswitch,Igalia/snabbswitch,Igalia/snabbswitch,eugeneia/snabb,alexandergall/snabbswitch,snabbco/snabb,dpino/snabbswitch,Igalia/snabb,Igalia/snabbswitch,eugeneia/snabb,alexandergall/snabbswitch,snabbco/snabb,dpino/snabb,Igalia/snabb,SnabbCo/snabbswitch,snabbco/snabb,eugeneia/snabb,Igalia/snabb
|
Add test for lwaftr's generate-binding-table subcommand
This adds a test which runs the generate-binding-table command and
verifies that it exits with a 0 status code and produces the expected
output for the command line parameters.
It doesn't check the contents of the block but it does check certain
expected things which should verify some problems if they occur.
|
"""
Test uses "snabb lwaftr generate-binding-table" subcommand. Does not
need NICs as it doesn't use any network functionality. The command is
just to produce a binding table config result.
"""
from subprocess import Popen, PIPE
from test_env import SNABB_CMD, BaseTestCase
class TestGenerateBindingTable(BaseTestCase):
generation_args = (str(SNABB_CMD), "lwaftr", "generate-binding-table")
def test_binding_table_generation(self):
"""
This runs the generate-binding-table subcommand and verifies that
it gets the number of softwires it expects back.
Usage can be found in the README however, it's:
<ipv4> <num_ipv4s> <br_address> <b4> <psid_len> <shift>
"""
# Build the generate-binding-table command.
cmd = list(self.generation_args)
num = 10
cmd.extend(
("193.5.1.100", str(num), "fc00::100", "fc00:1:2:3:4:5:0:7e", "1")
)
# Execute the command.
generation_proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
# Wait until it's finished.
generation_proc.wait()
# Check the status code is okay.
self.assertEqual(generation_proc.returncode, 0)
# Finally get the stdout value which should be the config.
config = generation_proc.stdout.readlines()
# Iterate over the lines and count the number of softwire definitions.
count = sum([1 for line in config if b"softwire {" in line])
self.assertEqual(count, num)
|
<commit_before><commit_msg>Add test for lwaftr's generate-binding-table subcommand
This adds a test which runs the generate-binding-table command and
verifies that it exits with a 0 status code and produces the expected
output for the command line parameters.
It doesn't check the contents of the block but it does check certain
expected things which should verify some problems if they occur.<commit_after>
|
"""
Test uses "snabb lwaftr generate-binding-table" subcommand. Does not
need NICs as it doesn't use any network functionality. The command is
just to produce a binding table config result.
"""
from subprocess import Popen, PIPE
from test_env import SNABB_CMD, BaseTestCase
class TestGenerateBindingTable(BaseTestCase):
generation_args = (str(SNABB_CMD), "lwaftr", "generate-binding-table")
def test_binding_table_generation(self):
"""
This runs the generate-binding-table subcommand and verifies that
it gets the number of softwires it expects back.
Usage can be found in the README however, it's:
<ipv4> <num_ipv4s> <br_address> <b4> <psid_len> <shift>
"""
# Build the generate-binding-table command.
cmd = list(self.generation_args)
num = 10
cmd.extend(
("193.5.1.100", str(num), "fc00::100", "fc00:1:2:3:4:5:0:7e", "1")
)
# Execute the command.
generation_proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
# Wait until it's finished.
generation_proc.wait()
# Check the status code is okay.
self.assertEqual(generation_proc.returncode, 0)
# Finally get the stdout value which should be the config.
config = generation_proc.stdout.readlines()
# Iterate over the lines and count the number of softwire definitions.
count = sum([1 for line in config if b"softwire {" in line])
self.assertEqual(count, num)
|
Add test for lwaftr's generate-binding-table subcommand
This adds a test which runs the generate-binding-table command and
verifies that it exits with a 0 status code and produces the expected
output for the command line parameters.
It doesn't check the contents of the block but it does check certain
expected things which should verify some problems if they occur."""
Test uses "snabb lwaftr generate-binding-table" subcommand. Does not
need NICs as it doesn't use any network functionality. The command is
just to produce a binding table config result.
"""
from subprocess import Popen, PIPE
from test_env import SNABB_CMD, BaseTestCase
class TestGenerateBindingTable(BaseTestCase):
generation_args = (str(SNABB_CMD), "lwaftr", "generate-binding-table")
def test_binding_table_generation(self):
"""
This runs the generate-binding-table subcommand and verifies that
it gets the number of softwires it expects back.
Usage can be found in the README however, it's:
<ipv4> <num_ipv4s> <br_address> <b4> <psid_len> <shift>
"""
# Build the generate-binding-table command.
cmd = list(self.generation_args)
num = 10
cmd.extend(
("193.5.1.100", str(num), "fc00::100", "fc00:1:2:3:4:5:0:7e", "1")
)
# Execute the command.
generation_proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
# Wait until it's finished.
generation_proc.wait()
# Check the status code is okay.
self.assertEqual(generation_proc.returncode, 0)
# Finally get the stdout value which should be the config.
config = generation_proc.stdout.readlines()
# Iterate over the lines and count the number of softwire definitions.
count = sum([1 for line in config if b"softwire {" in line])
self.assertEqual(count, num)
|
<commit_before><commit_msg>Add test for lwaftr's generate-binding-table subcommand
This adds a test which runs the generate-binding-table command and
verifies that it exits with a 0 status code and produces the expected
output for the command line parameters.
It doesn't check the contents of the block but it does check certain
expected things which should verify some problems if they occur.<commit_after>"""
Test uses "snabb lwaftr generate-binding-table" subcommand. Does not
need NICs as it doesn't use any network functionality. The command is
just to produce a binding table config result.
"""
from subprocess import Popen, PIPE
from test_env import SNABB_CMD, BaseTestCase
class TestGenerateBindingTable(BaseTestCase):
generation_args = (str(SNABB_CMD), "lwaftr", "generate-binding-table")
def test_binding_table_generation(self):
"""
This runs the generate-binding-table subcommand and verifies that
it gets the number of softwires it expects back.
Usage can be found in the README however, it's:
<ipv4> <num_ipv4s> <br_address> <b4> <psid_len> <shift>
"""
# Build the generate-binding-table command.
cmd = list(self.generation_args)
num = 10
cmd.extend(
("193.5.1.100", str(num), "fc00::100", "fc00:1:2:3:4:5:0:7e", "1")
)
# Execute the command.
generation_proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
# Wait until it's finished.
generation_proc.wait()
# Check the status code is okay.
self.assertEqual(generation_proc.returncode, 0)
# Finally get the stdout value which should be the config.
config = generation_proc.stdout.readlines()
# Iterate over the lines and count the number of softwire definitions.
count = sum([1 for line in config if b"softwire {" in line])
self.assertEqual(count, num)
|
|
2a94934ffff2f2984ba569ea2f4b195a6c550550
|
derrida/books/migrations/0003_add_foreignkey_reference_canvas_intervention.py
|
derrida/books/migrations/0003_add_foreignkey_reference_canvas_intervention.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-03 19:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('interventions', '0001_initial'),
('djiffy', '0002_view_permissions'),
('books', '0002_connect_book_references_canvases_manifests'),
]
operations = [
migrations.AddField(
model_name='reference',
name='canvases',
field=models.ManyToManyField(blank=True, help_text="Scanned images from Derrida's Library | ", to='djiffy.Canvas'),
),
migrations.AddField(
model_name='reference',
name='interventions',
field=models.ManyToManyField(blank=True, to='interventions.Intervention'),
),
]
|
Add migration to fix http 500 errors
|
Add migration to fix http 500 errors
|
Python
|
apache-2.0
|
Princeton-CDH/derrida-django,Princeton-CDH/derrida-django,Princeton-CDH/derrida-django,Princeton-CDH/derrida-django
|
Add migration to fix http 500 errors
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-03 19:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('interventions', '0001_initial'),
('djiffy', '0002_view_permissions'),
('books', '0002_connect_book_references_canvases_manifests'),
]
operations = [
migrations.AddField(
model_name='reference',
name='canvases',
field=models.ManyToManyField(blank=True, help_text="Scanned images from Derrida's Library | ", to='djiffy.Canvas'),
),
migrations.AddField(
model_name='reference',
name='interventions',
field=models.ManyToManyField(blank=True, to='interventions.Intervention'),
),
]
|
<commit_before><commit_msg>Add migration to fix http 500 errors<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-03 19:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('interventions', '0001_initial'),
('djiffy', '0002_view_permissions'),
('books', '0002_connect_book_references_canvases_manifests'),
]
operations = [
migrations.AddField(
model_name='reference',
name='canvases',
field=models.ManyToManyField(blank=True, help_text="Scanned images from Derrida's Library | ", to='djiffy.Canvas'),
),
migrations.AddField(
model_name='reference',
name='interventions',
field=models.ManyToManyField(blank=True, to='interventions.Intervention'),
),
]
|
Add migration to fix http 500 errors# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-03 19:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('interventions', '0001_initial'),
('djiffy', '0002_view_permissions'),
('books', '0002_connect_book_references_canvases_manifests'),
]
operations = [
migrations.AddField(
model_name='reference',
name='canvases',
field=models.ManyToManyField(blank=True, help_text="Scanned images from Derrida's Library | ", to='djiffy.Canvas'),
),
migrations.AddField(
model_name='reference',
name='interventions',
field=models.ManyToManyField(blank=True, to='interventions.Intervention'),
),
]
|
<commit_before><commit_msg>Add migration to fix http 500 errors<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-03 19:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('interventions', '0001_initial'),
('djiffy', '0002_view_permissions'),
('books', '0002_connect_book_references_canvases_manifests'),
]
operations = [
migrations.AddField(
model_name='reference',
name='canvases',
field=models.ManyToManyField(blank=True, help_text="Scanned images from Derrida's Library | ", to='djiffy.Canvas'),
),
migrations.AddField(
model_name='reference',
name='interventions',
field=models.ManyToManyField(blank=True, to='interventions.Intervention'),
),
]
|
|
817222e0263a653dd5bda51a237b3c51a8dc2487
|
rnacentral/portal/models/ensembl_compara.py
|
rnacentral/portal/models/ensembl_compara.py
|
"""
Copyright [2009-2019] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from caching.base import CachingMixin, CachingManager
from django.db import models
class EnsemblCompara(CachingMixin, models.Model):
id = models.IntegerField(primary_key=True)
ensembl_transcript_id = models.TextField()
urs_taxid = models.ForeignKey(models.RnaPrecomputed, to_field='id')
homology_id = models.IntegerField()
objects = CachingManager()
class Meta:
db_table = 'ensembl_compara'
|
Add Ensembl Compara django model
|
Add Ensembl Compara django model
|
Python
|
apache-2.0
|
RNAcentral/rnacentral-webcode,RNAcentral/rnacentral-webcode,RNAcentral/rnacentral-webcode,RNAcentral/rnacentral-webcode
|
Add Ensembl Compara django model
|
"""
Copyright [2009-2019] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from caching.base import CachingMixin, CachingManager
from django.db import models
class EnsemblCompara(CachingMixin, models.Model):
id = models.IntegerField(primary_key=True)
ensembl_transcript_id = models.TextField()
urs_taxid = models.ForeignKey(models.RnaPrecomputed, to_field='id')
homology_id = models.IntegerField()
objects = CachingManager()
class Meta:
db_table = 'ensembl_compara'
|
<commit_before><commit_msg>Add Ensembl Compara django model<commit_after>
|
"""
Copyright [2009-2019] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from caching.base import CachingMixin, CachingManager
from django.db import models
class EnsemblCompara(CachingMixin, models.Model):
id = models.IntegerField(primary_key=True)
ensembl_transcript_id = models.TextField()
urs_taxid = models.ForeignKey(models.RnaPrecomputed, to_field='id')
homology_id = models.IntegerField()
objects = CachingManager()
class Meta:
db_table = 'ensembl_compara'
|
Add Ensembl Compara django model"""
Copyright [2009-2019] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from caching.base import CachingMixin, CachingManager
from django.db import models
class EnsemblCompara(CachingMixin, models.Model):
id = models.IntegerField(primary_key=True)
ensembl_transcript_id = models.TextField()
urs_taxid = models.ForeignKey(models.RnaPrecomputed, to_field='id')
homology_id = models.IntegerField()
objects = CachingManager()
class Meta:
db_table = 'ensembl_compara'
|
<commit_before><commit_msg>Add Ensembl Compara django model<commit_after>"""
Copyright [2009-2019] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from caching.base import CachingMixin, CachingManager
from django.db import models
class EnsemblCompara(CachingMixin, models.Model):
id = models.IntegerField(primary_key=True)
ensembl_transcript_id = models.TextField()
urs_taxid = models.ForeignKey(models.RnaPrecomputed, to_field='id')
homology_id = models.IntegerField()
objects = CachingManager()
class Meta:
db_table = 'ensembl_compara'
|
|
b35e205cc683e8a87e5591791d697608e2b0c616
|
boosh/test_ssh.py
|
boosh/test_ssh.py
|
from boosh.ssh import Instance
def test_cache_dump():
src_instance = Instance(
id='i-10ca9425',
profile_name='testing',
region='us-west-1',
private_ip_address='127.0.0.1',
public_ip_address='10.0.0.1',
vpc_id='vpc-bbe848de',
subnet_id='subnet-b5bc10ec',
)
cache_line = ('i-10ca9425 {"private_ip_address":"127.0.0.1",'
'"profile_name":"testing",'
'"public_ip_address":"10.0.0.1",'
'"region":"us-west-1",'
'"subnet_id":"subnet-b5bc10ec",'
'"vpc_id":"vpc-bbe848de"}')
assert src_instance.as_cache_line() == cache_line
def test_cache_load():
src_instance = Instance(
id='i-10ca9425',
profile_name='testing',
region='us-west-1',
private_ip_address='127.0.0.1',
public_ip_address='10.0.0.1',
vpc_id='vpc-bbe848de',
subnet_id='subnet-b5bc10ec',
)
cache_line = ('i-10ca9425 {"private_ip_address":"127.0.0.1",'
'"profile_name":"testing",'
'"public_ip_address":"10.0.0.1",'
'"region":"us-west-1",'
'"subnet_id":"subnet-b5bc10ec",'
'"vpc_id":"vpc-bbe848de"}')
dst_instance = Instance.from_cache_line(cache_line)
assert src_instance.__dict__ == dst_instance.__dict__
|
Add Instance cache dump/load tests
|
Add Instance cache dump/load tests
|
Python
|
mit
|
betaworks/boosh
|
Add Instance cache dump/load tests
|
from boosh.ssh import Instance
def test_cache_dump():
src_instance = Instance(
id='i-10ca9425',
profile_name='testing',
region='us-west-1',
private_ip_address='127.0.0.1',
public_ip_address='10.0.0.1',
vpc_id='vpc-bbe848de',
subnet_id='subnet-b5bc10ec',
)
cache_line = ('i-10ca9425 {"private_ip_address":"127.0.0.1",'
'"profile_name":"testing",'
'"public_ip_address":"10.0.0.1",'
'"region":"us-west-1",'
'"subnet_id":"subnet-b5bc10ec",'
'"vpc_id":"vpc-bbe848de"}')
assert src_instance.as_cache_line() == cache_line
def test_cache_load():
src_instance = Instance(
id='i-10ca9425',
profile_name='testing',
region='us-west-1',
private_ip_address='127.0.0.1',
public_ip_address='10.0.0.1',
vpc_id='vpc-bbe848de',
subnet_id='subnet-b5bc10ec',
)
cache_line = ('i-10ca9425 {"private_ip_address":"127.0.0.1",'
'"profile_name":"testing",'
'"public_ip_address":"10.0.0.1",'
'"region":"us-west-1",'
'"subnet_id":"subnet-b5bc10ec",'
'"vpc_id":"vpc-bbe848de"}')
dst_instance = Instance.from_cache_line(cache_line)
assert src_instance.__dict__ == dst_instance.__dict__
|
<commit_before><commit_msg>Add Instance cache dump/load tests<commit_after>
|
from boosh.ssh import Instance
def test_cache_dump():
src_instance = Instance(
id='i-10ca9425',
profile_name='testing',
region='us-west-1',
private_ip_address='127.0.0.1',
public_ip_address='10.0.0.1',
vpc_id='vpc-bbe848de',
subnet_id='subnet-b5bc10ec',
)
cache_line = ('i-10ca9425 {"private_ip_address":"127.0.0.1",'
'"profile_name":"testing",'
'"public_ip_address":"10.0.0.1",'
'"region":"us-west-1",'
'"subnet_id":"subnet-b5bc10ec",'
'"vpc_id":"vpc-bbe848de"}')
assert src_instance.as_cache_line() == cache_line
def test_cache_load():
src_instance = Instance(
id='i-10ca9425',
profile_name='testing',
region='us-west-1',
private_ip_address='127.0.0.1',
public_ip_address='10.0.0.1',
vpc_id='vpc-bbe848de',
subnet_id='subnet-b5bc10ec',
)
cache_line = ('i-10ca9425 {"private_ip_address":"127.0.0.1",'
'"profile_name":"testing",'
'"public_ip_address":"10.0.0.1",'
'"region":"us-west-1",'
'"subnet_id":"subnet-b5bc10ec",'
'"vpc_id":"vpc-bbe848de"}')
dst_instance = Instance.from_cache_line(cache_line)
assert src_instance.__dict__ == dst_instance.__dict__
|
Add Instance cache dump/load testsfrom boosh.ssh import Instance
def test_cache_dump():
src_instance = Instance(
id='i-10ca9425',
profile_name='testing',
region='us-west-1',
private_ip_address='127.0.0.1',
public_ip_address='10.0.0.1',
vpc_id='vpc-bbe848de',
subnet_id='subnet-b5bc10ec',
)
cache_line = ('i-10ca9425 {"private_ip_address":"127.0.0.1",'
'"profile_name":"testing",'
'"public_ip_address":"10.0.0.1",'
'"region":"us-west-1",'
'"subnet_id":"subnet-b5bc10ec",'
'"vpc_id":"vpc-bbe848de"}')
assert src_instance.as_cache_line() == cache_line
def test_cache_load():
src_instance = Instance(
id='i-10ca9425',
profile_name='testing',
region='us-west-1',
private_ip_address='127.0.0.1',
public_ip_address='10.0.0.1',
vpc_id='vpc-bbe848de',
subnet_id='subnet-b5bc10ec',
)
cache_line = ('i-10ca9425 {"private_ip_address":"127.0.0.1",'
'"profile_name":"testing",'
'"public_ip_address":"10.0.0.1",'
'"region":"us-west-1",'
'"subnet_id":"subnet-b5bc10ec",'
'"vpc_id":"vpc-bbe848de"}')
dst_instance = Instance.from_cache_line(cache_line)
assert src_instance.__dict__ == dst_instance.__dict__
|
<commit_before><commit_msg>Add Instance cache dump/load tests<commit_after>from boosh.ssh import Instance
def test_cache_dump():
src_instance = Instance(
id='i-10ca9425',
profile_name='testing',
region='us-west-1',
private_ip_address='127.0.0.1',
public_ip_address='10.0.0.1',
vpc_id='vpc-bbe848de',
subnet_id='subnet-b5bc10ec',
)
cache_line = ('i-10ca9425 {"private_ip_address":"127.0.0.1",'
'"profile_name":"testing",'
'"public_ip_address":"10.0.0.1",'
'"region":"us-west-1",'
'"subnet_id":"subnet-b5bc10ec",'
'"vpc_id":"vpc-bbe848de"}')
assert src_instance.as_cache_line() == cache_line
def test_cache_load():
src_instance = Instance(
id='i-10ca9425',
profile_name='testing',
region='us-west-1',
private_ip_address='127.0.0.1',
public_ip_address='10.0.0.1',
vpc_id='vpc-bbe848de',
subnet_id='subnet-b5bc10ec',
)
cache_line = ('i-10ca9425 {"private_ip_address":"127.0.0.1",'
'"profile_name":"testing",'
'"public_ip_address":"10.0.0.1",'
'"region":"us-west-1",'
'"subnet_id":"subnet-b5bc10ec",'
'"vpc_id":"vpc-bbe848de"}')
dst_instance = Instance.from_cache_line(cache_line)
assert src_instance.__dict__ == dst_instance.__dict__
|
|
b113e2ee4c97cbfb4300b22daa3209e8b8580ed3
|
clusterpy/tests/test_clustering.py
|
clusterpy/tests/test_clustering.py
|
"""
Testing clustering algorithms in Clusterpy
** All the following tests take considerable time to complete **
"""
from unittest import TestCase
class TestClusteringAlgorithms(TestCase):
def setUp(self):
pass
def test_arisel(self):
assert False
def tearDown(self):
pass
|
Add test structure for clustering algorithms
|
Add test structure for clustering algorithms
|
Python
|
bsd-3-clause
|
clusterpy/clusterpy,clusterpy/clusterpy
|
Add test structure for clustering algorithms
|
"""
Testing clustering algorithms in Clusterpy
** All the following tests take considerable time to complete **
"""
from unittest import TestCase
class TestClusteringAlgorithms(TestCase):
def setUp(self):
pass
def test_arisel(self):
assert False
def tearDown(self):
pass
|
<commit_before><commit_msg>Add test structure for clustering algorithms<commit_after>
|
"""
Testing clustering algorithms in Clusterpy
** All the following tests take considerable time to complete **
"""
from unittest import TestCase
class TestClusteringAlgorithms(TestCase):
def setUp(self):
pass
def test_arisel(self):
assert False
def tearDown(self):
pass
|
Add test structure for clustering algorithms"""
Testing clustering algorithms in Clusterpy
** All the following tests take considerable time to complete **
"""
from unittest import TestCase
class TestClusteringAlgorithms(TestCase):
def setUp(self):
pass
def test_arisel(self):
assert False
def tearDown(self):
pass
|
<commit_before><commit_msg>Add test structure for clustering algorithms<commit_after>"""
Testing clustering algorithms in Clusterpy
** All the following tests take considerable time to complete **
"""
from unittest import TestCase
class TestClusteringAlgorithms(TestCase):
def setUp(self):
pass
def test_arisel(self):
assert False
def tearDown(self):
pass
|
|
ad9a9a9c192beedd388bc8d3ff639d04630bd1ae
|
cnxarchive/sql/migrations/20160624172846_add_post_publication_trigger.py
|
cnxarchive/sql/migrations/20160624172846_add_post_publication_trigger.py
|
# -*- coding: utf-8 -*-
def up(cursor):
cursor.execute("""\
CREATE OR REPLACE FUNCTION post_publication() RETURNS trigger AS $$
BEGIN
NOTIFY post_publication;
RETURN NEW;
END;
$$ LANGUAGE 'plpgsql';
CREATE TRIGGER post_publication_trigger
AFTER INSERT OR UPDATE ON modules FOR EACH ROW
WHEN (NEW.stateid = 5)
EXECUTE PROCEDURE post_publication();""")
def down(cursor):
cursor.execute("""\
DROP TRIGGER post_publication_trigger ON modules;
DROP FUNCTION post_publication();""")
|
Add migration to add post publication trigger
|
Add migration to add post publication trigger
|
Python
|
agpl-3.0
|
Connexions/cnx-archive,Connexions/cnx-archive
|
Add migration to add post publication trigger
|
# -*- coding: utf-8 -*-
def up(cursor):
cursor.execute("""\
CREATE OR REPLACE FUNCTION post_publication() RETURNS trigger AS $$
BEGIN
NOTIFY post_publication;
RETURN NEW;
END;
$$ LANGUAGE 'plpgsql';
CREATE TRIGGER post_publication_trigger
AFTER INSERT OR UPDATE ON modules FOR EACH ROW
WHEN (NEW.stateid = 5)
EXECUTE PROCEDURE post_publication();""")
def down(cursor):
cursor.execute("""\
DROP TRIGGER post_publication_trigger ON modules;
DROP FUNCTION post_publication();""")
|
<commit_before><commit_msg>Add migration to add post publication trigger<commit_after>
|
# -*- coding: utf-8 -*-
def up(cursor):
cursor.execute("""\
CREATE OR REPLACE FUNCTION post_publication() RETURNS trigger AS $$
BEGIN
NOTIFY post_publication;
RETURN NEW;
END;
$$ LANGUAGE 'plpgsql';
CREATE TRIGGER post_publication_trigger
AFTER INSERT OR UPDATE ON modules FOR EACH ROW
WHEN (NEW.stateid = 5)
EXECUTE PROCEDURE post_publication();""")
def down(cursor):
cursor.execute("""\
DROP TRIGGER post_publication_trigger ON modules;
DROP FUNCTION post_publication();""")
|
Add migration to add post publication trigger# -*- coding: utf-8 -*-
def up(cursor):
cursor.execute("""\
CREATE OR REPLACE FUNCTION post_publication() RETURNS trigger AS $$
BEGIN
NOTIFY post_publication;
RETURN NEW;
END;
$$ LANGUAGE 'plpgsql';
CREATE TRIGGER post_publication_trigger
AFTER INSERT OR UPDATE ON modules FOR EACH ROW
WHEN (NEW.stateid = 5)
EXECUTE PROCEDURE post_publication();""")
def down(cursor):
cursor.execute("""\
DROP TRIGGER post_publication_trigger ON modules;
DROP FUNCTION post_publication();""")
|
<commit_before><commit_msg>Add migration to add post publication trigger<commit_after># -*- coding: utf-8 -*-
def up(cursor):
cursor.execute("""\
CREATE OR REPLACE FUNCTION post_publication() RETURNS trigger AS $$
BEGIN
NOTIFY post_publication;
RETURN NEW;
END;
$$ LANGUAGE 'plpgsql';
CREATE TRIGGER post_publication_trigger
AFTER INSERT OR UPDATE ON modules FOR EACH ROW
WHEN (NEW.stateid = 5)
EXECUTE PROCEDURE post_publication();""")
def down(cursor):
cursor.execute("""\
DROP TRIGGER post_publication_trigger ON modules;
DROP FUNCTION post_publication();""")
|
|
8f11be5014deae4ec882a43774cddaabfc6033b1
|
examples/launch_cloud_harness.py
|
examples/launch_cloud_harness.py
|
import json
import os
# from osgeo import gdal
from gbdxtools import Interface
from task_template import TaskTemplate, Task, InputPort, OutputPort
gbdx = Interface()
# data = "s3://receiving-dgcs-tdgplatform-com/054813633050_01_003" # WV02 Image over San Francisco
# aoptask = gbdx.Task("AOP_Strip_Processor", data=data, enable_acomp=True, enable_pansharpen=True)
class RasterMetaApp(TaskTemplate):
task = Task("RasterMetaTask")
task.input_raster = InputPort(value="/Users/michaelconnor/demo_image")
task.output_meta = OutputPort(value="/Users/michaelconnor")
def invoke(self):
images = self.task.input_raster.list_files(extensions=[".tif", ".TIF"])
# Magic Starts here
for img in images:
header = "META FOR %s\n\n" % os.path.basename(img)
# gtif = gdal.Open(img)
self.task.output_meta.write('metadata.txt', header)
# self.task.output_meta.write('metadata.txt', json.dumps(gtif.GetMetadata(), indent=2))
ch_task = gbdx.Task(RasterMetaApp)
workflow = gbdx.Workflow([ch_task])
# workflow = gbdx.Workflow([aoptask, ch_task])
workflow.savedata(ch_task.outputs.output_meta, location='CH_OUT')
# workflow.savedata(aoptask.outputs.data, location='AOP_OUT')
workflow.execute()
|
Add example file for running cloud-harness tasks.
|
Add example file for running cloud-harness tasks.
|
Python
|
mit
|
michaelconnor00/gbdxtools,michaelconnor00/gbdxtools
|
Add example file for running cloud-harness tasks.
|
import json
import os
# from osgeo import gdal
from gbdxtools import Interface
from task_template import TaskTemplate, Task, InputPort, OutputPort
gbdx = Interface()
# data = "s3://receiving-dgcs-tdgplatform-com/054813633050_01_003" # WV02 Image over San Francisco
# aoptask = gbdx.Task("AOP_Strip_Processor", data=data, enable_acomp=True, enable_pansharpen=True)
class RasterMetaApp(TaskTemplate):
task = Task("RasterMetaTask")
task.input_raster = InputPort(value="/Users/michaelconnor/demo_image")
task.output_meta = OutputPort(value="/Users/michaelconnor")
def invoke(self):
images = self.task.input_raster.list_files(extensions=[".tif", ".TIF"])
# Magic Starts here
for img in images:
header = "META FOR %s\n\n" % os.path.basename(img)
# gtif = gdal.Open(img)
self.task.output_meta.write('metadata.txt', header)
# self.task.output_meta.write('metadata.txt', json.dumps(gtif.GetMetadata(), indent=2))
ch_task = gbdx.Task(RasterMetaApp)
workflow = gbdx.Workflow([ch_task])
# workflow = gbdx.Workflow([aoptask, ch_task])
workflow.savedata(ch_task.outputs.output_meta, location='CH_OUT')
# workflow.savedata(aoptask.outputs.data, location='AOP_OUT')
workflow.execute()
|
<commit_before><commit_msg>Add example file for running cloud-harness tasks.<commit_after>
|
import json
import os
# from osgeo import gdal
from gbdxtools import Interface
from task_template import TaskTemplate, Task, InputPort, OutputPort
gbdx = Interface()
# data = "s3://receiving-dgcs-tdgplatform-com/054813633050_01_003" # WV02 Image over San Francisco
# aoptask = gbdx.Task("AOP_Strip_Processor", data=data, enable_acomp=True, enable_pansharpen=True)
class RasterMetaApp(TaskTemplate):
task = Task("RasterMetaTask")
task.input_raster = InputPort(value="/Users/michaelconnor/demo_image")
task.output_meta = OutputPort(value="/Users/michaelconnor")
def invoke(self):
images = self.task.input_raster.list_files(extensions=[".tif", ".TIF"])
# Magic Starts here
for img in images:
header = "META FOR %s\n\n" % os.path.basename(img)
# gtif = gdal.Open(img)
self.task.output_meta.write('metadata.txt', header)
# self.task.output_meta.write('metadata.txt', json.dumps(gtif.GetMetadata(), indent=2))
ch_task = gbdx.Task(RasterMetaApp)
workflow = gbdx.Workflow([ch_task])
# workflow = gbdx.Workflow([aoptask, ch_task])
workflow.savedata(ch_task.outputs.output_meta, location='CH_OUT')
# workflow.savedata(aoptask.outputs.data, location='AOP_OUT')
workflow.execute()
|
Add example file for running cloud-harness tasks.import json
import os
# from osgeo import gdal
from gbdxtools import Interface
from task_template import TaskTemplate, Task, InputPort, OutputPort
gbdx = Interface()
# data = "s3://receiving-dgcs-tdgplatform-com/054813633050_01_003" # WV02 Image over San Francisco
# aoptask = gbdx.Task("AOP_Strip_Processor", data=data, enable_acomp=True, enable_pansharpen=True)
class RasterMetaApp(TaskTemplate):
task = Task("RasterMetaTask")
task.input_raster = InputPort(value="/Users/michaelconnor/demo_image")
task.output_meta = OutputPort(value="/Users/michaelconnor")
def invoke(self):
images = self.task.input_raster.list_files(extensions=[".tif", ".TIF"])
# Magic Starts here
for img in images:
header = "META FOR %s\n\n" % os.path.basename(img)
# gtif = gdal.Open(img)
self.task.output_meta.write('metadata.txt', header)
# self.task.output_meta.write('metadata.txt', json.dumps(gtif.GetMetadata(), indent=2))
ch_task = gbdx.Task(RasterMetaApp)
workflow = gbdx.Workflow([ch_task])
# workflow = gbdx.Workflow([aoptask, ch_task])
workflow.savedata(ch_task.outputs.output_meta, location='CH_OUT')
# workflow.savedata(aoptask.outputs.data, location='AOP_OUT')
workflow.execute()
|
<commit_before><commit_msg>Add example file for running cloud-harness tasks.<commit_after>import json
import os
# from osgeo import gdal
from gbdxtools import Interface
from task_template import TaskTemplate, Task, InputPort, OutputPort
gbdx = Interface()
# data = "s3://receiving-dgcs-tdgplatform-com/054813633050_01_003" # WV02 Image over San Francisco
# aoptask = gbdx.Task("AOP_Strip_Processor", data=data, enable_acomp=True, enable_pansharpen=True)
class RasterMetaApp(TaskTemplate):
task = Task("RasterMetaTask")
task.input_raster = InputPort(value="/Users/michaelconnor/demo_image")
task.output_meta = OutputPort(value="/Users/michaelconnor")
def invoke(self):
images = self.task.input_raster.list_files(extensions=[".tif", ".TIF"])
# Magic Starts here
for img in images:
header = "META FOR %s\n\n" % os.path.basename(img)
# gtif = gdal.Open(img)
self.task.output_meta.write('metadata.txt', header)
# self.task.output_meta.write('metadata.txt', json.dumps(gtif.GetMetadata(), indent=2))
ch_task = gbdx.Task(RasterMetaApp)
workflow = gbdx.Workflow([ch_task])
# workflow = gbdx.Workflow([aoptask, ch_task])
workflow.savedata(ch_task.outputs.output_meta, location='CH_OUT')
# workflow.savedata(aoptask.outputs.data, location='AOP_OUT')
workflow.execute()
|
|
c2ff5912364c0ec94d06416f70868ba7057a26f7
|
tests/app/soc/views/test_root_url.py
|
tests/app/soc/views/test_root_url.py
|
#!/usr/bin/env python2.5
#
# Copyright 2010 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the root url.
"""
__authors__ = [
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
from tests.test_utils import DjangoTestCase
class RootUrlViewTest(DjangoTestCase):
"""Tests program homepage views.
"""
def setUp(self):
self.init()
def testRootUrl(self):
"""Tests that the root url redirects to the gsoc homepage.
"""
url = '/'
response = self.client.get(url)
homepage = '/gsoc/homepage/' + self.gsoc.key().name()
self.assertResponseRedirect(response, homepage)
|
Add test for the root url
|
Add test for the root url
|
Python
|
apache-2.0
|
rhyolight/nupic.son,rhyolight/nupic.son,rhyolight/nupic.son
|
Add test for the root url
|
#!/usr/bin/env python2.5
#
# Copyright 2010 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the root url.
"""
__authors__ = [
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
from tests.test_utils import DjangoTestCase
class RootUrlViewTest(DjangoTestCase):
"""Tests program homepage views.
"""
def setUp(self):
self.init()
def testRootUrl(self):
"""Tests that the root url redirects to the gsoc homepage.
"""
url = '/'
response = self.client.get(url)
homepage = '/gsoc/homepage/' + self.gsoc.key().name()
self.assertResponseRedirect(response, homepage)
|
<commit_before><commit_msg>Add test for the root url<commit_after>
|
#!/usr/bin/env python2.5
#
# Copyright 2010 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the root url.
"""
__authors__ = [
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
from tests.test_utils import DjangoTestCase
class RootUrlViewTest(DjangoTestCase):
"""Tests program homepage views.
"""
def setUp(self):
self.init()
def testRootUrl(self):
"""Tests that the root url redirects to the gsoc homepage.
"""
url = '/'
response = self.client.get(url)
homepage = '/gsoc/homepage/' + self.gsoc.key().name()
self.assertResponseRedirect(response, homepage)
|
Add test for the root url#!/usr/bin/env python2.5
#
# Copyright 2010 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the root url.
"""
__authors__ = [
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
from tests.test_utils import DjangoTestCase
class RootUrlViewTest(DjangoTestCase):
"""Tests program homepage views.
"""
def setUp(self):
self.init()
def testRootUrl(self):
"""Tests that the root url redirects to the gsoc homepage.
"""
url = '/'
response = self.client.get(url)
homepage = '/gsoc/homepage/' + self.gsoc.key().name()
self.assertResponseRedirect(response, homepage)
|
<commit_before><commit_msg>Add test for the root url<commit_after>#!/usr/bin/env python2.5
#
# Copyright 2010 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the root url.
"""
__authors__ = [
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
from tests.test_utils import DjangoTestCase
class RootUrlViewTest(DjangoTestCase):
"""Tests program homepage views.
"""
def setUp(self):
self.init()
def testRootUrl(self):
"""Tests that the root url redirects to the gsoc homepage.
"""
url = '/'
response = self.client.get(url)
homepage = '/gsoc/homepage/' + self.gsoc.key().name()
self.assertResponseRedirect(response, homepage)
|
|
d34a57041e4a9058ff886431cd54e9d2c17ec468
|
tamper/randomfakeproxy.py
|
tamper/randomfakeproxy.py
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
from lib.core.enums import PRIORITY
from random import randrange
__priority__ = PRIORITY.NORMAL
def dependencies():
pass
def generateIP():
blockOne = randrange(0, 255, 1)
blockTwo = randrange(0, 255, 1)
blockThree = randrange(0, 255, 1)
blockFour = randrange(0, 255, 1)
if blockOne == 10:
return generateIP()
elif blockOne == 172:
return generateIP()
elif blockOne == 192:
return generateIP()
else:
return str(blockOne) + '.' + str(blockTwo) + '.' + str(blockThree) + '.' + str(blockFour)
def tamper(payload, **kwargs):
"""
Append a HTTP Request Parameter to bypass
WAF (usually application based ) Ban
protection bypass.
Mehmet INCE
"""
headers = kwargs.get("headers", {})
headers["X-Forwarded-For"] = generateIP()
return payload
|
Add random X-Forwarded-For to bypass IP Ban.
|
Add random X-Forwarded-For to bypass IP Ban.
|
Python
|
mit
|
dtrip/.ubuntu,dtrip/.ubuntu,RexGene/monsu-server,RexGene/monsu-server
|
Add random X-Forwarded-For to bypass IP Ban.
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
from lib.core.enums import PRIORITY
from random import randrange
__priority__ = PRIORITY.NORMAL
def dependencies():
pass
def generateIP():
blockOne = randrange(0, 255, 1)
blockTwo = randrange(0, 255, 1)
blockThree = randrange(0, 255, 1)
blockFour = randrange(0, 255, 1)
if blockOne == 10:
return generateIP()
elif blockOne == 172:
return generateIP()
elif blockOne == 192:
return generateIP()
else:
return str(blockOne) + '.' + str(blockTwo) + '.' + str(blockThree) + '.' + str(blockFour)
def tamper(payload, **kwargs):
"""
Append a HTTP Request Parameter to bypass
WAF (usually application based ) Ban
protection bypass.
Mehmet INCE
"""
headers = kwargs.get("headers", {})
headers["X-Forwarded-For"] = generateIP()
return payload
|
<commit_before><commit_msg>Add random X-Forwarded-For to bypass IP Ban.<commit_after>
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
from lib.core.enums import PRIORITY
from random import randrange
__priority__ = PRIORITY.NORMAL
def dependencies():
pass
def generateIP():
blockOne = randrange(0, 255, 1)
blockTwo = randrange(0, 255, 1)
blockThree = randrange(0, 255, 1)
blockFour = randrange(0, 255, 1)
if blockOne == 10:
return generateIP()
elif blockOne == 172:
return generateIP()
elif blockOne == 192:
return generateIP()
else:
return str(blockOne) + '.' + str(blockTwo) + '.' + str(blockThree) + '.' + str(blockFour)
def tamper(payload, **kwargs):
"""
Append a HTTP Request Parameter to bypass
WAF (usually application based ) Ban
protection bypass.
Mehmet INCE
"""
headers = kwargs.get("headers", {})
headers["X-Forwarded-For"] = generateIP()
return payload
|
Add random X-Forwarded-For to bypass IP Ban.#!/usr/bin/env python
"""
Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
from lib.core.enums import PRIORITY
from random import randrange
__priority__ = PRIORITY.NORMAL
def dependencies():
pass
def generateIP():
blockOne = randrange(0, 255, 1)
blockTwo = randrange(0, 255, 1)
blockThree = randrange(0, 255, 1)
blockFour = randrange(0, 255, 1)
if blockOne == 10:
return generateIP()
elif blockOne == 172:
return generateIP()
elif blockOne == 192:
return generateIP()
else:
return str(blockOne) + '.' + str(blockTwo) + '.' + str(blockThree) + '.' + str(blockFour)
def tamper(payload, **kwargs):
"""
Append a HTTP Request Parameter to bypass
WAF (usually application based ) Ban
protection bypass.
Mehmet INCE
"""
headers = kwargs.get("headers", {})
headers["X-Forwarded-For"] = generateIP()
return payload
|
<commit_before><commit_msg>Add random X-Forwarded-For to bypass IP Ban.<commit_after>#!/usr/bin/env python
"""
Copyright (c) 2006-2014 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
from lib.core.enums import PRIORITY
from random import randrange
__priority__ = PRIORITY.NORMAL
def dependencies():
pass
def generateIP():
blockOne = randrange(0, 255, 1)
blockTwo = randrange(0, 255, 1)
blockThree = randrange(0, 255, 1)
blockFour = randrange(0, 255, 1)
if blockOne == 10:
return generateIP()
elif blockOne == 172:
return generateIP()
elif blockOne == 192:
return generateIP()
else:
return str(blockOne) + '.' + str(blockTwo) + '.' + str(blockThree) + '.' + str(blockFour)
def tamper(payload, **kwargs):
"""
Append a HTTP Request Parameter to bypass
WAF (usually application based ) Ban
protection bypass.
Mehmet INCE
"""
headers = kwargs.get("headers", {})
headers["X-Forwarded-For"] = generateIP()
return payload
|
|
c3f8bac3571689349df5340c3ce06b3e4c100b7b
|
django_olcc/olcc/forms.py
|
django_olcc/olcc/forms.py
|
from django import forms
from olcc.models import Store
COUNTIES = (
(u'baker', u'Baker'),
(u'benton', u'Benton'),
(u'clackamas', u'Clackamas'),
(u'clatsop', u'Clatsop'),
(u'columbia', u'Columbia'),
(u'coos', u'Coos'),
(u'crook', u'Crook'),
(u'curry', u'Curry'),
(u'deschutes', u'Deschutes'),
(u'douglas', u'Douglas'),
(u'gilliam', u'Gilliam'),
(u'grant', u'Grant'),
(u'harney', u'Harney'),
(u'hood river', u'Hood River'),
(u'jackson', u'Jackson'),
(u'jefferson', u'Jefferson'),
(u'josephine', u'Josephine'),
(u'klamath', u'Klamath'),
(u'lake', u'Lake'),
(u'lane', u'Lane'),
(u'lincoln', u'Lincoln'),
(u'linn', u'Linn'),
(u'malheur', u'Malheur'),
(u'marion', u'Marion'),
(u'morrow', u'Morrow'),
(u'multnomah', u'Multnomah'),
(u'polk', u'Polk'),
(u'sherman', u'Sherman'),
(u'tillamook', u'Tillamook'),
(u'umatilla', u'Umatilla'),
(u'union', u'Union'),
(u'wallowa', u'Wallowa'),
(u'wasco', u'Wasco'),
(u'washington', u'Washington'),
(u'wheeler', u'Wheeler'),
(u'yamhill', u'Yamhill'))
class CountyForm(forms.Form):
"""
A simple form containing a single choice field with a list
of Oregon counties as choices.
"""
county = forms.ChoiceField(label='County', choices=COUNTIES,)
|
Add a basic CountyForm for allowing the visitor to select from a list of Oregon counties.
|
Add a basic CountyForm for allowing the visitor to select from a list of Oregon counties.
|
Python
|
mit
|
twaddington/django-olcc,twaddington/django-olcc,twaddington/django-olcc
|
Add a basic CountyForm for allowing the visitor to select from a list of Oregon counties.
|
from django import forms
from olcc.models import Store
COUNTIES = (
(u'baker', u'Baker'),
(u'benton', u'Benton'),
(u'clackamas', u'Clackamas'),
(u'clatsop', u'Clatsop'),
(u'columbia', u'Columbia'),
(u'coos', u'Coos'),
(u'crook', u'Crook'),
(u'curry', u'Curry'),
(u'deschutes', u'Deschutes'),
(u'douglas', u'Douglas'),
(u'gilliam', u'Gilliam'),
(u'grant', u'Grant'),
(u'harney', u'Harney'),
(u'hood river', u'Hood River'),
(u'jackson', u'Jackson'),
(u'jefferson', u'Jefferson'),
(u'josephine', u'Josephine'),
(u'klamath', u'Klamath'),
(u'lake', u'Lake'),
(u'lane', u'Lane'),
(u'lincoln', u'Lincoln'),
(u'linn', u'Linn'),
(u'malheur', u'Malheur'),
(u'marion', u'Marion'),
(u'morrow', u'Morrow'),
(u'multnomah', u'Multnomah'),
(u'polk', u'Polk'),
(u'sherman', u'Sherman'),
(u'tillamook', u'Tillamook'),
(u'umatilla', u'Umatilla'),
(u'union', u'Union'),
(u'wallowa', u'Wallowa'),
(u'wasco', u'Wasco'),
(u'washington', u'Washington'),
(u'wheeler', u'Wheeler'),
(u'yamhill', u'Yamhill'))
class CountyForm(forms.Form):
"""
A simple form containing a single choice field with a list
of Oregon counties as choices.
"""
county = forms.ChoiceField(label='County', choices=COUNTIES,)
|
<commit_before><commit_msg>Add a basic CountyForm for allowing the visitor to select from a list of Oregon counties.<commit_after>
|
from django import forms
from olcc.models import Store
COUNTIES = (
(u'baker', u'Baker'),
(u'benton', u'Benton'),
(u'clackamas', u'Clackamas'),
(u'clatsop', u'Clatsop'),
(u'columbia', u'Columbia'),
(u'coos', u'Coos'),
(u'crook', u'Crook'),
(u'curry', u'Curry'),
(u'deschutes', u'Deschutes'),
(u'douglas', u'Douglas'),
(u'gilliam', u'Gilliam'),
(u'grant', u'Grant'),
(u'harney', u'Harney'),
(u'hood river', u'Hood River'),
(u'jackson', u'Jackson'),
(u'jefferson', u'Jefferson'),
(u'josephine', u'Josephine'),
(u'klamath', u'Klamath'),
(u'lake', u'Lake'),
(u'lane', u'Lane'),
(u'lincoln', u'Lincoln'),
(u'linn', u'Linn'),
(u'malheur', u'Malheur'),
(u'marion', u'Marion'),
(u'morrow', u'Morrow'),
(u'multnomah', u'Multnomah'),
(u'polk', u'Polk'),
(u'sherman', u'Sherman'),
(u'tillamook', u'Tillamook'),
(u'umatilla', u'Umatilla'),
(u'union', u'Union'),
(u'wallowa', u'Wallowa'),
(u'wasco', u'Wasco'),
(u'washington', u'Washington'),
(u'wheeler', u'Wheeler'),
(u'yamhill', u'Yamhill'))
class CountyForm(forms.Form):
"""
A simple form containing a single choice field with a list
of Oregon counties as choices.
"""
county = forms.ChoiceField(label='County', choices=COUNTIES,)
|
Add a basic CountyForm for allowing the visitor to select from a list of Oregon counties.from django import forms
from olcc.models import Store
COUNTIES = (
(u'baker', u'Baker'),
(u'benton', u'Benton'),
(u'clackamas', u'Clackamas'),
(u'clatsop', u'Clatsop'),
(u'columbia', u'Columbia'),
(u'coos', u'Coos'),
(u'crook', u'Crook'),
(u'curry', u'Curry'),
(u'deschutes', u'Deschutes'),
(u'douglas', u'Douglas'),
(u'gilliam', u'Gilliam'),
(u'grant', u'Grant'),
(u'harney', u'Harney'),
(u'hood river', u'Hood River'),
(u'jackson', u'Jackson'),
(u'jefferson', u'Jefferson'),
(u'josephine', u'Josephine'),
(u'klamath', u'Klamath'),
(u'lake', u'Lake'),
(u'lane', u'Lane'),
(u'lincoln', u'Lincoln'),
(u'linn', u'Linn'),
(u'malheur', u'Malheur'),
(u'marion', u'Marion'),
(u'morrow', u'Morrow'),
(u'multnomah', u'Multnomah'),
(u'polk', u'Polk'),
(u'sherman', u'Sherman'),
(u'tillamook', u'Tillamook'),
(u'umatilla', u'Umatilla'),
(u'union', u'Union'),
(u'wallowa', u'Wallowa'),
(u'wasco', u'Wasco'),
(u'washington', u'Washington'),
(u'wheeler', u'Wheeler'),
(u'yamhill', u'Yamhill'))
class CountyForm(forms.Form):
"""
A simple form containing a single choice field with a list
of Oregon counties as choices.
"""
county = forms.ChoiceField(label='County', choices=COUNTIES,)
|
<commit_before><commit_msg>Add a basic CountyForm for allowing the visitor to select from a list of Oregon counties.<commit_after>from django import forms
from olcc.models import Store
COUNTIES = (
(u'baker', u'Baker'),
(u'benton', u'Benton'),
(u'clackamas', u'Clackamas'),
(u'clatsop', u'Clatsop'),
(u'columbia', u'Columbia'),
(u'coos', u'Coos'),
(u'crook', u'Crook'),
(u'curry', u'Curry'),
(u'deschutes', u'Deschutes'),
(u'douglas', u'Douglas'),
(u'gilliam', u'Gilliam'),
(u'grant', u'Grant'),
(u'harney', u'Harney'),
(u'hood river', u'Hood River'),
(u'jackson', u'Jackson'),
(u'jefferson', u'Jefferson'),
(u'josephine', u'Josephine'),
(u'klamath', u'Klamath'),
(u'lake', u'Lake'),
(u'lane', u'Lane'),
(u'lincoln', u'Lincoln'),
(u'linn', u'Linn'),
(u'malheur', u'Malheur'),
(u'marion', u'Marion'),
(u'morrow', u'Morrow'),
(u'multnomah', u'Multnomah'),
(u'polk', u'Polk'),
(u'sherman', u'Sherman'),
(u'tillamook', u'Tillamook'),
(u'umatilla', u'Umatilla'),
(u'union', u'Union'),
(u'wallowa', u'Wallowa'),
(u'wasco', u'Wasco'),
(u'washington', u'Washington'),
(u'wheeler', u'Wheeler'),
(u'yamhill', u'Yamhill'))
class CountyForm(forms.Form):
"""
A simple form containing a single choice field with a list
of Oregon counties as choices.
"""
county = forms.ChoiceField(label='County', choices=COUNTIES,)
|
|
b6238e741a9bc476b6b362893b462cb57532e618
|
tests/game_client_test.py
|
tests/game_client_test.py
|
from mock import Mock
from nose.tools import *
from nose.plugins.attrib import attr
import pybomb
from pybomb.response import Response
from pybomb.clients.game_client import GameClient
def setup():
global game_client, bad_response_client, bad_request_client
global return_fields
mock_response = Mock()
mock_response.json.return_value = {
'status_code': GameClient.RESPONSE_STATUS_OK,
'number_of_page_results': 1,
'number_of_total_results': 1,
'results': {},
}
mock_response.raise_for_status.return_value = None
game_client = GameClient('mock_api_key')
game_client._query_api = Mock(return_value=mock_response)
bad_request_client = GameClient('mock_api_key')
bad_request_client.URI_BASE = 'http://httpbin.org/status/404'
mock_bad_response = Mock()
mock_bad_response.json.return_value = {
'status_code':100,
'error': 'Invalid API Key',
}
mock_bad_response.raise_for_status.return_value = None
bad_response_client = GameClient('mock_api_key')
bad_response_client._query_api = Mock(return_value=mock_bad_response)
def test_fetch_returns_response():
response = game_client.fetch(1)
assert isinstance(response, Response)
@raises(pybomb.exceptions.InvalidReturnFieldException)
def test_fetch_invalid_return_field():
invalid_return_field = {'Bob', False}
game_client.fetch(1, invalid_return_field)
@attr('web')
@raises(pybomb.exceptions.BadRequestException)
def test_fetch_bad_request():
bad_request_client.fetch(1)
@raises(pybomb.exceptions.InvalidResponseException)
def test_fetch_bad_response():
bad_response_client.fetch(1)
|
Add 'old style' test for game client
|
Add 'old style' test for game client
|
Python
|
mit
|
steveYeah/PyBomb
|
Add 'old style' test for game client
|
from mock import Mock
from nose.tools import *
from nose.plugins.attrib import attr
import pybomb
from pybomb.response import Response
from pybomb.clients.game_client import GameClient
def setup():
global game_client, bad_response_client, bad_request_client
global return_fields
mock_response = Mock()
mock_response.json.return_value = {
'status_code': GameClient.RESPONSE_STATUS_OK,
'number_of_page_results': 1,
'number_of_total_results': 1,
'results': {},
}
mock_response.raise_for_status.return_value = None
game_client = GameClient('mock_api_key')
game_client._query_api = Mock(return_value=mock_response)
bad_request_client = GameClient('mock_api_key')
bad_request_client.URI_BASE = 'http://httpbin.org/status/404'
mock_bad_response = Mock()
mock_bad_response.json.return_value = {
'status_code':100,
'error': 'Invalid API Key',
}
mock_bad_response.raise_for_status.return_value = None
bad_response_client = GameClient('mock_api_key')
bad_response_client._query_api = Mock(return_value=mock_bad_response)
def test_fetch_returns_response():
response = game_client.fetch(1)
assert isinstance(response, Response)
@raises(pybomb.exceptions.InvalidReturnFieldException)
def test_fetch_invalid_return_field():
invalid_return_field = {'Bob', False}
game_client.fetch(1, invalid_return_field)
@attr('web')
@raises(pybomb.exceptions.BadRequestException)
def test_fetch_bad_request():
bad_request_client.fetch(1)
@raises(pybomb.exceptions.InvalidResponseException)
def test_fetch_bad_response():
bad_response_client.fetch(1)
|
<commit_before><commit_msg>Add 'old style' test for game client<commit_after>
|
from mock import Mock
from nose.tools import *
from nose.plugins.attrib import attr
import pybomb
from pybomb.response import Response
from pybomb.clients.game_client import GameClient
def setup():
global game_client, bad_response_client, bad_request_client
global return_fields
mock_response = Mock()
mock_response.json.return_value = {
'status_code': GameClient.RESPONSE_STATUS_OK,
'number_of_page_results': 1,
'number_of_total_results': 1,
'results': {},
}
mock_response.raise_for_status.return_value = None
game_client = GameClient('mock_api_key')
game_client._query_api = Mock(return_value=mock_response)
bad_request_client = GameClient('mock_api_key')
bad_request_client.URI_BASE = 'http://httpbin.org/status/404'
mock_bad_response = Mock()
mock_bad_response.json.return_value = {
'status_code':100,
'error': 'Invalid API Key',
}
mock_bad_response.raise_for_status.return_value = None
bad_response_client = GameClient('mock_api_key')
bad_response_client._query_api = Mock(return_value=mock_bad_response)
def test_fetch_returns_response():
response = game_client.fetch(1)
assert isinstance(response, Response)
@raises(pybomb.exceptions.InvalidReturnFieldException)
def test_fetch_invalid_return_field():
invalid_return_field = {'Bob', False}
game_client.fetch(1, invalid_return_field)
@attr('web')
@raises(pybomb.exceptions.BadRequestException)
def test_fetch_bad_request():
bad_request_client.fetch(1)
@raises(pybomb.exceptions.InvalidResponseException)
def test_fetch_bad_response():
bad_response_client.fetch(1)
|
Add 'old style' test for game clientfrom mock import Mock
from nose.tools import *
from nose.plugins.attrib import attr
import pybomb
from pybomb.response import Response
from pybomb.clients.game_client import GameClient
def setup():
global game_client, bad_response_client, bad_request_client
global return_fields
mock_response = Mock()
mock_response.json.return_value = {
'status_code': GameClient.RESPONSE_STATUS_OK,
'number_of_page_results': 1,
'number_of_total_results': 1,
'results': {},
}
mock_response.raise_for_status.return_value = None
game_client = GameClient('mock_api_key')
game_client._query_api = Mock(return_value=mock_response)
bad_request_client = GameClient('mock_api_key')
bad_request_client.URI_BASE = 'http://httpbin.org/status/404'
mock_bad_response = Mock()
mock_bad_response.json.return_value = {
'status_code':100,
'error': 'Invalid API Key',
}
mock_bad_response.raise_for_status.return_value = None
bad_response_client = GameClient('mock_api_key')
bad_response_client._query_api = Mock(return_value=mock_bad_response)
def test_fetch_returns_response():
response = game_client.fetch(1)
assert isinstance(response, Response)
@raises(pybomb.exceptions.InvalidReturnFieldException)
def test_fetch_invalid_return_field():
invalid_return_field = {'Bob', False}
game_client.fetch(1, invalid_return_field)
@attr('web')
@raises(pybomb.exceptions.BadRequestException)
def test_fetch_bad_request():
bad_request_client.fetch(1)
@raises(pybomb.exceptions.InvalidResponseException)
def test_fetch_bad_response():
bad_response_client.fetch(1)
|
<commit_before><commit_msg>Add 'old style' test for game client<commit_after>from mock import Mock
from nose.tools import *
from nose.plugins.attrib import attr
import pybomb
from pybomb.response import Response
from pybomb.clients.game_client import GameClient
def setup():
global game_client, bad_response_client, bad_request_client
global return_fields
mock_response = Mock()
mock_response.json.return_value = {
'status_code': GameClient.RESPONSE_STATUS_OK,
'number_of_page_results': 1,
'number_of_total_results': 1,
'results': {},
}
mock_response.raise_for_status.return_value = None
game_client = GameClient('mock_api_key')
game_client._query_api = Mock(return_value=mock_response)
bad_request_client = GameClient('mock_api_key')
bad_request_client.URI_BASE = 'http://httpbin.org/status/404'
mock_bad_response = Mock()
mock_bad_response.json.return_value = {
'status_code':100,
'error': 'Invalid API Key',
}
mock_bad_response.raise_for_status.return_value = None
bad_response_client = GameClient('mock_api_key')
bad_response_client._query_api = Mock(return_value=mock_bad_response)
def test_fetch_returns_response():
response = game_client.fetch(1)
assert isinstance(response, Response)
@raises(pybomb.exceptions.InvalidReturnFieldException)
def test_fetch_invalid_return_field():
invalid_return_field = {'Bob', False}
game_client.fetch(1, invalid_return_field)
@attr('web')
@raises(pybomb.exceptions.BadRequestException)
def test_fetch_bad_request():
bad_request_client.fetch(1)
@raises(pybomb.exceptions.InvalidResponseException)
def test_fetch_bad_response():
bad_response_client.fetch(1)
|
|
cd5a4d0c554c838b6c07af54c98fbac957678820
|
tests/test_wake_losses.py
|
tests/test_wake_losses.py
|
import pandas as pd
import numpy as np
import pytest
from pandas.util.testing import assert_series_equal
from windpowerlib.wake_losses import reduce_wind_speed, get_wind_efficiency_curve, display_wind_efficiency_curves
import windpowerlib.wind_turbine as wt
class TestWakeLosses:
def test_reduce_wind_speed(self):
parameters = {'wind_speed': pd.Series(np.arange(0, 26, 1.0)), 'wind_efficiency_curve_name': 'dena_mean'}
wind_speed_exp = pd.Series([
0.0, 0.9949534234119396, 1.9897327884892086, 2.9843374545454546, 3.807636264984227, 4.714931284760845,
5.642507531914893, 6.607021108049704, 7.592423167192429, 8.59498170212766, 9.606135658475111,
10.619828799086758, 11.641291957894737, 12.674012890137966, 13.709490666666666, 14.742508260567297,
15.773293013157893, 16.794615009724474, 17.817683032858028, 18.85294996704484, 19.86509539493748,
20.858807854510186, 21.854369681134507, 22.850700350710902, 23.85962037735849, 24.958125])
assert_series_equal(reduce_wind_speed(**parameters), wind_speed_exp)
# Raise ValueError - misspelling
with pytest.raises(ValueError):
parameters['wind_efficiency_curve_name'] = 'misspelled'
reduce_wind_speed(**parameters)
|
Add test for wake losses
|
Add test for wake losses
|
Python
|
mit
|
wind-python/windpowerlib
|
Add test for wake losses
|
import pandas as pd
import numpy as np
import pytest
from pandas.util.testing import assert_series_equal
from windpowerlib.wake_losses import reduce_wind_speed, get_wind_efficiency_curve, display_wind_efficiency_curves
import windpowerlib.wind_turbine as wt
class TestWakeLosses:
def test_reduce_wind_speed(self):
parameters = {'wind_speed': pd.Series(np.arange(0, 26, 1.0)), 'wind_efficiency_curve_name': 'dena_mean'}
wind_speed_exp = pd.Series([
0.0, 0.9949534234119396, 1.9897327884892086, 2.9843374545454546, 3.807636264984227, 4.714931284760845,
5.642507531914893, 6.607021108049704, 7.592423167192429, 8.59498170212766, 9.606135658475111,
10.619828799086758, 11.641291957894737, 12.674012890137966, 13.709490666666666, 14.742508260567297,
15.773293013157893, 16.794615009724474, 17.817683032858028, 18.85294996704484, 19.86509539493748,
20.858807854510186, 21.854369681134507, 22.850700350710902, 23.85962037735849, 24.958125])
assert_series_equal(reduce_wind_speed(**parameters), wind_speed_exp)
# Raise ValueError - misspelling
with pytest.raises(ValueError):
parameters['wind_efficiency_curve_name'] = 'misspelled'
reduce_wind_speed(**parameters)
|
<commit_before><commit_msg>Add test for wake losses<commit_after>
|
import pandas as pd
import numpy as np
import pytest
from pandas.util.testing import assert_series_equal
from windpowerlib.wake_losses import reduce_wind_speed, get_wind_efficiency_curve, display_wind_efficiency_curves
import windpowerlib.wind_turbine as wt
class TestWakeLosses:
def test_reduce_wind_speed(self):
parameters = {'wind_speed': pd.Series(np.arange(0, 26, 1.0)), 'wind_efficiency_curve_name': 'dena_mean'}
wind_speed_exp = pd.Series([
0.0, 0.9949534234119396, 1.9897327884892086, 2.9843374545454546, 3.807636264984227, 4.714931284760845,
5.642507531914893, 6.607021108049704, 7.592423167192429, 8.59498170212766, 9.606135658475111,
10.619828799086758, 11.641291957894737, 12.674012890137966, 13.709490666666666, 14.742508260567297,
15.773293013157893, 16.794615009724474, 17.817683032858028, 18.85294996704484, 19.86509539493748,
20.858807854510186, 21.854369681134507, 22.850700350710902, 23.85962037735849, 24.958125])
assert_series_equal(reduce_wind_speed(**parameters), wind_speed_exp)
# Raise ValueError - misspelling
with pytest.raises(ValueError):
parameters['wind_efficiency_curve_name'] = 'misspelled'
reduce_wind_speed(**parameters)
|
Add test for wake lossesimport pandas as pd
import numpy as np
import pytest
from pandas.util.testing import assert_series_equal
from windpowerlib.wake_losses import reduce_wind_speed, get_wind_efficiency_curve, display_wind_efficiency_curves
import windpowerlib.wind_turbine as wt
class TestWakeLosses:
def test_reduce_wind_speed(self):
parameters = {'wind_speed': pd.Series(np.arange(0, 26, 1.0)), 'wind_efficiency_curve_name': 'dena_mean'}
wind_speed_exp = pd.Series([
0.0, 0.9949534234119396, 1.9897327884892086, 2.9843374545454546, 3.807636264984227, 4.714931284760845,
5.642507531914893, 6.607021108049704, 7.592423167192429, 8.59498170212766, 9.606135658475111,
10.619828799086758, 11.641291957894737, 12.674012890137966, 13.709490666666666, 14.742508260567297,
15.773293013157893, 16.794615009724474, 17.817683032858028, 18.85294996704484, 19.86509539493748,
20.858807854510186, 21.854369681134507, 22.850700350710902, 23.85962037735849, 24.958125])
assert_series_equal(reduce_wind_speed(**parameters), wind_speed_exp)
# Raise ValueError - misspelling
with pytest.raises(ValueError):
parameters['wind_efficiency_curve_name'] = 'misspelled'
reduce_wind_speed(**parameters)
|
<commit_before><commit_msg>Add test for wake losses<commit_after>import pandas as pd
import numpy as np
import pytest
from pandas.util.testing import assert_series_equal
from windpowerlib.wake_losses import reduce_wind_speed, get_wind_efficiency_curve, display_wind_efficiency_curves
import windpowerlib.wind_turbine as wt
class TestWakeLosses:
def test_reduce_wind_speed(self):
parameters = {'wind_speed': pd.Series(np.arange(0, 26, 1.0)), 'wind_efficiency_curve_name': 'dena_mean'}
wind_speed_exp = pd.Series([
0.0, 0.9949534234119396, 1.9897327884892086, 2.9843374545454546, 3.807636264984227, 4.714931284760845,
5.642507531914893, 6.607021108049704, 7.592423167192429, 8.59498170212766, 9.606135658475111,
10.619828799086758, 11.641291957894737, 12.674012890137966, 13.709490666666666, 14.742508260567297,
15.773293013157893, 16.794615009724474, 17.817683032858028, 18.85294996704484, 19.86509539493748,
20.858807854510186, 21.854369681134507, 22.850700350710902, 23.85962037735849, 24.958125])
assert_series_equal(reduce_wind_speed(**parameters), wind_speed_exp)
# Raise ValueError - misspelling
with pytest.raises(ValueError):
parameters['wind_efficiency_curve_name'] = 'misspelled'
reduce_wind_speed(**parameters)
|
|
7dc579bf170799f5e834cc7caf219c4394aef4a7
|
examples/qspeed.py
|
examples/qspeed.py
|
#!/usr/bin/env python3
"""How fast is the queue implementation?"""
import time
import asyncio
print(asyncio)
N_CONSUMERS = 10
N_PRODUCERS = 1
N_ITEMS = 100000 # Per producer
Q_SIZE = 1
@asyncio.coroutine
def producer(q):
for i in range(N_ITEMS):
yield from q.put(i)
for i in range(N_CONSUMERS):
yield from q.put(None)
@asyncio.coroutine
def consumer(q):
while True:
i = yield from q.get()
if i is None:
break
def main():
q = asyncio.Queue(Q_SIZE)
loop = asyncio.get_event_loop()
consumers = [consumer(q) for _ in range(N_CONSUMERS)]
producers = [producer(q) for _ in range(N_PRODUCERS)]
t0 = time.time()
loop.run_until_complete(asyncio.gather(*consumers, *producers))
t1 = time.time()
dt = t1 - t0
print(N_CONSUMERS, 'consumers;',
N_PRODUCERS, 'producers;',
N_ITEMS, 'items/producer;',
Q_SIZE, 'maxsize;',
'%.3f total seconds;' % dt,
'%.3f usec per item.' % (1e6*dt/N_ITEMS/N_PRODUCERS))
main()
|
Add a little test for queue speed.
|
Add a little test for queue speed.
|
Python
|
apache-2.0
|
vxgmichel/asyncio,Martiusweb/asyncio,ajdavis/asyncio,gvanrossum/asyncio,ajdavis/asyncio,gvanrossum/asyncio,ajdavis/asyncio,gvanrossum/asyncio,Martiusweb/asyncio,vxgmichel/asyncio,vxgmichel/asyncio,Martiusweb/asyncio
|
Add a little test for queue speed.
|
#!/usr/bin/env python3
"""How fast is the queue implementation?"""
import time
import asyncio
print(asyncio)
N_CONSUMERS = 10
N_PRODUCERS = 1
N_ITEMS = 100000 # Per producer
Q_SIZE = 1
@asyncio.coroutine
def producer(q):
for i in range(N_ITEMS):
yield from q.put(i)
for i in range(N_CONSUMERS):
yield from q.put(None)
@asyncio.coroutine
def consumer(q):
while True:
i = yield from q.get()
if i is None:
break
def main():
q = asyncio.Queue(Q_SIZE)
loop = asyncio.get_event_loop()
consumers = [consumer(q) for _ in range(N_CONSUMERS)]
producers = [producer(q) for _ in range(N_PRODUCERS)]
t0 = time.time()
loop.run_until_complete(asyncio.gather(*consumers, *producers))
t1 = time.time()
dt = t1 - t0
print(N_CONSUMERS, 'consumers;',
N_PRODUCERS, 'producers;',
N_ITEMS, 'items/producer;',
Q_SIZE, 'maxsize;',
'%.3f total seconds;' % dt,
'%.3f usec per item.' % (1e6*dt/N_ITEMS/N_PRODUCERS))
main()
|
<commit_before><commit_msg>Add a little test for queue speed.<commit_after>
|
#!/usr/bin/env python3
"""How fast is the queue implementation?"""
import time
import asyncio
print(asyncio)
N_CONSUMERS = 10
N_PRODUCERS = 1
N_ITEMS = 100000 # Per producer
Q_SIZE = 1
@asyncio.coroutine
def producer(q):
for i in range(N_ITEMS):
yield from q.put(i)
for i in range(N_CONSUMERS):
yield from q.put(None)
@asyncio.coroutine
def consumer(q):
while True:
i = yield from q.get()
if i is None:
break
def main():
q = asyncio.Queue(Q_SIZE)
loop = asyncio.get_event_loop()
consumers = [consumer(q) for _ in range(N_CONSUMERS)]
producers = [producer(q) for _ in range(N_PRODUCERS)]
t0 = time.time()
loop.run_until_complete(asyncio.gather(*consumers, *producers))
t1 = time.time()
dt = t1 - t0
print(N_CONSUMERS, 'consumers;',
N_PRODUCERS, 'producers;',
N_ITEMS, 'items/producer;',
Q_SIZE, 'maxsize;',
'%.3f total seconds;' % dt,
'%.3f usec per item.' % (1e6*dt/N_ITEMS/N_PRODUCERS))
main()
|
Add a little test for queue speed.#!/usr/bin/env python3
"""How fast is the queue implementation?"""
import time
import asyncio
print(asyncio)
N_CONSUMERS = 10
N_PRODUCERS = 1
N_ITEMS = 100000 # Per producer
Q_SIZE = 1
@asyncio.coroutine
def producer(q):
for i in range(N_ITEMS):
yield from q.put(i)
for i in range(N_CONSUMERS):
yield from q.put(None)
@asyncio.coroutine
def consumer(q):
while True:
i = yield from q.get()
if i is None:
break
def main():
q = asyncio.Queue(Q_SIZE)
loop = asyncio.get_event_loop()
consumers = [consumer(q) for _ in range(N_CONSUMERS)]
producers = [producer(q) for _ in range(N_PRODUCERS)]
t0 = time.time()
loop.run_until_complete(asyncio.gather(*consumers, *producers))
t1 = time.time()
dt = t1 - t0
print(N_CONSUMERS, 'consumers;',
N_PRODUCERS, 'producers;',
N_ITEMS, 'items/producer;',
Q_SIZE, 'maxsize;',
'%.3f total seconds;' % dt,
'%.3f usec per item.' % (1e6*dt/N_ITEMS/N_PRODUCERS))
main()
|
<commit_before><commit_msg>Add a little test for queue speed.<commit_after>#!/usr/bin/env python3
"""How fast is the queue implementation?"""
import time
import asyncio
print(asyncio)
N_CONSUMERS = 10
N_PRODUCERS = 1
N_ITEMS = 100000 # Per producer
Q_SIZE = 1
@asyncio.coroutine
def producer(q):
for i in range(N_ITEMS):
yield from q.put(i)
for i in range(N_CONSUMERS):
yield from q.put(None)
@asyncio.coroutine
def consumer(q):
while True:
i = yield from q.get()
if i is None:
break
def main():
q = asyncio.Queue(Q_SIZE)
loop = asyncio.get_event_loop()
consumers = [consumer(q) for _ in range(N_CONSUMERS)]
producers = [producer(q) for _ in range(N_PRODUCERS)]
t0 = time.time()
loop.run_until_complete(asyncio.gather(*consumers, *producers))
t1 = time.time()
dt = t1 - t0
print(N_CONSUMERS, 'consumers;',
N_PRODUCERS, 'producers;',
N_ITEMS, 'items/producer;',
Q_SIZE, 'maxsize;',
'%.3f total seconds;' % dt,
'%.3f usec per item.' % (1e6*dt/N_ITEMS/N_PRODUCERS))
main()
|
|
491aae797d6de061fd93a5d1e827422b33f2269a
|
examples/save_user_followers_into_file.py
|
examples/save_user_followers_into_file.py
|
"""
instabot example
Workflow:
Save users' followers into a file.
"""
import argparse
import os
import sys
from tqdm import tqdm
sys.path.append(os.path.join(sys.path[0], '../'))
from instabot import Bot
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-u', type=str, help="username")
parser.add_argument('-p', type=str, help="password")
parser.add_argument('-proxy', type=str, help="proxy")
parser.add_argument('users', type=str, nargs='+', help='users')
args = parser.parse_args()
bot = Bot()
bot.login(username=args.u, password=args.p,
proxy=args.proxy)
fh = open("users_followings.txt", "a+")
for username in args.users:
followers = bot.get_user_followers(username)
for user in followers:
fh.write(user + "\n")
fh.close()
|
Save users' followers into file
|
Save users' followers into file
|
Python
|
apache-2.0
|
ohld/instabot,instagrambot/instabot,instagrambot/instabot
|
Save users' followers into file
|
"""
instabot example
Workflow:
Save users' followers into a file.
"""
import argparse
import os
import sys
from tqdm import tqdm
sys.path.append(os.path.join(sys.path[0], '../'))
from instabot import Bot
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-u', type=str, help="username")
parser.add_argument('-p', type=str, help="password")
parser.add_argument('-proxy', type=str, help="proxy")
parser.add_argument('users', type=str, nargs='+', help='users')
args = parser.parse_args()
bot = Bot()
bot.login(username=args.u, password=args.p,
proxy=args.proxy)
fh = open("users_followings.txt", "a+")
for username in args.users:
followers = bot.get_user_followers(username)
for user in followers:
fh.write(user + "\n")
fh.close()
|
<commit_before><commit_msg>Save users' followers into file<commit_after>
|
"""
instabot example
Workflow:
Save users' followers into a file.
"""
import argparse
import os
import sys
from tqdm import tqdm
sys.path.append(os.path.join(sys.path[0], '../'))
from instabot import Bot
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-u', type=str, help="username")
parser.add_argument('-p', type=str, help="password")
parser.add_argument('-proxy', type=str, help="proxy")
parser.add_argument('users', type=str, nargs='+', help='users')
args = parser.parse_args()
bot = Bot()
bot.login(username=args.u, password=args.p,
proxy=args.proxy)
fh = open("users_followings.txt", "a+")
for username in args.users:
followers = bot.get_user_followers(username)
for user in followers:
fh.write(user + "\n")
fh.close()
|
Save users' followers into file"""
instabot example
Workflow:
Save users' followers into a file.
"""
import argparse
import os
import sys
from tqdm import tqdm
sys.path.append(os.path.join(sys.path[0], '../'))
from instabot import Bot
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-u', type=str, help="username")
parser.add_argument('-p', type=str, help="password")
parser.add_argument('-proxy', type=str, help="proxy")
parser.add_argument('users', type=str, nargs='+', help='users')
args = parser.parse_args()
bot = Bot()
bot.login(username=args.u, password=args.p,
proxy=args.proxy)
fh = open("users_followings.txt", "a+")
for username in args.users:
followers = bot.get_user_followers(username)
for user in followers:
fh.write(user + "\n")
fh.close()
|
<commit_before><commit_msg>Save users' followers into file<commit_after>"""
instabot example
Workflow:
Save users' followers into a file.
"""
import argparse
import os
import sys
from tqdm import tqdm
sys.path.append(os.path.join(sys.path[0], '../'))
from instabot import Bot
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-u', type=str, help="username")
parser.add_argument('-p', type=str, help="password")
parser.add_argument('-proxy', type=str, help="proxy")
parser.add_argument('users', type=str, nargs='+', help='users')
args = parser.parse_args()
bot = Bot()
bot.login(username=args.u, password=args.p,
proxy=args.proxy)
fh = open("users_followings.txt", "a+")
for username in args.users:
followers = bot.get_user_followers(username)
for user in followers:
fh.write(user + "\n")
fh.close()
|
|
eb504eeb8229cd9f3f679349c171e6d93be58b32
|
examples/enable/component_demo.py
|
examples/enable/component_demo.py
|
from __future__ import with_statement
from enthought.enable.api import Component, ComponentEditor
from enthought.traits.api import HasTraits, Instance
from enthought.traits.ui.api import Item, View
class MyComponent(Component):
def draw(self, gc, **kwargs):
w,h = gc.width(), gc.height()
gc.clear()
# Draw a rounded rect just inside the bounds
gc.set_line_width(2.0)
gc.set_stroke_color((0.0, 0.0, 0.0, 1.0))
r = 15
b = 3
gc.move_to(b, h/2)
gc.arc_to(b, h-b,
w/2, h-b,
r)
gc.arc_to(w-b, h-b,
w-b, h/2,
r)
gc.arc_to(w-b, b,
w/2, b,
r)
gc.arc_to(b, b,
b, h/2,
r)
gc.line_to(b, h/2)
gc.stroke_path()
return
def normal_key_pressed(self, event):
print "key pressed: ", event.character
class Demo(HasTraits):
canvas = Instance(Component)
traits_view = View(Item('canvas', editor=ComponentEditor(bgcolor="lightgray"),
show_label=False, width=200, height=200),
resizable=True, title="Component Example")
def _canvas_default(self):
return MyComponent()
if __name__ == "__main__":
Demo().configure_traits()
|
Add a simple demo for showing the features of the Component class.
|
Add a simple demo for showing the features of the Component class.
|
Python
|
bsd-3-clause
|
tommy-u/enable,tommy-u/enable,tommy-u/enable,tommy-u/enable
|
Add a simple demo for showing the features of the Component class.
|
from __future__ import with_statement
from enthought.enable.api import Component, ComponentEditor
from enthought.traits.api import HasTraits, Instance
from enthought.traits.ui.api import Item, View
class MyComponent(Component):
def draw(self, gc, **kwargs):
w,h = gc.width(), gc.height()
gc.clear()
# Draw a rounded rect just inside the bounds
gc.set_line_width(2.0)
gc.set_stroke_color((0.0, 0.0, 0.0, 1.0))
r = 15
b = 3
gc.move_to(b, h/2)
gc.arc_to(b, h-b,
w/2, h-b,
r)
gc.arc_to(w-b, h-b,
w-b, h/2,
r)
gc.arc_to(w-b, b,
w/2, b,
r)
gc.arc_to(b, b,
b, h/2,
r)
gc.line_to(b, h/2)
gc.stroke_path()
return
def normal_key_pressed(self, event):
print "key pressed: ", event.character
class Demo(HasTraits):
canvas = Instance(Component)
traits_view = View(Item('canvas', editor=ComponentEditor(bgcolor="lightgray"),
show_label=False, width=200, height=200),
resizable=True, title="Component Example")
def _canvas_default(self):
return MyComponent()
if __name__ == "__main__":
Demo().configure_traits()
|
<commit_before><commit_msg>Add a simple demo for showing the features of the Component class.<commit_after>
|
from __future__ import with_statement
from enthought.enable.api import Component, ComponentEditor
from enthought.traits.api import HasTraits, Instance
from enthought.traits.ui.api import Item, View
class MyComponent(Component):
def draw(self, gc, **kwargs):
w,h = gc.width(), gc.height()
gc.clear()
# Draw a rounded rect just inside the bounds
gc.set_line_width(2.0)
gc.set_stroke_color((0.0, 0.0, 0.0, 1.0))
r = 15
b = 3
gc.move_to(b, h/2)
gc.arc_to(b, h-b,
w/2, h-b,
r)
gc.arc_to(w-b, h-b,
w-b, h/2,
r)
gc.arc_to(w-b, b,
w/2, b,
r)
gc.arc_to(b, b,
b, h/2,
r)
gc.line_to(b, h/2)
gc.stroke_path()
return
def normal_key_pressed(self, event):
print "key pressed: ", event.character
class Demo(HasTraits):
canvas = Instance(Component)
traits_view = View(Item('canvas', editor=ComponentEditor(bgcolor="lightgray"),
show_label=False, width=200, height=200),
resizable=True, title="Component Example")
def _canvas_default(self):
return MyComponent()
if __name__ == "__main__":
Demo().configure_traits()
|
Add a simple demo for showing the features of the Component class.from __future__ import with_statement
from enthought.enable.api import Component, ComponentEditor
from enthought.traits.api import HasTraits, Instance
from enthought.traits.ui.api import Item, View
class MyComponent(Component):
def draw(self, gc, **kwargs):
w,h = gc.width(), gc.height()
gc.clear()
# Draw a rounded rect just inside the bounds
gc.set_line_width(2.0)
gc.set_stroke_color((0.0, 0.0, 0.0, 1.0))
r = 15
b = 3
gc.move_to(b, h/2)
gc.arc_to(b, h-b,
w/2, h-b,
r)
gc.arc_to(w-b, h-b,
w-b, h/2,
r)
gc.arc_to(w-b, b,
w/2, b,
r)
gc.arc_to(b, b,
b, h/2,
r)
gc.line_to(b, h/2)
gc.stroke_path()
return
def normal_key_pressed(self, event):
print "key pressed: ", event.character
class Demo(HasTraits):
canvas = Instance(Component)
traits_view = View(Item('canvas', editor=ComponentEditor(bgcolor="lightgray"),
show_label=False, width=200, height=200),
resizable=True, title="Component Example")
def _canvas_default(self):
return MyComponent()
if __name__ == "__main__":
Demo().configure_traits()
|
<commit_before><commit_msg>Add a simple demo for showing the features of the Component class.<commit_after>from __future__ import with_statement
from enthought.enable.api import Component, ComponentEditor
from enthought.traits.api import HasTraits, Instance
from enthought.traits.ui.api import Item, View
class MyComponent(Component):
def draw(self, gc, **kwargs):
w,h = gc.width(), gc.height()
gc.clear()
# Draw a rounded rect just inside the bounds
gc.set_line_width(2.0)
gc.set_stroke_color((0.0, 0.0, 0.0, 1.0))
r = 15
b = 3
gc.move_to(b, h/2)
gc.arc_to(b, h-b,
w/2, h-b,
r)
gc.arc_to(w-b, h-b,
w-b, h/2,
r)
gc.arc_to(w-b, b,
w/2, b,
r)
gc.arc_to(b, b,
b, h/2,
r)
gc.line_to(b, h/2)
gc.stroke_path()
return
def normal_key_pressed(self, event):
print "key pressed: ", event.character
class Demo(HasTraits):
canvas = Instance(Component)
traits_view = View(Item('canvas', editor=ComponentEditor(bgcolor="lightgray"),
show_label=False, width=200, height=200),
resizable=True, title="Component Example")
def _canvas_default(self):
return MyComponent()
if __name__ == "__main__":
Demo().configure_traits()
|
|
85c56454501e156134ee628f279b7632e38fda04
|
Mathematics/Fundamentals/special-multiple.py
|
Mathematics/Fundamentals/special-multiple.py
|
# Python 2
# Enter your code here. Read input from STDIN. Print output to STDOUT
# Observation: If you factor 9 from 9, 90, 99, 900, 909, 990, 999, ...
# you ge the binary numbers 1, 10, 11, 100, 101, 110, 111, ...
t = int(raw_input())
for i in range(t):
n = int(raw_input())
j = 1
while(int(str(bin(j))[2:].replace('1','9'))%n!=0):
j += 1
d = str(bin(j))[2:].replace('1','9')
print d
|
Add code taking advantage of binary numbers
|
Add code taking advantage of binary numbers
|
Python
|
mit
|
ugaliguy/HackerRank,ugaliguy/HackerRank,ugaliguy/HackerRank
|
Add code taking advantage of binary numbers
|
# Python 2
# Enter your code here. Read input from STDIN. Print output to STDOUT
# Observation: If you factor 9 from 9, 90, 99, 900, 909, 990, 999, ...
# you ge the binary numbers 1, 10, 11, 100, 101, 110, 111, ...
t = int(raw_input())
for i in range(t):
n = int(raw_input())
j = 1
while(int(str(bin(j))[2:].replace('1','9'))%n!=0):
j += 1
d = str(bin(j))[2:].replace('1','9')
print d
|
<commit_before><commit_msg>Add code taking advantage of binary numbers<commit_after>
|
# Python 2
# Enter your code here. Read input from STDIN. Print output to STDOUT
# Observation: If you factor 9 from 9, 90, 99, 900, 909, 990, 999, ...
# you ge the binary numbers 1, 10, 11, 100, 101, 110, 111, ...
t = int(raw_input())
for i in range(t):
n = int(raw_input())
j = 1
while(int(str(bin(j))[2:].replace('1','9'))%n!=0):
j += 1
d = str(bin(j))[2:].replace('1','9')
print d
|
Add code taking advantage of binary numbers# Python 2
# Enter your code here. Read input from STDIN. Print output to STDOUT
# Observation: If you factor 9 from 9, 90, 99, 900, 909, 990, 999, ...
# you ge the binary numbers 1, 10, 11, 100, 101, 110, 111, ...
t = int(raw_input())
for i in range(t):
n = int(raw_input())
j = 1
while(int(str(bin(j))[2:].replace('1','9'))%n!=0):
j += 1
d = str(bin(j))[2:].replace('1','9')
print d
|
<commit_before><commit_msg>Add code taking advantage of binary numbers<commit_after># Python 2
# Enter your code here. Read input from STDIN. Print output to STDOUT
# Observation: If you factor 9 from 9, 90, 99, 900, 909, 990, 999, ...
# you ge the binary numbers 1, 10, 11, 100, 101, 110, 111, ...
t = int(raw_input())
for i in range(t):
n = int(raw_input())
j = 1
while(int(str(bin(j))[2:].replace('1','9'))%n!=0):
j += 1
d = str(bin(j))[2:].replace('1','9')
print d
|
|
6649d71702e7e6dfb0c85d222b841de9bac72f4c
|
dmaws/commands/syncdata.py
|
dmaws/commands/syncdata.py
|
import click
from ..cli import cli_command
from ..stacks import StackPlan
from ..syncdata import RDS, RDSPostgresClient
@cli_command('syncdata', max_apps=0)
def syncdata_cmd(ctx):
plan = StackPlan.from_ctx(ctx, apps=['database_dev_access'])
status = plan.create(create_dependencies=False)
if not status:
sys.exit(1)
plan.info(['database'])
rds = RDS(ctx.variables['aws_region'], logger=ctx.log)
instance = rds.get_instance(plan.get_value('stacks.database.outputs')['URL'])
snapshot = rds.create_new_snapshot('syncdata', instance.id)
tmp_instance = rds.restore_instance_from_snapshot(
"syncdata", "syncdata",
vpc_security_groups=instance.vpc_security_groups)
pg_client = RDSPostgresClient.from_boto(
tmp_instance,
plan.get_value('database.name'),
plan.get_value('database.user'),
plan.get_value('database.password'),
logger=ctx.log
)
pg_client.clean_database_for_staging()
pg_client.dump("staging.sql")
pg_client.clean_database_for_preview()
pg_client.dump("preview.sql")
pg_client.close()
rds.delete_instance('syncdata')
rds.delete_snapshot('syncdata')
|
Add command to export db
|
Add command to export db
|
Python
|
mit
|
alphagov/digitalmarketplace-aws,alphagov/digitalmarketplace-aws,alphagov/digitalmarketplace-aws
|
Add command to export db
|
import click
from ..cli import cli_command
from ..stacks import StackPlan
from ..syncdata import RDS, RDSPostgresClient
@cli_command('syncdata', max_apps=0)
def syncdata_cmd(ctx):
plan = StackPlan.from_ctx(ctx, apps=['database_dev_access'])
status = plan.create(create_dependencies=False)
if not status:
sys.exit(1)
plan.info(['database'])
rds = RDS(ctx.variables['aws_region'], logger=ctx.log)
instance = rds.get_instance(plan.get_value('stacks.database.outputs')['URL'])
snapshot = rds.create_new_snapshot('syncdata', instance.id)
tmp_instance = rds.restore_instance_from_snapshot(
"syncdata", "syncdata",
vpc_security_groups=instance.vpc_security_groups)
pg_client = RDSPostgresClient.from_boto(
tmp_instance,
plan.get_value('database.name'),
plan.get_value('database.user'),
plan.get_value('database.password'),
logger=ctx.log
)
pg_client.clean_database_for_staging()
pg_client.dump("staging.sql")
pg_client.clean_database_for_preview()
pg_client.dump("preview.sql")
pg_client.close()
rds.delete_instance('syncdata')
rds.delete_snapshot('syncdata')
|
<commit_before><commit_msg>Add command to export db<commit_after>
|
import click
from ..cli import cli_command
from ..stacks import StackPlan
from ..syncdata import RDS, RDSPostgresClient
@cli_command('syncdata', max_apps=0)
def syncdata_cmd(ctx):
plan = StackPlan.from_ctx(ctx, apps=['database_dev_access'])
status = plan.create(create_dependencies=False)
if not status:
sys.exit(1)
plan.info(['database'])
rds = RDS(ctx.variables['aws_region'], logger=ctx.log)
instance = rds.get_instance(plan.get_value('stacks.database.outputs')['URL'])
snapshot = rds.create_new_snapshot('syncdata', instance.id)
tmp_instance = rds.restore_instance_from_snapshot(
"syncdata", "syncdata",
vpc_security_groups=instance.vpc_security_groups)
pg_client = RDSPostgresClient.from_boto(
tmp_instance,
plan.get_value('database.name'),
plan.get_value('database.user'),
plan.get_value('database.password'),
logger=ctx.log
)
pg_client.clean_database_for_staging()
pg_client.dump("staging.sql")
pg_client.clean_database_for_preview()
pg_client.dump("preview.sql")
pg_client.close()
rds.delete_instance('syncdata')
rds.delete_snapshot('syncdata')
|
Add command to export dbimport click
from ..cli import cli_command
from ..stacks import StackPlan
from ..syncdata import RDS, RDSPostgresClient
@cli_command('syncdata', max_apps=0)
def syncdata_cmd(ctx):
plan = StackPlan.from_ctx(ctx, apps=['database_dev_access'])
status = plan.create(create_dependencies=False)
if not status:
sys.exit(1)
plan.info(['database'])
rds = RDS(ctx.variables['aws_region'], logger=ctx.log)
instance = rds.get_instance(plan.get_value('stacks.database.outputs')['URL'])
snapshot = rds.create_new_snapshot('syncdata', instance.id)
tmp_instance = rds.restore_instance_from_snapshot(
"syncdata", "syncdata",
vpc_security_groups=instance.vpc_security_groups)
pg_client = RDSPostgresClient.from_boto(
tmp_instance,
plan.get_value('database.name'),
plan.get_value('database.user'),
plan.get_value('database.password'),
logger=ctx.log
)
pg_client.clean_database_for_staging()
pg_client.dump("staging.sql")
pg_client.clean_database_for_preview()
pg_client.dump("preview.sql")
pg_client.close()
rds.delete_instance('syncdata')
rds.delete_snapshot('syncdata')
|
<commit_before><commit_msg>Add command to export db<commit_after>import click
from ..cli import cli_command
from ..stacks import StackPlan
from ..syncdata import RDS, RDSPostgresClient
@cli_command('syncdata', max_apps=0)
def syncdata_cmd(ctx):
plan = StackPlan.from_ctx(ctx, apps=['database_dev_access'])
status = plan.create(create_dependencies=False)
if not status:
sys.exit(1)
plan.info(['database'])
rds = RDS(ctx.variables['aws_region'], logger=ctx.log)
instance = rds.get_instance(plan.get_value('stacks.database.outputs')['URL'])
snapshot = rds.create_new_snapshot('syncdata', instance.id)
tmp_instance = rds.restore_instance_from_snapshot(
"syncdata", "syncdata",
vpc_security_groups=instance.vpc_security_groups)
pg_client = RDSPostgresClient.from_boto(
tmp_instance,
plan.get_value('database.name'),
plan.get_value('database.user'),
plan.get_value('database.password'),
logger=ctx.log
)
pg_client.clean_database_for_staging()
pg_client.dump("staging.sql")
pg_client.clean_database_for_preview()
pg_client.dump("preview.sql")
pg_client.close()
rds.delete_instance('syncdata')
rds.delete_snapshot('syncdata')
|
|
95b400e147b04a904b98769f426bd7bb99e20d5d
|
api/restore_wallet.py
|
api/restore_wallet.py
|
import urlparse
import os, sys
import json
tools_dir = os.environ.get('TOOLSDIR')
lib_path = os.path.abspath(tools_dir)
sys.path.append(lib_path)
from msc_apps import *
data_dir_root = os.environ.get('DATADIR')
def restore_wallet_response(request_dict):
if not request_dict.has_key('type'):
return (None, 'No field type in response dict '+str(request_dict))
req_type = request_dict['type'][0].upper()
if req_type == "RESTOREWALLET":
wallet, error = restore_wallet(request_dict['uuid'][0])
else:
return (None, req_type + ' is not supported')
if error != None:
response = { 'status': error }
else:
response = { 'status': 'OK',
'wallet': wallet }
return (json.dumps(response), None)
def restore_wallet(uuid):
filename = data_dir_root + '/wallets/' + uuid + '.json'
if not os.path.exists(filename):
return (None, "Wallet does not exist")
with open(filename, 'r') as f:
wallet = json.load(f)
return (wallet, None)
def restore_wallet_handler(environ, start_response):
return general_handler(environ, start_response, restore_wallet_response)
|
Add restore endpoint.. was missing for some reason
|
Add restore endpoint.. was missing for some reason
|
Python
|
agpl-3.0
|
Nevtep/omniwallet,achamely/omniwallet,maran/omniwallet,VukDukic/omniwallet,habibmasuro/omniwallet,FuzzyBearBTC/omniwallet,habibmasuro/omniwallet,FuzzyBearBTC/omniwallet,OmniLayer/omniwallet,Nevtep/omniwallet,arowser/omniwallet,habibmasuro/omniwallet,VukDukic/omniwallet,maran/omniwallet,ripper234/omniwallet,curtislacy/omniwallet,OmniLayer/omniwallet,achamely/omniwallet,Nevtep/omniwallet,achamely/omniwallet,dexX7/omniwallet,curtislacy/omniwallet,ripper234/omniwallet,arowser/omniwallet,dexX7/omniwallet,VukDukic/omniwallet,curtislacy/omniwallet,Nevtep/omniwallet,arowser/omniwallet,dexX7/omniwallet,FuzzyBearBTC/omniwallet,OmniLayer/omniwallet,OmniLayer/omniwallet,maran/omniwallet,achamely/omniwallet,habibmasuro/omniwallet,ripper234/omniwallet
|
Add restore endpoint.. was missing for some reason
|
import urlparse
import os, sys
import json
tools_dir = os.environ.get('TOOLSDIR')
lib_path = os.path.abspath(tools_dir)
sys.path.append(lib_path)
from msc_apps import *
data_dir_root = os.environ.get('DATADIR')
def restore_wallet_response(request_dict):
if not request_dict.has_key('type'):
return (None, 'No field type in response dict '+str(request_dict))
req_type = request_dict['type'][0].upper()
if req_type == "RESTOREWALLET":
wallet, error = restore_wallet(request_dict['uuid'][0])
else:
return (None, req_type + ' is not supported')
if error != None:
response = { 'status': error }
else:
response = { 'status': 'OK',
'wallet': wallet }
return (json.dumps(response), None)
def restore_wallet(uuid):
filename = data_dir_root + '/wallets/' + uuid + '.json'
if not os.path.exists(filename):
return (None, "Wallet does not exist")
with open(filename, 'r') as f:
wallet = json.load(f)
return (wallet, None)
def restore_wallet_handler(environ, start_response):
return general_handler(environ, start_response, restore_wallet_response)
|
<commit_before><commit_msg>Add restore endpoint.. was missing for some reason<commit_after>
|
import urlparse
import os, sys
import json
tools_dir = os.environ.get('TOOLSDIR')
lib_path = os.path.abspath(tools_dir)
sys.path.append(lib_path)
from msc_apps import *
data_dir_root = os.environ.get('DATADIR')
def restore_wallet_response(request_dict):
if not request_dict.has_key('type'):
return (None, 'No field type in response dict '+str(request_dict))
req_type = request_dict['type'][0].upper()
if req_type == "RESTOREWALLET":
wallet, error = restore_wallet(request_dict['uuid'][0])
else:
return (None, req_type + ' is not supported')
if error != None:
response = { 'status': error }
else:
response = { 'status': 'OK',
'wallet': wallet }
return (json.dumps(response), None)
def restore_wallet(uuid):
filename = data_dir_root + '/wallets/' + uuid + '.json'
if not os.path.exists(filename):
return (None, "Wallet does not exist")
with open(filename, 'r') as f:
wallet = json.load(f)
return (wallet, None)
def restore_wallet_handler(environ, start_response):
return general_handler(environ, start_response, restore_wallet_response)
|
Add restore endpoint.. was missing for some reasonimport urlparse
import os, sys
import json
tools_dir = os.environ.get('TOOLSDIR')
lib_path = os.path.abspath(tools_dir)
sys.path.append(lib_path)
from msc_apps import *
data_dir_root = os.environ.get('DATADIR')
def restore_wallet_response(request_dict):
if not request_dict.has_key('type'):
return (None, 'No field type in response dict '+str(request_dict))
req_type = request_dict['type'][0].upper()
if req_type == "RESTOREWALLET":
wallet, error = restore_wallet(request_dict['uuid'][0])
else:
return (None, req_type + ' is not supported')
if error != None:
response = { 'status': error }
else:
response = { 'status': 'OK',
'wallet': wallet }
return (json.dumps(response), None)
def restore_wallet(uuid):
filename = data_dir_root + '/wallets/' + uuid + '.json'
if not os.path.exists(filename):
return (None, "Wallet does not exist")
with open(filename, 'r') as f:
wallet = json.load(f)
return (wallet, None)
def restore_wallet_handler(environ, start_response):
return general_handler(environ, start_response, restore_wallet_response)
|
<commit_before><commit_msg>Add restore endpoint.. was missing for some reason<commit_after>import urlparse
import os, sys
import json
tools_dir = os.environ.get('TOOLSDIR')
lib_path = os.path.abspath(tools_dir)
sys.path.append(lib_path)
from msc_apps import *
data_dir_root = os.environ.get('DATADIR')
def restore_wallet_response(request_dict):
if not request_dict.has_key('type'):
return (None, 'No field type in response dict '+str(request_dict))
req_type = request_dict['type'][0].upper()
if req_type == "RESTOREWALLET":
wallet, error = restore_wallet(request_dict['uuid'][0])
else:
return (None, req_type + ' is not supported')
if error != None:
response = { 'status': error }
else:
response = { 'status': 'OK',
'wallet': wallet }
return (json.dumps(response), None)
def restore_wallet(uuid):
filename = data_dir_root + '/wallets/' + uuid + '.json'
if not os.path.exists(filename):
return (None, "Wallet does not exist")
with open(filename, 'r') as f:
wallet = json.load(f)
return (wallet, None)
def restore_wallet_handler(environ, start_response):
return general_handler(environ, start_response, restore_wallet_response)
|
|
b1fad32c311f106782d08e588f1b186108be5efc
|
CodeFights/palindromeRearranging.py
|
CodeFights/palindromeRearranging.py
|
#!/usr/local/bin/python
# Code Fights Palindrome Rearranging Problem
def palindromeRearranging(inputString):
from collections import Counter
is_even_len = len(inputString) % 2 == 0
letter_freq = Counter(inputString)
odd_counts = sum([freq % 2 for char, freq in letter_freq.items()])
return (is_even_len and odd_counts == 0) or (not is_even_len and odd_counts == 1)
def main():
tests = [
["aabb", True],
["aaaaaaaaaaaaaaaaaaaaaaaaaaaaaabc", False],
["abbcabb", True],
["zyyzzzzz", True],
["z", True],
["zaa", True],
["abca", False]
]
for t in tests:
res = palindromeRearranging(t[0])
if t[1] == res:
print("PASSED: palindromeRearranging({}) returned {}"
.format(t[0], res))
else:
print("FAILED: palindromeRearranging({}) should have returned {}"
.format(t[0], t[1]))
if __name__ == '__main__':
main()
|
Solve Code Fights palindrome rearranging problem
|
Solve Code Fights palindrome rearranging problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights palindrome rearranging problem
|
#!/usr/local/bin/python
# Code Fights Palindrome Rearranging Problem
def palindromeRearranging(inputString):
from collections import Counter
is_even_len = len(inputString) % 2 == 0
letter_freq = Counter(inputString)
odd_counts = sum([freq % 2 for char, freq in letter_freq.items()])
return (is_even_len and odd_counts == 0) or (not is_even_len and odd_counts == 1)
def main():
tests = [
["aabb", True],
["aaaaaaaaaaaaaaaaaaaaaaaaaaaaaabc", False],
["abbcabb", True],
["zyyzzzzz", True],
["z", True],
["zaa", True],
["abca", False]
]
for t in tests:
res = palindromeRearranging(t[0])
if t[1] == res:
print("PASSED: palindromeRearranging({}) returned {}"
.format(t[0], res))
else:
print("FAILED: palindromeRearranging({}) should have returned {}"
.format(t[0], t[1]))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights palindrome rearranging problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Palindrome Rearranging Problem
def palindromeRearranging(inputString):
from collections import Counter
is_even_len = len(inputString) % 2 == 0
letter_freq = Counter(inputString)
odd_counts = sum([freq % 2 for char, freq in letter_freq.items()])
return (is_even_len and odd_counts == 0) or (not is_even_len and odd_counts == 1)
def main():
tests = [
["aabb", True],
["aaaaaaaaaaaaaaaaaaaaaaaaaaaaaabc", False],
["abbcabb", True],
["zyyzzzzz", True],
["z", True],
["zaa", True],
["abca", False]
]
for t in tests:
res = palindromeRearranging(t[0])
if t[1] == res:
print("PASSED: palindromeRearranging({}) returned {}"
.format(t[0], res))
else:
print("FAILED: palindromeRearranging({}) should have returned {}"
.format(t[0], t[1]))
if __name__ == '__main__':
main()
|
Solve Code Fights palindrome rearranging problem#!/usr/local/bin/python
# Code Fights Palindrome Rearranging Problem
def palindromeRearranging(inputString):
from collections import Counter
is_even_len = len(inputString) % 2 == 0
letter_freq = Counter(inputString)
odd_counts = sum([freq % 2 for char, freq in letter_freq.items()])
return (is_even_len and odd_counts == 0) or (not is_even_len and odd_counts == 1)
def main():
tests = [
["aabb", True],
["aaaaaaaaaaaaaaaaaaaaaaaaaaaaaabc", False],
["abbcabb", True],
["zyyzzzzz", True],
["z", True],
["zaa", True],
["abca", False]
]
for t in tests:
res = palindromeRearranging(t[0])
if t[1] == res:
print("PASSED: palindromeRearranging({}) returned {}"
.format(t[0], res))
else:
print("FAILED: palindromeRearranging({}) should have returned {}"
.format(t[0], t[1]))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights palindrome rearranging problem<commit_after>#!/usr/local/bin/python
# Code Fights Palindrome Rearranging Problem
def palindromeRearranging(inputString):
from collections import Counter
is_even_len = len(inputString) % 2 == 0
letter_freq = Counter(inputString)
odd_counts = sum([freq % 2 for char, freq in letter_freq.items()])
return (is_even_len and odd_counts == 0) or (not is_even_len and odd_counts == 1)
def main():
tests = [
["aabb", True],
["aaaaaaaaaaaaaaaaaaaaaaaaaaaaaabc", False],
["abbcabb", True],
["zyyzzzzz", True],
["z", True],
["zaa", True],
["abca", False]
]
for t in tests:
res = palindromeRearranging(t[0])
if t[1] == res:
print("PASSED: palindromeRearranging({}) returned {}"
.format(t[0], res))
else:
print("FAILED: palindromeRearranging({}) should have returned {}"
.format(t[0], t[1]))
if __name__ == '__main__':
main()
|
|
a2f2392095b4692384c89a30d5a97e6bb0297dc0
|
pyweaving/generate.py
|
pyweaving/generate.py
|
from . import Draft, Thread
def twill():
# just generates 2/2 twill for now
# we'll need 4 shafts and 4 treadles
draft = Draft(num_shafts=4, num_treadles=4)
# do tie-up
for ii in range(4):
draft.treadles[ii].shafts.add(draft.shafts[ii])
draft.treadles[ii].shafts.add(draft.shafts[(ii + 1) % 4])
for ii in range(20):
draft.warp.append(Thread(
dir='warp',
color=(0, 160, 0),
shafts=set([draft.shafts[ii % 4]]),
))
draft.weft.append(Thread(
dir='weft',
color=(160, 0, 0),
treadles=set([draft.treadles[ii % 4]]),
))
return draft
|
Add a simple 2/2 twill draft generator
|
Add a simple 2/2 twill draft generator
|
Python
|
mit
|
storborg/pyweaving
|
Add a simple 2/2 twill draft generator
|
from . import Draft, Thread
def twill():
# just generates 2/2 twill for now
# we'll need 4 shafts and 4 treadles
draft = Draft(num_shafts=4, num_treadles=4)
# do tie-up
for ii in range(4):
draft.treadles[ii].shafts.add(draft.shafts[ii])
draft.treadles[ii].shafts.add(draft.shafts[(ii + 1) % 4])
for ii in range(20):
draft.warp.append(Thread(
dir='warp',
color=(0, 160, 0),
shafts=set([draft.shafts[ii % 4]]),
))
draft.weft.append(Thread(
dir='weft',
color=(160, 0, 0),
treadles=set([draft.treadles[ii % 4]]),
))
return draft
|
<commit_before><commit_msg>Add a simple 2/2 twill draft generator<commit_after>
|
from . import Draft, Thread
def twill():
# just generates 2/2 twill for now
# we'll need 4 shafts and 4 treadles
draft = Draft(num_shafts=4, num_treadles=4)
# do tie-up
for ii in range(4):
draft.treadles[ii].shafts.add(draft.shafts[ii])
draft.treadles[ii].shafts.add(draft.shafts[(ii + 1) % 4])
for ii in range(20):
draft.warp.append(Thread(
dir='warp',
color=(0, 160, 0),
shafts=set([draft.shafts[ii % 4]]),
))
draft.weft.append(Thread(
dir='weft',
color=(160, 0, 0),
treadles=set([draft.treadles[ii % 4]]),
))
return draft
|
Add a simple 2/2 twill draft generatorfrom . import Draft, Thread
def twill():
# just generates 2/2 twill for now
# we'll need 4 shafts and 4 treadles
draft = Draft(num_shafts=4, num_treadles=4)
# do tie-up
for ii in range(4):
draft.treadles[ii].shafts.add(draft.shafts[ii])
draft.treadles[ii].shafts.add(draft.shafts[(ii + 1) % 4])
for ii in range(20):
draft.warp.append(Thread(
dir='warp',
color=(0, 160, 0),
shafts=set([draft.shafts[ii % 4]]),
))
draft.weft.append(Thread(
dir='weft',
color=(160, 0, 0),
treadles=set([draft.treadles[ii % 4]]),
))
return draft
|
<commit_before><commit_msg>Add a simple 2/2 twill draft generator<commit_after>from . import Draft, Thread
def twill():
# just generates 2/2 twill for now
# we'll need 4 shafts and 4 treadles
draft = Draft(num_shafts=4, num_treadles=4)
# do tie-up
for ii in range(4):
draft.treadles[ii].shafts.add(draft.shafts[ii])
draft.treadles[ii].shafts.add(draft.shafts[(ii + 1) % 4])
for ii in range(20):
draft.warp.append(Thread(
dir='warp',
color=(0, 160, 0),
shafts=set([draft.shafts[ii % 4]]),
))
draft.weft.append(Thread(
dir='weft',
color=(160, 0, 0),
treadles=set([draft.treadles[ii % 4]]),
))
return draft
|
|
7645d98247df22dbd4a5af19d89174d347d827e6
|
python/challenges/plusMinus.py
|
python/challenges/plusMinus.py
|
"""
Problem Statement:
Given an array of integers, calculate which fraction of the elements are positive, negative, and zeroes, respectively. Print the decimal value of each fraction.
Input Format:
The first line, N, is the size of the array.
The second line contains N space-separated integers describing the array of numbers (A1,A2,A3,⋯,AN).
Output Format:
Print each value on its own line with the fraction of positive numbers first, negative numbers second, and zeroes third.
"""
|
Create main challenge file with proble statement and i/o expectations
|
Create main challenge file with proble statement and i/o expectations
|
Python
|
mit
|
markthethomas/algorithms,markthethomas/algorithms,markthethomas/algorithms,markthethomas/algorithms
|
Create main challenge file with proble statement and i/o expectations
|
"""
Problem Statement:
Given an array of integers, calculate which fraction of the elements are positive, negative, and zeroes, respectively. Print the decimal value of each fraction.
Input Format:
The first line, N, is the size of the array.
The second line contains N space-separated integers describing the array of numbers (A1,A2,A3,⋯,AN).
Output Format:
Print each value on its own line with the fraction of positive numbers first, negative numbers second, and zeroes third.
"""
|
<commit_before><commit_msg>Create main challenge file with proble statement and i/o expectations<commit_after>
|
"""
Problem Statement:
Given an array of integers, calculate which fraction of the elements are positive, negative, and zeroes, respectively. Print the decimal value of each fraction.
Input Format:
The first line, N, is the size of the array.
The second line contains N space-separated integers describing the array of numbers (A1,A2,A3,⋯,AN).
Output Format:
Print each value on its own line with the fraction of positive numbers first, negative numbers second, and zeroes third.
"""
|
Create main challenge file with proble statement and i/o expectations"""
Problem Statement:
Given an array of integers, calculate which fraction of the elements are positive, negative, and zeroes, respectively. Print the decimal value of each fraction.
Input Format:
The first line, N, is the size of the array.
The second line contains N space-separated integers describing the array of numbers (A1,A2,A3,⋯,AN).
Output Format:
Print each value on its own line with the fraction of positive numbers first, negative numbers second, and zeroes third.
"""
|
<commit_before><commit_msg>Create main challenge file with proble statement and i/o expectations<commit_after>"""
Problem Statement:
Given an array of integers, calculate which fraction of the elements are positive, negative, and zeroes, respectively. Print the decimal value of each fraction.
Input Format:
The first line, N, is the size of the array.
The second line contains N space-separated integers describing the array of numbers (A1,A2,A3,⋯,AN).
Output Format:
Print each value on its own line with the fraction of positive numbers first, negative numbers second, and zeroes third.
"""
|
|
62f3171c463fc8827f9fa2a363314ab3caec4bb7
|
openedx/core/djangoapps/user_api/management/tests/test_bulk_user_org_email_optout.py
|
openedx/core/djangoapps/user_api/management/tests/test_bulk_user_org_email_optout.py
|
"""
Test the test_bulk_user_org_email_optout management command
"""
import os
import tempfile
from contextlib import contextmanager
import mock
import pytest
from django.core.management import call_command
pytestmark = pytest.mark.django_db
CSV_DATA = """1,UniversityX
2,CollegeX
3,StateUX
"""
@contextmanager
def _create_test_csv(csv_data):
"""
Context manager to create and populate a CSV file - and delete it after usage.
"""
__, file_name = tempfile.mkstemp(text=True)
with open(file_name, 'w') as file_pointer:
file_pointer.write(csv_data.encode('utf-8'))
yield file_name
os.unlink(file_name)
@mock.patch('openedx.core.djangoapps.user_api.management.commands.bulk_user_org_email_optout.log.info')
def test_successful_dry_run(mock_logger):
"""
Run the command with default states for a successful initial population
"""
with _create_test_csv(CSV_DATA) as tmp_csv_file:
args = ['--dry_run', '--optout_csv_path={}'.format(tmp_csv_file)]
call_command('bulk_user_org_email_optout', *args)
assert mock_logger.call_count == 3
mock_logger.assert_any_call("Read %s opt-out rows from CSV file '%s'.", 3, tmp_csv_file)
mock_logger.assert_any_call(
'Attempting opt-out for rows (%s, %s) through (%s, %s)...', '1', 'UniversityX', '3', 'StateUX'
)
mock_logger.assert_any_call(
'INSERT INTO user_api_userorgtag (`user_id`, `org`, `key`, `value`, `created`, `modified`) \
VALUES (1,"UniversityX","email-optin","False",NOW(),NOW()),(2,"CollegeX","email-optin","False",NOW(),NOW()),\
(3,"StateUX","email-optin","False",NOW(),NOW()) ON DUPLICATE KEY UPDATE value="False", modified=NOW();')
|
Add test for bulk email optout mgmt cmd.
|
Add test for bulk email optout mgmt cmd.
|
Python
|
agpl-3.0
|
stvstnfrd/edx-platform,a-parhom/edx-platform,edx/edx-platform,philanthropy-u/edx-platform,philanthropy-u/edx-platform,cpennington/edx-platform,jolyonb/edx-platform,teltek/edx-platform,msegado/edx-platform,EDUlib/edx-platform,appsembler/edx-platform,mitocw/edx-platform,cpennington/edx-platform,edx-solutions/edx-platform,appsembler/edx-platform,msegado/edx-platform,edx/edx-platform,teltek/edx-platform,msegado/edx-platform,eduNEXT/edunext-platform,arbrandes/edx-platform,msegado/edx-platform,jolyonb/edx-platform,ESOedX/edx-platform,jolyonb/edx-platform,angelapper/edx-platform,angelapper/edx-platform,eduNEXT/edunext-platform,angelapper/edx-platform,EDUlib/edx-platform,cpennington/edx-platform,edx/edx-platform,mitocw/edx-platform,cpennington/edx-platform,jolyonb/edx-platform,ESOedX/edx-platform,appsembler/edx-platform,ESOedX/edx-platform,a-parhom/edx-platform,arbrandes/edx-platform,EDUlib/edx-platform,mitocw/edx-platform,arbrandes/edx-platform,mitocw/edx-platform,stvstnfrd/edx-platform,appsembler/edx-platform,EDUlib/edx-platform,stvstnfrd/edx-platform,arbrandes/edx-platform,eduNEXT/edx-platform,eduNEXT/edx-platform,ESOedX/edx-platform,edx-solutions/edx-platform,a-parhom/edx-platform,edx-solutions/edx-platform,eduNEXT/edunext-platform,angelapper/edx-platform,eduNEXT/edx-platform,eduNEXT/edunext-platform,edx-solutions/edx-platform,philanthropy-u/edx-platform,teltek/edx-platform,stvstnfrd/edx-platform,a-parhom/edx-platform,msegado/edx-platform,philanthropy-u/edx-platform,edx/edx-platform,teltek/edx-platform,eduNEXT/edx-platform
|
Add test for bulk email optout mgmt cmd.
|
"""
Test the test_bulk_user_org_email_optout management command
"""
import os
import tempfile
from contextlib import contextmanager
import mock
import pytest
from django.core.management import call_command
pytestmark = pytest.mark.django_db
CSV_DATA = """1,UniversityX
2,CollegeX
3,StateUX
"""
@contextmanager
def _create_test_csv(csv_data):
"""
Context manager to create and populate a CSV file - and delete it after usage.
"""
__, file_name = tempfile.mkstemp(text=True)
with open(file_name, 'w') as file_pointer:
file_pointer.write(csv_data.encode('utf-8'))
yield file_name
os.unlink(file_name)
@mock.patch('openedx.core.djangoapps.user_api.management.commands.bulk_user_org_email_optout.log.info')
def test_successful_dry_run(mock_logger):
"""
Run the command with default states for a successful initial population
"""
with _create_test_csv(CSV_DATA) as tmp_csv_file:
args = ['--dry_run', '--optout_csv_path={}'.format(tmp_csv_file)]
call_command('bulk_user_org_email_optout', *args)
assert mock_logger.call_count == 3
mock_logger.assert_any_call("Read %s opt-out rows from CSV file '%s'.", 3, tmp_csv_file)
mock_logger.assert_any_call(
'Attempting opt-out for rows (%s, %s) through (%s, %s)...', '1', 'UniversityX', '3', 'StateUX'
)
mock_logger.assert_any_call(
'INSERT INTO user_api_userorgtag (`user_id`, `org`, `key`, `value`, `created`, `modified`) \
VALUES (1,"UniversityX","email-optin","False",NOW(),NOW()),(2,"CollegeX","email-optin","False",NOW(),NOW()),\
(3,"StateUX","email-optin","False",NOW(),NOW()) ON DUPLICATE KEY UPDATE value="False", modified=NOW();')
|
<commit_before><commit_msg>Add test for bulk email optout mgmt cmd.<commit_after>
|
"""
Test the test_bulk_user_org_email_optout management command
"""
import os
import tempfile
from contextlib import contextmanager
import mock
import pytest
from django.core.management import call_command
pytestmark = pytest.mark.django_db
CSV_DATA = """1,UniversityX
2,CollegeX
3,StateUX
"""
@contextmanager
def _create_test_csv(csv_data):
"""
Context manager to create and populate a CSV file - and delete it after usage.
"""
__, file_name = tempfile.mkstemp(text=True)
with open(file_name, 'w') as file_pointer:
file_pointer.write(csv_data.encode('utf-8'))
yield file_name
os.unlink(file_name)
@mock.patch('openedx.core.djangoapps.user_api.management.commands.bulk_user_org_email_optout.log.info')
def test_successful_dry_run(mock_logger):
"""
Run the command with default states for a successful initial population
"""
with _create_test_csv(CSV_DATA) as tmp_csv_file:
args = ['--dry_run', '--optout_csv_path={}'.format(tmp_csv_file)]
call_command('bulk_user_org_email_optout', *args)
assert mock_logger.call_count == 3
mock_logger.assert_any_call("Read %s opt-out rows from CSV file '%s'.", 3, tmp_csv_file)
mock_logger.assert_any_call(
'Attempting opt-out for rows (%s, %s) through (%s, %s)...', '1', 'UniversityX', '3', 'StateUX'
)
mock_logger.assert_any_call(
'INSERT INTO user_api_userorgtag (`user_id`, `org`, `key`, `value`, `created`, `modified`) \
VALUES (1,"UniversityX","email-optin","False",NOW(),NOW()),(2,"CollegeX","email-optin","False",NOW(),NOW()),\
(3,"StateUX","email-optin","False",NOW(),NOW()) ON DUPLICATE KEY UPDATE value="False", modified=NOW();')
|
Add test for bulk email optout mgmt cmd."""
Test the test_bulk_user_org_email_optout management command
"""
import os
import tempfile
from contextlib import contextmanager
import mock
import pytest
from django.core.management import call_command
pytestmark = pytest.mark.django_db
CSV_DATA = """1,UniversityX
2,CollegeX
3,StateUX
"""
@contextmanager
def _create_test_csv(csv_data):
"""
Context manager to create and populate a CSV file - and delete it after usage.
"""
__, file_name = tempfile.mkstemp(text=True)
with open(file_name, 'w') as file_pointer:
file_pointer.write(csv_data.encode('utf-8'))
yield file_name
os.unlink(file_name)
@mock.patch('openedx.core.djangoapps.user_api.management.commands.bulk_user_org_email_optout.log.info')
def test_successful_dry_run(mock_logger):
"""
Run the command with default states for a successful initial population
"""
with _create_test_csv(CSV_DATA) as tmp_csv_file:
args = ['--dry_run', '--optout_csv_path={}'.format(tmp_csv_file)]
call_command('bulk_user_org_email_optout', *args)
assert mock_logger.call_count == 3
mock_logger.assert_any_call("Read %s opt-out rows from CSV file '%s'.", 3, tmp_csv_file)
mock_logger.assert_any_call(
'Attempting opt-out for rows (%s, %s) through (%s, %s)...', '1', 'UniversityX', '3', 'StateUX'
)
mock_logger.assert_any_call(
'INSERT INTO user_api_userorgtag (`user_id`, `org`, `key`, `value`, `created`, `modified`) \
VALUES (1,"UniversityX","email-optin","False",NOW(),NOW()),(2,"CollegeX","email-optin","False",NOW(),NOW()),\
(3,"StateUX","email-optin","False",NOW(),NOW()) ON DUPLICATE KEY UPDATE value="False", modified=NOW();')
|
<commit_before><commit_msg>Add test for bulk email optout mgmt cmd.<commit_after>"""
Test the test_bulk_user_org_email_optout management command
"""
import os
import tempfile
from contextlib import contextmanager
import mock
import pytest
from django.core.management import call_command
pytestmark = pytest.mark.django_db
CSV_DATA = """1,UniversityX
2,CollegeX
3,StateUX
"""
@contextmanager
def _create_test_csv(csv_data):
"""
Context manager to create and populate a CSV file - and delete it after usage.
"""
__, file_name = tempfile.mkstemp(text=True)
with open(file_name, 'w') as file_pointer:
file_pointer.write(csv_data.encode('utf-8'))
yield file_name
os.unlink(file_name)
@mock.patch('openedx.core.djangoapps.user_api.management.commands.bulk_user_org_email_optout.log.info')
def test_successful_dry_run(mock_logger):
"""
Run the command with default states for a successful initial population
"""
with _create_test_csv(CSV_DATA) as tmp_csv_file:
args = ['--dry_run', '--optout_csv_path={}'.format(tmp_csv_file)]
call_command('bulk_user_org_email_optout', *args)
assert mock_logger.call_count == 3
mock_logger.assert_any_call("Read %s opt-out rows from CSV file '%s'.", 3, tmp_csv_file)
mock_logger.assert_any_call(
'Attempting opt-out for rows (%s, %s) through (%s, %s)...', '1', 'UniversityX', '3', 'StateUX'
)
mock_logger.assert_any_call(
'INSERT INTO user_api_userorgtag (`user_id`, `org`, `key`, `value`, `created`, `modified`) \
VALUES (1,"UniversityX","email-optin","False",NOW(),NOW()),(2,"CollegeX","email-optin","False",NOW(),NOW()),\
(3,"StateUX","email-optin","False",NOW(),NOW()) ON DUPLICATE KEY UPDATE value="False", modified=NOW();')
|
|
05a0340919b8d7affc369201afd2bed931559516
|
djangae/contrib/contenttypes/tests.py
|
djangae/contrib/contenttypes/tests.py
|
# SYSTEM
from __future__ import absolute_import
# LIBRARIES
from django.db import models
from django.test import TestCase
from django.contrib.contenttypes.models import ContentType
from djangae.contrib.contenttypes.models import SimulatedContentTypeManager
class DummyModel(models.Model):
pass
class SimulatedContentTypesTests(TestCase):
def test_contenttypes_patch_is_applied(self):
self.assertEqual(ContentType.objects.__class__, SimulatedContentTypeManager)
def test_passing_model_to_simulated_manager_work(self):
manager = SimulatedContentTypeManager(model=DummyModel)
self.assertEqual(manager._get_model(), DummyModel)
def test_get_all_contenttype_objects(self):
self.assertTrue(len(ContentType.objects.all()) > 0)
def test_get_for_model(self):
ct = ContentType.objects.get_for_model(DummyModel)
self.assertEqual(ct.model, DummyModel._meta.model_name)
self.assertEqual(ct.app_label, DummyModel._meta.app_label)
def test_get_by_natural_key(self):
ct = ContentType.objects.get_by_natural_key(
DummyModel._meta.app_label, DummyModel._meta.model_name)
self.assertEqual(ct.model, DummyModel._meta.model_name)
self.assertEqual(ct.app_label, DummyModel._meta.app_label)
def test_get_for_id(self):
ct = ContentType.objects.get_for_model(DummyModel)
by_id = ContentType.objects.get_for_id(ct.id)
self.assertEqual(by_id.model, DummyModel._meta.model_name)
self.assertEqual(by_id.app_label, DummyModel._meta.app_label)
def test_create_contenttype(self):
ct = ContentType.objects.create(app_label='test', model='test')
self.assertEqual(ct.app_label, 'test')
self.assertEqual(ct.model, 'test')
self.assertIsNotNone(ct.pk)
def test_get_or_create_contenttype(self):
ct, created = ContentType.objects.get_or_create(
app_label=DummyModel._meta.app_label,
model=DummyModel._meta.model_name
)
self.assertEqual(ct.model, DummyModel._meta.model_name)
self.assertEqual(ct.app_label, DummyModel._meta.app_label)
self.assertFalse(created)
ct, created = ContentType.objects.get_or_create(
app_label=DummyModel._meta.app_label,
model='different_model'
)
self.assertEqual(ct.model, 'different_model')
self.assertEqual(ct.app_label, DummyModel._meta.app_label)
self.assertTrue(created)
def test_filter_contenttypes(self):
self.assertTrue(len(ContentType.objects.all()) > 1)
self.assertEqual(1, len(ContentType.objects.filter(app_label=DummyModel._meta.app_label)))
|
Add basic test suite for SimulatedContentTypeManager
|
Add basic test suite for SimulatedContentTypeManager
|
Python
|
bsd-3-clause
|
grzes/djangae,potatolondon/djangae,kirberich/djangae,kirberich/djangae,grzes/djangae,kirberich/djangae,potatolondon/djangae,grzes/djangae
|
Add basic test suite for SimulatedContentTypeManager
|
# SYSTEM
from __future__ import absolute_import
# LIBRARIES
from django.db import models
from django.test import TestCase
from django.contrib.contenttypes.models import ContentType
from djangae.contrib.contenttypes.models import SimulatedContentTypeManager
class DummyModel(models.Model):
pass
class SimulatedContentTypesTests(TestCase):
def test_contenttypes_patch_is_applied(self):
self.assertEqual(ContentType.objects.__class__, SimulatedContentTypeManager)
def test_passing_model_to_simulated_manager_work(self):
manager = SimulatedContentTypeManager(model=DummyModel)
self.assertEqual(manager._get_model(), DummyModel)
def test_get_all_contenttype_objects(self):
self.assertTrue(len(ContentType.objects.all()) > 0)
def test_get_for_model(self):
ct = ContentType.objects.get_for_model(DummyModel)
self.assertEqual(ct.model, DummyModel._meta.model_name)
self.assertEqual(ct.app_label, DummyModel._meta.app_label)
def test_get_by_natural_key(self):
ct = ContentType.objects.get_by_natural_key(
DummyModel._meta.app_label, DummyModel._meta.model_name)
self.assertEqual(ct.model, DummyModel._meta.model_name)
self.assertEqual(ct.app_label, DummyModel._meta.app_label)
def test_get_for_id(self):
ct = ContentType.objects.get_for_model(DummyModel)
by_id = ContentType.objects.get_for_id(ct.id)
self.assertEqual(by_id.model, DummyModel._meta.model_name)
self.assertEqual(by_id.app_label, DummyModel._meta.app_label)
def test_create_contenttype(self):
ct = ContentType.objects.create(app_label='test', model='test')
self.assertEqual(ct.app_label, 'test')
self.assertEqual(ct.model, 'test')
self.assertIsNotNone(ct.pk)
def test_get_or_create_contenttype(self):
ct, created = ContentType.objects.get_or_create(
app_label=DummyModel._meta.app_label,
model=DummyModel._meta.model_name
)
self.assertEqual(ct.model, DummyModel._meta.model_name)
self.assertEqual(ct.app_label, DummyModel._meta.app_label)
self.assertFalse(created)
ct, created = ContentType.objects.get_or_create(
app_label=DummyModel._meta.app_label,
model='different_model'
)
self.assertEqual(ct.model, 'different_model')
self.assertEqual(ct.app_label, DummyModel._meta.app_label)
self.assertTrue(created)
def test_filter_contenttypes(self):
self.assertTrue(len(ContentType.objects.all()) > 1)
self.assertEqual(1, len(ContentType.objects.filter(app_label=DummyModel._meta.app_label)))
|
<commit_before><commit_msg>Add basic test suite for SimulatedContentTypeManager<commit_after>
|
# SYSTEM
from __future__ import absolute_import
# LIBRARIES
from django.db import models
from django.test import TestCase
from django.contrib.contenttypes.models import ContentType
from djangae.contrib.contenttypes.models import SimulatedContentTypeManager
class DummyModel(models.Model):
pass
class SimulatedContentTypesTests(TestCase):
def test_contenttypes_patch_is_applied(self):
self.assertEqual(ContentType.objects.__class__, SimulatedContentTypeManager)
def test_passing_model_to_simulated_manager_work(self):
manager = SimulatedContentTypeManager(model=DummyModel)
self.assertEqual(manager._get_model(), DummyModel)
def test_get_all_contenttype_objects(self):
self.assertTrue(len(ContentType.objects.all()) > 0)
def test_get_for_model(self):
ct = ContentType.objects.get_for_model(DummyModel)
self.assertEqual(ct.model, DummyModel._meta.model_name)
self.assertEqual(ct.app_label, DummyModel._meta.app_label)
def test_get_by_natural_key(self):
ct = ContentType.objects.get_by_natural_key(
DummyModel._meta.app_label, DummyModel._meta.model_name)
self.assertEqual(ct.model, DummyModel._meta.model_name)
self.assertEqual(ct.app_label, DummyModel._meta.app_label)
def test_get_for_id(self):
ct = ContentType.objects.get_for_model(DummyModel)
by_id = ContentType.objects.get_for_id(ct.id)
self.assertEqual(by_id.model, DummyModel._meta.model_name)
self.assertEqual(by_id.app_label, DummyModel._meta.app_label)
def test_create_contenttype(self):
ct = ContentType.objects.create(app_label='test', model='test')
self.assertEqual(ct.app_label, 'test')
self.assertEqual(ct.model, 'test')
self.assertIsNotNone(ct.pk)
def test_get_or_create_contenttype(self):
ct, created = ContentType.objects.get_or_create(
app_label=DummyModel._meta.app_label,
model=DummyModel._meta.model_name
)
self.assertEqual(ct.model, DummyModel._meta.model_name)
self.assertEqual(ct.app_label, DummyModel._meta.app_label)
self.assertFalse(created)
ct, created = ContentType.objects.get_or_create(
app_label=DummyModel._meta.app_label,
model='different_model'
)
self.assertEqual(ct.model, 'different_model')
self.assertEqual(ct.app_label, DummyModel._meta.app_label)
self.assertTrue(created)
def test_filter_contenttypes(self):
self.assertTrue(len(ContentType.objects.all()) > 1)
self.assertEqual(1, len(ContentType.objects.filter(app_label=DummyModel._meta.app_label)))
|
Add basic test suite for SimulatedContentTypeManager# SYSTEM
from __future__ import absolute_import
# LIBRARIES
from django.db import models
from django.test import TestCase
from django.contrib.contenttypes.models import ContentType
from djangae.contrib.contenttypes.models import SimulatedContentTypeManager
class DummyModel(models.Model):
pass
class SimulatedContentTypesTests(TestCase):
def test_contenttypes_patch_is_applied(self):
self.assertEqual(ContentType.objects.__class__, SimulatedContentTypeManager)
def test_passing_model_to_simulated_manager_work(self):
manager = SimulatedContentTypeManager(model=DummyModel)
self.assertEqual(manager._get_model(), DummyModel)
def test_get_all_contenttype_objects(self):
self.assertTrue(len(ContentType.objects.all()) > 0)
def test_get_for_model(self):
ct = ContentType.objects.get_for_model(DummyModel)
self.assertEqual(ct.model, DummyModel._meta.model_name)
self.assertEqual(ct.app_label, DummyModel._meta.app_label)
def test_get_by_natural_key(self):
ct = ContentType.objects.get_by_natural_key(
DummyModel._meta.app_label, DummyModel._meta.model_name)
self.assertEqual(ct.model, DummyModel._meta.model_name)
self.assertEqual(ct.app_label, DummyModel._meta.app_label)
def test_get_for_id(self):
ct = ContentType.objects.get_for_model(DummyModel)
by_id = ContentType.objects.get_for_id(ct.id)
self.assertEqual(by_id.model, DummyModel._meta.model_name)
self.assertEqual(by_id.app_label, DummyModel._meta.app_label)
def test_create_contenttype(self):
ct = ContentType.objects.create(app_label='test', model='test')
self.assertEqual(ct.app_label, 'test')
self.assertEqual(ct.model, 'test')
self.assertIsNotNone(ct.pk)
def test_get_or_create_contenttype(self):
ct, created = ContentType.objects.get_or_create(
app_label=DummyModel._meta.app_label,
model=DummyModel._meta.model_name
)
self.assertEqual(ct.model, DummyModel._meta.model_name)
self.assertEqual(ct.app_label, DummyModel._meta.app_label)
self.assertFalse(created)
ct, created = ContentType.objects.get_or_create(
app_label=DummyModel._meta.app_label,
model='different_model'
)
self.assertEqual(ct.model, 'different_model')
self.assertEqual(ct.app_label, DummyModel._meta.app_label)
self.assertTrue(created)
def test_filter_contenttypes(self):
self.assertTrue(len(ContentType.objects.all()) > 1)
self.assertEqual(1, len(ContentType.objects.filter(app_label=DummyModel._meta.app_label)))
|
<commit_before><commit_msg>Add basic test suite for SimulatedContentTypeManager<commit_after># SYSTEM
from __future__ import absolute_import
# LIBRARIES
from django.db import models
from django.test import TestCase
from django.contrib.contenttypes.models import ContentType
from djangae.contrib.contenttypes.models import SimulatedContentTypeManager
class DummyModel(models.Model):
pass
class SimulatedContentTypesTests(TestCase):
def test_contenttypes_patch_is_applied(self):
self.assertEqual(ContentType.objects.__class__, SimulatedContentTypeManager)
def test_passing_model_to_simulated_manager_work(self):
manager = SimulatedContentTypeManager(model=DummyModel)
self.assertEqual(manager._get_model(), DummyModel)
def test_get_all_contenttype_objects(self):
self.assertTrue(len(ContentType.objects.all()) > 0)
def test_get_for_model(self):
ct = ContentType.objects.get_for_model(DummyModel)
self.assertEqual(ct.model, DummyModel._meta.model_name)
self.assertEqual(ct.app_label, DummyModel._meta.app_label)
def test_get_by_natural_key(self):
ct = ContentType.objects.get_by_natural_key(
DummyModel._meta.app_label, DummyModel._meta.model_name)
self.assertEqual(ct.model, DummyModel._meta.model_name)
self.assertEqual(ct.app_label, DummyModel._meta.app_label)
def test_get_for_id(self):
ct = ContentType.objects.get_for_model(DummyModel)
by_id = ContentType.objects.get_for_id(ct.id)
self.assertEqual(by_id.model, DummyModel._meta.model_name)
self.assertEqual(by_id.app_label, DummyModel._meta.app_label)
def test_create_contenttype(self):
ct = ContentType.objects.create(app_label='test', model='test')
self.assertEqual(ct.app_label, 'test')
self.assertEqual(ct.model, 'test')
self.assertIsNotNone(ct.pk)
def test_get_or_create_contenttype(self):
ct, created = ContentType.objects.get_or_create(
app_label=DummyModel._meta.app_label,
model=DummyModel._meta.model_name
)
self.assertEqual(ct.model, DummyModel._meta.model_name)
self.assertEqual(ct.app_label, DummyModel._meta.app_label)
self.assertFalse(created)
ct, created = ContentType.objects.get_or_create(
app_label=DummyModel._meta.app_label,
model='different_model'
)
self.assertEqual(ct.model, 'different_model')
self.assertEqual(ct.app_label, DummyModel._meta.app_label)
self.assertTrue(created)
def test_filter_contenttypes(self):
self.assertTrue(len(ContentType.objects.all()) > 1)
self.assertEqual(1, len(ContentType.objects.filter(app_label=DummyModel._meta.app_label)))
|
|
0bc52971191d2fa698032912f0ed1ffcfc8fc4c9
|
elements/cost_functions.py
|
elements/cost_functions.py
|
"""
a set of cost functions for Neural Network layers.
"""
import theano.tensor as T
l1_norm(w):
"""
Returns L1 norm of the given matrix (w).
L1 norm is simply sum of a matrix elements.
@input: w, a theano shared variable.
@output: L1 norm of w
"""
return abs(w).sum()
l2_norm(w):
"""
Returns L2 norm of the given matrix (w).
L2 norm of a matrix is simply square root of the sum of square of elements of the matrix.
In an other word, it's length for a matrix.
@input: w, a theano shared variable.
@output: L2 norm of w
"""
return T.sqrt((w ** 2).sum())
l2_norm_sqr(w):
"""
Returns square of L2 norm of the given matrix (w).
square of L2 norm of a matrix is simply the sum of square of elements of the matrix.
@input: w, a theano shared variable.
@output: L2 norm of w
"""
return (w ** 2).sum()
# TODO
# add contractive cost funciton.
|
Add some basic cost functions (L1 & L2 norms)
|
Add some basic cost functions (L1 & L2 norms)
|
Python
|
mit
|
mmohaveri/DeepNetTookKit
|
Add some basic cost functions (L1 & L2 norms)
|
"""
a set of cost functions for Neural Network layers.
"""
import theano.tensor as T
l1_norm(w):
"""
Returns L1 norm of the given matrix (w).
L1 norm is simply sum of a matrix elements.
@input: w, a theano shared variable.
@output: L1 norm of w
"""
return abs(w).sum()
l2_norm(w):
"""
Returns L2 norm of the given matrix (w).
L2 norm of a matrix is simply square root of the sum of square of elements of the matrix.
In an other word, it's length for a matrix.
@input: w, a theano shared variable.
@output: L2 norm of w
"""
return T.sqrt((w ** 2).sum())
l2_norm_sqr(w):
"""
Returns square of L2 norm of the given matrix (w).
square of L2 norm of a matrix is simply the sum of square of elements of the matrix.
@input: w, a theano shared variable.
@output: L2 norm of w
"""
return (w ** 2).sum()
# TODO
# add contractive cost funciton.
|
<commit_before><commit_msg>Add some basic cost functions (L1 & L2 norms)<commit_after>
|
"""
a set of cost functions for Neural Network layers.
"""
import theano.tensor as T
l1_norm(w):
"""
Returns L1 norm of the given matrix (w).
L1 norm is simply sum of a matrix elements.
@input: w, a theano shared variable.
@output: L1 norm of w
"""
return abs(w).sum()
l2_norm(w):
"""
Returns L2 norm of the given matrix (w).
L2 norm of a matrix is simply square root of the sum of square of elements of the matrix.
In an other word, it's length for a matrix.
@input: w, a theano shared variable.
@output: L2 norm of w
"""
return T.sqrt((w ** 2).sum())
l2_norm_sqr(w):
"""
Returns square of L2 norm of the given matrix (w).
square of L2 norm of a matrix is simply the sum of square of elements of the matrix.
@input: w, a theano shared variable.
@output: L2 norm of w
"""
return (w ** 2).sum()
# TODO
# add contractive cost funciton.
|
Add some basic cost functions (L1 & L2 norms)"""
a set of cost functions for Neural Network layers.
"""
import theano.tensor as T
l1_norm(w):
"""
Returns L1 norm of the given matrix (w).
L1 norm is simply sum of a matrix elements.
@input: w, a theano shared variable.
@output: L1 norm of w
"""
return abs(w).sum()
l2_norm(w):
"""
Returns L2 norm of the given matrix (w).
L2 norm of a matrix is simply square root of the sum of square of elements of the matrix.
In an other word, it's length for a matrix.
@input: w, a theano shared variable.
@output: L2 norm of w
"""
return T.sqrt((w ** 2).sum())
l2_norm_sqr(w):
"""
Returns square of L2 norm of the given matrix (w).
square of L2 norm of a matrix is simply the sum of square of elements of the matrix.
@input: w, a theano shared variable.
@output: L2 norm of w
"""
return (w ** 2).sum()
# TODO
# add contractive cost funciton.
|
<commit_before><commit_msg>Add some basic cost functions (L1 & L2 norms)<commit_after>"""
a set of cost functions for Neural Network layers.
"""
import theano.tensor as T
l1_norm(w):
"""
Returns L1 norm of the given matrix (w).
L1 norm is simply sum of a matrix elements.
@input: w, a theano shared variable.
@output: L1 norm of w
"""
return abs(w).sum()
l2_norm(w):
"""
Returns L2 norm of the given matrix (w).
L2 norm of a matrix is simply square root of the sum of square of elements of the matrix.
In an other word, it's length for a matrix.
@input: w, a theano shared variable.
@output: L2 norm of w
"""
return T.sqrt((w ** 2).sum())
l2_norm_sqr(w):
"""
Returns square of L2 norm of the given matrix (w).
square of L2 norm of a matrix is simply the sum of square of elements of the matrix.
@input: w, a theano shared variable.
@output: L2 norm of w
"""
return (w ** 2).sum()
# TODO
# add contractive cost funciton.
|
|
2b7f32b725c46a504ce74c4bf06c8865613cdfe7
|
example_code/client_adc.py
|
example_code/client_adc.py
|
# Main program for ESP8266 to sample a potentiometer sensor and send sensor
# events to an MQTT broker.
# Configuration is stored in a separate config.py
from config import SENSOR_ID, WIFI_ESSID, WIFI_PASSWORD, MQTT_HOST,\
MQTT_TOPIC, SLEEP_TIME
from wifi import wifi_connect, disable_wifi_ap
from thingflow import *
from adc_esp8266 import ADCSensor
from mqtt_writer import MQTTWriter
# setup network
disable_wifi_ap()
wifi_connect(WIFI_ESSID, WIFI_PASSWORD)
# create objects
m = MQTTWriter(SENSOR_ID, MQTT_HOST, 1883, MQTT_TOPIC)
sensor = ADCSensor(SENSOR_ID, min_rd=4, max_rd=1024)
sched = Scheduler()
# schedule and run
sched.schedule_sensor(sensor, SLEEP_TIME, m)
print("Running main loop with sample every %s seconds..." % SLEEP_TIME)
sched.run_forever()
|
Add esp8266 ADC sensor client example
|
Add esp8266 ADC sensor client example
|
Python
|
mit
|
jfischer/micropython-iot-hackathon,jfischer/micropython-iot-hackathon
|
Add esp8266 ADC sensor client example
|
# Main program for ESP8266 to sample a potentiometer sensor and send sensor
# events to an MQTT broker.
# Configuration is stored in a separate config.py
from config import SENSOR_ID, WIFI_ESSID, WIFI_PASSWORD, MQTT_HOST,\
MQTT_TOPIC, SLEEP_TIME
from wifi import wifi_connect, disable_wifi_ap
from thingflow import *
from adc_esp8266 import ADCSensor
from mqtt_writer import MQTTWriter
# setup network
disable_wifi_ap()
wifi_connect(WIFI_ESSID, WIFI_PASSWORD)
# create objects
m = MQTTWriter(SENSOR_ID, MQTT_HOST, 1883, MQTT_TOPIC)
sensor = ADCSensor(SENSOR_ID, min_rd=4, max_rd=1024)
sched = Scheduler()
# schedule and run
sched.schedule_sensor(sensor, SLEEP_TIME, m)
print("Running main loop with sample every %s seconds..." % SLEEP_TIME)
sched.run_forever()
|
<commit_before><commit_msg>Add esp8266 ADC sensor client example<commit_after>
|
# Main program for ESP8266 to sample a potentiometer sensor and send sensor
# events to an MQTT broker.
# Configuration is stored in a separate config.py
from config import SENSOR_ID, WIFI_ESSID, WIFI_PASSWORD, MQTT_HOST,\
MQTT_TOPIC, SLEEP_TIME
from wifi import wifi_connect, disable_wifi_ap
from thingflow import *
from adc_esp8266 import ADCSensor
from mqtt_writer import MQTTWriter
# setup network
disable_wifi_ap()
wifi_connect(WIFI_ESSID, WIFI_PASSWORD)
# create objects
m = MQTTWriter(SENSOR_ID, MQTT_HOST, 1883, MQTT_TOPIC)
sensor = ADCSensor(SENSOR_ID, min_rd=4, max_rd=1024)
sched = Scheduler()
# schedule and run
sched.schedule_sensor(sensor, SLEEP_TIME, m)
print("Running main loop with sample every %s seconds..." % SLEEP_TIME)
sched.run_forever()
|
Add esp8266 ADC sensor client example# Main program for ESP8266 to sample a potentiometer sensor and send sensor
# events to an MQTT broker.
# Configuration is stored in a separate config.py
from config import SENSOR_ID, WIFI_ESSID, WIFI_PASSWORD, MQTT_HOST,\
MQTT_TOPIC, SLEEP_TIME
from wifi import wifi_connect, disable_wifi_ap
from thingflow import *
from adc_esp8266 import ADCSensor
from mqtt_writer import MQTTWriter
# setup network
disable_wifi_ap()
wifi_connect(WIFI_ESSID, WIFI_PASSWORD)
# create objects
m = MQTTWriter(SENSOR_ID, MQTT_HOST, 1883, MQTT_TOPIC)
sensor = ADCSensor(SENSOR_ID, min_rd=4, max_rd=1024)
sched = Scheduler()
# schedule and run
sched.schedule_sensor(sensor, SLEEP_TIME, m)
print("Running main loop with sample every %s seconds..." % SLEEP_TIME)
sched.run_forever()
|
<commit_before><commit_msg>Add esp8266 ADC sensor client example<commit_after># Main program for ESP8266 to sample a potentiometer sensor and send sensor
# events to an MQTT broker.
# Configuration is stored in a separate config.py
from config import SENSOR_ID, WIFI_ESSID, WIFI_PASSWORD, MQTT_HOST,\
MQTT_TOPIC, SLEEP_TIME
from wifi import wifi_connect, disable_wifi_ap
from thingflow import *
from adc_esp8266 import ADCSensor
from mqtt_writer import MQTTWriter
# setup network
disable_wifi_ap()
wifi_connect(WIFI_ESSID, WIFI_PASSWORD)
# create objects
m = MQTTWriter(SENSOR_ID, MQTT_HOST, 1883, MQTT_TOPIC)
sensor = ADCSensor(SENSOR_ID, min_rd=4, max_rd=1024)
sched = Scheduler()
# schedule and run
sched.schedule_sensor(sensor, SLEEP_TIME, m)
print("Running main loop with sample every %s seconds..." % SLEEP_TIME)
sched.run_forever()
|
|
9d7a393cbc981dc3cae94c6e4df25344718def06
|
alembic/versions/3a98a6674cb2_add_published_column_to_project.py
|
alembic/versions/3a98a6674cb2_add_published_column_to_project.py
|
"""Add published column to project
Revision ID: 3a98a6674cb2
Revises: 35f8b948e98d
Create Date: 2015-08-07 10:24:31.558995
"""
# revision identifiers, used by Alembic.
revision = '3a98a6674cb2'
down_revision = '35f8b948e98d'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('project', sa.Column('published', sa.Boolean, default=False))
query = 'UPDATE "project" SET published=false;'
op.execute(query)
op.alter_column('project', 'published', nullable=False)
query = """UPDATE "project" SET published=true
WHERE project.id IN
(SELECT project.id FROM project, task WHERE
project.id=task.project_id AND
(project.info->>'task_presenter') IS NOT NULL
GROUP BY project.id);"""
op.execute(query)
def downgrade():
op.drop_column('project', 'published')
|
Add migration for adding published column in project
|
Add migration for adding published column in project
|
Python
|
agpl-3.0
|
PyBossa/pybossa,geotagx/pybossa,PyBossa/pybossa,Scifabric/pybossa,Scifabric/pybossa,geotagx/pybossa
|
Add migration for adding published column in project
|
"""Add published column to project
Revision ID: 3a98a6674cb2
Revises: 35f8b948e98d
Create Date: 2015-08-07 10:24:31.558995
"""
# revision identifiers, used by Alembic.
revision = '3a98a6674cb2'
down_revision = '35f8b948e98d'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('project', sa.Column('published', sa.Boolean, default=False))
query = 'UPDATE "project" SET published=false;'
op.execute(query)
op.alter_column('project', 'published', nullable=False)
query = """UPDATE "project" SET published=true
WHERE project.id IN
(SELECT project.id FROM project, task WHERE
project.id=task.project_id AND
(project.info->>'task_presenter') IS NOT NULL
GROUP BY project.id);"""
op.execute(query)
def downgrade():
op.drop_column('project', 'published')
|
<commit_before><commit_msg>Add migration for adding published column in project<commit_after>
|
"""Add published column to project
Revision ID: 3a98a6674cb2
Revises: 35f8b948e98d
Create Date: 2015-08-07 10:24:31.558995
"""
# revision identifiers, used by Alembic.
revision = '3a98a6674cb2'
down_revision = '35f8b948e98d'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('project', sa.Column('published', sa.Boolean, default=False))
query = 'UPDATE "project" SET published=false;'
op.execute(query)
op.alter_column('project', 'published', nullable=False)
query = """UPDATE "project" SET published=true
WHERE project.id IN
(SELECT project.id FROM project, task WHERE
project.id=task.project_id AND
(project.info->>'task_presenter') IS NOT NULL
GROUP BY project.id);"""
op.execute(query)
def downgrade():
op.drop_column('project', 'published')
|
Add migration for adding published column in project"""Add published column to project
Revision ID: 3a98a6674cb2
Revises: 35f8b948e98d
Create Date: 2015-08-07 10:24:31.558995
"""
# revision identifiers, used by Alembic.
revision = '3a98a6674cb2'
down_revision = '35f8b948e98d'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('project', sa.Column('published', sa.Boolean, default=False))
query = 'UPDATE "project" SET published=false;'
op.execute(query)
op.alter_column('project', 'published', nullable=False)
query = """UPDATE "project" SET published=true
WHERE project.id IN
(SELECT project.id FROM project, task WHERE
project.id=task.project_id AND
(project.info->>'task_presenter') IS NOT NULL
GROUP BY project.id);"""
op.execute(query)
def downgrade():
op.drop_column('project', 'published')
|
<commit_before><commit_msg>Add migration for adding published column in project<commit_after>"""Add published column to project
Revision ID: 3a98a6674cb2
Revises: 35f8b948e98d
Create Date: 2015-08-07 10:24:31.558995
"""
# revision identifiers, used by Alembic.
revision = '3a98a6674cb2'
down_revision = '35f8b948e98d'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('project', sa.Column('published', sa.Boolean, default=False))
query = 'UPDATE "project" SET published=false;'
op.execute(query)
op.alter_column('project', 'published', nullable=False)
query = """UPDATE "project" SET published=true
WHERE project.id IN
(SELECT project.id FROM project, task WHERE
project.id=task.project_id AND
(project.info->>'task_presenter') IS NOT NULL
GROUP BY project.id);"""
op.execute(query)
def downgrade():
op.drop_column('project', 'published')
|
|
21df7f5837566d8bb9a17c7847fa10ec1570adb3
|
osf/migrations/0031_auto_20170202_0943.py
|
osf/migrations/0031_auto_20170202_0943.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-02-02 15:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('osf', '0030_auto_20170130_1608'),
]
operations = [
migrations.AlterField(
model_name='osfuser',
name='affiliated_institutions',
field=models.ManyToManyField(blank=True, to='osf.Institution'),
),
migrations.AlterField(
model_name='osfuser',
name='external_accounts',
field=models.ManyToManyField(blank=True, to='osf.ExternalAccount'),
),
]
|
Add blank=True to external account and Institution so that permissions can be sucessfully added in the admin's admin interface
|
Add blank=True to external account and Institution so that permissions can be sucessfully added in the admin's admin interface
|
Python
|
apache-2.0
|
erinspace/osf.io,pattisdr/osf.io,crcresearch/osf.io,acshi/osf.io,hmoco/osf.io,Nesiehr/osf.io,TomBaxter/osf.io,cslzchen/osf.io,Nesiehr/osf.io,sloria/osf.io,felliott/osf.io,mfraezz/osf.io,erinspace/osf.io,acshi/osf.io,pattisdr/osf.io,baylee-d/osf.io,chrisseto/osf.io,mfraezz/osf.io,saradbowman/osf.io,hmoco/osf.io,monikagrabowska/osf.io,mfraezz/osf.io,caseyrollins/osf.io,TomBaxter/osf.io,CenterForOpenScience/osf.io,brianjgeiger/osf.io,leb2dg/osf.io,icereval/osf.io,sloria/osf.io,HalcyonChimera/osf.io,mattclark/osf.io,hmoco/osf.io,HalcyonChimera/osf.io,felliott/osf.io,acshi/osf.io,sloria/osf.io,cwisecarver/osf.io,saradbowman/osf.io,binoculars/osf.io,CenterForOpenScience/osf.io,caseyrollins/osf.io,chennan47/osf.io,caneruguz/osf.io,Nesiehr/osf.io,acshi/osf.io,icereval/osf.io,monikagrabowska/osf.io,cslzchen/osf.io,Johnetordoff/osf.io,caseyrollins/osf.io,crcresearch/osf.io,brianjgeiger/osf.io,Johnetordoff/osf.io,laurenrevere/osf.io,Johnetordoff/osf.io,chrisseto/osf.io,chennan47/osf.io,binoculars/osf.io,laurenrevere/osf.io,cwisecarver/osf.io,monikagrabowska/osf.io,aaxelb/osf.io,chennan47/osf.io,icereval/osf.io,felliott/osf.io,leb2dg/osf.io,CenterForOpenScience/osf.io,Johnetordoff/osf.io,Nesiehr/osf.io,pattisdr/osf.io,HalcyonChimera/osf.io,caneruguz/osf.io,adlius/osf.io,monikagrabowska/osf.io,leb2dg/osf.io,caneruguz/osf.io,monikagrabowska/osf.io,TomBaxter/osf.io,brianjgeiger/osf.io,hmoco/osf.io,erinspace/osf.io,mfraezz/osf.io,chrisseto/osf.io,baylee-d/osf.io,aaxelb/osf.io,cwisecarver/osf.io,aaxelb/osf.io,aaxelb/osf.io,caneruguz/osf.io,baylee-d/osf.io,cslzchen/osf.io,mattclark/osf.io,adlius/osf.io,binoculars/osf.io,acshi/osf.io,leb2dg/osf.io,crcresearch/osf.io,adlius/osf.io,brianjgeiger/osf.io,cslzchen/osf.io,cwisecarver/osf.io,mattclark/osf.io,adlius/osf.io,laurenrevere/osf.io,chrisseto/osf.io,CenterForOpenScience/osf.io,HalcyonChimera/osf.io,felliott/osf.io
|
Add blank=True to external account and Institution so that permissions can be sucessfully added in the admin's admin interface
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-02-02 15:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('osf', '0030_auto_20170130_1608'),
]
operations = [
migrations.AlterField(
model_name='osfuser',
name='affiliated_institutions',
field=models.ManyToManyField(blank=True, to='osf.Institution'),
),
migrations.AlterField(
model_name='osfuser',
name='external_accounts',
field=models.ManyToManyField(blank=True, to='osf.ExternalAccount'),
),
]
|
<commit_before><commit_msg>Add blank=True to external account and Institution so that permissions can be sucessfully added in the admin's admin interface<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-02-02 15:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('osf', '0030_auto_20170130_1608'),
]
operations = [
migrations.AlterField(
model_name='osfuser',
name='affiliated_institutions',
field=models.ManyToManyField(blank=True, to='osf.Institution'),
),
migrations.AlterField(
model_name='osfuser',
name='external_accounts',
field=models.ManyToManyField(blank=True, to='osf.ExternalAccount'),
),
]
|
Add blank=True to external account and Institution so that permissions can be sucessfully added in the admin's admin interface# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-02-02 15:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('osf', '0030_auto_20170130_1608'),
]
operations = [
migrations.AlterField(
model_name='osfuser',
name='affiliated_institutions',
field=models.ManyToManyField(blank=True, to='osf.Institution'),
),
migrations.AlterField(
model_name='osfuser',
name='external_accounts',
field=models.ManyToManyField(blank=True, to='osf.ExternalAccount'),
),
]
|
<commit_before><commit_msg>Add blank=True to external account and Institution so that permissions can be sucessfully added in the admin's admin interface<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-02-02 15:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('osf', '0030_auto_20170130_1608'),
]
operations = [
migrations.AlterField(
model_name='osfuser',
name='affiliated_institutions',
field=models.ManyToManyField(blank=True, to='osf.Institution'),
),
migrations.AlterField(
model_name='osfuser',
name='external_accounts',
field=models.ManyToManyField(blank=True, to='osf.ExternalAccount'),
),
]
|
|
121d908e72fc36752d42489aaa65db5897881eb4
|
softwareindex/handlers/coreapi.py
|
softwareindex/handlers/coreapi.py
|
import requests, json, urllib
SEARCH_URL = 'http://core.ac.uk:80/api-v2/articles/search/'
API_KEY = 'FILL THIS IN'
def getCOREMentions(identifier, **kwargs):
"""Return the number of mentions in CORE and a descriptor, as a tuple.
Needs an API key, which can be obtained here: http://core.ac.uk/api-keys/register"""
params = {
'apiKey': API_KEY,
'metadata': 'false',
'pageSize': 100,
'page': 1
}
params.update(kwargs)
response = requests.get(SEARCH_URL + urllib.quote_plus(identifier), params=params)
response.raise_for_status()
results = response.json()
return (len(results['data'] or []),
'mentions in Open Access articles (via http://core.ac.uk/)')
|
Add initial version of CORE API handler.
|
Add initial version of CORE API handler.
This uses the CORE v2 API to get the number of open access articles with
the given software identifier mentioned in the full text.
Currently the maximum returned is 100, since the v2 API doesn't return
the total number of search results. It doesn't do much error handling
yet either.
|
Python
|
bsd-3-clause
|
softwaresaved/softwareindex,softwaresaved/softwareindex
|
Add initial version of CORE API handler.
This uses the CORE v2 API to get the number of open access articles with
the given software identifier mentioned in the full text.
Currently the maximum returned is 100, since the v2 API doesn't return
the total number of search results. It doesn't do much error handling
yet either.
|
import requests, json, urllib
SEARCH_URL = 'http://core.ac.uk:80/api-v2/articles/search/'
API_KEY = 'FILL THIS IN'
def getCOREMentions(identifier, **kwargs):
"""Return the number of mentions in CORE and a descriptor, as a tuple.
Needs an API key, which can be obtained here: http://core.ac.uk/api-keys/register"""
params = {
'apiKey': API_KEY,
'metadata': 'false',
'pageSize': 100,
'page': 1
}
params.update(kwargs)
response = requests.get(SEARCH_URL + urllib.quote_plus(identifier), params=params)
response.raise_for_status()
results = response.json()
return (len(results['data'] or []),
'mentions in Open Access articles (via http://core.ac.uk/)')
|
<commit_before><commit_msg>Add initial version of CORE API handler.
This uses the CORE v2 API to get the number of open access articles with
the given software identifier mentioned in the full text.
Currently the maximum returned is 100, since the v2 API doesn't return
the total number of search results. It doesn't do much error handling
yet either.<commit_after>
|
import requests, json, urllib
SEARCH_URL = 'http://core.ac.uk:80/api-v2/articles/search/'
API_KEY = 'FILL THIS IN'
def getCOREMentions(identifier, **kwargs):
"""Return the number of mentions in CORE and a descriptor, as a tuple.
Needs an API key, which can be obtained here: http://core.ac.uk/api-keys/register"""
params = {
'apiKey': API_KEY,
'metadata': 'false',
'pageSize': 100,
'page': 1
}
params.update(kwargs)
response = requests.get(SEARCH_URL + urllib.quote_plus(identifier), params=params)
response.raise_for_status()
results = response.json()
return (len(results['data'] or []),
'mentions in Open Access articles (via http://core.ac.uk/)')
|
Add initial version of CORE API handler.
This uses the CORE v2 API to get the number of open access articles with
the given software identifier mentioned in the full text.
Currently the maximum returned is 100, since the v2 API doesn't return
the total number of search results. It doesn't do much error handling
yet either.import requests, json, urllib
SEARCH_URL = 'http://core.ac.uk:80/api-v2/articles/search/'
API_KEY = 'FILL THIS IN'
def getCOREMentions(identifier, **kwargs):
"""Return the number of mentions in CORE and a descriptor, as a tuple.
Needs an API key, which can be obtained here: http://core.ac.uk/api-keys/register"""
params = {
'apiKey': API_KEY,
'metadata': 'false',
'pageSize': 100,
'page': 1
}
params.update(kwargs)
response = requests.get(SEARCH_URL + urllib.quote_plus(identifier), params=params)
response.raise_for_status()
results = response.json()
return (len(results['data'] or []),
'mentions in Open Access articles (via http://core.ac.uk/)')
|
<commit_before><commit_msg>Add initial version of CORE API handler.
This uses the CORE v2 API to get the number of open access articles with
the given software identifier mentioned in the full text.
Currently the maximum returned is 100, since the v2 API doesn't return
the total number of search results. It doesn't do much error handling
yet either.<commit_after>import requests, json, urllib
SEARCH_URL = 'http://core.ac.uk:80/api-v2/articles/search/'
API_KEY = 'FILL THIS IN'
def getCOREMentions(identifier, **kwargs):
"""Return the number of mentions in CORE and a descriptor, as a tuple.
Needs an API key, which can be obtained here: http://core.ac.uk/api-keys/register"""
params = {
'apiKey': API_KEY,
'metadata': 'false',
'pageSize': 100,
'page': 1
}
params.update(kwargs)
response = requests.get(SEARCH_URL + urllib.quote_plus(identifier), params=params)
response.raise_for_status()
results = response.json()
return (len(results['data'] or []),
'mentions in Open Access articles (via http://core.ac.uk/)')
|
|
ca2d37ad158dc996a15f5a2724d57ed0f6f298dd
|
Python/Templates/Django/ProjectTemplates/Python/Web/WebRoleDjango/urls.py
|
Python/Templates/Django/ProjectTemplates/Python/Web/WebRoleDjango/urls.py
|
"""
Definition of urls for $safeprojectname$.
"""
from django.conf.urls import include, url
import $safeprojectname$.views
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = [
# Examples:
# url(r'^$', $safeprojectname$.views.home, name='home'),
# url(r'^$safeprojectname$/', include('$safeprojectname$.$safeprojectname$.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
]
|
"""
Definition of urls for $safeprojectname$.
"""
from django.conf.urls import include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = [
# Examples:
# url(r'^$', $safeprojectname$.views.home, name='home'),
# url(r'^$safeprojectname$/', include('$safeprojectname$.$safeprojectname$.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
]
|
Remove import of non-existing file.
|
Remove import of non-existing file.
|
Python
|
apache-2.0
|
huguesv/PTVS,huguesv/PTVS,DEVSENSE/PTVS,huguesv/PTVS,int19h/PTVS,int19h/PTVS,zooba/PTVS,Microsoft/PTVS,int19h/PTVS,Microsoft/PTVS,huguesv/PTVS,DEVSENSE/PTVS,Microsoft/PTVS,huguesv/PTVS,Microsoft/PTVS,int19h/PTVS,zooba/PTVS,Microsoft/PTVS,DEVSENSE/PTVS,DEVSENSE/PTVS,int19h/PTVS,Microsoft/PTVS,zooba/PTVS,huguesv/PTVS,zooba/PTVS,DEVSENSE/PTVS,zooba/PTVS,zooba/PTVS,DEVSENSE/PTVS,int19h/PTVS
|
"""
Definition of urls for $safeprojectname$.
"""
from django.conf.urls import include, url
import $safeprojectname$.views
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = [
# Examples:
# url(r'^$', $safeprojectname$.views.home, name='home'),
# url(r'^$safeprojectname$/', include('$safeprojectname$.$safeprojectname$.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
]
Remove import of non-existing file.
|
"""
Definition of urls for $safeprojectname$.
"""
from django.conf.urls import include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = [
# Examples:
# url(r'^$', $safeprojectname$.views.home, name='home'),
# url(r'^$safeprojectname$/', include('$safeprojectname$.$safeprojectname$.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
]
|
<commit_before>"""
Definition of urls for $safeprojectname$.
"""
from django.conf.urls import include, url
import $safeprojectname$.views
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = [
# Examples:
# url(r'^$', $safeprojectname$.views.home, name='home'),
# url(r'^$safeprojectname$/', include('$safeprojectname$.$safeprojectname$.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
]
<commit_msg>Remove import of non-existing file.<commit_after>
|
"""
Definition of urls for $safeprojectname$.
"""
from django.conf.urls import include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = [
# Examples:
# url(r'^$', $safeprojectname$.views.home, name='home'),
# url(r'^$safeprojectname$/', include('$safeprojectname$.$safeprojectname$.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
]
|
"""
Definition of urls for $safeprojectname$.
"""
from django.conf.urls import include, url
import $safeprojectname$.views
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = [
# Examples:
# url(r'^$', $safeprojectname$.views.home, name='home'),
# url(r'^$safeprojectname$/', include('$safeprojectname$.$safeprojectname$.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
]
Remove import of non-existing file."""
Definition of urls for $safeprojectname$.
"""
from django.conf.urls import include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = [
# Examples:
# url(r'^$', $safeprojectname$.views.home, name='home'),
# url(r'^$safeprojectname$/', include('$safeprojectname$.$safeprojectname$.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
]
|
<commit_before>"""
Definition of urls for $safeprojectname$.
"""
from django.conf.urls import include, url
import $safeprojectname$.views
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = [
# Examples:
# url(r'^$', $safeprojectname$.views.home, name='home'),
# url(r'^$safeprojectname$/', include('$safeprojectname$.$safeprojectname$.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
]
<commit_msg>Remove import of non-existing file.<commit_after>"""
Definition of urls for $safeprojectname$.
"""
from django.conf.urls import include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = [
# Examples:
# url(r'^$', $safeprojectname$.views.home, name='home'),
# url(r'^$safeprojectname$/', include('$safeprojectname$.$safeprojectname$.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
]
|
f0a0985ac3c5b9b77d8cd0b3bf7b8b028c7acf42
|
scripts/check_data.py
|
scripts/check_data.py
|
#!/usr/bin/env python3
import sys
import json
from glob import glob
from os.path import relpath, abspath, dirname
from pathlib import Path
BASE_DIR = str(Path(dirname(abspath(__file__))).parent)
def get_json_files(base):
'''Returns a list of all JSON files in the /data/ directory'''
files = glob(f'{base}/data/**/*.json', recursive=True)
files.sort()
return files
def read_json_file(file):
'''Reads a single JSON and returns a dict of its contents'''
with open(file) as json_file:
return json.load(json_file)
def print_json_error(file, err):
'''Outputs error status'''
print(f'{file}: {err}')
def main(base):
'''Runs a check on all the project's JSON files to see if they have valid syntax'''
files = get_json_files(base)
errors = False
for file in files:
fpath = relpath(file, base)
try:
read_json_file(file)
except json.decoder.JSONDecodeError as err:
print_json_error(fpath, err)
errors = True
except:
print_json_error(fpath, 'Unknown error')
errors = True
if errors:
sys.exit(1)
if __name__== "__main__":
main(BASE_DIR)
|
Add script for checking whether data files have valid JSON syntax
|
Add script for checking whether data files have valid JSON syntax
|
Python
|
mit
|
msikma/pokesprite,msikma/pokesprite,msikma/pokesprite
|
Add script for checking whether data files have valid JSON syntax
|
#!/usr/bin/env python3
import sys
import json
from glob import glob
from os.path import relpath, abspath, dirname
from pathlib import Path
BASE_DIR = str(Path(dirname(abspath(__file__))).parent)
def get_json_files(base):
'''Returns a list of all JSON files in the /data/ directory'''
files = glob(f'{base}/data/**/*.json', recursive=True)
files.sort()
return files
def read_json_file(file):
'''Reads a single JSON and returns a dict of its contents'''
with open(file) as json_file:
return json.load(json_file)
def print_json_error(file, err):
'''Outputs error status'''
print(f'{file}: {err}')
def main(base):
'''Runs a check on all the project's JSON files to see if they have valid syntax'''
files = get_json_files(base)
errors = False
for file in files:
fpath = relpath(file, base)
try:
read_json_file(file)
except json.decoder.JSONDecodeError as err:
print_json_error(fpath, err)
errors = True
except:
print_json_error(fpath, 'Unknown error')
errors = True
if errors:
sys.exit(1)
if __name__== "__main__":
main(BASE_DIR)
|
<commit_before><commit_msg>Add script for checking whether data files have valid JSON syntax<commit_after>
|
#!/usr/bin/env python3
import sys
import json
from glob import glob
from os.path import relpath, abspath, dirname
from pathlib import Path
BASE_DIR = str(Path(dirname(abspath(__file__))).parent)
def get_json_files(base):
'''Returns a list of all JSON files in the /data/ directory'''
files = glob(f'{base}/data/**/*.json', recursive=True)
files.sort()
return files
def read_json_file(file):
'''Reads a single JSON and returns a dict of its contents'''
with open(file) as json_file:
return json.load(json_file)
def print_json_error(file, err):
'''Outputs error status'''
print(f'{file}: {err}')
def main(base):
'''Runs a check on all the project's JSON files to see if they have valid syntax'''
files = get_json_files(base)
errors = False
for file in files:
fpath = relpath(file, base)
try:
read_json_file(file)
except json.decoder.JSONDecodeError as err:
print_json_error(fpath, err)
errors = True
except:
print_json_error(fpath, 'Unknown error')
errors = True
if errors:
sys.exit(1)
if __name__== "__main__":
main(BASE_DIR)
|
Add script for checking whether data files have valid JSON syntax#!/usr/bin/env python3
import sys
import json
from glob import glob
from os.path import relpath, abspath, dirname
from pathlib import Path
BASE_DIR = str(Path(dirname(abspath(__file__))).parent)
def get_json_files(base):
'''Returns a list of all JSON files in the /data/ directory'''
files = glob(f'{base}/data/**/*.json', recursive=True)
files.sort()
return files
def read_json_file(file):
'''Reads a single JSON and returns a dict of its contents'''
with open(file) as json_file:
return json.load(json_file)
def print_json_error(file, err):
'''Outputs error status'''
print(f'{file}: {err}')
def main(base):
'''Runs a check on all the project's JSON files to see if they have valid syntax'''
files = get_json_files(base)
errors = False
for file in files:
fpath = relpath(file, base)
try:
read_json_file(file)
except json.decoder.JSONDecodeError as err:
print_json_error(fpath, err)
errors = True
except:
print_json_error(fpath, 'Unknown error')
errors = True
if errors:
sys.exit(1)
if __name__== "__main__":
main(BASE_DIR)
|
<commit_before><commit_msg>Add script for checking whether data files have valid JSON syntax<commit_after>#!/usr/bin/env python3
import sys
import json
from glob import glob
from os.path import relpath, abspath, dirname
from pathlib import Path
BASE_DIR = str(Path(dirname(abspath(__file__))).parent)
def get_json_files(base):
'''Returns a list of all JSON files in the /data/ directory'''
files = glob(f'{base}/data/**/*.json', recursive=True)
files.sort()
return files
def read_json_file(file):
'''Reads a single JSON and returns a dict of its contents'''
with open(file) as json_file:
return json.load(json_file)
def print_json_error(file, err):
'''Outputs error status'''
print(f'{file}: {err}')
def main(base):
'''Runs a check on all the project's JSON files to see if they have valid syntax'''
files = get_json_files(base)
errors = False
for file in files:
fpath = relpath(file, base)
try:
read_json_file(file)
except json.decoder.JSONDecodeError as err:
print_json_error(fpath, err)
errors = True
except:
print_json_error(fpath, 'Unknown error')
errors = True
if errors:
sys.exit(1)
if __name__== "__main__":
main(BASE_DIR)
|
|
928ac17b69b810b85f3cfbe168f6b3fdb8c6cd6e
|
src/ggrc/migrations/versions/20170214101700_ff4ebc0d89c_add_column_to_fulltext_records_table.py
|
src/ggrc/migrations/versions/20170214101700_ff4ebc0d89c_add_column_to_fulltext_records_table.py
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Add 'subproperty' column into 'fulltext_record_properties' table to make
search by property subtype bypossible.
For example:
We have two subtypes of the property Person:
- name
- email
Create Date: 2017-02-14 10:17:00.155675
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
from alembic import op
# revision identifiers, used by Alembic.
revision = '5335453abae'
down_revision = '341f8a645b2f'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.add_column('fulltext_record_properties',
sa.Column('subproperty', mysql.VARCHAR(length=64),
nullable=False, server_default=''))
op.execute("""
ALTER TABLE fulltext_record_properties
DROP PRIMARY KEY,
ADD PRIMARY KEY (`key`, `type`, property, subproperty);
""")
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
op.execute("""
TRUNCATE TABLE fulltext_record_properties;
""")
op.drop_column('fulltext_record_properties', 'subproperty')
|
Add column for property subtype
|
Add column for property subtype
Add column 'subproperty' into fulltext_record_properties table
|
Python
|
apache-2.0
|
AleksNeStu/ggrc-core,AleksNeStu/ggrc-core,plamut/ggrc-core,VinnieJohns/ggrc-core,VinnieJohns/ggrc-core,plamut/ggrc-core,plamut/ggrc-core,VinnieJohns/ggrc-core,VinnieJohns/ggrc-core,plamut/ggrc-core,AleksNeStu/ggrc-core,AleksNeStu/ggrc-core
|
Add column for property subtype
Add column 'subproperty' into fulltext_record_properties table
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Add 'subproperty' column into 'fulltext_record_properties' table to make
search by property subtype bypossible.
For example:
We have two subtypes of the property Person:
- name
- email
Create Date: 2017-02-14 10:17:00.155675
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
from alembic import op
# revision identifiers, used by Alembic.
revision = '5335453abae'
down_revision = '341f8a645b2f'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.add_column('fulltext_record_properties',
sa.Column('subproperty', mysql.VARCHAR(length=64),
nullable=False, server_default=''))
op.execute("""
ALTER TABLE fulltext_record_properties
DROP PRIMARY KEY,
ADD PRIMARY KEY (`key`, `type`, property, subproperty);
""")
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
op.execute("""
TRUNCATE TABLE fulltext_record_properties;
""")
op.drop_column('fulltext_record_properties', 'subproperty')
|
<commit_before><commit_msg>Add column for property subtype
Add column 'subproperty' into fulltext_record_properties table<commit_after>
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Add 'subproperty' column into 'fulltext_record_properties' table to make
search by property subtype bypossible.
For example:
We have two subtypes of the property Person:
- name
- email
Create Date: 2017-02-14 10:17:00.155675
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
from alembic import op
# revision identifiers, used by Alembic.
revision = '5335453abae'
down_revision = '341f8a645b2f'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.add_column('fulltext_record_properties',
sa.Column('subproperty', mysql.VARCHAR(length=64),
nullable=False, server_default=''))
op.execute("""
ALTER TABLE fulltext_record_properties
DROP PRIMARY KEY,
ADD PRIMARY KEY (`key`, `type`, property, subproperty);
""")
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
op.execute("""
TRUNCATE TABLE fulltext_record_properties;
""")
op.drop_column('fulltext_record_properties', 'subproperty')
|
Add column for property subtype
Add column 'subproperty' into fulltext_record_properties table# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Add 'subproperty' column into 'fulltext_record_properties' table to make
search by property subtype bypossible.
For example:
We have two subtypes of the property Person:
- name
- email
Create Date: 2017-02-14 10:17:00.155675
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
from alembic import op
# revision identifiers, used by Alembic.
revision = '5335453abae'
down_revision = '341f8a645b2f'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.add_column('fulltext_record_properties',
sa.Column('subproperty', mysql.VARCHAR(length=64),
nullable=False, server_default=''))
op.execute("""
ALTER TABLE fulltext_record_properties
DROP PRIMARY KEY,
ADD PRIMARY KEY (`key`, `type`, property, subproperty);
""")
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
op.execute("""
TRUNCATE TABLE fulltext_record_properties;
""")
op.drop_column('fulltext_record_properties', 'subproperty')
|
<commit_before><commit_msg>Add column for property subtype
Add column 'subproperty' into fulltext_record_properties table<commit_after># Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Add 'subproperty' column into 'fulltext_record_properties' table to make
search by property subtype bypossible.
For example:
We have two subtypes of the property Person:
- name
- email
Create Date: 2017-02-14 10:17:00.155675
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
from alembic import op
# revision identifiers, used by Alembic.
revision = '5335453abae'
down_revision = '341f8a645b2f'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.add_column('fulltext_record_properties',
sa.Column('subproperty', mysql.VARCHAR(length=64),
nullable=False, server_default=''))
op.execute("""
ALTER TABLE fulltext_record_properties
DROP PRIMARY KEY,
ADD PRIMARY KEY (`key`, `type`, property, subproperty);
""")
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
op.execute("""
TRUNCATE TABLE fulltext_record_properties;
""")
op.drop_column('fulltext_record_properties', 'subproperty')
|
|
d4972ffd1e0b9ba42800234b847c6704b9b2146e
|
jisho.py
|
jisho.py
|
#!/usr/bin/env python
# encoding: utf-8
import sys
from workflow import Workflow, web, ICON_WEB
API_URL = 'http://jisho.org/api/v1/search/words'
SEP_COMMA = u'、 ' # Separator for subtitle kana readings.
MAX_NUM_RESULTS = 9 # Maximum number of results that Alfred can display.
def get_results(query):
"""Fetches query search results from Jisho.org API.
Args:
query: A string representing the search query for Jisho.org.
Returns:
An array of JSON results from Jisho.org based on search query.
"""
params = dict(keyword=query)
request = web.get(API_URL, params)
# Throw an error if request failed.
request.raise_for_status()
# Parse response as JSON and extract results.
response = request.json()
return response['data']
def add_alfred_result(wf, result):
"""Adds the result to Alfred.
Args:
wf: An instance of Workflow.
result: A dict representation of info about the Japanese word.
"""
japanese_info = result['japanese']
# Prefer kanji as title over kana.
if 'word' in japanese_info[0]:
title = japanese_info[0]['word']
subtitle = combine_kana_readings(japanese_info)
else:
title = japanese_info[0]['reading']
# Ignore first reading since it was used as the title.
subtitle = combine_kana_readings(japanese_info[1:])
wf.add_item(title=title,
subtitle=subtitle,
arg=title,
valid=True,
largetext=title,
icon=ICON_WEB)
def combine_kana_readings(japanese_info):
"""Combines the kana readings for the japanese info with the SEP_COMMA.
Args:
japanese_info: An array with dict elements with reading info.
"""
return SEP_COMMA.join([word['reading'] for word in japanese_info])
def main(wf):
"""Main function to handle query and request info from Jisho.org.
Args:
wf: An instance of Workflow.
"""
# Get query from Alfred.
query = wf.args[0] if len(wf.args) else None
# Retrieve results from Jisho.org.
results = get_results(query)
# Add results, up to the maximum number of results, to Alfred.
for i in range(min(len(results), MAX_NUM_RESULTS)):
add_alfred_result(wf, results[i])
# Send the results to Alfred as XML.
wf.send_feedback()
if __name__ == '__main__':
wf = Workflow()
sys.exit(wf.run(main))
|
Add ability to get and display Jisho.org results
|
Add ability to get and display Jisho.org results
|
Python
|
mit
|
janclarin/jisho-alfred-workflow
|
Add ability to get and display Jisho.org results
|
#!/usr/bin/env python
# encoding: utf-8
import sys
from workflow import Workflow, web, ICON_WEB
API_URL = 'http://jisho.org/api/v1/search/words'
SEP_COMMA = u'、 ' # Separator for subtitle kana readings.
MAX_NUM_RESULTS = 9 # Maximum number of results that Alfred can display.
def get_results(query):
"""Fetches query search results from Jisho.org API.
Args:
query: A string representing the search query for Jisho.org.
Returns:
An array of JSON results from Jisho.org based on search query.
"""
params = dict(keyword=query)
request = web.get(API_URL, params)
# Throw an error if request failed.
request.raise_for_status()
# Parse response as JSON and extract results.
response = request.json()
return response['data']
def add_alfred_result(wf, result):
"""Adds the result to Alfred.
Args:
wf: An instance of Workflow.
result: A dict representation of info about the Japanese word.
"""
japanese_info = result['japanese']
# Prefer kanji as title over kana.
if 'word' in japanese_info[0]:
title = japanese_info[0]['word']
subtitle = combine_kana_readings(japanese_info)
else:
title = japanese_info[0]['reading']
# Ignore first reading since it was used as the title.
subtitle = combine_kana_readings(japanese_info[1:])
wf.add_item(title=title,
subtitle=subtitle,
arg=title,
valid=True,
largetext=title,
icon=ICON_WEB)
def combine_kana_readings(japanese_info):
"""Combines the kana readings for the japanese info with the SEP_COMMA.
Args:
japanese_info: An array with dict elements with reading info.
"""
return SEP_COMMA.join([word['reading'] for word in japanese_info])
def main(wf):
"""Main function to handle query and request info from Jisho.org.
Args:
wf: An instance of Workflow.
"""
# Get query from Alfred.
query = wf.args[0] if len(wf.args) else None
# Retrieve results from Jisho.org.
results = get_results(query)
# Add results, up to the maximum number of results, to Alfred.
for i in range(min(len(results), MAX_NUM_RESULTS)):
add_alfred_result(wf, results[i])
# Send the results to Alfred as XML.
wf.send_feedback()
if __name__ == '__main__':
wf = Workflow()
sys.exit(wf.run(main))
|
<commit_before><commit_msg>Add ability to get and display Jisho.org results<commit_after>
|
#!/usr/bin/env python
# encoding: utf-8
import sys
from workflow import Workflow, web, ICON_WEB
API_URL = 'http://jisho.org/api/v1/search/words'
SEP_COMMA = u'、 ' # Separator for subtitle kana readings.
MAX_NUM_RESULTS = 9 # Maximum number of results that Alfred can display.
def get_results(query):
"""Fetches query search results from Jisho.org API.
Args:
query: A string representing the search query for Jisho.org.
Returns:
An array of JSON results from Jisho.org based on search query.
"""
params = dict(keyword=query)
request = web.get(API_URL, params)
# Throw an error if request failed.
request.raise_for_status()
# Parse response as JSON and extract results.
response = request.json()
return response['data']
def add_alfred_result(wf, result):
"""Adds the result to Alfred.
Args:
wf: An instance of Workflow.
result: A dict representation of info about the Japanese word.
"""
japanese_info = result['japanese']
# Prefer kanji as title over kana.
if 'word' in japanese_info[0]:
title = japanese_info[0]['word']
subtitle = combine_kana_readings(japanese_info)
else:
title = japanese_info[0]['reading']
# Ignore first reading since it was used as the title.
subtitle = combine_kana_readings(japanese_info[1:])
wf.add_item(title=title,
subtitle=subtitle,
arg=title,
valid=True,
largetext=title,
icon=ICON_WEB)
def combine_kana_readings(japanese_info):
"""Combines the kana readings for the japanese info with the SEP_COMMA.
Args:
japanese_info: An array with dict elements with reading info.
"""
return SEP_COMMA.join([word['reading'] for word in japanese_info])
def main(wf):
"""Main function to handle query and request info from Jisho.org.
Args:
wf: An instance of Workflow.
"""
# Get query from Alfred.
query = wf.args[0] if len(wf.args) else None
# Retrieve results from Jisho.org.
results = get_results(query)
# Add results, up to the maximum number of results, to Alfred.
for i in range(min(len(results), MAX_NUM_RESULTS)):
add_alfred_result(wf, results[i])
# Send the results to Alfred as XML.
wf.send_feedback()
if __name__ == '__main__':
wf = Workflow()
sys.exit(wf.run(main))
|
Add ability to get and display Jisho.org results#!/usr/bin/env python
# encoding: utf-8
import sys
from workflow import Workflow, web, ICON_WEB
API_URL = 'http://jisho.org/api/v1/search/words'
SEP_COMMA = u'、 ' # Separator for subtitle kana readings.
MAX_NUM_RESULTS = 9 # Maximum number of results that Alfred can display.
def get_results(query):
"""Fetches query search results from Jisho.org API.
Args:
query: A string representing the search query for Jisho.org.
Returns:
An array of JSON results from Jisho.org based on search query.
"""
params = dict(keyword=query)
request = web.get(API_URL, params)
# Throw an error if request failed.
request.raise_for_status()
# Parse response as JSON and extract results.
response = request.json()
return response['data']
def add_alfred_result(wf, result):
"""Adds the result to Alfred.
Args:
wf: An instance of Workflow.
result: A dict representation of info about the Japanese word.
"""
japanese_info = result['japanese']
# Prefer kanji as title over kana.
if 'word' in japanese_info[0]:
title = japanese_info[0]['word']
subtitle = combine_kana_readings(japanese_info)
else:
title = japanese_info[0]['reading']
# Ignore first reading since it was used as the title.
subtitle = combine_kana_readings(japanese_info[1:])
wf.add_item(title=title,
subtitle=subtitle,
arg=title,
valid=True,
largetext=title,
icon=ICON_WEB)
def combine_kana_readings(japanese_info):
"""Combines the kana readings for the japanese info with the SEP_COMMA.
Args:
japanese_info: An array with dict elements with reading info.
"""
return SEP_COMMA.join([word['reading'] for word in japanese_info])
def main(wf):
"""Main function to handle query and request info from Jisho.org.
Args:
wf: An instance of Workflow.
"""
# Get query from Alfred.
query = wf.args[0] if len(wf.args) else None
# Retrieve results from Jisho.org.
results = get_results(query)
# Add results, up to the maximum number of results, to Alfred.
for i in range(min(len(results), MAX_NUM_RESULTS)):
add_alfred_result(wf, results[i])
# Send the results to Alfred as XML.
wf.send_feedback()
if __name__ == '__main__':
wf = Workflow()
sys.exit(wf.run(main))
|
<commit_before><commit_msg>Add ability to get and display Jisho.org results<commit_after>#!/usr/bin/env python
# encoding: utf-8
import sys
from workflow import Workflow, web, ICON_WEB
API_URL = 'http://jisho.org/api/v1/search/words'
SEP_COMMA = u'、 ' # Separator for subtitle kana readings.
MAX_NUM_RESULTS = 9 # Maximum number of results that Alfred can display.
def get_results(query):
"""Fetches query search results from Jisho.org API.
Args:
query: A string representing the search query for Jisho.org.
Returns:
An array of JSON results from Jisho.org based on search query.
"""
params = dict(keyword=query)
request = web.get(API_URL, params)
# Throw an error if request failed.
request.raise_for_status()
# Parse response as JSON and extract results.
response = request.json()
return response['data']
def add_alfred_result(wf, result):
"""Adds the result to Alfred.
Args:
wf: An instance of Workflow.
result: A dict representation of info about the Japanese word.
"""
japanese_info = result['japanese']
# Prefer kanji as title over kana.
if 'word' in japanese_info[0]:
title = japanese_info[0]['word']
subtitle = combine_kana_readings(japanese_info)
else:
title = japanese_info[0]['reading']
# Ignore first reading since it was used as the title.
subtitle = combine_kana_readings(japanese_info[1:])
wf.add_item(title=title,
subtitle=subtitle,
arg=title,
valid=True,
largetext=title,
icon=ICON_WEB)
def combine_kana_readings(japanese_info):
"""Combines the kana readings for the japanese info with the SEP_COMMA.
Args:
japanese_info: An array with dict elements with reading info.
"""
return SEP_COMMA.join([word['reading'] for word in japanese_info])
def main(wf):
"""Main function to handle query and request info from Jisho.org.
Args:
wf: An instance of Workflow.
"""
# Get query from Alfred.
query = wf.args[0] if len(wf.args) else None
# Retrieve results from Jisho.org.
results = get_results(query)
# Add results, up to the maximum number of results, to Alfred.
for i in range(min(len(results), MAX_NUM_RESULTS)):
add_alfred_result(wf, results[i])
# Send the results to Alfred as XML.
wf.send_feedback()
if __name__ == '__main__':
wf = Workflow()
sys.exit(wf.run(main))
|
|
2a6a7b6fff73e6622ac8a4cdc97fa2701225691d
|
vigir_ltl_specification/src/vigir_ltl_specification/task_specification.py
|
vigir_ltl_specification/src/vigir_ltl_specification/task_specification.py
|
#!/usr/bin/env python
import os
import pprint
import preconditions as precond
from gr1_specification import GR1Specification
from gr1_formulas import GR1Formula, FastSlowFormula
"""
Module's docstring #TODO
"""
VIGIR_ROOT_DIR = os.environ['VIGIR_ROOT_DIR']
class TaskSpecification(GR1Specification):
"""..."""
def __init__(self, spec_name = '', env_props = [], sys_props = []):
super(TaskSpecification, self).__init__(spec_name, env_props, sys_props)
# =========================================================
# Entry point
# =========================================================
def main():
my_spec = TaskSpecification()
print 'Environment props:\t', my_spec.env_props
print 'System props:\t\t', my_spec.sys_props
if __name__ == "__main__":
main()
|
Add module for task-specific specs
|
[vigir_ltl_specification] Add module for task-specific specs
|
Python
|
bsd-3-clause
|
team-vigir/vigir_behavior_synthesis,team-vigir/vigir_behavior_synthesis
|
[vigir_ltl_specification] Add module for task-specific specs
|
#!/usr/bin/env python
import os
import pprint
import preconditions as precond
from gr1_specification import GR1Specification
from gr1_formulas import GR1Formula, FastSlowFormula
"""
Module's docstring #TODO
"""
VIGIR_ROOT_DIR = os.environ['VIGIR_ROOT_DIR']
class TaskSpecification(GR1Specification):
"""..."""
def __init__(self, spec_name = '', env_props = [], sys_props = []):
super(TaskSpecification, self).__init__(spec_name, env_props, sys_props)
# =========================================================
# Entry point
# =========================================================
def main():
my_spec = TaskSpecification()
print 'Environment props:\t', my_spec.env_props
print 'System props:\t\t', my_spec.sys_props
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>[vigir_ltl_specification] Add module for task-specific specs<commit_after>
|
#!/usr/bin/env python
import os
import pprint
import preconditions as precond
from gr1_specification import GR1Specification
from gr1_formulas import GR1Formula, FastSlowFormula
"""
Module's docstring #TODO
"""
VIGIR_ROOT_DIR = os.environ['VIGIR_ROOT_DIR']
class TaskSpecification(GR1Specification):
"""..."""
def __init__(self, spec_name = '', env_props = [], sys_props = []):
super(TaskSpecification, self).__init__(spec_name, env_props, sys_props)
# =========================================================
# Entry point
# =========================================================
def main():
my_spec = TaskSpecification()
print 'Environment props:\t', my_spec.env_props
print 'System props:\t\t', my_spec.sys_props
if __name__ == "__main__":
main()
|
[vigir_ltl_specification] Add module for task-specific specs#!/usr/bin/env python
import os
import pprint
import preconditions as precond
from gr1_specification import GR1Specification
from gr1_formulas import GR1Formula, FastSlowFormula
"""
Module's docstring #TODO
"""
VIGIR_ROOT_DIR = os.environ['VIGIR_ROOT_DIR']
class TaskSpecification(GR1Specification):
"""..."""
def __init__(self, spec_name = '', env_props = [], sys_props = []):
super(TaskSpecification, self).__init__(spec_name, env_props, sys_props)
# =========================================================
# Entry point
# =========================================================
def main():
my_spec = TaskSpecification()
print 'Environment props:\t', my_spec.env_props
print 'System props:\t\t', my_spec.sys_props
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>[vigir_ltl_specification] Add module for task-specific specs<commit_after>#!/usr/bin/env python
import os
import pprint
import preconditions as precond
from gr1_specification import GR1Specification
from gr1_formulas import GR1Formula, FastSlowFormula
"""
Module's docstring #TODO
"""
VIGIR_ROOT_DIR = os.environ['VIGIR_ROOT_DIR']
class TaskSpecification(GR1Specification):
"""..."""
def __init__(self, spec_name = '', env_props = [], sys_props = []):
super(TaskSpecification, self).__init__(spec_name, env_props, sys_props)
# =========================================================
# Entry point
# =========================================================
def main():
my_spec = TaskSpecification()
print 'Environment props:\t', my_spec.env_props
print 'System props:\t\t', my_spec.sys_props
if __name__ == "__main__":
main()
|
|
a37a721666551bc91743d36605073d97eb7a1f5d
|
package_control.py
|
package_control.py
|
import sys
package_name = 'My Package'
def plugin_loaded():
from package_control import events
if events.install(package_name):
print('Installed %s!' % events.install(package_name))
elif events.post_upgrade(package_name):
print('Upgraded to %s!' % events.post_upgrade(package_name))
def plugin_unloaded():
from package_control import events
if events.pre_upgrade(package_name):
print('Upgrading from %s!' % events.pre_upgrade(package_name))
elif events.remove(package_name):
print('Removing %s!' % events.remove(package_name))
|
Package control APIs for installing/removing.
|
Package control APIs for installing/removing.
|
Python
|
mit
|
niosus/EasyClangComplete,kgizdov/EasyClangComplete,kgizdov/EasyClangComplete,kgizdov/EasyClangComplete,kgizdov/EasyClangComplete,niosus/EasyClangComplete,niosus/EasyClangComplete
|
Package control APIs for installing/removing.
|
import sys
package_name = 'My Package'
def plugin_loaded():
from package_control import events
if events.install(package_name):
print('Installed %s!' % events.install(package_name))
elif events.post_upgrade(package_name):
print('Upgraded to %s!' % events.post_upgrade(package_name))
def plugin_unloaded():
from package_control import events
if events.pre_upgrade(package_name):
print('Upgrading from %s!' % events.pre_upgrade(package_name))
elif events.remove(package_name):
print('Removing %s!' % events.remove(package_name))
|
<commit_before><commit_msg>Package control APIs for installing/removing.<commit_after>
|
import sys
package_name = 'My Package'
def plugin_loaded():
from package_control import events
if events.install(package_name):
print('Installed %s!' % events.install(package_name))
elif events.post_upgrade(package_name):
print('Upgraded to %s!' % events.post_upgrade(package_name))
def plugin_unloaded():
from package_control import events
if events.pre_upgrade(package_name):
print('Upgrading from %s!' % events.pre_upgrade(package_name))
elif events.remove(package_name):
print('Removing %s!' % events.remove(package_name))
|
Package control APIs for installing/removing.import sys
package_name = 'My Package'
def plugin_loaded():
from package_control import events
if events.install(package_name):
print('Installed %s!' % events.install(package_name))
elif events.post_upgrade(package_name):
print('Upgraded to %s!' % events.post_upgrade(package_name))
def plugin_unloaded():
from package_control import events
if events.pre_upgrade(package_name):
print('Upgrading from %s!' % events.pre_upgrade(package_name))
elif events.remove(package_name):
print('Removing %s!' % events.remove(package_name))
|
<commit_before><commit_msg>Package control APIs for installing/removing.<commit_after>import sys
package_name = 'My Package'
def plugin_loaded():
from package_control import events
if events.install(package_name):
print('Installed %s!' % events.install(package_name))
elif events.post_upgrade(package_name):
print('Upgraded to %s!' % events.post_upgrade(package_name))
def plugin_unloaded():
from package_control import events
if events.pre_upgrade(package_name):
print('Upgrading from %s!' % events.pre_upgrade(package_name))
elif events.remove(package_name):
print('Removing %s!' % events.remove(package_name))
|
|
c713210011772bbf5afbb88dc4bb62a7e9496f2b
|
new_post.py
|
new_post.py
|
import argparse, datetime, unicodedata, re
def slugify(value):
"""
Converts to lowercase, removes non-word characters (alphanumerics and
underscores) and converts spaces to hyphens. Also strips leading and
trailing whitespace.
"""
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub('[^\w\s-]', '', value).strip().lower()
return re.sub('[-\s]+', '-', value)
parser = argparse.ArgumentParser(description='Create a new post.')
parser.add_argument('-title', help='Title of the post', required=True)
parser.add_argument('-date', help='Date of the post formatted [yyyy-mm-dd] (defaults to today)', required=False)
parser.add_argument('-draft', help='Draft post (defaults to false)', required=False, action='store_true')
args = parser.parse_args()
dateformat = "%Y-%m-%d"
today = datetime.datetime.today()
filedate = today.strftime(dateformat)
if args.date:
try:
t = datetime.datetime.strptime(args.date, dateformat)
filedate = t.strftime(dateformat)
except Exception:
print("Not a valid date, using today", filedate)
filename = filedate + '-' + slugify(args.title) + '.md'
f = open(filename, 'w')
print("---", file=f)
print("layout: post", file=f)
print("title: " + args.title, file=f)
if args.draft:
print("published: false", file=f)
if args.date:
print("date: " + filedate, file=f)
print("---", file=f)
|
Add python helper to add new post
|
Add python helper to add new post
|
Python
|
mit
|
tuvokki/tuvokki.github.com,tuvokki/tuvokki.github.com,tuvokki/tuvokki.github.com,tuvokki/tuvokki.github.com
|
Add python helper to add new post
|
import argparse, datetime, unicodedata, re
def slugify(value):
"""
Converts to lowercase, removes non-word characters (alphanumerics and
underscores) and converts spaces to hyphens. Also strips leading and
trailing whitespace.
"""
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub('[^\w\s-]', '', value).strip().lower()
return re.sub('[-\s]+', '-', value)
parser = argparse.ArgumentParser(description='Create a new post.')
parser.add_argument('-title', help='Title of the post', required=True)
parser.add_argument('-date', help='Date of the post formatted [yyyy-mm-dd] (defaults to today)', required=False)
parser.add_argument('-draft', help='Draft post (defaults to false)', required=False, action='store_true')
args = parser.parse_args()
dateformat = "%Y-%m-%d"
today = datetime.datetime.today()
filedate = today.strftime(dateformat)
if args.date:
try:
t = datetime.datetime.strptime(args.date, dateformat)
filedate = t.strftime(dateformat)
except Exception:
print("Not a valid date, using today", filedate)
filename = filedate + '-' + slugify(args.title) + '.md'
f = open(filename, 'w')
print("---", file=f)
print("layout: post", file=f)
print("title: " + args.title, file=f)
if args.draft:
print("published: false", file=f)
if args.date:
print("date: " + filedate, file=f)
print("---", file=f)
|
<commit_before><commit_msg>Add python helper to add new post<commit_after>
|
import argparse, datetime, unicodedata, re
def slugify(value):
"""
Converts to lowercase, removes non-word characters (alphanumerics and
underscores) and converts spaces to hyphens. Also strips leading and
trailing whitespace.
"""
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub('[^\w\s-]', '', value).strip().lower()
return re.sub('[-\s]+', '-', value)
parser = argparse.ArgumentParser(description='Create a new post.')
parser.add_argument('-title', help='Title of the post', required=True)
parser.add_argument('-date', help='Date of the post formatted [yyyy-mm-dd] (defaults to today)', required=False)
parser.add_argument('-draft', help='Draft post (defaults to false)', required=False, action='store_true')
args = parser.parse_args()
dateformat = "%Y-%m-%d"
today = datetime.datetime.today()
filedate = today.strftime(dateformat)
if args.date:
try:
t = datetime.datetime.strptime(args.date, dateformat)
filedate = t.strftime(dateformat)
except Exception:
print("Not a valid date, using today", filedate)
filename = filedate + '-' + slugify(args.title) + '.md'
f = open(filename, 'w')
print("---", file=f)
print("layout: post", file=f)
print("title: " + args.title, file=f)
if args.draft:
print("published: false", file=f)
if args.date:
print("date: " + filedate, file=f)
print("---", file=f)
|
Add python helper to add new postimport argparse, datetime, unicodedata, re
def slugify(value):
"""
Converts to lowercase, removes non-word characters (alphanumerics and
underscores) and converts spaces to hyphens. Also strips leading and
trailing whitespace.
"""
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub('[^\w\s-]', '', value).strip().lower()
return re.sub('[-\s]+', '-', value)
parser = argparse.ArgumentParser(description='Create a new post.')
parser.add_argument('-title', help='Title of the post', required=True)
parser.add_argument('-date', help='Date of the post formatted [yyyy-mm-dd] (defaults to today)', required=False)
parser.add_argument('-draft', help='Draft post (defaults to false)', required=False, action='store_true')
args = parser.parse_args()
dateformat = "%Y-%m-%d"
today = datetime.datetime.today()
filedate = today.strftime(dateformat)
if args.date:
try:
t = datetime.datetime.strptime(args.date, dateformat)
filedate = t.strftime(dateformat)
except Exception:
print("Not a valid date, using today", filedate)
filename = filedate + '-' + slugify(args.title) + '.md'
f = open(filename, 'w')
print("---", file=f)
print("layout: post", file=f)
print("title: " + args.title, file=f)
if args.draft:
print("published: false", file=f)
if args.date:
print("date: " + filedate, file=f)
print("---", file=f)
|
<commit_before><commit_msg>Add python helper to add new post<commit_after>import argparse, datetime, unicodedata, re
def slugify(value):
"""
Converts to lowercase, removes non-word characters (alphanumerics and
underscores) and converts spaces to hyphens. Also strips leading and
trailing whitespace.
"""
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub('[^\w\s-]', '', value).strip().lower()
return re.sub('[-\s]+', '-', value)
parser = argparse.ArgumentParser(description='Create a new post.')
parser.add_argument('-title', help='Title of the post', required=True)
parser.add_argument('-date', help='Date of the post formatted [yyyy-mm-dd] (defaults to today)', required=False)
parser.add_argument('-draft', help='Draft post (defaults to false)', required=False, action='store_true')
args = parser.parse_args()
dateformat = "%Y-%m-%d"
today = datetime.datetime.today()
filedate = today.strftime(dateformat)
if args.date:
try:
t = datetime.datetime.strptime(args.date, dateformat)
filedate = t.strftime(dateformat)
except Exception:
print("Not a valid date, using today", filedate)
filename = filedate + '-' + slugify(args.title) + '.md'
f = open(filename, 'w')
print("---", file=f)
print("layout: post", file=f)
print("title: " + args.title, file=f)
if args.draft:
print("published: false", file=f)
if args.date:
print("date: " + filedate, file=f)
print("---", file=f)
|
|
fcd1f4c93b9f3108e1711d5dca0506549c4ba2f7
|
src/ggrc/migrations/versions/20150521091609_29d21b3c24b4_migrate_object_controls_to_relationships.py
|
src/ggrc/migrations/versions/20150521091609_29d21b3c24b4_migrate_object_controls_to_relationships.py
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
"""Migrate object_controls to relationships
Revision ID: 29d21b3c24b4
Revises: b0c3361797a
Create Date: 2015-05-21 09:16:09.620314
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = '29d21b3c24b4'
down_revision = 'b0c3361797a'
def upgrade():
sql = """
REPLACE INTO relationships (
modified_by_id, created_at, updated_at, source_id,
source_type, destination_id, destination_type, context_id
)
SELECT modified_by_id, created_at, updated_at, control_id as source_id,
'Control' as source_type, controllable_id as destination_id,
controllable_type as destination_type, context_id
FROM object_controls;
"""
op.execute(sql)
op.drop_constraint(
'object_controls_ibfk_2', 'object_controls', type_='foreignkey')
def downgrade():
op.create_foreign_key(
'object_controls_ibfk_2',
'object_controls',
'controls',
['control_id'],
['id']
)
|
Add a migration for object_controls -> relationships
|
Add a migration for object_controls -> relationships
|
Python
|
apache-2.0
|
AleksNeStu/ggrc-core,andrei-karalionak/ggrc-core,hasanalom/ggrc-core,kr41/ggrc-core,josthkko/ggrc-core,uskudnik/ggrc-core,VinnieJohns/ggrc-core,kr41/ggrc-core,selahssea/ggrc-core,andrei-karalionak/ggrc-core,hyperNURb/ggrc-core,edofic/ggrc-core,jmakov/ggrc-core,plamut/ggrc-core,prasannav7/ggrc-core,VinnieJohns/ggrc-core,jmakov/ggrc-core,hasanalom/ggrc-core,uskudnik/ggrc-core,AleksNeStu/ggrc-core,j0gurt/ggrc-core,VinnieJohns/ggrc-core,plamut/ggrc-core,prasannav7/ggrc-core,jmakov/ggrc-core,AleksNeStu/ggrc-core,selahssea/ggrc-core,NejcZupec/ggrc-core,edofic/ggrc-core,plamut/ggrc-core,VinnieJohns/ggrc-core,hyperNURb/ggrc-core,plamut/ggrc-core,uskudnik/ggrc-core,hasanalom/ggrc-core,j0gurt/ggrc-core,josthkko/ggrc-core,jmakov/ggrc-core,hyperNURb/ggrc-core,kr41/ggrc-core,prasannav7/ggrc-core,hasanalom/ggrc-core,hasanalom/ggrc-core,hyperNURb/ggrc-core,uskudnik/ggrc-core,jmakov/ggrc-core,j0gurt/ggrc-core,hyperNURb/ggrc-core,NejcZupec/ggrc-core,josthkko/ggrc-core,j0gurt/ggrc-core,NejcZupec/ggrc-core,kr41/ggrc-core,prasannav7/ggrc-core,selahssea/ggrc-core,NejcZupec/ggrc-core,edofic/ggrc-core,edofic/ggrc-core,uskudnik/ggrc-core,andrei-karalionak/ggrc-core,AleksNeStu/ggrc-core,selahssea/ggrc-core,andrei-karalionak/ggrc-core,josthkko/ggrc-core
|
Add a migration for object_controls -> relationships
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
"""Migrate object_controls to relationships
Revision ID: 29d21b3c24b4
Revises: b0c3361797a
Create Date: 2015-05-21 09:16:09.620314
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = '29d21b3c24b4'
down_revision = 'b0c3361797a'
def upgrade():
sql = """
REPLACE INTO relationships (
modified_by_id, created_at, updated_at, source_id,
source_type, destination_id, destination_type, context_id
)
SELECT modified_by_id, created_at, updated_at, control_id as source_id,
'Control' as source_type, controllable_id as destination_id,
controllable_type as destination_type, context_id
FROM object_controls;
"""
op.execute(sql)
op.drop_constraint(
'object_controls_ibfk_2', 'object_controls', type_='foreignkey')
def downgrade():
op.create_foreign_key(
'object_controls_ibfk_2',
'object_controls',
'controls',
['control_id'],
['id']
)
|
<commit_before><commit_msg>Add a migration for object_controls -> relationships<commit_after>
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
"""Migrate object_controls to relationships
Revision ID: 29d21b3c24b4
Revises: b0c3361797a
Create Date: 2015-05-21 09:16:09.620314
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = '29d21b3c24b4'
down_revision = 'b0c3361797a'
def upgrade():
sql = """
REPLACE INTO relationships (
modified_by_id, created_at, updated_at, source_id,
source_type, destination_id, destination_type, context_id
)
SELECT modified_by_id, created_at, updated_at, control_id as source_id,
'Control' as source_type, controllable_id as destination_id,
controllable_type as destination_type, context_id
FROM object_controls;
"""
op.execute(sql)
op.drop_constraint(
'object_controls_ibfk_2', 'object_controls', type_='foreignkey')
def downgrade():
op.create_foreign_key(
'object_controls_ibfk_2',
'object_controls',
'controls',
['control_id'],
['id']
)
|
Add a migration for object_controls -> relationships# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
"""Migrate object_controls to relationships
Revision ID: 29d21b3c24b4
Revises: b0c3361797a
Create Date: 2015-05-21 09:16:09.620314
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = '29d21b3c24b4'
down_revision = 'b0c3361797a'
def upgrade():
sql = """
REPLACE INTO relationships (
modified_by_id, created_at, updated_at, source_id,
source_type, destination_id, destination_type, context_id
)
SELECT modified_by_id, created_at, updated_at, control_id as source_id,
'Control' as source_type, controllable_id as destination_id,
controllable_type as destination_type, context_id
FROM object_controls;
"""
op.execute(sql)
op.drop_constraint(
'object_controls_ibfk_2', 'object_controls', type_='foreignkey')
def downgrade():
op.create_foreign_key(
'object_controls_ibfk_2',
'object_controls',
'controls',
['control_id'],
['id']
)
|
<commit_before><commit_msg>Add a migration for object_controls -> relationships<commit_after># Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
"""Migrate object_controls to relationships
Revision ID: 29d21b3c24b4
Revises: b0c3361797a
Create Date: 2015-05-21 09:16:09.620314
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = '29d21b3c24b4'
down_revision = 'b0c3361797a'
def upgrade():
sql = """
REPLACE INTO relationships (
modified_by_id, created_at, updated_at, source_id,
source_type, destination_id, destination_type, context_id
)
SELECT modified_by_id, created_at, updated_at, control_id as source_id,
'Control' as source_type, controllable_id as destination_id,
controllable_type as destination_type, context_id
FROM object_controls;
"""
op.execute(sql)
op.drop_constraint(
'object_controls_ibfk_2', 'object_controls', type_='foreignkey')
def downgrade():
op.create_foreign_key(
'object_controls_ibfk_2',
'object_controls',
'controls',
['control_id'],
['id']
)
|
|
3047958a14f8e428404a4a29c43600b85fce6621
|
packages/syft/src/syft/core/node/common/node_service/simple/obj_exists.py
|
packages/syft/src/syft/core/node/common/node_service/simple/obj_exists.py
|
# stdlib
from typing import Any
from typing import Optional
# third party
from nacl.signing import VerifyKey
# relative
from ... import UID
from ....abstract.node import AbstractNode
from .simple_messages import NodeRunnableMessageWithReply
class DoesObjectExistMessage(NodeRunnableMessageWithReply):
__attr_allowlist__ = ["obj_id"]
def __init__(self, obj_id: UID):
self.obj_id = obj_id
def run(self, node: AbstractNode, verify_key: Optional[VerifyKey] = None) -> Any:
try:
storable_object = node.store[self.obj_id] # type: ignore
return True
except Exception as e:
return False
|
Add new message which checks whether an object exists
|
Add new message which checks whether an object exists
|
Python
|
apache-2.0
|
OpenMined/PySyft,OpenMined/PySyft,OpenMined/PySyft,OpenMined/PySyft
|
Add new message which checks whether an object exists
|
# stdlib
from typing import Any
from typing import Optional
# third party
from nacl.signing import VerifyKey
# relative
from ... import UID
from ....abstract.node import AbstractNode
from .simple_messages import NodeRunnableMessageWithReply
class DoesObjectExistMessage(NodeRunnableMessageWithReply):
__attr_allowlist__ = ["obj_id"]
def __init__(self, obj_id: UID):
self.obj_id = obj_id
def run(self, node: AbstractNode, verify_key: Optional[VerifyKey] = None) -> Any:
try:
storable_object = node.store[self.obj_id] # type: ignore
return True
except Exception as e:
return False
|
<commit_before><commit_msg>Add new message which checks whether an object exists<commit_after>
|
# stdlib
from typing import Any
from typing import Optional
# third party
from nacl.signing import VerifyKey
# relative
from ... import UID
from ....abstract.node import AbstractNode
from .simple_messages import NodeRunnableMessageWithReply
class DoesObjectExistMessage(NodeRunnableMessageWithReply):
__attr_allowlist__ = ["obj_id"]
def __init__(self, obj_id: UID):
self.obj_id = obj_id
def run(self, node: AbstractNode, verify_key: Optional[VerifyKey] = None) -> Any:
try:
storable_object = node.store[self.obj_id] # type: ignore
return True
except Exception as e:
return False
|
Add new message which checks whether an object exists# stdlib
from typing import Any
from typing import Optional
# third party
from nacl.signing import VerifyKey
# relative
from ... import UID
from ....abstract.node import AbstractNode
from .simple_messages import NodeRunnableMessageWithReply
class DoesObjectExistMessage(NodeRunnableMessageWithReply):
__attr_allowlist__ = ["obj_id"]
def __init__(self, obj_id: UID):
self.obj_id = obj_id
def run(self, node: AbstractNode, verify_key: Optional[VerifyKey] = None) -> Any:
try:
storable_object = node.store[self.obj_id] # type: ignore
return True
except Exception as e:
return False
|
<commit_before><commit_msg>Add new message which checks whether an object exists<commit_after># stdlib
from typing import Any
from typing import Optional
# third party
from nacl.signing import VerifyKey
# relative
from ... import UID
from ....abstract.node import AbstractNode
from .simple_messages import NodeRunnableMessageWithReply
class DoesObjectExistMessage(NodeRunnableMessageWithReply):
__attr_allowlist__ = ["obj_id"]
def __init__(self, obj_id: UID):
self.obj_id = obj_id
def run(self, node: AbstractNode, verify_key: Optional[VerifyKey] = None) -> Any:
try:
storable_object = node.store[self.obj_id] # type: ignore
return True
except Exception as e:
return False
|
|
cf8f70e61af991ef45f2528ad1e18c017b3fef67
|
processjsontree.py
|
processjsontree.py
|
#!/usr/bin/env python
"""Process a JSON file tree."""
import os
import json
import logging
import sys
def main():
logging.basicConfig(level=logging.INFO)
input = sys.stdin.read()
obj = json.loads(input)
ProcessJsonTree(obj)
def ProcessJsonTree(json_obj):
pass
if __name__ == '__main__':
main()
|
Add script to process a jsontree (this will soon parse with esprima).
|
Add script to process a jsontree (this will soon parse with esprima).
|
Python
|
apache-2.0
|
nanaze/jsdoctor,Prachigarg1/Prachi,Prachigarg1/Prachi,nanaze/jsdoctor,Prachigarg1/Prachi,nanaze/jsdoctor
|
Add script to process a jsontree (this will soon parse with esprima).
|
#!/usr/bin/env python
"""Process a JSON file tree."""
import os
import json
import logging
import sys
def main():
logging.basicConfig(level=logging.INFO)
input = sys.stdin.read()
obj = json.loads(input)
ProcessJsonTree(obj)
def ProcessJsonTree(json_obj):
pass
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to process a jsontree (this will soon parse with esprima).<commit_after>
|
#!/usr/bin/env python
"""Process a JSON file tree."""
import os
import json
import logging
import sys
def main():
logging.basicConfig(level=logging.INFO)
input = sys.stdin.read()
obj = json.loads(input)
ProcessJsonTree(obj)
def ProcessJsonTree(json_obj):
pass
if __name__ == '__main__':
main()
|
Add script to process a jsontree (this will soon parse with esprima).#!/usr/bin/env python
"""Process a JSON file tree."""
import os
import json
import logging
import sys
def main():
logging.basicConfig(level=logging.INFO)
input = sys.stdin.read()
obj = json.loads(input)
ProcessJsonTree(obj)
def ProcessJsonTree(json_obj):
pass
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to process a jsontree (this will soon parse with esprima).<commit_after>#!/usr/bin/env python
"""Process a JSON file tree."""
import os
import json
import logging
import sys
def main():
logging.basicConfig(level=logging.INFO)
input = sys.stdin.read()
obj = json.loads(input)
ProcessJsonTree(obj)
def ProcessJsonTree(json_obj):
pass
if __name__ == '__main__':
main()
|
|
51868ecddb39e20bdf6fb5ad242267d421d799ca
|
alice3/rombasic/lst2prn.py
|
alice3/rombasic/lst2prn.py
|
#!python
import sys
print '',
print_labels = False
for line in sys.stdin:
if line.strip() == "; +++ global symbols +++":
break;
dummy = sys.stdin.next()
for line in sys.stdin:
if len(line.strip()) == 0:
break
else:
parts = line.strip().split("=");
if len(parts) > 1:
label = parts[0].strip()
value = parts[1].strip()[1:]
print "%s %s" % (value, label)
print '',
print
|
Add tool to convert zasm .lst to az80 .prn for simualtor symbols
|
Add tool to convert zasm .lst to az80 .prn for simualtor symbols
|
Python
|
apache-2.0
|
lkesteloot/alice,lkesteloot/alice,lkesteloot/alice,lkesteloot/alice,lkesteloot/alice,lkesteloot/alice
|
Add tool to convert zasm .lst to az80 .prn for simualtor symbols
|
#!python
import sys
print '',
print_labels = False
for line in sys.stdin:
if line.strip() == "; +++ global symbols +++":
break;
dummy = sys.stdin.next()
for line in sys.stdin:
if len(line.strip()) == 0:
break
else:
parts = line.strip().split("=");
if len(parts) > 1:
label = parts[0].strip()
value = parts[1].strip()[1:]
print "%s %s" % (value, label)
print '',
print
|
<commit_before><commit_msg>Add tool to convert zasm .lst to az80 .prn for simualtor symbols<commit_after>
|
#!python
import sys
print '',
print_labels = False
for line in sys.stdin:
if line.strip() == "; +++ global symbols +++":
break;
dummy = sys.stdin.next()
for line in sys.stdin:
if len(line.strip()) == 0:
break
else:
parts = line.strip().split("=");
if len(parts) > 1:
label = parts[0].strip()
value = parts[1].strip()[1:]
print "%s %s" % (value, label)
print '',
print
|
Add tool to convert zasm .lst to az80 .prn for simualtor symbols#!python
import sys
print '',
print_labels = False
for line in sys.stdin:
if line.strip() == "; +++ global symbols +++":
break;
dummy = sys.stdin.next()
for line in sys.stdin:
if len(line.strip()) == 0:
break
else:
parts = line.strip().split("=");
if len(parts) > 1:
label = parts[0].strip()
value = parts[1].strip()[1:]
print "%s %s" % (value, label)
print '',
print
|
<commit_before><commit_msg>Add tool to convert zasm .lst to az80 .prn for simualtor symbols<commit_after>#!python
import sys
print '',
print_labels = False
for line in sys.stdin:
if line.strip() == "; +++ global symbols +++":
break;
dummy = sys.stdin.next()
for line in sys.stdin:
if len(line.strip()) == 0:
break
else:
parts = line.strip().split("=");
if len(parts) > 1:
label = parts[0].strip()
value = parts[1].strip()[1:]
print "%s %s" % (value, label)
print '',
print
|
|
b6e5d994d5db9db1fb3d732074f37177824bd594
|
src/lib/apply_json_metadata.py
|
src/lib/apply_json_metadata.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.cloud import storage
import sys
def apply_json_metadata(bucket_name, prefix_name):
"""
Applies Content-Type and gzip Content-Encoding to json files in a bucket prefix
In order to allow for decompressive transcoding and serving of gzipped assets to clients
who can decompress themselves, both the content type and content encoding meta data need
to be set on JSON objects. Most methods of transferring objects into a bucket do not
correctly set this meta data, so we have this utility to correct for this after the fact.
See also: https://cloud.google.com/storage/docs/transcoding
"""
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
for blob in bucket.list_blobs(prefix=prefix_name):
if(blob.name.endswith("json")):
print(blob.name)
if(blob.content_type != "application/json" or
blob.content_encoding != "gzip" or
blob.content_disposition != "inline"):
blob.content_type = "application/json"
blob.content_encoding = "gzip"
blob.content_disposition = "inline"
blob.patch()
if __name__ == "__main__":
if(len(sys.argv) != 3):
print("Usage: apply_json_meta [bucket_name] [prefix_name]")
else:
apply_json_metadata(sys.argv[1],sys.argv[2])
|
Add utility to set GCS JSON metadata
|
Add utility to set GCS JSON metadata
|
Python
|
apache-2.0
|
GoogleCloudPlatform/covid-19-open-data,GoogleCloudPlatform/covid-19-open-data
|
Add utility to set GCS JSON metadata
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.cloud import storage
import sys
def apply_json_metadata(bucket_name, prefix_name):
"""
Applies Content-Type and gzip Content-Encoding to json files in a bucket prefix
In order to allow for decompressive transcoding and serving of gzipped assets to clients
who can decompress themselves, both the content type and content encoding meta data need
to be set on JSON objects. Most methods of transferring objects into a bucket do not
correctly set this meta data, so we have this utility to correct for this after the fact.
See also: https://cloud.google.com/storage/docs/transcoding
"""
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
for blob in bucket.list_blobs(prefix=prefix_name):
if(blob.name.endswith("json")):
print(blob.name)
if(blob.content_type != "application/json" or
blob.content_encoding != "gzip" or
blob.content_disposition != "inline"):
blob.content_type = "application/json"
blob.content_encoding = "gzip"
blob.content_disposition = "inline"
blob.patch()
if __name__ == "__main__":
if(len(sys.argv) != 3):
print("Usage: apply_json_meta [bucket_name] [prefix_name]")
else:
apply_json_metadata(sys.argv[1],sys.argv[2])
|
<commit_before><commit_msg>Add utility to set GCS JSON metadata<commit_after>
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.cloud import storage
import sys
def apply_json_metadata(bucket_name, prefix_name):
"""
Applies Content-Type and gzip Content-Encoding to json files in a bucket prefix
In order to allow for decompressive transcoding and serving of gzipped assets to clients
who can decompress themselves, both the content type and content encoding meta data need
to be set on JSON objects. Most methods of transferring objects into a bucket do not
correctly set this meta data, so we have this utility to correct for this after the fact.
See also: https://cloud.google.com/storage/docs/transcoding
"""
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
for blob in bucket.list_blobs(prefix=prefix_name):
if(blob.name.endswith("json")):
print(blob.name)
if(blob.content_type != "application/json" or
blob.content_encoding != "gzip" or
blob.content_disposition != "inline"):
blob.content_type = "application/json"
blob.content_encoding = "gzip"
blob.content_disposition = "inline"
blob.patch()
if __name__ == "__main__":
if(len(sys.argv) != 3):
print("Usage: apply_json_meta [bucket_name] [prefix_name]")
else:
apply_json_metadata(sys.argv[1],sys.argv[2])
|
Add utility to set GCS JSON metadata# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.cloud import storage
import sys
def apply_json_metadata(bucket_name, prefix_name):
"""
Applies Content-Type and gzip Content-Encoding to json files in a bucket prefix
In order to allow for decompressive transcoding and serving of gzipped assets to clients
who can decompress themselves, both the content type and content encoding meta data need
to be set on JSON objects. Most methods of transferring objects into a bucket do not
correctly set this meta data, so we have this utility to correct for this after the fact.
See also: https://cloud.google.com/storage/docs/transcoding
"""
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
for blob in bucket.list_blobs(prefix=prefix_name):
if(blob.name.endswith("json")):
print(blob.name)
if(blob.content_type != "application/json" or
blob.content_encoding != "gzip" or
blob.content_disposition != "inline"):
blob.content_type = "application/json"
blob.content_encoding = "gzip"
blob.content_disposition = "inline"
blob.patch()
if __name__ == "__main__":
if(len(sys.argv) != 3):
print("Usage: apply_json_meta [bucket_name] [prefix_name]")
else:
apply_json_metadata(sys.argv[1],sys.argv[2])
|
<commit_before><commit_msg>Add utility to set GCS JSON metadata<commit_after># Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.cloud import storage
import sys
def apply_json_metadata(bucket_name, prefix_name):
"""
Applies Content-Type and gzip Content-Encoding to json files in a bucket prefix
In order to allow for decompressive transcoding and serving of gzipped assets to clients
who can decompress themselves, both the content type and content encoding meta data need
to be set on JSON objects. Most methods of transferring objects into a bucket do not
correctly set this meta data, so we have this utility to correct for this after the fact.
See also: https://cloud.google.com/storage/docs/transcoding
"""
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
for blob in bucket.list_blobs(prefix=prefix_name):
if(blob.name.endswith("json")):
print(blob.name)
if(blob.content_type != "application/json" or
blob.content_encoding != "gzip" or
blob.content_disposition != "inline"):
blob.content_type = "application/json"
blob.content_encoding = "gzip"
blob.content_disposition = "inline"
blob.patch()
if __name__ == "__main__":
if(len(sys.argv) != 3):
print("Usage: apply_json_meta [bucket_name] [prefix_name]")
else:
apply_json_metadata(sys.argv[1],sys.argv[2])
|
|
f31f65ed111ad2ec7e7502ad8ae147d3fca4a39d
|
tests/util_test.py
|
tests/util_test.py
|
import os
from photoshell import util
def test_hash_file(tmpdir):
tmpdir.join('file.test').write("Test")
assert (util.hash_file(os.path.join(tmpdir.strpath, 'file.test')) ==
'640ab2bae07bedc4c163f679a746f7ab7fb5d1fa')
|
Add a test for file hashing
|
Add a test for file hashing
|
Python
|
mit
|
SamWhited/photoshell,campaul/photoshell,photoshell/photoshell
|
Add a test for file hashing
|
import os
from photoshell import util
def test_hash_file(tmpdir):
tmpdir.join('file.test').write("Test")
assert (util.hash_file(os.path.join(tmpdir.strpath, 'file.test')) ==
'640ab2bae07bedc4c163f679a746f7ab7fb5d1fa')
|
<commit_before><commit_msg>Add a test for file hashing<commit_after>
|
import os
from photoshell import util
def test_hash_file(tmpdir):
tmpdir.join('file.test').write("Test")
assert (util.hash_file(os.path.join(tmpdir.strpath, 'file.test')) ==
'640ab2bae07bedc4c163f679a746f7ab7fb5d1fa')
|
Add a test for file hashingimport os
from photoshell import util
def test_hash_file(tmpdir):
tmpdir.join('file.test').write("Test")
assert (util.hash_file(os.path.join(tmpdir.strpath, 'file.test')) ==
'640ab2bae07bedc4c163f679a746f7ab7fb5d1fa')
|
<commit_before><commit_msg>Add a test for file hashing<commit_after>import os
from photoshell import util
def test_hash_file(tmpdir):
tmpdir.join('file.test').write("Test")
assert (util.hash_file(os.path.join(tmpdir.strpath, 'file.test')) ==
'640ab2bae07bedc4c163f679a746f7ab7fb5d1fa')
|
|
45122a9e1af09cf79391801e0c8728e7a881aa34
|
tests/alternate_encoding_test.py
|
tests/alternate_encoding_test.py
|
import redisdl
import unittest
import json
import os.path
from . import util
class RedisdlTest(unittest.TestCase):
def setUp(self):
import redis
self.r = redis.Redis(charset='latin1')
for key in self.r.keys('*'):
self.r.delete(key)
def test_dump_unicode_value(self):
self.r.set('key', util.b('\xa9'))
dump = redisdl.dumps(encoding='latin1')
actual = json.loads(dump)
expected = {'key': {'type': 'string', 'value': u"\u00a9"}}
self.assertEqual(expected, actual)
def test_load_unicode_value(self):
dump = '{"key":{"type":"string","value":"\\u00a9"}}'
redisdl.loads(dump, encoding='latin1')
value = self.r.get('key')
self.assertEqual(util.b('\xa9'), value)
|
Test alternate encodings when dumping and loading
|
Test alternate encodings when dumping and loading
|
Python
|
bsd-2-clause
|
p/redis-dump-load,p/redis-dump-load,hyunchel/redis-dump-load,hyunchel/redis-dump-load
|
Test alternate encodings when dumping and loading
|
import redisdl
import unittest
import json
import os.path
from . import util
class RedisdlTest(unittest.TestCase):
def setUp(self):
import redis
self.r = redis.Redis(charset='latin1')
for key in self.r.keys('*'):
self.r.delete(key)
def test_dump_unicode_value(self):
self.r.set('key', util.b('\xa9'))
dump = redisdl.dumps(encoding='latin1')
actual = json.loads(dump)
expected = {'key': {'type': 'string', 'value': u"\u00a9"}}
self.assertEqual(expected, actual)
def test_load_unicode_value(self):
dump = '{"key":{"type":"string","value":"\\u00a9"}}'
redisdl.loads(dump, encoding='latin1')
value = self.r.get('key')
self.assertEqual(util.b('\xa9'), value)
|
<commit_before><commit_msg>Test alternate encodings when dumping and loading<commit_after>
|
import redisdl
import unittest
import json
import os.path
from . import util
class RedisdlTest(unittest.TestCase):
def setUp(self):
import redis
self.r = redis.Redis(charset='latin1')
for key in self.r.keys('*'):
self.r.delete(key)
def test_dump_unicode_value(self):
self.r.set('key', util.b('\xa9'))
dump = redisdl.dumps(encoding='latin1')
actual = json.loads(dump)
expected = {'key': {'type': 'string', 'value': u"\u00a9"}}
self.assertEqual(expected, actual)
def test_load_unicode_value(self):
dump = '{"key":{"type":"string","value":"\\u00a9"}}'
redisdl.loads(dump, encoding='latin1')
value = self.r.get('key')
self.assertEqual(util.b('\xa9'), value)
|
Test alternate encodings when dumping and loadingimport redisdl
import unittest
import json
import os.path
from . import util
class RedisdlTest(unittest.TestCase):
def setUp(self):
import redis
self.r = redis.Redis(charset='latin1')
for key in self.r.keys('*'):
self.r.delete(key)
def test_dump_unicode_value(self):
self.r.set('key', util.b('\xa9'))
dump = redisdl.dumps(encoding='latin1')
actual = json.loads(dump)
expected = {'key': {'type': 'string', 'value': u"\u00a9"}}
self.assertEqual(expected, actual)
def test_load_unicode_value(self):
dump = '{"key":{"type":"string","value":"\\u00a9"}}'
redisdl.loads(dump, encoding='latin1')
value = self.r.get('key')
self.assertEqual(util.b('\xa9'), value)
|
<commit_before><commit_msg>Test alternate encodings when dumping and loading<commit_after>import redisdl
import unittest
import json
import os.path
from . import util
class RedisdlTest(unittest.TestCase):
def setUp(self):
import redis
self.r = redis.Redis(charset='latin1')
for key in self.r.keys('*'):
self.r.delete(key)
def test_dump_unicode_value(self):
self.r.set('key', util.b('\xa9'))
dump = redisdl.dumps(encoding='latin1')
actual = json.loads(dump)
expected = {'key': {'type': 'string', 'value': u"\u00a9"}}
self.assertEqual(expected, actual)
def test_load_unicode_value(self):
dump = '{"key":{"type":"string","value":"\\u00a9"}}'
redisdl.loads(dump, encoding='latin1')
value = self.r.get('key')
self.assertEqual(util.b('\xa9'), value)
|
|
d4a36fb392139f1eb30524c1cf99d939be5542b7
|
tests/test_commands/test_update.py
|
tests/test_commands/test_update.py
|
# Copyright 2015 0xc0170
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from project_generator.commands.update import update
class TestProject(TestCase):
"""test things related to the Project class"""
def setUp(self):
pass
def tearDown(self):
pass
def test_update(self):
# force update. This assumes that repository is alive and that travis has
# internet connection
update(True)
|
Test - add commands test folder, first test - update command
|
Test - add commands test folder, first test - update command
|
Python
|
apache-2.0
|
ohagendorf/project_generator,project-generator/project_generator,0xc0170/project_generator,molejar/project_generator,hwfwgrp/project_generator,sarahmarshy/project_generator
|
Test - add commands test folder, first test - update command
|
# Copyright 2015 0xc0170
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from project_generator.commands.update import update
class TestProject(TestCase):
"""test things related to the Project class"""
def setUp(self):
pass
def tearDown(self):
pass
def test_update(self):
# force update. This assumes that repository is alive and that travis has
# internet connection
update(True)
|
<commit_before><commit_msg>Test - add commands test folder, first test - update command<commit_after>
|
# Copyright 2015 0xc0170
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from project_generator.commands.update import update
class TestProject(TestCase):
"""test things related to the Project class"""
def setUp(self):
pass
def tearDown(self):
pass
def test_update(self):
# force update. This assumes that repository is alive and that travis has
# internet connection
update(True)
|
Test - add commands test folder, first test - update command# Copyright 2015 0xc0170
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from project_generator.commands.update import update
class TestProject(TestCase):
"""test things related to the Project class"""
def setUp(self):
pass
def tearDown(self):
pass
def test_update(self):
# force update. This assumes that repository is alive and that travis has
# internet connection
update(True)
|
<commit_before><commit_msg>Test - add commands test folder, first test - update command<commit_after># Copyright 2015 0xc0170
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from project_generator.commands.update import update
class TestProject(TestCase):
"""test things related to the Project class"""
def setUp(self):
pass
def tearDown(self):
pass
def test_update(self):
# force update. This assumes that repository is alive and that travis has
# internet connection
update(True)
|
|
547fb9e7290295848f81e62d26483b2576fe720f
|
skan/test/test_nx.py
|
skan/test/test_nx.py
|
import os, sys
import numpy as np
from numpy.testing import assert_equal, assert_almost_equal
from skan import nx
rundir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(rundir)
from skan._testdata import tinycycle, skeleton1, skeleton2
def test_tiny_cycle():
g, degimg, skel_labels = nx.skeleton_to_nx(tinycycle)
expected_edges = [(1, 2), (1, 3), (2, 4), (3, 4)]
assert sorted(g.edges()) == expected_edges
assert_almost_equal([g[a][b]['weight'] for a, b in g.edges()], np.sqrt(2))
expected_degrees = np.array([[0, 2, 0], [2, 0, 2], [0, 2, 0]])
assert_equal(degimg, expected_degrees)
assert all(g.node[n]['type'] == 'path' for n in g)
def test_skeleton1_stats():
g = nx.skeleton_to_nx(skeleton1)[0]
stats = nx.branch_statistics(g)
assert_equal(stats.shape, (4, 4))
keys = map(tuple, stats[:, :2].astype(int))
dists = stats[:, 2]
types = stats[:, 3].astype(int)
ids2dist = dict(zip(keys, dists))
assert (13, 8) in ids2dist
assert (8, 13) in ids2dist
d0, d1 = sorted((ids2dist[(13, 8)], ids2dist[(8, 13)]))
assert_almost_equal(d0, 1 + np.sqrt(2))
assert_almost_equal(d1, 5*d0)
assert_equal(np.bincount(types), [0, 2, 2])
assert_almost_equal(np.unique(dists), [d0, 2 + np.sqrt(2), d1])
def test_3skeletons():
df = nx.summarise(skeleton2)
assert_almost_equal(np.unique(df['euclidean-distance']),
np.sqrt([5, 10]))
assert_equal(np.unique(df['skeleton-id']), [0, 1])
assert_equal(np.bincount(df['branch-type']), [0, 4, 4])
|
Add tests for nx backend
|
Add tests for nx backend
|
Python
|
bsd-3-clause
|
jni/skan
|
Add tests for nx backend
|
import os, sys
import numpy as np
from numpy.testing import assert_equal, assert_almost_equal
from skan import nx
rundir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(rundir)
from skan._testdata import tinycycle, skeleton1, skeleton2
def test_tiny_cycle():
g, degimg, skel_labels = nx.skeleton_to_nx(tinycycle)
expected_edges = [(1, 2), (1, 3), (2, 4), (3, 4)]
assert sorted(g.edges()) == expected_edges
assert_almost_equal([g[a][b]['weight'] for a, b in g.edges()], np.sqrt(2))
expected_degrees = np.array([[0, 2, 0], [2, 0, 2], [0, 2, 0]])
assert_equal(degimg, expected_degrees)
assert all(g.node[n]['type'] == 'path' for n in g)
def test_skeleton1_stats():
g = nx.skeleton_to_nx(skeleton1)[0]
stats = nx.branch_statistics(g)
assert_equal(stats.shape, (4, 4))
keys = map(tuple, stats[:, :2].astype(int))
dists = stats[:, 2]
types = stats[:, 3].astype(int)
ids2dist = dict(zip(keys, dists))
assert (13, 8) in ids2dist
assert (8, 13) in ids2dist
d0, d1 = sorted((ids2dist[(13, 8)], ids2dist[(8, 13)]))
assert_almost_equal(d0, 1 + np.sqrt(2))
assert_almost_equal(d1, 5*d0)
assert_equal(np.bincount(types), [0, 2, 2])
assert_almost_equal(np.unique(dists), [d0, 2 + np.sqrt(2), d1])
def test_3skeletons():
df = nx.summarise(skeleton2)
assert_almost_equal(np.unique(df['euclidean-distance']),
np.sqrt([5, 10]))
assert_equal(np.unique(df['skeleton-id']), [0, 1])
assert_equal(np.bincount(df['branch-type']), [0, 4, 4])
|
<commit_before><commit_msg>Add tests for nx backend<commit_after>
|
import os, sys
import numpy as np
from numpy.testing import assert_equal, assert_almost_equal
from skan import nx
rundir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(rundir)
from skan._testdata import tinycycle, skeleton1, skeleton2
def test_tiny_cycle():
g, degimg, skel_labels = nx.skeleton_to_nx(tinycycle)
expected_edges = [(1, 2), (1, 3), (2, 4), (3, 4)]
assert sorted(g.edges()) == expected_edges
assert_almost_equal([g[a][b]['weight'] for a, b in g.edges()], np.sqrt(2))
expected_degrees = np.array([[0, 2, 0], [2, 0, 2], [0, 2, 0]])
assert_equal(degimg, expected_degrees)
assert all(g.node[n]['type'] == 'path' for n in g)
def test_skeleton1_stats():
g = nx.skeleton_to_nx(skeleton1)[0]
stats = nx.branch_statistics(g)
assert_equal(stats.shape, (4, 4))
keys = map(tuple, stats[:, :2].astype(int))
dists = stats[:, 2]
types = stats[:, 3].astype(int)
ids2dist = dict(zip(keys, dists))
assert (13, 8) in ids2dist
assert (8, 13) in ids2dist
d0, d1 = sorted((ids2dist[(13, 8)], ids2dist[(8, 13)]))
assert_almost_equal(d0, 1 + np.sqrt(2))
assert_almost_equal(d1, 5*d0)
assert_equal(np.bincount(types), [0, 2, 2])
assert_almost_equal(np.unique(dists), [d0, 2 + np.sqrt(2), d1])
def test_3skeletons():
df = nx.summarise(skeleton2)
assert_almost_equal(np.unique(df['euclidean-distance']),
np.sqrt([5, 10]))
assert_equal(np.unique(df['skeleton-id']), [0, 1])
assert_equal(np.bincount(df['branch-type']), [0, 4, 4])
|
Add tests for nx backendimport os, sys
import numpy as np
from numpy.testing import assert_equal, assert_almost_equal
from skan import nx
rundir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(rundir)
from skan._testdata import tinycycle, skeleton1, skeleton2
def test_tiny_cycle():
g, degimg, skel_labels = nx.skeleton_to_nx(tinycycle)
expected_edges = [(1, 2), (1, 3), (2, 4), (3, 4)]
assert sorted(g.edges()) == expected_edges
assert_almost_equal([g[a][b]['weight'] for a, b in g.edges()], np.sqrt(2))
expected_degrees = np.array([[0, 2, 0], [2, 0, 2], [0, 2, 0]])
assert_equal(degimg, expected_degrees)
assert all(g.node[n]['type'] == 'path' for n in g)
def test_skeleton1_stats():
g = nx.skeleton_to_nx(skeleton1)[0]
stats = nx.branch_statistics(g)
assert_equal(stats.shape, (4, 4))
keys = map(tuple, stats[:, :2].astype(int))
dists = stats[:, 2]
types = stats[:, 3].astype(int)
ids2dist = dict(zip(keys, dists))
assert (13, 8) in ids2dist
assert (8, 13) in ids2dist
d0, d1 = sorted((ids2dist[(13, 8)], ids2dist[(8, 13)]))
assert_almost_equal(d0, 1 + np.sqrt(2))
assert_almost_equal(d1, 5*d0)
assert_equal(np.bincount(types), [0, 2, 2])
assert_almost_equal(np.unique(dists), [d0, 2 + np.sqrt(2), d1])
def test_3skeletons():
df = nx.summarise(skeleton2)
assert_almost_equal(np.unique(df['euclidean-distance']),
np.sqrt([5, 10]))
assert_equal(np.unique(df['skeleton-id']), [0, 1])
assert_equal(np.bincount(df['branch-type']), [0, 4, 4])
|
<commit_before><commit_msg>Add tests for nx backend<commit_after>import os, sys
import numpy as np
from numpy.testing import assert_equal, assert_almost_equal
from skan import nx
rundir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(rundir)
from skan._testdata import tinycycle, skeleton1, skeleton2
def test_tiny_cycle():
g, degimg, skel_labels = nx.skeleton_to_nx(tinycycle)
expected_edges = [(1, 2), (1, 3), (2, 4), (3, 4)]
assert sorted(g.edges()) == expected_edges
assert_almost_equal([g[a][b]['weight'] for a, b in g.edges()], np.sqrt(2))
expected_degrees = np.array([[0, 2, 0], [2, 0, 2], [0, 2, 0]])
assert_equal(degimg, expected_degrees)
assert all(g.node[n]['type'] == 'path' for n in g)
def test_skeleton1_stats():
g = nx.skeleton_to_nx(skeleton1)[0]
stats = nx.branch_statistics(g)
assert_equal(stats.shape, (4, 4))
keys = map(tuple, stats[:, :2].astype(int))
dists = stats[:, 2]
types = stats[:, 3].astype(int)
ids2dist = dict(zip(keys, dists))
assert (13, 8) in ids2dist
assert (8, 13) in ids2dist
d0, d1 = sorted((ids2dist[(13, 8)], ids2dist[(8, 13)]))
assert_almost_equal(d0, 1 + np.sqrt(2))
assert_almost_equal(d1, 5*d0)
assert_equal(np.bincount(types), [0, 2, 2])
assert_almost_equal(np.unique(dists), [d0, 2 + np.sqrt(2), d1])
def test_3skeletons():
df = nx.summarise(skeleton2)
assert_almost_equal(np.unique(df['euclidean-distance']),
np.sqrt([5, 10]))
assert_equal(np.unique(df['skeleton-id']), [0, 1])
assert_equal(np.bincount(df['branch-type']), [0, 4, 4])
|
|
ff23ae7705aa2841ff92d273d4e4851e3b7411c5
|
ws-tests/test_invalid_study_put.py
|
ws-tests/test_invalid_study_put.py
|
#!/usr/bin/env python
from opentreetesting import test_http_json_method, config
import datetime
import codecs
import json
import sys
import os
# this makes it easier to test concurrent pushes to different branches
if len(sys.argv) > 1:
study_id = sys.argv[1]
else:
study_id = 1003
DOMAIN = config('host', 'apihost')
SUBMIT_URI = DOMAIN + '/v1/study/%s' % study_id
inpf = codecs.open('../nexson-validator/tests/single/input/1003.json', 'rU', encoding='utf-8')
n = json.load(inpf)
# refresh a timestamp so that the test generates a commit
m = n['nexml']['meta']
short_list = [i for i in m if i.get('@property') == 'bogus_timestamp']
if short_list:
el = short_list[0]
else:
el = {'@property': 'bogus_timestamp', '@xsi:type': 'nex:LiteralMeta'}
m.append(el)
el['$'] = datetime.datetime.utcnow().isoformat()
data = { 'nexson' : n,
'auth_token': 'bogus'
}
if test_http_json_method(SUBMIT_URI,
'PUT',
data=data,
expected_status=400):
sys.exit(0)
sys.exit(1)
|
Add a test for invalid PUTs which do not have a valid auth_token
|
Add a test for invalid PUTs which do not have a valid auth_token
|
Python
|
bsd-2-clause
|
OpenTreeOfLife/phylesystem-api,OpenTreeOfLife/phylesystem-api,OpenTreeOfLife/phylesystem-api
|
Add a test for invalid PUTs which do not have a valid auth_token
|
#!/usr/bin/env python
from opentreetesting import test_http_json_method, config
import datetime
import codecs
import json
import sys
import os
# this makes it easier to test concurrent pushes to different branches
if len(sys.argv) > 1:
study_id = sys.argv[1]
else:
study_id = 1003
DOMAIN = config('host', 'apihost')
SUBMIT_URI = DOMAIN + '/v1/study/%s' % study_id
inpf = codecs.open('../nexson-validator/tests/single/input/1003.json', 'rU', encoding='utf-8')
n = json.load(inpf)
# refresh a timestamp so that the test generates a commit
m = n['nexml']['meta']
short_list = [i for i in m if i.get('@property') == 'bogus_timestamp']
if short_list:
el = short_list[0]
else:
el = {'@property': 'bogus_timestamp', '@xsi:type': 'nex:LiteralMeta'}
m.append(el)
el['$'] = datetime.datetime.utcnow().isoformat()
data = { 'nexson' : n,
'auth_token': 'bogus'
}
if test_http_json_method(SUBMIT_URI,
'PUT',
data=data,
expected_status=400):
sys.exit(0)
sys.exit(1)
|
<commit_before><commit_msg>Add a test for invalid PUTs which do not have a valid auth_token<commit_after>
|
#!/usr/bin/env python
from opentreetesting import test_http_json_method, config
import datetime
import codecs
import json
import sys
import os
# this makes it easier to test concurrent pushes to different branches
if len(sys.argv) > 1:
study_id = sys.argv[1]
else:
study_id = 1003
DOMAIN = config('host', 'apihost')
SUBMIT_URI = DOMAIN + '/v1/study/%s' % study_id
inpf = codecs.open('../nexson-validator/tests/single/input/1003.json', 'rU', encoding='utf-8')
n = json.load(inpf)
# refresh a timestamp so that the test generates a commit
m = n['nexml']['meta']
short_list = [i for i in m if i.get('@property') == 'bogus_timestamp']
if short_list:
el = short_list[0]
else:
el = {'@property': 'bogus_timestamp', '@xsi:type': 'nex:LiteralMeta'}
m.append(el)
el['$'] = datetime.datetime.utcnow().isoformat()
data = { 'nexson' : n,
'auth_token': 'bogus'
}
if test_http_json_method(SUBMIT_URI,
'PUT',
data=data,
expected_status=400):
sys.exit(0)
sys.exit(1)
|
Add a test for invalid PUTs which do not have a valid auth_token#!/usr/bin/env python
from opentreetesting import test_http_json_method, config
import datetime
import codecs
import json
import sys
import os
# this makes it easier to test concurrent pushes to different branches
if len(sys.argv) > 1:
study_id = sys.argv[1]
else:
study_id = 1003
DOMAIN = config('host', 'apihost')
SUBMIT_URI = DOMAIN + '/v1/study/%s' % study_id
inpf = codecs.open('../nexson-validator/tests/single/input/1003.json', 'rU', encoding='utf-8')
n = json.load(inpf)
# refresh a timestamp so that the test generates a commit
m = n['nexml']['meta']
short_list = [i for i in m if i.get('@property') == 'bogus_timestamp']
if short_list:
el = short_list[0]
else:
el = {'@property': 'bogus_timestamp', '@xsi:type': 'nex:LiteralMeta'}
m.append(el)
el['$'] = datetime.datetime.utcnow().isoformat()
data = { 'nexson' : n,
'auth_token': 'bogus'
}
if test_http_json_method(SUBMIT_URI,
'PUT',
data=data,
expected_status=400):
sys.exit(0)
sys.exit(1)
|
<commit_before><commit_msg>Add a test for invalid PUTs which do not have a valid auth_token<commit_after>#!/usr/bin/env python
from opentreetesting import test_http_json_method, config
import datetime
import codecs
import json
import sys
import os
# this makes it easier to test concurrent pushes to different branches
if len(sys.argv) > 1:
study_id = sys.argv[1]
else:
study_id = 1003
DOMAIN = config('host', 'apihost')
SUBMIT_URI = DOMAIN + '/v1/study/%s' % study_id
inpf = codecs.open('../nexson-validator/tests/single/input/1003.json', 'rU', encoding='utf-8')
n = json.load(inpf)
# refresh a timestamp so that the test generates a commit
m = n['nexml']['meta']
short_list = [i for i in m if i.get('@property') == 'bogus_timestamp']
if short_list:
el = short_list[0]
else:
el = {'@property': 'bogus_timestamp', '@xsi:type': 'nex:LiteralMeta'}
m.append(el)
el['$'] = datetime.datetime.utcnow().isoformat()
data = { 'nexson' : n,
'auth_token': 'bogus'
}
if test_http_json_method(SUBMIT_URI,
'PUT',
data=data,
expected_status=400):
sys.exit(0)
sys.exit(1)
|
|
24d4168f34a50814a72886acde1a99d063324744
|
unit_tests/magic_mock_example.py
|
unit_tests/magic_mock_example.py
|
from mock import MagicMock
from calculator import Calculator
thing = Calculator()
thing.mymethod = MagicMock(return_value = {'x':'X'})
thing.mymethod(1,2,3,4,5,6,7,8,9,0, k='val')
thing.mymethod.assert_called_with(1,2,3,4,5,6,7,8,9,0, k='val')
|
Add new example with maginc mock
|
Add new example with maginc mock
|
Python
|
mit
|
rolandovillca/python_introduction_basic,rolandovillca/python_basic_introduction,rolandovillca/python_basis,rolandovillca/python_basic_concepts
|
Add new example with maginc mock
|
from mock import MagicMock
from calculator import Calculator
thing = Calculator()
thing.mymethod = MagicMock(return_value = {'x':'X'})
thing.mymethod(1,2,3,4,5,6,7,8,9,0, k='val')
thing.mymethod.assert_called_with(1,2,3,4,5,6,7,8,9,0, k='val')
|
<commit_before><commit_msg>Add new example with maginc mock<commit_after>
|
from mock import MagicMock
from calculator import Calculator
thing = Calculator()
thing.mymethod = MagicMock(return_value = {'x':'X'})
thing.mymethod(1,2,3,4,5,6,7,8,9,0, k='val')
thing.mymethod.assert_called_with(1,2,3,4,5,6,7,8,9,0, k='val')
|
Add new example with maginc mockfrom mock import MagicMock
from calculator import Calculator
thing = Calculator()
thing.mymethod = MagicMock(return_value = {'x':'X'})
thing.mymethod(1,2,3,4,5,6,7,8,9,0, k='val')
thing.mymethod.assert_called_with(1,2,3,4,5,6,7,8,9,0, k='val')
|
<commit_before><commit_msg>Add new example with maginc mock<commit_after>from mock import MagicMock
from calculator import Calculator
thing = Calculator()
thing.mymethod = MagicMock(return_value = {'x':'X'})
thing.mymethod(1,2,3,4,5,6,7,8,9,0, k='val')
thing.mymethod.assert_called_with(1,2,3,4,5,6,7,8,9,0, k='val')
|
|
a56400f6b503aaba19fa5a1969831db9c6b0552d
|
tests/framework/test_bmi_ugrid.py
|
tests/framework/test_bmi_ugrid.py
|
"""Unit tests for the pymt.framwork.bmi_ugrid module."""
import numpy as np
import xarray as xr
from pymt.framework.bmi_ugrid import Scalar, Vector
from pymt.framework.bmi_bridge import _BmiCap
grid_id = 0
class TestScalar:
def get_grid_rank(self, grid_id):
return 0
class ScalarBmi(_BmiCap):
_cls = TestScalar
def test_scalar_grid():
"""Testing creating a scalar grid."""
bmi = ScalarBmi()
grid = Scalar(bmi, grid_id)
assert grid.ndim == 0
assert grid.metadata["type"] == "scalar"
assert isinstance(grid, xr.Dataset) == True
class TestVector:
def get_grid_rank(self, grid_id):
return 1
class VectorBmi(_BmiCap):
_cls = TestVector
def test_vector_grid():
"""Testing creating a vector grid."""
bmi = VectorBmi()
grid = Vector(bmi, grid_id)
assert grid.ndim == 1
assert grid.metadata["type"] == "vector"
assert isinstance(grid, xr.Dataset) == True
|
Write unit tests for scalar and vector classes
|
Write unit tests for scalar and vector classes
|
Python
|
mit
|
csdms/pymt
|
Write unit tests for scalar and vector classes
|
"""Unit tests for the pymt.framwork.bmi_ugrid module."""
import numpy as np
import xarray as xr
from pymt.framework.bmi_ugrid import Scalar, Vector
from pymt.framework.bmi_bridge import _BmiCap
grid_id = 0
class TestScalar:
def get_grid_rank(self, grid_id):
return 0
class ScalarBmi(_BmiCap):
_cls = TestScalar
def test_scalar_grid():
"""Testing creating a scalar grid."""
bmi = ScalarBmi()
grid = Scalar(bmi, grid_id)
assert grid.ndim == 0
assert grid.metadata["type"] == "scalar"
assert isinstance(grid, xr.Dataset) == True
class TestVector:
def get_grid_rank(self, grid_id):
return 1
class VectorBmi(_BmiCap):
_cls = TestVector
def test_vector_grid():
"""Testing creating a vector grid."""
bmi = VectorBmi()
grid = Vector(bmi, grid_id)
assert grid.ndim == 1
assert grid.metadata["type"] == "vector"
assert isinstance(grid, xr.Dataset) == True
|
<commit_before><commit_msg>Write unit tests for scalar and vector classes<commit_after>
|
"""Unit tests for the pymt.framwork.bmi_ugrid module."""
import numpy as np
import xarray as xr
from pymt.framework.bmi_ugrid import Scalar, Vector
from pymt.framework.bmi_bridge import _BmiCap
grid_id = 0
class TestScalar:
def get_grid_rank(self, grid_id):
return 0
class ScalarBmi(_BmiCap):
_cls = TestScalar
def test_scalar_grid():
"""Testing creating a scalar grid."""
bmi = ScalarBmi()
grid = Scalar(bmi, grid_id)
assert grid.ndim == 0
assert grid.metadata["type"] == "scalar"
assert isinstance(grid, xr.Dataset) == True
class TestVector:
def get_grid_rank(self, grid_id):
return 1
class VectorBmi(_BmiCap):
_cls = TestVector
def test_vector_grid():
"""Testing creating a vector grid."""
bmi = VectorBmi()
grid = Vector(bmi, grid_id)
assert grid.ndim == 1
assert grid.metadata["type"] == "vector"
assert isinstance(grid, xr.Dataset) == True
|
Write unit tests for scalar and vector classes"""Unit tests for the pymt.framwork.bmi_ugrid module."""
import numpy as np
import xarray as xr
from pymt.framework.bmi_ugrid import Scalar, Vector
from pymt.framework.bmi_bridge import _BmiCap
grid_id = 0
class TestScalar:
def get_grid_rank(self, grid_id):
return 0
class ScalarBmi(_BmiCap):
_cls = TestScalar
def test_scalar_grid():
"""Testing creating a scalar grid."""
bmi = ScalarBmi()
grid = Scalar(bmi, grid_id)
assert grid.ndim == 0
assert grid.metadata["type"] == "scalar"
assert isinstance(grid, xr.Dataset) == True
class TestVector:
def get_grid_rank(self, grid_id):
return 1
class VectorBmi(_BmiCap):
_cls = TestVector
def test_vector_grid():
"""Testing creating a vector grid."""
bmi = VectorBmi()
grid = Vector(bmi, grid_id)
assert grid.ndim == 1
assert grid.metadata["type"] == "vector"
assert isinstance(grid, xr.Dataset) == True
|
<commit_before><commit_msg>Write unit tests for scalar and vector classes<commit_after>"""Unit tests for the pymt.framwork.bmi_ugrid module."""
import numpy as np
import xarray as xr
from pymt.framework.bmi_ugrid import Scalar, Vector
from pymt.framework.bmi_bridge import _BmiCap
grid_id = 0
class TestScalar:
def get_grid_rank(self, grid_id):
return 0
class ScalarBmi(_BmiCap):
_cls = TestScalar
def test_scalar_grid():
"""Testing creating a scalar grid."""
bmi = ScalarBmi()
grid = Scalar(bmi, grid_id)
assert grid.ndim == 0
assert grid.metadata["type"] == "scalar"
assert isinstance(grid, xr.Dataset) == True
class TestVector:
def get_grid_rank(self, grid_id):
return 1
class VectorBmi(_BmiCap):
_cls = TestVector
def test_vector_grid():
"""Testing creating a vector grid."""
bmi = VectorBmi()
grid = Vector(bmi, grid_id)
assert grid.ndim == 1
assert grid.metadata["type"] == "vector"
assert isinstance(grid, xr.Dataset) == True
|
|
05de65933792140703bcdd14f5c7f7239251e1b1
|
thinc/tests/unit/test_loss.py
|
thinc/tests/unit/test_loss.py
|
# coding: utf-8
from __future__ import unicode_literals
import pytest
from mock import MagicMock
from numpy import ndarray
from ...loss import categorical_crossentropy
@pytest.mark.parametrize('shape,labels', [([100, 100, 100], [-1, -1, -1])])
def test_loss(shape, labels):
scores = MagicMock(spec=ndarray, shape=shape)
loss = categorical_crossentropy(scores, labels)
assert len(loss) == 2
|
Add test for loss function
|
Add test for loss function
|
Python
|
mit
|
spacy-io/thinc,spacy-io/thinc,explosion/thinc,spacy-io/thinc,explosion/thinc,explosion/thinc,explosion/thinc
|
Add test for loss function
|
# coding: utf-8
from __future__ import unicode_literals
import pytest
from mock import MagicMock
from numpy import ndarray
from ...loss import categorical_crossentropy
@pytest.mark.parametrize('shape,labels', [([100, 100, 100], [-1, -1, -1])])
def test_loss(shape, labels):
scores = MagicMock(spec=ndarray, shape=shape)
loss = categorical_crossentropy(scores, labels)
assert len(loss) == 2
|
<commit_before><commit_msg>Add test for loss function<commit_after>
|
# coding: utf-8
from __future__ import unicode_literals
import pytest
from mock import MagicMock
from numpy import ndarray
from ...loss import categorical_crossentropy
@pytest.mark.parametrize('shape,labels', [([100, 100, 100], [-1, -1, -1])])
def test_loss(shape, labels):
scores = MagicMock(spec=ndarray, shape=shape)
loss = categorical_crossentropy(scores, labels)
assert len(loss) == 2
|
Add test for loss function# coding: utf-8
from __future__ import unicode_literals
import pytest
from mock import MagicMock
from numpy import ndarray
from ...loss import categorical_crossentropy
@pytest.mark.parametrize('shape,labels', [([100, 100, 100], [-1, -1, -1])])
def test_loss(shape, labels):
scores = MagicMock(spec=ndarray, shape=shape)
loss = categorical_crossentropy(scores, labels)
assert len(loss) == 2
|
<commit_before><commit_msg>Add test for loss function<commit_after># coding: utf-8
from __future__ import unicode_literals
import pytest
from mock import MagicMock
from numpy import ndarray
from ...loss import categorical_crossentropy
@pytest.mark.parametrize('shape,labels', [([100, 100, 100], [-1, -1, -1])])
def test_loss(shape, labels):
scores = MagicMock(spec=ndarray, shape=shape)
loss = categorical_crossentropy(scores, labels)
assert len(loss) == 2
|
|
8f30914f7c16aa56db20c612a382ee5ea5c67a5e
|
tools/debugging/migrate_db.py
|
tools/debugging/migrate_db.py
|
"""
"""
import click
import gevent
import structlog
from raiden.exceptions import InvalidDBData, RaidenDBUpgradeError
from raiden.storage import serialize, sqlite
from raiden.utils.upgrades import UpgradeManager
log = structlog.get_logger(__name__)
database_path = ""
def upgrade_db(current_version: int, new_version: int):
log.debug(f'Upgrading database from v{current_version} to v{new_version}')
# Prevent unique constraint error in DB when recording raiden "runs"
gevent.sleep(1)
manager = UpgradeManager(
db_filename=database_path,
current_version=current_version,
new_version=new_version,
)
try:
manager.run()
except (RaidenDBUpgradeError, InvalidDBData) as e:
manager.restore_backup()
log.error(f'Failed to upgrade database: {str(e)}')
def migrate_db(storage):
storage.register_upgrade_callback(upgrade_db)
storage.maybe_upgrade()
@click.command(help=__doc__)
@click.argument(
'db-file',
type=click.Path(exists=True),
)
def main(db_file):
global database_path
database_path = db_file
migrate_db(
storage=sqlite.SQLiteStorage(db_file, serialize.JSONSerializer()),
)
if __name__ == "__main__":
main()
|
Add DB upgrade debugging script
|
Add DB upgrade debugging script
[skip ci]
|
Python
|
mit
|
hackaugusto/raiden,hackaugusto/raiden
|
Add DB upgrade debugging script
[skip ci]
|
"""
"""
import click
import gevent
import structlog
from raiden.exceptions import InvalidDBData, RaidenDBUpgradeError
from raiden.storage import serialize, sqlite
from raiden.utils.upgrades import UpgradeManager
log = structlog.get_logger(__name__)
database_path = ""
def upgrade_db(current_version: int, new_version: int):
log.debug(f'Upgrading database from v{current_version} to v{new_version}')
# Prevent unique constraint error in DB when recording raiden "runs"
gevent.sleep(1)
manager = UpgradeManager(
db_filename=database_path,
current_version=current_version,
new_version=new_version,
)
try:
manager.run()
except (RaidenDBUpgradeError, InvalidDBData) as e:
manager.restore_backup()
log.error(f'Failed to upgrade database: {str(e)}')
def migrate_db(storage):
storage.register_upgrade_callback(upgrade_db)
storage.maybe_upgrade()
@click.command(help=__doc__)
@click.argument(
'db-file',
type=click.Path(exists=True),
)
def main(db_file):
global database_path
database_path = db_file
migrate_db(
storage=sqlite.SQLiteStorage(db_file, serialize.JSONSerializer()),
)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add DB upgrade debugging script
[skip ci]<commit_after>
|
"""
"""
import click
import gevent
import structlog
from raiden.exceptions import InvalidDBData, RaidenDBUpgradeError
from raiden.storage import serialize, sqlite
from raiden.utils.upgrades import UpgradeManager
log = structlog.get_logger(__name__)
database_path = ""
def upgrade_db(current_version: int, new_version: int):
log.debug(f'Upgrading database from v{current_version} to v{new_version}')
# Prevent unique constraint error in DB when recording raiden "runs"
gevent.sleep(1)
manager = UpgradeManager(
db_filename=database_path,
current_version=current_version,
new_version=new_version,
)
try:
manager.run()
except (RaidenDBUpgradeError, InvalidDBData) as e:
manager.restore_backup()
log.error(f'Failed to upgrade database: {str(e)}')
def migrate_db(storage):
storage.register_upgrade_callback(upgrade_db)
storage.maybe_upgrade()
@click.command(help=__doc__)
@click.argument(
'db-file',
type=click.Path(exists=True),
)
def main(db_file):
global database_path
database_path = db_file
migrate_db(
storage=sqlite.SQLiteStorage(db_file, serialize.JSONSerializer()),
)
if __name__ == "__main__":
main()
|
Add DB upgrade debugging script
[skip ci]"""
"""
import click
import gevent
import structlog
from raiden.exceptions import InvalidDBData, RaidenDBUpgradeError
from raiden.storage import serialize, sqlite
from raiden.utils.upgrades import UpgradeManager
log = structlog.get_logger(__name__)
database_path = ""
def upgrade_db(current_version: int, new_version: int):
log.debug(f'Upgrading database from v{current_version} to v{new_version}')
# Prevent unique constraint error in DB when recording raiden "runs"
gevent.sleep(1)
manager = UpgradeManager(
db_filename=database_path,
current_version=current_version,
new_version=new_version,
)
try:
manager.run()
except (RaidenDBUpgradeError, InvalidDBData) as e:
manager.restore_backup()
log.error(f'Failed to upgrade database: {str(e)}')
def migrate_db(storage):
storage.register_upgrade_callback(upgrade_db)
storage.maybe_upgrade()
@click.command(help=__doc__)
@click.argument(
'db-file',
type=click.Path(exists=True),
)
def main(db_file):
global database_path
database_path = db_file
migrate_db(
storage=sqlite.SQLiteStorage(db_file, serialize.JSONSerializer()),
)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add DB upgrade debugging script
[skip ci]<commit_after>"""
"""
import click
import gevent
import structlog
from raiden.exceptions import InvalidDBData, RaidenDBUpgradeError
from raiden.storage import serialize, sqlite
from raiden.utils.upgrades import UpgradeManager
log = structlog.get_logger(__name__)
database_path = ""
def upgrade_db(current_version: int, new_version: int):
log.debug(f'Upgrading database from v{current_version} to v{new_version}')
# Prevent unique constraint error in DB when recording raiden "runs"
gevent.sleep(1)
manager = UpgradeManager(
db_filename=database_path,
current_version=current_version,
new_version=new_version,
)
try:
manager.run()
except (RaidenDBUpgradeError, InvalidDBData) as e:
manager.restore_backup()
log.error(f'Failed to upgrade database: {str(e)}')
def migrate_db(storage):
storage.register_upgrade_callback(upgrade_db)
storage.maybe_upgrade()
@click.command(help=__doc__)
@click.argument(
'db-file',
type=click.Path(exists=True),
)
def main(db_file):
global database_path
database_path = db_file
migrate_db(
storage=sqlite.SQLiteStorage(db_file, serialize.JSONSerializer()),
)
if __name__ == "__main__":
main()
|
|
5a67efecccb91f68efbe7b14406a14c8151a2e9e
|
spacy/tests/parser/test_preset_sbd.py
|
spacy/tests/parser/test_preset_sbd.py
|
'''Test that the parser respects preset sentence boundaries.'''
import pytest
from thinc.neural.optimizers import Adam
from thinc.neural.ops import NumpyOps
from ...attrs import NORM
from ...gold import GoldParse
from ...vocab import Vocab
from ...tokens import Doc
from ...pipeline import NeuralDependencyParser
@pytest.fixture
def vocab():
return Vocab(lex_attr_getters={NORM: lambda s: s})
@pytest.fixture
def parser(vocab):
parser = NeuralDependencyParser(vocab)
parser.cfg['token_vector_width'] = 4
parser.cfg['hidden_width'] = 32
#parser.add_label('right')
parser.add_label('left')
parser.begin_training([], **parser.cfg)
sgd = Adam(NumpyOps(), 0.001)
for i in range(10):
losses = {}
doc = Doc(vocab, words=['a', 'b', 'c', 'd'])
gold = GoldParse(doc, heads=[1, 1, 3, 3],
deps=['left', 'ROOT', 'left', 'ROOT'])
parser.update([doc], [gold], sgd=sgd, losses=losses)
return parser
def test_no_sentences(parser):
doc = Doc(parser.vocab, words=['a', 'b', 'c', 'd'])
doc = parser(doc)
assert len(list(doc.sents)) == 2
def test_sents_1(parser):
doc = Doc(parser.vocab, words=['a', 'b', 'c', 'd'])
doc[2].sent_start = True
doc = parser(doc)
assert len(list(doc.sents)) == 3
doc = Doc(parser.vocab, words=['a', 'b', 'c', 'd'])
doc[1].sent_start = False
doc[2].sent_start = True
doc[3].sent_start = False
doc = parser(doc)
assert len(list(doc.sents)) == 2
def test_sents_1_2(parser):
doc = Doc(parser.vocab, words=['a', 'b', 'c', 'd'])
doc[1].sent_start = True
doc[2].sent_start = True
doc = parser(doc)
assert len(list(doc.sents)) == 3
def test_sents_1_3(parser):
doc = Doc(parser.vocab, words=['a', 'b', 'c', 'd'])
doc[1].sent_start = True
doc[3].sent_start = True
doc = parser(doc)
assert len(list(doc.sents)) == 4
doc = Doc(parser.vocab, words=['a', 'b', 'c', 'd'])
doc[1].sent_start = True
doc[2].sent_start = False
doc[3].sent_start = True
doc = parser(doc)
assert len(list(doc.sents)) == 3
|
Add tests for sentence segmentation presetting
|
Add tests for sentence segmentation presetting
|
Python
|
mit
|
explosion/spaCy,honnibal/spaCy,aikramer2/spaCy,spacy-io/spaCy,aikramer2/spaCy,aikramer2/spaCy,spacy-io/spaCy,aikramer2/spaCy,honnibal/spaCy,recognai/spaCy,explosion/spaCy,explosion/spaCy,explosion/spaCy,spacy-io/spaCy,recognai/spaCy,spacy-io/spaCy,honnibal/spaCy,aikramer2/spaCy,spacy-io/spaCy,explosion/spaCy,recognai/spaCy,recognai/spaCy,spacy-io/spaCy,explosion/spaCy,honnibal/spaCy,aikramer2/spaCy,recognai/spaCy,recognai/spaCy
|
Add tests for sentence segmentation presetting
|
'''Test that the parser respects preset sentence boundaries.'''
import pytest
from thinc.neural.optimizers import Adam
from thinc.neural.ops import NumpyOps
from ...attrs import NORM
from ...gold import GoldParse
from ...vocab import Vocab
from ...tokens import Doc
from ...pipeline import NeuralDependencyParser
@pytest.fixture
def vocab():
return Vocab(lex_attr_getters={NORM: lambda s: s})
@pytest.fixture
def parser(vocab):
parser = NeuralDependencyParser(vocab)
parser.cfg['token_vector_width'] = 4
parser.cfg['hidden_width'] = 32
#parser.add_label('right')
parser.add_label('left')
parser.begin_training([], **parser.cfg)
sgd = Adam(NumpyOps(), 0.001)
for i in range(10):
losses = {}
doc = Doc(vocab, words=['a', 'b', 'c', 'd'])
gold = GoldParse(doc, heads=[1, 1, 3, 3],
deps=['left', 'ROOT', 'left', 'ROOT'])
parser.update([doc], [gold], sgd=sgd, losses=losses)
return parser
def test_no_sentences(parser):
doc = Doc(parser.vocab, words=['a', 'b', 'c', 'd'])
doc = parser(doc)
assert len(list(doc.sents)) == 2
def test_sents_1(parser):
doc = Doc(parser.vocab, words=['a', 'b', 'c', 'd'])
doc[2].sent_start = True
doc = parser(doc)
assert len(list(doc.sents)) == 3
doc = Doc(parser.vocab, words=['a', 'b', 'c', 'd'])
doc[1].sent_start = False
doc[2].sent_start = True
doc[3].sent_start = False
doc = parser(doc)
assert len(list(doc.sents)) == 2
def test_sents_1_2(parser):
doc = Doc(parser.vocab, words=['a', 'b', 'c', 'd'])
doc[1].sent_start = True
doc[2].sent_start = True
doc = parser(doc)
assert len(list(doc.sents)) == 3
def test_sents_1_3(parser):
doc = Doc(parser.vocab, words=['a', 'b', 'c', 'd'])
doc[1].sent_start = True
doc[3].sent_start = True
doc = parser(doc)
assert len(list(doc.sents)) == 4
doc = Doc(parser.vocab, words=['a', 'b', 'c', 'd'])
doc[1].sent_start = True
doc[2].sent_start = False
doc[3].sent_start = True
doc = parser(doc)
assert len(list(doc.sents)) == 3
|
<commit_before><commit_msg>Add tests for sentence segmentation presetting<commit_after>
|
'''Test that the parser respects preset sentence boundaries.'''
import pytest
from thinc.neural.optimizers import Adam
from thinc.neural.ops import NumpyOps
from ...attrs import NORM
from ...gold import GoldParse
from ...vocab import Vocab
from ...tokens import Doc
from ...pipeline import NeuralDependencyParser
@pytest.fixture
def vocab():
return Vocab(lex_attr_getters={NORM: lambda s: s})
@pytest.fixture
def parser(vocab):
parser = NeuralDependencyParser(vocab)
parser.cfg['token_vector_width'] = 4
parser.cfg['hidden_width'] = 32
#parser.add_label('right')
parser.add_label('left')
parser.begin_training([], **parser.cfg)
sgd = Adam(NumpyOps(), 0.001)
for i in range(10):
losses = {}
doc = Doc(vocab, words=['a', 'b', 'c', 'd'])
gold = GoldParse(doc, heads=[1, 1, 3, 3],
deps=['left', 'ROOT', 'left', 'ROOT'])
parser.update([doc], [gold], sgd=sgd, losses=losses)
return parser
def test_no_sentences(parser):
doc = Doc(parser.vocab, words=['a', 'b', 'c', 'd'])
doc = parser(doc)
assert len(list(doc.sents)) == 2
def test_sents_1(parser):
doc = Doc(parser.vocab, words=['a', 'b', 'c', 'd'])
doc[2].sent_start = True
doc = parser(doc)
assert len(list(doc.sents)) == 3
doc = Doc(parser.vocab, words=['a', 'b', 'c', 'd'])
doc[1].sent_start = False
doc[2].sent_start = True
doc[3].sent_start = False
doc = parser(doc)
assert len(list(doc.sents)) == 2
def test_sents_1_2(parser):
doc = Doc(parser.vocab, words=['a', 'b', 'c', 'd'])
doc[1].sent_start = True
doc[2].sent_start = True
doc = parser(doc)
assert len(list(doc.sents)) == 3
def test_sents_1_3(parser):
doc = Doc(parser.vocab, words=['a', 'b', 'c', 'd'])
doc[1].sent_start = True
doc[3].sent_start = True
doc = parser(doc)
assert len(list(doc.sents)) == 4
doc = Doc(parser.vocab, words=['a', 'b', 'c', 'd'])
doc[1].sent_start = True
doc[2].sent_start = False
doc[3].sent_start = True
doc = parser(doc)
assert len(list(doc.sents)) == 3
|
Add tests for sentence segmentation presetting'''Test that the parser respects preset sentence boundaries.'''
import pytest
from thinc.neural.optimizers import Adam
from thinc.neural.ops import NumpyOps
from ...attrs import NORM
from ...gold import GoldParse
from ...vocab import Vocab
from ...tokens import Doc
from ...pipeline import NeuralDependencyParser
@pytest.fixture
def vocab():
return Vocab(lex_attr_getters={NORM: lambda s: s})
@pytest.fixture
def parser(vocab):
parser = NeuralDependencyParser(vocab)
parser.cfg['token_vector_width'] = 4
parser.cfg['hidden_width'] = 32
#parser.add_label('right')
parser.add_label('left')
parser.begin_training([], **parser.cfg)
sgd = Adam(NumpyOps(), 0.001)
for i in range(10):
losses = {}
doc = Doc(vocab, words=['a', 'b', 'c', 'd'])
gold = GoldParse(doc, heads=[1, 1, 3, 3],
deps=['left', 'ROOT', 'left', 'ROOT'])
parser.update([doc], [gold], sgd=sgd, losses=losses)
return parser
def test_no_sentences(parser):
doc = Doc(parser.vocab, words=['a', 'b', 'c', 'd'])
doc = parser(doc)
assert len(list(doc.sents)) == 2
def test_sents_1(parser):
doc = Doc(parser.vocab, words=['a', 'b', 'c', 'd'])
doc[2].sent_start = True
doc = parser(doc)
assert len(list(doc.sents)) == 3
doc = Doc(parser.vocab, words=['a', 'b', 'c', 'd'])
doc[1].sent_start = False
doc[2].sent_start = True
doc[3].sent_start = False
doc = parser(doc)
assert len(list(doc.sents)) == 2
def test_sents_1_2(parser):
doc = Doc(parser.vocab, words=['a', 'b', 'c', 'd'])
doc[1].sent_start = True
doc[2].sent_start = True
doc = parser(doc)
assert len(list(doc.sents)) == 3
def test_sents_1_3(parser):
doc = Doc(parser.vocab, words=['a', 'b', 'c', 'd'])
doc[1].sent_start = True
doc[3].sent_start = True
doc = parser(doc)
assert len(list(doc.sents)) == 4
doc = Doc(parser.vocab, words=['a', 'b', 'c', 'd'])
doc[1].sent_start = True
doc[2].sent_start = False
doc[3].sent_start = True
doc = parser(doc)
assert len(list(doc.sents)) == 3
|
<commit_before><commit_msg>Add tests for sentence segmentation presetting<commit_after>'''Test that the parser respects preset sentence boundaries.'''
import pytest
from thinc.neural.optimizers import Adam
from thinc.neural.ops import NumpyOps
from ...attrs import NORM
from ...gold import GoldParse
from ...vocab import Vocab
from ...tokens import Doc
from ...pipeline import NeuralDependencyParser
@pytest.fixture
def vocab():
return Vocab(lex_attr_getters={NORM: lambda s: s})
@pytest.fixture
def parser(vocab):
parser = NeuralDependencyParser(vocab)
parser.cfg['token_vector_width'] = 4
parser.cfg['hidden_width'] = 32
#parser.add_label('right')
parser.add_label('left')
parser.begin_training([], **parser.cfg)
sgd = Adam(NumpyOps(), 0.001)
for i in range(10):
losses = {}
doc = Doc(vocab, words=['a', 'b', 'c', 'd'])
gold = GoldParse(doc, heads=[1, 1, 3, 3],
deps=['left', 'ROOT', 'left', 'ROOT'])
parser.update([doc], [gold], sgd=sgd, losses=losses)
return parser
def test_no_sentences(parser):
doc = Doc(parser.vocab, words=['a', 'b', 'c', 'd'])
doc = parser(doc)
assert len(list(doc.sents)) == 2
def test_sents_1(parser):
doc = Doc(parser.vocab, words=['a', 'b', 'c', 'd'])
doc[2].sent_start = True
doc = parser(doc)
assert len(list(doc.sents)) == 3
doc = Doc(parser.vocab, words=['a', 'b', 'c', 'd'])
doc[1].sent_start = False
doc[2].sent_start = True
doc[3].sent_start = False
doc = parser(doc)
assert len(list(doc.sents)) == 2
def test_sents_1_2(parser):
doc = Doc(parser.vocab, words=['a', 'b', 'c', 'd'])
doc[1].sent_start = True
doc[2].sent_start = True
doc = parser(doc)
assert len(list(doc.sents)) == 3
def test_sents_1_3(parser):
doc = Doc(parser.vocab, words=['a', 'b', 'c', 'd'])
doc[1].sent_start = True
doc[3].sent_start = True
doc = parser(doc)
assert len(list(doc.sents)) == 4
doc = Doc(parser.vocab, words=['a', 'b', 'c', 'd'])
doc[1].sent_start = True
doc[2].sent_start = False
doc[3].sent_start = True
doc = parser(doc)
assert len(list(doc.sents)) == 3
|
|
ad425bed540e3bfdb5e825dc58eb96cea3f04903
|
tests/test_ruby.py
|
tests/test_ruby.py
|
import json
from lints.ruby import Ruby
def test_ruby():
msg = [
'app/models/message:50: syntax error, unexpected end-of-input, expecting keyword_end',
]
res = Ruby().parse_loclist(msg, 1)
assert json.loads(res)[0] == {
"lnum": "50",
"bufnr": 1,
"enum": 1,
"text": '[ruby]syntax error, unexpected end-of-input, expecting keyword_end',
"type": "E",
}
|
Add test for ruby linter
|
Add test for ruby linter
|
Python
|
mit
|
maralla/vim-fixup,maralla/vim-linter,maralla/validator.vim,maralla/vim-linter,maralla/vim-fixup
|
Add test for ruby linter
|
import json
from lints.ruby import Ruby
def test_ruby():
msg = [
'app/models/message:50: syntax error, unexpected end-of-input, expecting keyword_end',
]
res = Ruby().parse_loclist(msg, 1)
assert json.loads(res)[0] == {
"lnum": "50",
"bufnr": 1,
"enum": 1,
"text": '[ruby]syntax error, unexpected end-of-input, expecting keyword_end',
"type": "E",
}
|
<commit_before><commit_msg>Add test for ruby linter<commit_after>
|
import json
from lints.ruby import Ruby
def test_ruby():
msg = [
'app/models/message:50: syntax error, unexpected end-of-input, expecting keyword_end',
]
res = Ruby().parse_loclist(msg, 1)
assert json.loads(res)[0] == {
"lnum": "50",
"bufnr": 1,
"enum": 1,
"text": '[ruby]syntax error, unexpected end-of-input, expecting keyword_end',
"type": "E",
}
|
Add test for ruby linterimport json
from lints.ruby import Ruby
def test_ruby():
msg = [
'app/models/message:50: syntax error, unexpected end-of-input, expecting keyword_end',
]
res = Ruby().parse_loclist(msg, 1)
assert json.loads(res)[0] == {
"lnum": "50",
"bufnr": 1,
"enum": 1,
"text": '[ruby]syntax error, unexpected end-of-input, expecting keyword_end',
"type": "E",
}
|
<commit_before><commit_msg>Add test for ruby linter<commit_after>import json
from lints.ruby import Ruby
def test_ruby():
msg = [
'app/models/message:50: syntax error, unexpected end-of-input, expecting keyword_end',
]
res = Ruby().parse_loclist(msg, 1)
assert json.loads(res)[0] == {
"lnum": "50",
"bufnr": 1,
"enum": 1,
"text": '[ruby]syntax error, unexpected end-of-input, expecting keyword_end',
"type": "E",
}
|
|
d395bd17d9f3776beb0cc5205e791d5be363d87d
|
Examples/BouncyBall/BouncyBall.py
|
Examples/BouncyBall/BouncyBall.py
|
#Imports
from tphysics import VerletCircle, Rectangle, Game
from Tkinter import TclError
#Create the game
g = Game("Bouncy Ball", 600, 600, "grey")
#Create the walls
walls = [Rectangle(-290, 0, 20, 600), Rectangle(290, 0, 20, 600), Rectangle(0, 290, 600, 20), Rectangle(0, -290, 600, 20)]
#Add the walls to the game
for w in walls:
g.add_shape(w)
#Create a bouncy ball
ball = VerletCircle(0, 0, 10, 10, 50)
g.add_shape(ball)
#Set the physics constants
elasticity = 1
friction = 0
gravity = 0.2
max_fall = 10
#Game loop
while True:
#Update the ball
ball.update()
#Implement gravity
if ball.getyspeed() > -max_fall:
#Change the y speed
ball.setyspeed(ball.getyspeed() - gravity)
#If the ball hits a side wall
for i in range(2):
if ball.collide(walls[i]) != 0:
#Bounce on the x
ball.bouncex(elasticity)
#If the ball hits a vertical wall
for i in range(2,4):
if ball.collide(walls[i]):
#Bounce on the y
ball.bouncey(elasticity, friction)
#If the window hasn't been closed update the game
try:
g.update()
except TclError:
print("Program closed successfully.")
break
|
Create a bouncy ball example that uses verlet integration.
|
Create a bouncy ball example that uses verlet integration.
|
Python
|
mit
|
thebillington/tphysics
|
Create a bouncy ball example that uses verlet integration.
|
#Imports
from tphysics import VerletCircle, Rectangle, Game
from Tkinter import TclError
#Create the game
g = Game("Bouncy Ball", 600, 600, "grey")
#Create the walls
walls = [Rectangle(-290, 0, 20, 600), Rectangle(290, 0, 20, 600), Rectangle(0, 290, 600, 20), Rectangle(0, -290, 600, 20)]
#Add the walls to the game
for w in walls:
g.add_shape(w)
#Create a bouncy ball
ball = VerletCircle(0, 0, 10, 10, 50)
g.add_shape(ball)
#Set the physics constants
elasticity = 1
friction = 0
gravity = 0.2
max_fall = 10
#Game loop
while True:
#Update the ball
ball.update()
#Implement gravity
if ball.getyspeed() > -max_fall:
#Change the y speed
ball.setyspeed(ball.getyspeed() - gravity)
#If the ball hits a side wall
for i in range(2):
if ball.collide(walls[i]) != 0:
#Bounce on the x
ball.bouncex(elasticity)
#If the ball hits a vertical wall
for i in range(2,4):
if ball.collide(walls[i]):
#Bounce on the y
ball.bouncey(elasticity, friction)
#If the window hasn't been closed update the game
try:
g.update()
except TclError:
print("Program closed successfully.")
break
|
<commit_before><commit_msg>Create a bouncy ball example that uses verlet integration.<commit_after>
|
#Imports
from tphysics import VerletCircle, Rectangle, Game
from Tkinter import TclError
#Create the game
g = Game("Bouncy Ball", 600, 600, "grey")
#Create the walls
walls = [Rectangle(-290, 0, 20, 600), Rectangle(290, 0, 20, 600), Rectangle(0, 290, 600, 20), Rectangle(0, -290, 600, 20)]
#Add the walls to the game
for w in walls:
g.add_shape(w)
#Create a bouncy ball
ball = VerletCircle(0, 0, 10, 10, 50)
g.add_shape(ball)
#Set the physics constants
elasticity = 1
friction = 0
gravity = 0.2
max_fall = 10
#Game loop
while True:
#Update the ball
ball.update()
#Implement gravity
if ball.getyspeed() > -max_fall:
#Change the y speed
ball.setyspeed(ball.getyspeed() - gravity)
#If the ball hits a side wall
for i in range(2):
if ball.collide(walls[i]) != 0:
#Bounce on the x
ball.bouncex(elasticity)
#If the ball hits a vertical wall
for i in range(2,4):
if ball.collide(walls[i]):
#Bounce on the y
ball.bouncey(elasticity, friction)
#If the window hasn't been closed update the game
try:
g.update()
except TclError:
print("Program closed successfully.")
break
|
Create a bouncy ball example that uses verlet integration.#Imports
from tphysics import VerletCircle, Rectangle, Game
from Tkinter import TclError
#Create the game
g = Game("Bouncy Ball", 600, 600, "grey")
#Create the walls
walls = [Rectangle(-290, 0, 20, 600), Rectangle(290, 0, 20, 600), Rectangle(0, 290, 600, 20), Rectangle(0, -290, 600, 20)]
#Add the walls to the game
for w in walls:
g.add_shape(w)
#Create a bouncy ball
ball = VerletCircle(0, 0, 10, 10, 50)
g.add_shape(ball)
#Set the physics constants
elasticity = 1
friction = 0
gravity = 0.2
max_fall = 10
#Game loop
while True:
#Update the ball
ball.update()
#Implement gravity
if ball.getyspeed() > -max_fall:
#Change the y speed
ball.setyspeed(ball.getyspeed() - gravity)
#If the ball hits a side wall
for i in range(2):
if ball.collide(walls[i]) != 0:
#Bounce on the x
ball.bouncex(elasticity)
#If the ball hits a vertical wall
for i in range(2,4):
if ball.collide(walls[i]):
#Bounce on the y
ball.bouncey(elasticity, friction)
#If the window hasn't been closed update the game
try:
g.update()
except TclError:
print("Program closed successfully.")
break
|
<commit_before><commit_msg>Create a bouncy ball example that uses verlet integration.<commit_after>#Imports
from tphysics import VerletCircle, Rectangle, Game
from Tkinter import TclError
#Create the game
g = Game("Bouncy Ball", 600, 600, "grey")
#Create the walls
walls = [Rectangle(-290, 0, 20, 600), Rectangle(290, 0, 20, 600), Rectangle(0, 290, 600, 20), Rectangle(0, -290, 600, 20)]
#Add the walls to the game
for w in walls:
g.add_shape(w)
#Create a bouncy ball
ball = VerletCircle(0, 0, 10, 10, 50)
g.add_shape(ball)
#Set the physics constants
elasticity = 1
friction = 0
gravity = 0.2
max_fall = 10
#Game loop
while True:
#Update the ball
ball.update()
#Implement gravity
if ball.getyspeed() > -max_fall:
#Change the y speed
ball.setyspeed(ball.getyspeed() - gravity)
#If the ball hits a side wall
for i in range(2):
if ball.collide(walls[i]) != 0:
#Bounce on the x
ball.bouncex(elasticity)
#If the ball hits a vertical wall
for i in range(2,4):
if ball.collide(walls[i]):
#Bounce on the y
ball.bouncey(elasticity, friction)
#If the window hasn't been closed update the game
try:
g.update()
except TclError:
print("Program closed successfully.")
break
|
|
12929fe96de4f7892856b72d86eb82217ad2972e
|
test/test_serve.py
|
test/test_serve.py
|
import unittest
import asyncio
import io
import multiprocessing
import urllib.request
import time
import grole
def simple_server():
app = grole.Grole()
@app.route('/')
def hello(env, req):
return 'Hello, World!'
app.run()
class TestServe(unittest.TestCase):
def test_simple(self):
p = multiprocessing.Process(target=simple_server)
p.start()
time.sleep(0.1)
with urllib.request.urlopen('http://localhost:1234') as response:
html = response.read()
self.assertEqual(html, b'Hello, World!')
p.terminate()
|
Add test of running the server
|
Add test of running the server
|
Python
|
mit
|
witchard/grole
|
Add test of running the server
|
import unittest
import asyncio
import io
import multiprocessing
import urllib.request
import time
import grole
def simple_server():
app = grole.Grole()
@app.route('/')
def hello(env, req):
return 'Hello, World!'
app.run()
class TestServe(unittest.TestCase):
def test_simple(self):
p = multiprocessing.Process(target=simple_server)
p.start()
time.sleep(0.1)
with urllib.request.urlopen('http://localhost:1234') as response:
html = response.read()
self.assertEqual(html, b'Hello, World!')
p.terminate()
|
<commit_before><commit_msg>Add test of running the server<commit_after>
|
import unittest
import asyncio
import io
import multiprocessing
import urllib.request
import time
import grole
def simple_server():
app = grole.Grole()
@app.route('/')
def hello(env, req):
return 'Hello, World!'
app.run()
class TestServe(unittest.TestCase):
def test_simple(self):
p = multiprocessing.Process(target=simple_server)
p.start()
time.sleep(0.1)
with urllib.request.urlopen('http://localhost:1234') as response:
html = response.read()
self.assertEqual(html, b'Hello, World!')
p.terminate()
|
Add test of running the serverimport unittest
import asyncio
import io
import multiprocessing
import urllib.request
import time
import grole
def simple_server():
app = grole.Grole()
@app.route('/')
def hello(env, req):
return 'Hello, World!'
app.run()
class TestServe(unittest.TestCase):
def test_simple(self):
p = multiprocessing.Process(target=simple_server)
p.start()
time.sleep(0.1)
with urllib.request.urlopen('http://localhost:1234') as response:
html = response.read()
self.assertEqual(html, b'Hello, World!')
p.terminate()
|
<commit_before><commit_msg>Add test of running the server<commit_after>import unittest
import asyncio
import io
import multiprocessing
import urllib.request
import time
import grole
def simple_server():
app = grole.Grole()
@app.route('/')
def hello(env, req):
return 'Hello, World!'
app.run()
class TestServe(unittest.TestCase):
def test_simple(self):
p = multiprocessing.Process(target=simple_server)
p.start()
time.sleep(0.1)
with urllib.request.urlopen('http://localhost:1234') as response:
html = response.read()
self.assertEqual(html, b'Hello, World!')
p.terminate()
|
|
dfde3b4bff462acdcfb4436c898110fda889b415
|
src/tests/orientated_bins_test.py
|
src/tests/orientated_bins_test.py
|
import unittest
import nose.tools
import numpy as np
import skimage.io as io
from scipy.ndimage.filters import gaussian_filter
from mammogram.orientated_bins import orientated_bins
class OrientatedBinsTest(unittest.TestCase):
def test_with_pure_structure(self):
size = 20
linear_structure = np.zeros(shape=(size,size))
linear_structure[:,size/2] = np.ones(size)
line_strength = orientated_bins(linear_structure, 7)
# line_strength[line_strength<0.15] = 0 #threshold
io.imshow(line_strength)
io.show()
nose.tools.assert_equal(np.count_nonzero(line_strength), size)
def test_with_burred_structure(self):
linear_structure = np.identity(20)
img = gaussian_filter(linear_structure, 0.8)
line_strength = orientated_bins(img, 10)
line_strength[line_strength<0.15] = 0 #threshold
nose.tools.assert_equal(np.count_nonzero(line_strength), 20)
def test_with_multiple_windows(self):
size = 100
linear_structure = np.zeros(shape=(size,size))
linear_structure[:,15] = np.ones(size)
linear_structure = np.identity(size)
noise = np.random.rand(size, size) * 0.5
linear_structure += noise
img = gaussian_filter(linear_structure, 1.5)
line_strength = orientated_bins(img, 10)
def test_with_more_bins(self):
size = 100
linear_structure = np.zeros(shape=(size,size))
linear_structure[:,15] = np.ones(size)
linear_structure = np.identity(size)
noise = np.random.rand(size, size) * 0.5
linear_structure += noise
img = gaussian_filter(linear_structure, 1.5)
line_strength = orientated_bins(img, 5, nbins=8)
# def test_real(self):
# path="../../data/p214-010-60001-cl.png"
# mask="../../data/f214-010-60001-cl_mask.png"
#
# img = io.imread(path)
# msk = io.imread(mask, as_grey=True)
#
# pyramid = pyramid_gaussian(img, downscale=4)
# pyramid.next()
# img = pyramid.next()
#
# img = img * msk
#
# line_strength = orientated_bins(img, 5, nbins=12)
#
# # io.imshow(line_strength)
# # io.show()
|
Add some tests for the orientated bins
|
Add some tests for the orientated bins
|
Python
|
mit
|
samueljackson92/major-project,samueljackson92/major-project,samueljackson92/major-project,samueljackson92/major-project
|
Add some tests for the orientated bins
|
import unittest
import nose.tools
import numpy as np
import skimage.io as io
from scipy.ndimage.filters import gaussian_filter
from mammogram.orientated_bins import orientated_bins
class OrientatedBinsTest(unittest.TestCase):
def test_with_pure_structure(self):
size = 20
linear_structure = np.zeros(shape=(size,size))
linear_structure[:,size/2] = np.ones(size)
line_strength = orientated_bins(linear_structure, 7)
# line_strength[line_strength<0.15] = 0 #threshold
io.imshow(line_strength)
io.show()
nose.tools.assert_equal(np.count_nonzero(line_strength), size)
def test_with_burred_structure(self):
linear_structure = np.identity(20)
img = gaussian_filter(linear_structure, 0.8)
line_strength = orientated_bins(img, 10)
line_strength[line_strength<0.15] = 0 #threshold
nose.tools.assert_equal(np.count_nonzero(line_strength), 20)
def test_with_multiple_windows(self):
size = 100
linear_structure = np.zeros(shape=(size,size))
linear_structure[:,15] = np.ones(size)
linear_structure = np.identity(size)
noise = np.random.rand(size, size) * 0.5
linear_structure += noise
img = gaussian_filter(linear_structure, 1.5)
line_strength = orientated_bins(img, 10)
def test_with_more_bins(self):
size = 100
linear_structure = np.zeros(shape=(size,size))
linear_structure[:,15] = np.ones(size)
linear_structure = np.identity(size)
noise = np.random.rand(size, size) * 0.5
linear_structure += noise
img = gaussian_filter(linear_structure, 1.5)
line_strength = orientated_bins(img, 5, nbins=8)
# def test_real(self):
# path="../../data/p214-010-60001-cl.png"
# mask="../../data/f214-010-60001-cl_mask.png"
#
# img = io.imread(path)
# msk = io.imread(mask, as_grey=True)
#
# pyramid = pyramid_gaussian(img, downscale=4)
# pyramid.next()
# img = pyramid.next()
#
# img = img * msk
#
# line_strength = orientated_bins(img, 5, nbins=12)
#
# # io.imshow(line_strength)
# # io.show()
|
<commit_before><commit_msg>Add some tests for the orientated bins<commit_after>
|
import unittest
import nose.tools
import numpy as np
import skimage.io as io
from scipy.ndimage.filters import gaussian_filter
from mammogram.orientated_bins import orientated_bins
class OrientatedBinsTest(unittest.TestCase):
def test_with_pure_structure(self):
size = 20
linear_structure = np.zeros(shape=(size,size))
linear_structure[:,size/2] = np.ones(size)
line_strength = orientated_bins(linear_structure, 7)
# line_strength[line_strength<0.15] = 0 #threshold
io.imshow(line_strength)
io.show()
nose.tools.assert_equal(np.count_nonzero(line_strength), size)
def test_with_burred_structure(self):
linear_structure = np.identity(20)
img = gaussian_filter(linear_structure, 0.8)
line_strength = orientated_bins(img, 10)
line_strength[line_strength<0.15] = 0 #threshold
nose.tools.assert_equal(np.count_nonzero(line_strength), 20)
def test_with_multiple_windows(self):
size = 100
linear_structure = np.zeros(shape=(size,size))
linear_structure[:,15] = np.ones(size)
linear_structure = np.identity(size)
noise = np.random.rand(size, size) * 0.5
linear_structure += noise
img = gaussian_filter(linear_structure, 1.5)
line_strength = orientated_bins(img, 10)
def test_with_more_bins(self):
size = 100
linear_structure = np.zeros(shape=(size,size))
linear_structure[:,15] = np.ones(size)
linear_structure = np.identity(size)
noise = np.random.rand(size, size) * 0.5
linear_structure += noise
img = gaussian_filter(linear_structure, 1.5)
line_strength = orientated_bins(img, 5, nbins=8)
# def test_real(self):
# path="../../data/p214-010-60001-cl.png"
# mask="../../data/f214-010-60001-cl_mask.png"
#
# img = io.imread(path)
# msk = io.imread(mask, as_grey=True)
#
# pyramid = pyramid_gaussian(img, downscale=4)
# pyramid.next()
# img = pyramid.next()
#
# img = img * msk
#
# line_strength = orientated_bins(img, 5, nbins=12)
#
# # io.imshow(line_strength)
# # io.show()
|
Add some tests for the orientated binsimport unittest
import nose.tools
import numpy as np
import skimage.io as io
from scipy.ndimage.filters import gaussian_filter
from mammogram.orientated_bins import orientated_bins
class OrientatedBinsTest(unittest.TestCase):
def test_with_pure_structure(self):
size = 20
linear_structure = np.zeros(shape=(size,size))
linear_structure[:,size/2] = np.ones(size)
line_strength = orientated_bins(linear_structure, 7)
# line_strength[line_strength<0.15] = 0 #threshold
io.imshow(line_strength)
io.show()
nose.tools.assert_equal(np.count_nonzero(line_strength), size)
def test_with_burred_structure(self):
linear_structure = np.identity(20)
img = gaussian_filter(linear_structure, 0.8)
line_strength = orientated_bins(img, 10)
line_strength[line_strength<0.15] = 0 #threshold
nose.tools.assert_equal(np.count_nonzero(line_strength), 20)
def test_with_multiple_windows(self):
size = 100
linear_structure = np.zeros(shape=(size,size))
linear_structure[:,15] = np.ones(size)
linear_structure = np.identity(size)
noise = np.random.rand(size, size) * 0.5
linear_structure += noise
img = gaussian_filter(linear_structure, 1.5)
line_strength = orientated_bins(img, 10)
def test_with_more_bins(self):
size = 100
linear_structure = np.zeros(shape=(size,size))
linear_structure[:,15] = np.ones(size)
linear_structure = np.identity(size)
noise = np.random.rand(size, size) * 0.5
linear_structure += noise
img = gaussian_filter(linear_structure, 1.5)
line_strength = orientated_bins(img, 5, nbins=8)
# def test_real(self):
# path="../../data/p214-010-60001-cl.png"
# mask="../../data/f214-010-60001-cl_mask.png"
#
# img = io.imread(path)
# msk = io.imread(mask, as_grey=True)
#
# pyramid = pyramid_gaussian(img, downscale=4)
# pyramid.next()
# img = pyramid.next()
#
# img = img * msk
#
# line_strength = orientated_bins(img, 5, nbins=12)
#
# # io.imshow(line_strength)
# # io.show()
|
<commit_before><commit_msg>Add some tests for the orientated bins<commit_after>import unittest
import nose.tools
import numpy as np
import skimage.io as io
from scipy.ndimage.filters import gaussian_filter
from mammogram.orientated_bins import orientated_bins
class OrientatedBinsTest(unittest.TestCase):
def test_with_pure_structure(self):
size = 20
linear_structure = np.zeros(shape=(size,size))
linear_structure[:,size/2] = np.ones(size)
line_strength = orientated_bins(linear_structure, 7)
# line_strength[line_strength<0.15] = 0 #threshold
io.imshow(line_strength)
io.show()
nose.tools.assert_equal(np.count_nonzero(line_strength), size)
def test_with_burred_structure(self):
linear_structure = np.identity(20)
img = gaussian_filter(linear_structure, 0.8)
line_strength = orientated_bins(img, 10)
line_strength[line_strength<0.15] = 0 #threshold
nose.tools.assert_equal(np.count_nonzero(line_strength), 20)
def test_with_multiple_windows(self):
size = 100
linear_structure = np.zeros(shape=(size,size))
linear_structure[:,15] = np.ones(size)
linear_structure = np.identity(size)
noise = np.random.rand(size, size) * 0.5
linear_structure += noise
img = gaussian_filter(linear_structure, 1.5)
line_strength = orientated_bins(img, 10)
def test_with_more_bins(self):
size = 100
linear_structure = np.zeros(shape=(size,size))
linear_structure[:,15] = np.ones(size)
linear_structure = np.identity(size)
noise = np.random.rand(size, size) * 0.5
linear_structure += noise
img = gaussian_filter(linear_structure, 1.5)
line_strength = orientated_bins(img, 5, nbins=8)
# def test_real(self):
# path="../../data/p214-010-60001-cl.png"
# mask="../../data/f214-010-60001-cl_mask.png"
#
# img = io.imread(path)
# msk = io.imread(mask, as_grey=True)
#
# pyramid = pyramid_gaussian(img, downscale=4)
# pyramid.next()
# img = pyramid.next()
#
# img = img * msk
#
# line_strength = orientated_bins(img, 5, nbins=12)
#
# # io.imshow(line_strength)
# # io.show()
|
|
4cc819e76cad1e873ea16e0b8bf0a64260967af4
|
server/lib/python/cartodb_services/cartodb_services/here/service_factory.py
|
server/lib/python/cartodb_services/cartodb_services/here/service_factory.py
|
from cartodb_services.here.geocoder import HereMapsGeocoder, HereMapsGeocoderV7
from cartodb_services.here.bulk_geocoder import HereMapsBulkGeocoder, HereMapsBulkGeocoderV7
from cartodb_services.here.routing import HereMapsRoutingIsoline, HereMapsRoutingIsolineV8
GEOCODING_DEFAULT_MAXRESULTS = 1
def get_geocoder(logger, app_id=None, app_code=None, service_params=None, maxresults=GEOCODING_DEFAULT_MAXRESULTS, use_apikey=False, apikey=None):
if use_apikey is True:
return HereMapsGeocoderV7(apikey=apikey, logger=logger,
service_params=service_params,
limit=maxresults)
else:
return HereMapsGeocoder(app_id=app_id, app_code=app_code,
logger=logger,
service_params=service_params,
maxresults=maxresults)
def get_bulk_geocoder(logger, app_id=None, app_code=None, service_params=None, use_apikey=False, apikey=None):
if use_apikey is True:
return HereMapsBulkGeocoderV7(apikey=apikey, logger=logger,
service_params=service_params)
else:
return HereMapsBulkGeocoder(app_id=app_id, app_code=app_code,
logger=logger,
service_params=service_params)
def get_routing_isoline(logger, app_id=None, app_code=None, service_params=None, use_apikey=False, apikey=None):
if use_apikey is True:
return HereMapsRoutingIsolineV8(apikey=apikey, logger=logger,
service_params=service_params)
else:
return HereMapsRoutingIsoline(app_id=app_id, app_code=app_code,
logger=logger,
service_params=service_params)
|
Add service factory module to return appropiate service version
|
Add service factory module to return appropiate service version
|
Python
|
bsd-3-clause
|
CartoDB/dataservices-api,CartoDB/geocoder-api,CartoDB/dataservices-api,CartoDB/dataservices-api,CartoDB/dataservices-api,CartoDB/geocoder-api,CartoDB/geocoder-api,CartoDB/geocoder-api
|
Add service factory module to return appropiate service version
|
from cartodb_services.here.geocoder import HereMapsGeocoder, HereMapsGeocoderV7
from cartodb_services.here.bulk_geocoder import HereMapsBulkGeocoder, HereMapsBulkGeocoderV7
from cartodb_services.here.routing import HereMapsRoutingIsoline, HereMapsRoutingIsolineV8
GEOCODING_DEFAULT_MAXRESULTS = 1
def get_geocoder(logger, app_id=None, app_code=None, service_params=None, maxresults=GEOCODING_DEFAULT_MAXRESULTS, use_apikey=False, apikey=None):
if use_apikey is True:
return HereMapsGeocoderV7(apikey=apikey, logger=logger,
service_params=service_params,
limit=maxresults)
else:
return HereMapsGeocoder(app_id=app_id, app_code=app_code,
logger=logger,
service_params=service_params,
maxresults=maxresults)
def get_bulk_geocoder(logger, app_id=None, app_code=None, service_params=None, use_apikey=False, apikey=None):
if use_apikey is True:
return HereMapsBulkGeocoderV7(apikey=apikey, logger=logger,
service_params=service_params)
else:
return HereMapsBulkGeocoder(app_id=app_id, app_code=app_code,
logger=logger,
service_params=service_params)
def get_routing_isoline(logger, app_id=None, app_code=None, service_params=None, use_apikey=False, apikey=None):
if use_apikey is True:
return HereMapsRoutingIsolineV8(apikey=apikey, logger=logger,
service_params=service_params)
else:
return HereMapsRoutingIsoline(app_id=app_id, app_code=app_code,
logger=logger,
service_params=service_params)
|
<commit_before><commit_msg>Add service factory module to return appropiate service version<commit_after>
|
from cartodb_services.here.geocoder import HereMapsGeocoder, HereMapsGeocoderV7
from cartodb_services.here.bulk_geocoder import HereMapsBulkGeocoder, HereMapsBulkGeocoderV7
from cartodb_services.here.routing import HereMapsRoutingIsoline, HereMapsRoutingIsolineV8
GEOCODING_DEFAULT_MAXRESULTS = 1
def get_geocoder(logger, app_id=None, app_code=None, service_params=None, maxresults=GEOCODING_DEFAULT_MAXRESULTS, use_apikey=False, apikey=None):
if use_apikey is True:
return HereMapsGeocoderV7(apikey=apikey, logger=logger,
service_params=service_params,
limit=maxresults)
else:
return HereMapsGeocoder(app_id=app_id, app_code=app_code,
logger=logger,
service_params=service_params,
maxresults=maxresults)
def get_bulk_geocoder(logger, app_id=None, app_code=None, service_params=None, use_apikey=False, apikey=None):
if use_apikey is True:
return HereMapsBulkGeocoderV7(apikey=apikey, logger=logger,
service_params=service_params)
else:
return HereMapsBulkGeocoder(app_id=app_id, app_code=app_code,
logger=logger,
service_params=service_params)
def get_routing_isoline(logger, app_id=None, app_code=None, service_params=None, use_apikey=False, apikey=None):
if use_apikey is True:
return HereMapsRoutingIsolineV8(apikey=apikey, logger=logger,
service_params=service_params)
else:
return HereMapsRoutingIsoline(app_id=app_id, app_code=app_code,
logger=logger,
service_params=service_params)
|
Add service factory module to return appropiate service versionfrom cartodb_services.here.geocoder import HereMapsGeocoder, HereMapsGeocoderV7
from cartodb_services.here.bulk_geocoder import HereMapsBulkGeocoder, HereMapsBulkGeocoderV7
from cartodb_services.here.routing import HereMapsRoutingIsoline, HereMapsRoutingIsolineV8
GEOCODING_DEFAULT_MAXRESULTS = 1
def get_geocoder(logger, app_id=None, app_code=None, service_params=None, maxresults=GEOCODING_DEFAULT_MAXRESULTS, use_apikey=False, apikey=None):
if use_apikey is True:
return HereMapsGeocoderV7(apikey=apikey, logger=logger,
service_params=service_params,
limit=maxresults)
else:
return HereMapsGeocoder(app_id=app_id, app_code=app_code,
logger=logger,
service_params=service_params,
maxresults=maxresults)
def get_bulk_geocoder(logger, app_id=None, app_code=None, service_params=None, use_apikey=False, apikey=None):
if use_apikey is True:
return HereMapsBulkGeocoderV7(apikey=apikey, logger=logger,
service_params=service_params)
else:
return HereMapsBulkGeocoder(app_id=app_id, app_code=app_code,
logger=logger,
service_params=service_params)
def get_routing_isoline(logger, app_id=None, app_code=None, service_params=None, use_apikey=False, apikey=None):
if use_apikey is True:
return HereMapsRoutingIsolineV8(apikey=apikey, logger=logger,
service_params=service_params)
else:
return HereMapsRoutingIsoline(app_id=app_id, app_code=app_code,
logger=logger,
service_params=service_params)
|
<commit_before><commit_msg>Add service factory module to return appropiate service version<commit_after>from cartodb_services.here.geocoder import HereMapsGeocoder, HereMapsGeocoderV7
from cartodb_services.here.bulk_geocoder import HereMapsBulkGeocoder, HereMapsBulkGeocoderV7
from cartodb_services.here.routing import HereMapsRoutingIsoline, HereMapsRoutingIsolineV8
GEOCODING_DEFAULT_MAXRESULTS = 1
def get_geocoder(logger, app_id=None, app_code=None, service_params=None, maxresults=GEOCODING_DEFAULT_MAXRESULTS, use_apikey=False, apikey=None):
if use_apikey is True:
return HereMapsGeocoderV7(apikey=apikey, logger=logger,
service_params=service_params,
limit=maxresults)
else:
return HereMapsGeocoder(app_id=app_id, app_code=app_code,
logger=logger,
service_params=service_params,
maxresults=maxresults)
def get_bulk_geocoder(logger, app_id=None, app_code=None, service_params=None, use_apikey=False, apikey=None):
if use_apikey is True:
return HereMapsBulkGeocoderV7(apikey=apikey, logger=logger,
service_params=service_params)
else:
return HereMapsBulkGeocoder(app_id=app_id, app_code=app_code,
logger=logger,
service_params=service_params)
def get_routing_isoline(logger, app_id=None, app_code=None, service_params=None, use_apikey=False, apikey=None):
if use_apikey is True:
return HereMapsRoutingIsolineV8(apikey=apikey, logger=logger,
service_params=service_params)
else:
return HereMapsRoutingIsoline(app_id=app_id, app_code=app_code,
logger=logger,
service_params=service_params)
|
|
04717bf2b84b62e1f6de5f5a34307474abefec1e
|
run_task.py
|
run_task.py
|
import sys
import logging
import logging.config
import traceback
import bson.objectid
import config.global_configuration as global_conf
import database.client
import util.database_helpers as dh
def main(*args):
"""
Run a particular task.
:args: Only argument is the id of the task to run
:return:
"""
if len(args) >= 1:
task_id = bson.objectid.ObjectId(args[0])
config = global_conf.load_global_config('config.yml')
logging.config.dictConfig(config['logging'])
db_client = database.client.DatabaseClient(config=config)
task = dh.load_object(db_client, db_client.tasks_collection, task_id)
if task is not None:
try:
task.run_task(db_client)
except Exception:
logging.getLogger(__name__).error("Exception occurred while running {0}: {1}".format(
type(task).__name__, traceback.format_exc()
))
task.mark_job_failed()
task.save_updates(db_client.tasks_collection)
if __name__ == '__main__':
main(*sys.argv[1:])
|
Add a common run task script for all tasks. Not supported by db_client yet
|
Add a common run task script for all tasks. Not supported by db_client yet
|
Python
|
bsd-2-clause
|
jskinn/robot-vision-experiment-framework,jskinn/robot-vision-experiment-framework
|
Add a common run task script for all tasks. Not supported by db_client yet
|
import sys
import logging
import logging.config
import traceback
import bson.objectid
import config.global_configuration as global_conf
import database.client
import util.database_helpers as dh
def main(*args):
"""
Run a particular task.
:args: Only argument is the id of the task to run
:return:
"""
if len(args) >= 1:
task_id = bson.objectid.ObjectId(args[0])
config = global_conf.load_global_config('config.yml')
logging.config.dictConfig(config['logging'])
db_client = database.client.DatabaseClient(config=config)
task = dh.load_object(db_client, db_client.tasks_collection, task_id)
if task is not None:
try:
task.run_task(db_client)
except Exception:
logging.getLogger(__name__).error("Exception occurred while running {0}: {1}".format(
type(task).__name__, traceback.format_exc()
))
task.mark_job_failed()
task.save_updates(db_client.tasks_collection)
if __name__ == '__main__':
main(*sys.argv[1:])
|
<commit_before><commit_msg>Add a common run task script for all tasks. Not supported by db_client yet<commit_after>
|
import sys
import logging
import logging.config
import traceback
import bson.objectid
import config.global_configuration as global_conf
import database.client
import util.database_helpers as dh
def main(*args):
"""
Run a particular task.
:args: Only argument is the id of the task to run
:return:
"""
if len(args) >= 1:
task_id = bson.objectid.ObjectId(args[0])
config = global_conf.load_global_config('config.yml')
logging.config.dictConfig(config['logging'])
db_client = database.client.DatabaseClient(config=config)
task = dh.load_object(db_client, db_client.tasks_collection, task_id)
if task is not None:
try:
task.run_task(db_client)
except Exception:
logging.getLogger(__name__).error("Exception occurred while running {0}: {1}".format(
type(task).__name__, traceback.format_exc()
))
task.mark_job_failed()
task.save_updates(db_client.tasks_collection)
if __name__ == '__main__':
main(*sys.argv[1:])
|
Add a common run task script for all tasks. Not supported by db_client yetimport sys
import logging
import logging.config
import traceback
import bson.objectid
import config.global_configuration as global_conf
import database.client
import util.database_helpers as dh
def main(*args):
"""
Run a particular task.
:args: Only argument is the id of the task to run
:return:
"""
if len(args) >= 1:
task_id = bson.objectid.ObjectId(args[0])
config = global_conf.load_global_config('config.yml')
logging.config.dictConfig(config['logging'])
db_client = database.client.DatabaseClient(config=config)
task = dh.load_object(db_client, db_client.tasks_collection, task_id)
if task is not None:
try:
task.run_task(db_client)
except Exception:
logging.getLogger(__name__).error("Exception occurred while running {0}: {1}".format(
type(task).__name__, traceback.format_exc()
))
task.mark_job_failed()
task.save_updates(db_client.tasks_collection)
if __name__ == '__main__':
main(*sys.argv[1:])
|
<commit_before><commit_msg>Add a common run task script for all tasks. Not supported by db_client yet<commit_after>import sys
import logging
import logging.config
import traceback
import bson.objectid
import config.global_configuration as global_conf
import database.client
import util.database_helpers as dh
def main(*args):
"""
Run a particular task.
:args: Only argument is the id of the task to run
:return:
"""
if len(args) >= 1:
task_id = bson.objectid.ObjectId(args[0])
config = global_conf.load_global_config('config.yml')
logging.config.dictConfig(config['logging'])
db_client = database.client.DatabaseClient(config=config)
task = dh.load_object(db_client, db_client.tasks_collection, task_id)
if task is not None:
try:
task.run_task(db_client)
except Exception:
logging.getLogger(__name__).error("Exception occurred while running {0}: {1}".format(
type(task).__name__, traceback.format_exc()
))
task.mark_job_failed()
task.save_updates(db_client.tasks_collection)
if __name__ == '__main__':
main(*sys.argv[1:])
|
|
97f5933e6f6b03bc7b0cc9b070316e2264359700
|
tests/infrastructure/test_utils.py
|
tests/infrastructure/test_utils.py
|
import random
import string
INDENT = '\n' + ' ' * 8
def generate_simple_output_program(source):
return """thing Program
setup{source}
""".format(source=INDENT + INDENT.join([source] if isinstance(source, str) else source))
def generate_test_case_structure(dct):
lst = []
for name, groups in list(dct.items()):
for idx, group in enumerate(groups):
lst.append(('{} #{}'.format(name, idx + 1), group[0], group[1]))
return lst
def random_string():
def get_string():
return ''.join(random.choices(string.ascii_uppercase + string.digits, k=random.randint(2, 50)))
return [get_string() for _ in range(random.randint(3, 8))]
|
Move test utils to infra module
|
Move test utils to infra module
|
Python
|
mit
|
ytanay/thinglang,ytanay/thinglang,ytanay/thinglang,ytanay/thinglang
|
Move test utils to infra module
|
import random
import string
INDENT = '\n' + ' ' * 8
def generate_simple_output_program(source):
return """thing Program
setup{source}
""".format(source=INDENT + INDENT.join([source] if isinstance(source, str) else source))
def generate_test_case_structure(dct):
lst = []
for name, groups in list(dct.items()):
for idx, group in enumerate(groups):
lst.append(('{} #{}'.format(name, idx + 1), group[0], group[1]))
return lst
def random_string():
def get_string():
return ''.join(random.choices(string.ascii_uppercase + string.digits, k=random.randint(2, 50)))
return [get_string() for _ in range(random.randint(3, 8))]
|
<commit_before><commit_msg>Move test utils to infra module<commit_after>
|
import random
import string
INDENT = '\n' + ' ' * 8
def generate_simple_output_program(source):
return """thing Program
setup{source}
""".format(source=INDENT + INDENT.join([source] if isinstance(source, str) else source))
def generate_test_case_structure(dct):
lst = []
for name, groups in list(dct.items()):
for idx, group in enumerate(groups):
lst.append(('{} #{}'.format(name, idx + 1), group[0], group[1]))
return lst
def random_string():
def get_string():
return ''.join(random.choices(string.ascii_uppercase + string.digits, k=random.randint(2, 50)))
return [get_string() for _ in range(random.randint(3, 8))]
|
Move test utils to infra moduleimport random
import string
INDENT = '\n' + ' ' * 8
def generate_simple_output_program(source):
return """thing Program
setup{source}
""".format(source=INDENT + INDENT.join([source] if isinstance(source, str) else source))
def generate_test_case_structure(dct):
lst = []
for name, groups in list(dct.items()):
for idx, group in enumerate(groups):
lst.append(('{} #{}'.format(name, idx + 1), group[0], group[1]))
return lst
def random_string():
def get_string():
return ''.join(random.choices(string.ascii_uppercase + string.digits, k=random.randint(2, 50)))
return [get_string() for _ in range(random.randint(3, 8))]
|
<commit_before><commit_msg>Move test utils to infra module<commit_after>import random
import string
INDENT = '\n' + ' ' * 8
def generate_simple_output_program(source):
return """thing Program
setup{source}
""".format(source=INDENT + INDENT.join([source] if isinstance(source, str) else source))
def generate_test_case_structure(dct):
lst = []
for name, groups in list(dct.items()):
for idx, group in enumerate(groups):
lst.append(('{} #{}'.format(name, idx + 1), group[0], group[1]))
return lst
def random_string():
def get_string():
return ''.join(random.choices(string.ascii_uppercase + string.digits, k=random.randint(2, 50)))
return [get_string() for _ in range(random.randint(3, 8))]
|
|
9591911cf98348a771f7fffc8951bfd578cc02ce
|
send_sms.py
|
send_sms.py
|
# Download the twilio-python library from http://twilio.com/docs/libraries
from twilio.rest import TwilioRestClient
import config
# Find these values at https://twilio.com/user/account
account_sid = "ACXXXXXXXXXXXXXXXXX"
auth_token = "YYYYYYYYYYYYYYYYYY"
client = TwilioRestClient(account_sid, auth_token)
message = client.messages.create(to="+12316851234", from_="+15555555555",
body="Hello there!")
|
Add snippet for sending sms.
|
Add snippet for sending sms.
|
Python
|
mit
|
mattstibbs/twilio-snippets
|
Add snippet for sending sms.
|
# Download the twilio-python library from http://twilio.com/docs/libraries
from twilio.rest import TwilioRestClient
import config
# Find these values at https://twilio.com/user/account
account_sid = "ACXXXXXXXXXXXXXXXXX"
auth_token = "YYYYYYYYYYYYYYYYYY"
client = TwilioRestClient(account_sid, auth_token)
message = client.messages.create(to="+12316851234", from_="+15555555555",
body="Hello there!")
|
<commit_before><commit_msg>Add snippet for sending sms.<commit_after>
|
# Download the twilio-python library from http://twilio.com/docs/libraries
from twilio.rest import TwilioRestClient
import config
# Find these values at https://twilio.com/user/account
account_sid = "ACXXXXXXXXXXXXXXXXX"
auth_token = "YYYYYYYYYYYYYYYYYY"
client = TwilioRestClient(account_sid, auth_token)
message = client.messages.create(to="+12316851234", from_="+15555555555",
body="Hello there!")
|
Add snippet for sending sms.# Download the twilio-python library from http://twilio.com/docs/libraries
from twilio.rest import TwilioRestClient
import config
# Find these values at https://twilio.com/user/account
account_sid = "ACXXXXXXXXXXXXXXXXX"
auth_token = "YYYYYYYYYYYYYYYYYY"
client = TwilioRestClient(account_sid, auth_token)
message = client.messages.create(to="+12316851234", from_="+15555555555",
body="Hello there!")
|
<commit_before><commit_msg>Add snippet for sending sms.<commit_after># Download the twilio-python library from http://twilio.com/docs/libraries
from twilio.rest import TwilioRestClient
import config
# Find these values at https://twilio.com/user/account
account_sid = "ACXXXXXXXXXXXXXXXXX"
auth_token = "YYYYYYYYYYYYYYYYYY"
client = TwilioRestClient(account_sid, auth_token)
message = client.messages.create(to="+12316851234", from_="+15555555555",
body="Hello there!")
|
|
4fa3db89bd5a8a00a654cb294ca3b0acf080dd3e
|
bluebottle/wallposts/migrations/0003_mediawallpost_results_page.py
|
bluebottle/wallposts/migrations/0003_mediawallpost_results_page.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-11-15 15:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wallposts', '0002_auto_20161115_1601'),
]
operations = [
migrations.AddField(
model_name='mediawallpost',
name='results_page',
field=models.BooleanField(default=True),
),
]
|
Add results boolean to wallpost pics
|
Add results boolean to wallpost pics
|
Python
|
bsd-3-clause
|
onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle
|
Add results boolean to wallpost pics
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-11-15 15:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wallposts', '0002_auto_20161115_1601'),
]
operations = [
migrations.AddField(
model_name='mediawallpost',
name='results_page',
field=models.BooleanField(default=True),
),
]
|
<commit_before><commit_msg>Add results boolean to wallpost pics<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-11-15 15:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wallposts', '0002_auto_20161115_1601'),
]
operations = [
migrations.AddField(
model_name='mediawallpost',
name='results_page',
field=models.BooleanField(default=True),
),
]
|
Add results boolean to wallpost pics# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-11-15 15:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wallposts', '0002_auto_20161115_1601'),
]
operations = [
migrations.AddField(
model_name='mediawallpost',
name='results_page',
field=models.BooleanField(default=True),
),
]
|
<commit_before><commit_msg>Add results boolean to wallpost pics<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-11-15 15:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wallposts', '0002_auto_20161115_1601'),
]
operations = [
migrations.AddField(
model_name='mediawallpost',
name='results_page',
field=models.BooleanField(default=True),
),
]
|
|
5e0ff1ce486cc4281919ed9c8e96a71723300b07
|
test/examples/kissgp_gp_classification_test.py
|
test/examples/kissgp_gp_classification_test.py
|
import math
import torch
import gpytorch
from torch import nn, optim
from torch.autograd import Variable
from gpytorch.kernels import RBFKernel, GridInterpolationKernel
from gpytorch.means import ConstantMean
from gpytorch.likelihoods import BernoulliLikelihood
from gpytorch.random_variables import GaussianRandomVariable
from gpytorch.inference import Inference
train_x = Variable(torch.linspace(0, 1, 10))
train_y = Variable(torch.sign(torch.cos(train_x.data * (4 * math.pi))))
class GPClassificationModel(gpytorch.GPModel):
def __init__(self):
super(GPClassificationModel, self).__init__(BernoulliLikelihood())
self.mean_module = ConstantMean(constant_bounds=[-1e-5, 1e-5])
self.covar_module = RBFKernel(log_lengthscale_bounds=(-5, 6))
self.grid_covar_module = GridInterpolationKernel(self.covar_module, 50)
self.register_parameter('log_outputscale', nn.Parameter(torch.Tensor([0])), bounds=(-5, 6))
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.grid_covar_module(x)
covar_x = covar_x.mul(self.log_outputscale.exp())
latent_pred = GaussianRandomVariable(mean_x, covar_x)
return latent_pred
def test_kissgp_classification_error():
prior_model = GPClassificationModel()
infer = Inference(prior_model)
posterior_model = infer.run(train_x, train_y)
# Find optimal model hyperparameters
posterior_model.train()
optimizer = optim.Adam(posterior_model.parameters(), lr=0.15)
optimizer.n_iter = 0
for i in range(150):
optimizer.zero_grad()
output = posterior_model.forward(train_x)
loss = -posterior_model.marginal_log_likelihood(output, train_y)
loss.backward()
optimizer.n_iter += 1
optimizer.step()
# Set back to eval mode
posterior_model.eval()
test_preds = posterior_model(train_x).mean().ge(0.5).float().mul(2).sub(1).squeeze()
mean_abs_error = torch.mean(torch.abs(train_y - test_preds) / 2)
assert(mean_abs_error.data.squeeze()[0] < 1e-5)
|
Add KISS-GP classification unit test.
|
Add KISS-GP classification unit test.
|
Python
|
mit
|
jrg365/gpytorch,jrg365/gpytorch,jrg365/gpytorch
|
Add KISS-GP classification unit test.
|
import math
import torch
import gpytorch
from torch import nn, optim
from torch.autograd import Variable
from gpytorch.kernels import RBFKernel, GridInterpolationKernel
from gpytorch.means import ConstantMean
from gpytorch.likelihoods import BernoulliLikelihood
from gpytorch.random_variables import GaussianRandomVariable
from gpytorch.inference import Inference
train_x = Variable(torch.linspace(0, 1, 10))
train_y = Variable(torch.sign(torch.cos(train_x.data * (4 * math.pi))))
class GPClassificationModel(gpytorch.GPModel):
def __init__(self):
super(GPClassificationModel, self).__init__(BernoulliLikelihood())
self.mean_module = ConstantMean(constant_bounds=[-1e-5, 1e-5])
self.covar_module = RBFKernel(log_lengthscale_bounds=(-5, 6))
self.grid_covar_module = GridInterpolationKernel(self.covar_module, 50)
self.register_parameter('log_outputscale', nn.Parameter(torch.Tensor([0])), bounds=(-5, 6))
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.grid_covar_module(x)
covar_x = covar_x.mul(self.log_outputscale.exp())
latent_pred = GaussianRandomVariable(mean_x, covar_x)
return latent_pred
def test_kissgp_classification_error():
prior_model = GPClassificationModel()
infer = Inference(prior_model)
posterior_model = infer.run(train_x, train_y)
# Find optimal model hyperparameters
posterior_model.train()
optimizer = optim.Adam(posterior_model.parameters(), lr=0.15)
optimizer.n_iter = 0
for i in range(150):
optimizer.zero_grad()
output = posterior_model.forward(train_x)
loss = -posterior_model.marginal_log_likelihood(output, train_y)
loss.backward()
optimizer.n_iter += 1
optimizer.step()
# Set back to eval mode
posterior_model.eval()
test_preds = posterior_model(train_x).mean().ge(0.5).float().mul(2).sub(1).squeeze()
mean_abs_error = torch.mean(torch.abs(train_y - test_preds) / 2)
assert(mean_abs_error.data.squeeze()[0] < 1e-5)
|
<commit_before><commit_msg>Add KISS-GP classification unit test.<commit_after>
|
import math
import torch
import gpytorch
from torch import nn, optim
from torch.autograd import Variable
from gpytorch.kernels import RBFKernel, GridInterpolationKernel
from gpytorch.means import ConstantMean
from gpytorch.likelihoods import BernoulliLikelihood
from gpytorch.random_variables import GaussianRandomVariable
from gpytorch.inference import Inference
train_x = Variable(torch.linspace(0, 1, 10))
train_y = Variable(torch.sign(torch.cos(train_x.data * (4 * math.pi))))
class GPClassificationModel(gpytorch.GPModel):
def __init__(self):
super(GPClassificationModel, self).__init__(BernoulliLikelihood())
self.mean_module = ConstantMean(constant_bounds=[-1e-5, 1e-5])
self.covar_module = RBFKernel(log_lengthscale_bounds=(-5, 6))
self.grid_covar_module = GridInterpolationKernel(self.covar_module, 50)
self.register_parameter('log_outputscale', nn.Parameter(torch.Tensor([0])), bounds=(-5, 6))
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.grid_covar_module(x)
covar_x = covar_x.mul(self.log_outputscale.exp())
latent_pred = GaussianRandomVariable(mean_x, covar_x)
return latent_pred
def test_kissgp_classification_error():
prior_model = GPClassificationModel()
infer = Inference(prior_model)
posterior_model = infer.run(train_x, train_y)
# Find optimal model hyperparameters
posterior_model.train()
optimizer = optim.Adam(posterior_model.parameters(), lr=0.15)
optimizer.n_iter = 0
for i in range(150):
optimizer.zero_grad()
output = posterior_model.forward(train_x)
loss = -posterior_model.marginal_log_likelihood(output, train_y)
loss.backward()
optimizer.n_iter += 1
optimizer.step()
# Set back to eval mode
posterior_model.eval()
test_preds = posterior_model(train_x).mean().ge(0.5).float().mul(2).sub(1).squeeze()
mean_abs_error = torch.mean(torch.abs(train_y - test_preds) / 2)
assert(mean_abs_error.data.squeeze()[0] < 1e-5)
|
Add KISS-GP classification unit test.import math
import torch
import gpytorch
from torch import nn, optim
from torch.autograd import Variable
from gpytorch.kernels import RBFKernel, GridInterpolationKernel
from gpytorch.means import ConstantMean
from gpytorch.likelihoods import BernoulliLikelihood
from gpytorch.random_variables import GaussianRandomVariable
from gpytorch.inference import Inference
train_x = Variable(torch.linspace(0, 1, 10))
train_y = Variable(torch.sign(torch.cos(train_x.data * (4 * math.pi))))
class GPClassificationModel(gpytorch.GPModel):
def __init__(self):
super(GPClassificationModel, self).__init__(BernoulliLikelihood())
self.mean_module = ConstantMean(constant_bounds=[-1e-5, 1e-5])
self.covar_module = RBFKernel(log_lengthscale_bounds=(-5, 6))
self.grid_covar_module = GridInterpolationKernel(self.covar_module, 50)
self.register_parameter('log_outputscale', nn.Parameter(torch.Tensor([0])), bounds=(-5, 6))
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.grid_covar_module(x)
covar_x = covar_x.mul(self.log_outputscale.exp())
latent_pred = GaussianRandomVariable(mean_x, covar_x)
return latent_pred
def test_kissgp_classification_error():
prior_model = GPClassificationModel()
infer = Inference(prior_model)
posterior_model = infer.run(train_x, train_y)
# Find optimal model hyperparameters
posterior_model.train()
optimizer = optim.Adam(posterior_model.parameters(), lr=0.15)
optimizer.n_iter = 0
for i in range(150):
optimizer.zero_grad()
output = posterior_model.forward(train_x)
loss = -posterior_model.marginal_log_likelihood(output, train_y)
loss.backward()
optimizer.n_iter += 1
optimizer.step()
# Set back to eval mode
posterior_model.eval()
test_preds = posterior_model(train_x).mean().ge(0.5).float().mul(2).sub(1).squeeze()
mean_abs_error = torch.mean(torch.abs(train_y - test_preds) / 2)
assert(mean_abs_error.data.squeeze()[0] < 1e-5)
|
<commit_before><commit_msg>Add KISS-GP classification unit test.<commit_after>import math
import torch
import gpytorch
from torch import nn, optim
from torch.autograd import Variable
from gpytorch.kernels import RBFKernel, GridInterpolationKernel
from gpytorch.means import ConstantMean
from gpytorch.likelihoods import BernoulliLikelihood
from gpytorch.random_variables import GaussianRandomVariable
from gpytorch.inference import Inference
train_x = Variable(torch.linspace(0, 1, 10))
train_y = Variable(torch.sign(torch.cos(train_x.data * (4 * math.pi))))
class GPClassificationModel(gpytorch.GPModel):
def __init__(self):
super(GPClassificationModel, self).__init__(BernoulliLikelihood())
self.mean_module = ConstantMean(constant_bounds=[-1e-5, 1e-5])
self.covar_module = RBFKernel(log_lengthscale_bounds=(-5, 6))
self.grid_covar_module = GridInterpolationKernel(self.covar_module, 50)
self.register_parameter('log_outputscale', nn.Parameter(torch.Tensor([0])), bounds=(-5, 6))
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.grid_covar_module(x)
covar_x = covar_x.mul(self.log_outputscale.exp())
latent_pred = GaussianRandomVariable(mean_x, covar_x)
return latent_pred
def test_kissgp_classification_error():
prior_model = GPClassificationModel()
infer = Inference(prior_model)
posterior_model = infer.run(train_x, train_y)
# Find optimal model hyperparameters
posterior_model.train()
optimizer = optim.Adam(posterior_model.parameters(), lr=0.15)
optimizer.n_iter = 0
for i in range(150):
optimizer.zero_grad()
output = posterior_model.forward(train_x)
loss = -posterior_model.marginal_log_likelihood(output, train_y)
loss.backward()
optimizer.n_iter += 1
optimizer.step()
# Set back to eval mode
posterior_model.eval()
test_preds = posterior_model(train_x).mean().ge(0.5).float().mul(2).sub(1).squeeze()
mean_abs_error = torch.mean(torch.abs(train_y - test_preds) / 2)
assert(mean_abs_error.data.squeeze()[0] < 1e-5)
|
|
1245e0aeaf5cd37e6f6c5c0feddbedededd3a458
|
tests/test_crypto.py
|
tests/test_crypto.py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import base64
import credsmash.aes_ctr
import credsmash.aes_gcm
class DummyKeyService(object):
def generate_key_data(self, number_of_bytes):
key = os.urandom(int(number_of_bytes))
return key, base64.b64encode(key)
def decrypt(self, encoded_key):
return base64.b64decode(encoded_key)
def test_aes_ctr_legacy():
"""
Basic test to ensure `cryptography` is installed/working
"""
key_service = DummyKeyService()
plaintext = b'abcdefghi'
material = credsmash.aes_ctr.seal_aes_ctr_legacy(
key_service,
plaintext
)
recovered_plaintext = credsmash.aes_ctr.open_aes_ctr_legacy(
key_service, material
)
assert plaintext == recovered_plaintext
def test_aes_ctr():
key_service = DummyKeyService()
plaintext = b'abcdefghi'
material = credsmash.aes_ctr.seal_aes_ctr(
key_service,
plaintext
)
recovered_plaintext = credsmash.aes_ctr.open_aes_ctr(
key_service, material
)
assert plaintext == recovered_plaintext
def test_aes_gcm():
key_service = DummyKeyService()
plaintext = b'abcdefghi'
material = credsmash.aes_gcm.seal_aes_gcm(
key_service,
plaintext
)
recovered_plaintext = credsmash.aes_gcm.open_aes_gcm(
key_service, material
)
assert plaintext == recovered_plaintext
|
Add test to show crypto working
|
Add test to show crypto working
|
Python
|
apache-2.0
|
3stack-software/credsmash
|
Add test to show crypto working
|
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import base64
import credsmash.aes_ctr
import credsmash.aes_gcm
class DummyKeyService(object):
def generate_key_data(self, number_of_bytes):
key = os.urandom(int(number_of_bytes))
return key, base64.b64encode(key)
def decrypt(self, encoded_key):
return base64.b64decode(encoded_key)
def test_aes_ctr_legacy():
"""
Basic test to ensure `cryptography` is installed/working
"""
key_service = DummyKeyService()
plaintext = b'abcdefghi'
material = credsmash.aes_ctr.seal_aes_ctr_legacy(
key_service,
plaintext
)
recovered_plaintext = credsmash.aes_ctr.open_aes_ctr_legacy(
key_service, material
)
assert plaintext == recovered_plaintext
def test_aes_ctr():
key_service = DummyKeyService()
plaintext = b'abcdefghi'
material = credsmash.aes_ctr.seal_aes_ctr(
key_service,
plaintext
)
recovered_plaintext = credsmash.aes_ctr.open_aes_ctr(
key_service, material
)
assert plaintext == recovered_plaintext
def test_aes_gcm():
key_service = DummyKeyService()
plaintext = b'abcdefghi'
material = credsmash.aes_gcm.seal_aes_gcm(
key_service,
plaintext
)
recovered_plaintext = credsmash.aes_gcm.open_aes_gcm(
key_service, material
)
assert plaintext == recovered_plaintext
|
<commit_before><commit_msg>Add test to show crypto working<commit_after>
|
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import base64
import credsmash.aes_ctr
import credsmash.aes_gcm
class DummyKeyService(object):
def generate_key_data(self, number_of_bytes):
key = os.urandom(int(number_of_bytes))
return key, base64.b64encode(key)
def decrypt(self, encoded_key):
return base64.b64decode(encoded_key)
def test_aes_ctr_legacy():
"""
Basic test to ensure `cryptography` is installed/working
"""
key_service = DummyKeyService()
plaintext = b'abcdefghi'
material = credsmash.aes_ctr.seal_aes_ctr_legacy(
key_service,
plaintext
)
recovered_plaintext = credsmash.aes_ctr.open_aes_ctr_legacy(
key_service, material
)
assert plaintext == recovered_plaintext
def test_aes_ctr():
key_service = DummyKeyService()
plaintext = b'abcdefghi'
material = credsmash.aes_ctr.seal_aes_ctr(
key_service,
plaintext
)
recovered_plaintext = credsmash.aes_ctr.open_aes_ctr(
key_service, material
)
assert plaintext == recovered_plaintext
def test_aes_gcm():
key_service = DummyKeyService()
plaintext = b'abcdefghi'
material = credsmash.aes_gcm.seal_aes_gcm(
key_service,
plaintext
)
recovered_plaintext = credsmash.aes_gcm.open_aes_gcm(
key_service, material
)
assert plaintext == recovered_plaintext
|
Add test to show crypto workingfrom __future__ import absolute_import, division, print_function, unicode_literals
import os
import base64
import credsmash.aes_ctr
import credsmash.aes_gcm
class DummyKeyService(object):
def generate_key_data(self, number_of_bytes):
key = os.urandom(int(number_of_bytes))
return key, base64.b64encode(key)
def decrypt(self, encoded_key):
return base64.b64decode(encoded_key)
def test_aes_ctr_legacy():
"""
Basic test to ensure `cryptography` is installed/working
"""
key_service = DummyKeyService()
plaintext = b'abcdefghi'
material = credsmash.aes_ctr.seal_aes_ctr_legacy(
key_service,
plaintext
)
recovered_plaintext = credsmash.aes_ctr.open_aes_ctr_legacy(
key_service, material
)
assert plaintext == recovered_plaintext
def test_aes_ctr():
key_service = DummyKeyService()
plaintext = b'abcdefghi'
material = credsmash.aes_ctr.seal_aes_ctr(
key_service,
plaintext
)
recovered_plaintext = credsmash.aes_ctr.open_aes_ctr(
key_service, material
)
assert plaintext == recovered_plaintext
def test_aes_gcm():
key_service = DummyKeyService()
plaintext = b'abcdefghi'
material = credsmash.aes_gcm.seal_aes_gcm(
key_service,
plaintext
)
recovered_plaintext = credsmash.aes_gcm.open_aes_gcm(
key_service, material
)
assert plaintext == recovered_plaintext
|
<commit_before><commit_msg>Add test to show crypto working<commit_after>from __future__ import absolute_import, division, print_function, unicode_literals
import os
import base64
import credsmash.aes_ctr
import credsmash.aes_gcm
class DummyKeyService(object):
def generate_key_data(self, number_of_bytes):
key = os.urandom(int(number_of_bytes))
return key, base64.b64encode(key)
def decrypt(self, encoded_key):
return base64.b64decode(encoded_key)
def test_aes_ctr_legacy():
"""
Basic test to ensure `cryptography` is installed/working
"""
key_service = DummyKeyService()
plaintext = b'abcdefghi'
material = credsmash.aes_ctr.seal_aes_ctr_legacy(
key_service,
plaintext
)
recovered_plaintext = credsmash.aes_ctr.open_aes_ctr_legacy(
key_service, material
)
assert plaintext == recovered_plaintext
def test_aes_ctr():
key_service = DummyKeyService()
plaintext = b'abcdefghi'
material = credsmash.aes_ctr.seal_aes_ctr(
key_service,
plaintext
)
recovered_plaintext = credsmash.aes_ctr.open_aes_ctr(
key_service, material
)
assert plaintext == recovered_plaintext
def test_aes_gcm():
key_service = DummyKeyService()
plaintext = b'abcdefghi'
material = credsmash.aes_gcm.seal_aes_gcm(
key_service,
plaintext
)
recovered_plaintext = credsmash.aes_gcm.open_aes_gcm(
key_service, material
)
assert plaintext == recovered_plaintext
|
|
db8a08d29c81ae9add1e55b7fb4aada6154dadfa
|
scipy/_lib/tests/test_warnings.py
|
scipy/_lib/tests/test_warnings.py
|
"""
Tests which scan for certain occurrences in the code, they may not find
all of these occurrences but should catch almost all. This file was adapted
from numpy.
"""
from __future__ import division, absolute_import, print_function
import sys
if sys.version_info >= (3, 4):
from pathlib import Path
import ast
import tokenize
import scipy
from numpy.testing import run_module_suite
from numpy.testing.decorators import slow
class ParseCall(ast.NodeVisitor):
def __init__(self):
self.ls = []
def visit_Attribute(self, node):
ast.NodeVisitor.generic_visit(self, node)
self.ls.append(node.attr)
def visit_Name(self, node):
self.ls.append(node.id)
class FindFuncs(ast.NodeVisitor):
def __init__(self, filename):
super().__init__()
self.__filename = filename
def visit_Call(self, node):
p = ParseCall()
p.visit(node.func)
ast.NodeVisitor.generic_visit(self, node)
if p.ls[-1] == 'simplefilter' or p.ls[-1] == 'filterwarnings':
if node.args[0].s == "ignore":
raise AssertionError(
"ignore filter should not be used; found in "
"{} on line {}".format(self.__filename, node.lineno))
if p.ls[-1] == 'warn' and (
len(p.ls) == 1 or p.ls[-2] == 'warnings'):
if "_lib/tests/test_warnings.py" is self.__filename:
# This file
return
# See if stacklevel exists:
# if len(node.args) == 3:
# return
# args = {kw.arg for kw in node.keywords}
# if "stacklevel" in args:
# return
# raise AssertionError(
# "warnings should have an appropriate stacklevel; found in "
# "{} on line {}".format(self.__filename, node.lineno))
@slow
def test_warning_calls():
# combined "ignore" and stacklevel error
base = Path(scipy.__file__).parent
for path in base.rglob("*.py"):
# There is still one missing occurance in optimize.py,
# this is one that should be fixed and this removed then.
if path == base / "optimize" / "optimize.py":
continue
# use tokenize to auto-detect encoding on systems where no
# default encoding is defined (e.g. LANG='C')
with tokenize.open(str(path)) as file:
tree = ast.parse(file.read())
FindFuncs(path).visit(tree)
if __name__ == "__main__":
run_module_suite()
|
Add a test for "ignore" warning filters
|
TST: Add a test for "ignore" warning filters
This file currently ignores the scipy/optimize/optimize.py file
because of one (or actually two identical) remaining filters there.
A commented out part, can be used to find all occurances of missing
stacklevels to `warnings.warn`.
This will not find errors in cython files though.
|
Python
|
bsd-3-clause
|
perimosocordiae/scipy,vigna/scipy,anntzer/scipy,aarchiba/scipy,aarchiba/scipy,jamestwebber/scipy,gfyoung/scipy,perimosocordiae/scipy,ilayn/scipy,WarrenWeckesser/scipy,WarrenWeckesser/scipy,Stefan-Endres/scipy,scipy/scipy,gfyoung/scipy,lhilt/scipy,rgommers/scipy,gertingold/scipy,matthew-brett/scipy,jor-/scipy,pizzathief/scipy,aeklant/scipy,endolith/scipy,aarchiba/scipy,rgommers/scipy,andyfaff/scipy,vigna/scipy,Eric89GXL/scipy,scipy/scipy,andyfaff/scipy,gertingold/scipy,anntzer/scipy,ilayn/scipy,nmayorov/scipy,andyfaff/scipy,grlee77/scipy,mdhaber/scipy,gfyoung/scipy,Eric89GXL/scipy,pizzathief/scipy,mdhaber/scipy,person142/scipy,scipy/scipy,gertingold/scipy,anntzer/scipy,mdhaber/scipy,perimosocordiae/scipy,Stefan-Endres/scipy,ilayn/scipy,rgommers/scipy,arokem/scipy,WarrenWeckesser/scipy,zerothi/scipy,perimosocordiae/scipy,lhilt/scipy,zerothi/scipy,aarchiba/scipy,endolith/scipy,jor-/scipy,aeklant/scipy,Eric89GXL/scipy,endolith/scipy,person142/scipy,lhilt/scipy,andyfaff/scipy,arokem/scipy,e-q/scipy,gertingold/scipy,andyfaff/scipy,endolith/scipy,zerothi/scipy,andyfaff/scipy,gfyoung/scipy,lhilt/scipy,e-q/scipy,perimosocordiae/scipy,Stefan-Endres/scipy,e-q/scipy,person142/scipy,matthew-brett/scipy,scipy/scipy,jor-/scipy,endolith/scipy,e-q/scipy,gfyoung/scipy,ilayn/scipy,lhilt/scipy,pizzathief/scipy,mdhaber/scipy,pizzathief/scipy,perimosocordiae/scipy,anntzer/scipy,WarrenWeckesser/scipy,jamestwebber/scipy,anntzer/scipy,matthew-brett/scipy,nmayorov/scipy,nmayorov/scipy,aeklant/scipy,ilayn/scipy,endolith/scipy,nmayorov/scipy,person142/scipy,grlee77/scipy,grlee77/scipy,mdhaber/scipy,person142/scipy,rgommers/scipy,Stefan-Endres/scipy,tylerjereddy/scipy,scipy/scipy,WarrenWeckesser/scipy,arokem/scipy,arokem/scipy,jor-/scipy,gertingold/scipy,aeklant/scipy,grlee77/scipy,matthew-brett/scipy,tylerjereddy/scipy,aarchiba/scipy,tylerjereddy/scipy,anntzer/scipy,aeklant/scipy,vigna/scipy,zerothi/scipy,grlee77/scipy,e-q/scipy,arokem/scipy,jamestwebber/scipy,Eric89GXL/scipy,jamestwebber/scipy,Stefan-Endres/scipy,matthew-brett/scipy,ilayn/scipy,zerothi/scipy,Stefan-Endres/scipy,vigna/scipy,jor-/scipy,tylerjereddy/scipy,Eric89GXL/scipy,Eric89GXL/scipy,vigna/scipy,nmayorov/scipy,zerothi/scipy,rgommers/scipy,pizzathief/scipy,tylerjereddy/scipy,mdhaber/scipy,WarrenWeckesser/scipy,jamestwebber/scipy,scipy/scipy
|
TST: Add a test for "ignore" warning filters
This file currently ignores the scipy/optimize/optimize.py file
because of one (or actually two identical) remaining filters there.
A commented out part, can be used to find all occurances of missing
stacklevels to `warnings.warn`.
This will not find errors in cython files though.
|
"""
Tests which scan for certain occurrences in the code, they may not find
all of these occurrences but should catch almost all. This file was adapted
from numpy.
"""
from __future__ import division, absolute_import, print_function
import sys
if sys.version_info >= (3, 4):
from pathlib import Path
import ast
import tokenize
import scipy
from numpy.testing import run_module_suite
from numpy.testing.decorators import slow
class ParseCall(ast.NodeVisitor):
def __init__(self):
self.ls = []
def visit_Attribute(self, node):
ast.NodeVisitor.generic_visit(self, node)
self.ls.append(node.attr)
def visit_Name(self, node):
self.ls.append(node.id)
class FindFuncs(ast.NodeVisitor):
def __init__(self, filename):
super().__init__()
self.__filename = filename
def visit_Call(self, node):
p = ParseCall()
p.visit(node.func)
ast.NodeVisitor.generic_visit(self, node)
if p.ls[-1] == 'simplefilter' or p.ls[-1] == 'filterwarnings':
if node.args[0].s == "ignore":
raise AssertionError(
"ignore filter should not be used; found in "
"{} on line {}".format(self.__filename, node.lineno))
if p.ls[-1] == 'warn' and (
len(p.ls) == 1 or p.ls[-2] == 'warnings'):
if "_lib/tests/test_warnings.py" is self.__filename:
# This file
return
# See if stacklevel exists:
# if len(node.args) == 3:
# return
# args = {kw.arg for kw in node.keywords}
# if "stacklevel" in args:
# return
# raise AssertionError(
# "warnings should have an appropriate stacklevel; found in "
# "{} on line {}".format(self.__filename, node.lineno))
@slow
def test_warning_calls():
# combined "ignore" and stacklevel error
base = Path(scipy.__file__).parent
for path in base.rglob("*.py"):
# There is still one missing occurance in optimize.py,
# this is one that should be fixed and this removed then.
if path == base / "optimize" / "optimize.py":
continue
# use tokenize to auto-detect encoding on systems where no
# default encoding is defined (e.g. LANG='C')
with tokenize.open(str(path)) as file:
tree = ast.parse(file.read())
FindFuncs(path).visit(tree)
if __name__ == "__main__":
run_module_suite()
|
<commit_before><commit_msg>TST: Add a test for "ignore" warning filters
This file currently ignores the scipy/optimize/optimize.py file
because of one (or actually two identical) remaining filters there.
A commented out part, can be used to find all occurances of missing
stacklevels to `warnings.warn`.
This will not find errors in cython files though.<commit_after>
|
"""
Tests which scan for certain occurrences in the code, they may not find
all of these occurrences but should catch almost all. This file was adapted
from numpy.
"""
from __future__ import division, absolute_import, print_function
import sys
if sys.version_info >= (3, 4):
from pathlib import Path
import ast
import tokenize
import scipy
from numpy.testing import run_module_suite
from numpy.testing.decorators import slow
class ParseCall(ast.NodeVisitor):
def __init__(self):
self.ls = []
def visit_Attribute(self, node):
ast.NodeVisitor.generic_visit(self, node)
self.ls.append(node.attr)
def visit_Name(self, node):
self.ls.append(node.id)
class FindFuncs(ast.NodeVisitor):
def __init__(self, filename):
super().__init__()
self.__filename = filename
def visit_Call(self, node):
p = ParseCall()
p.visit(node.func)
ast.NodeVisitor.generic_visit(self, node)
if p.ls[-1] == 'simplefilter' or p.ls[-1] == 'filterwarnings':
if node.args[0].s == "ignore":
raise AssertionError(
"ignore filter should not be used; found in "
"{} on line {}".format(self.__filename, node.lineno))
if p.ls[-1] == 'warn' and (
len(p.ls) == 1 or p.ls[-2] == 'warnings'):
if "_lib/tests/test_warnings.py" is self.__filename:
# This file
return
# See if stacklevel exists:
# if len(node.args) == 3:
# return
# args = {kw.arg for kw in node.keywords}
# if "stacklevel" in args:
# return
# raise AssertionError(
# "warnings should have an appropriate stacklevel; found in "
# "{} on line {}".format(self.__filename, node.lineno))
@slow
def test_warning_calls():
# combined "ignore" and stacklevel error
base = Path(scipy.__file__).parent
for path in base.rglob("*.py"):
# There is still one missing occurance in optimize.py,
# this is one that should be fixed and this removed then.
if path == base / "optimize" / "optimize.py":
continue
# use tokenize to auto-detect encoding on systems where no
# default encoding is defined (e.g. LANG='C')
with tokenize.open(str(path)) as file:
tree = ast.parse(file.read())
FindFuncs(path).visit(tree)
if __name__ == "__main__":
run_module_suite()
|
TST: Add a test for "ignore" warning filters
This file currently ignores the scipy/optimize/optimize.py file
because of one (or actually two identical) remaining filters there.
A commented out part, can be used to find all occurances of missing
stacklevels to `warnings.warn`.
This will not find errors in cython files though."""
Tests which scan for certain occurrences in the code, they may not find
all of these occurrences but should catch almost all. This file was adapted
from numpy.
"""
from __future__ import division, absolute_import, print_function
import sys
if sys.version_info >= (3, 4):
from pathlib import Path
import ast
import tokenize
import scipy
from numpy.testing import run_module_suite
from numpy.testing.decorators import slow
class ParseCall(ast.NodeVisitor):
def __init__(self):
self.ls = []
def visit_Attribute(self, node):
ast.NodeVisitor.generic_visit(self, node)
self.ls.append(node.attr)
def visit_Name(self, node):
self.ls.append(node.id)
class FindFuncs(ast.NodeVisitor):
def __init__(self, filename):
super().__init__()
self.__filename = filename
def visit_Call(self, node):
p = ParseCall()
p.visit(node.func)
ast.NodeVisitor.generic_visit(self, node)
if p.ls[-1] == 'simplefilter' or p.ls[-1] == 'filterwarnings':
if node.args[0].s == "ignore":
raise AssertionError(
"ignore filter should not be used; found in "
"{} on line {}".format(self.__filename, node.lineno))
if p.ls[-1] == 'warn' and (
len(p.ls) == 1 or p.ls[-2] == 'warnings'):
if "_lib/tests/test_warnings.py" is self.__filename:
# This file
return
# See if stacklevel exists:
# if len(node.args) == 3:
# return
# args = {kw.arg for kw in node.keywords}
# if "stacklevel" in args:
# return
# raise AssertionError(
# "warnings should have an appropriate stacklevel; found in "
# "{} on line {}".format(self.__filename, node.lineno))
@slow
def test_warning_calls():
# combined "ignore" and stacklevel error
base = Path(scipy.__file__).parent
for path in base.rglob("*.py"):
# There is still one missing occurance in optimize.py,
# this is one that should be fixed and this removed then.
if path == base / "optimize" / "optimize.py":
continue
# use tokenize to auto-detect encoding on systems where no
# default encoding is defined (e.g. LANG='C')
with tokenize.open(str(path)) as file:
tree = ast.parse(file.read())
FindFuncs(path).visit(tree)
if __name__ == "__main__":
run_module_suite()
|
<commit_before><commit_msg>TST: Add a test for "ignore" warning filters
This file currently ignores the scipy/optimize/optimize.py file
because of one (or actually two identical) remaining filters there.
A commented out part, can be used to find all occurances of missing
stacklevels to `warnings.warn`.
This will not find errors in cython files though.<commit_after>"""
Tests which scan for certain occurrences in the code, they may not find
all of these occurrences but should catch almost all. This file was adapted
from numpy.
"""
from __future__ import division, absolute_import, print_function
import sys
if sys.version_info >= (3, 4):
from pathlib import Path
import ast
import tokenize
import scipy
from numpy.testing import run_module_suite
from numpy.testing.decorators import slow
class ParseCall(ast.NodeVisitor):
def __init__(self):
self.ls = []
def visit_Attribute(self, node):
ast.NodeVisitor.generic_visit(self, node)
self.ls.append(node.attr)
def visit_Name(self, node):
self.ls.append(node.id)
class FindFuncs(ast.NodeVisitor):
def __init__(self, filename):
super().__init__()
self.__filename = filename
def visit_Call(self, node):
p = ParseCall()
p.visit(node.func)
ast.NodeVisitor.generic_visit(self, node)
if p.ls[-1] == 'simplefilter' or p.ls[-1] == 'filterwarnings':
if node.args[0].s == "ignore":
raise AssertionError(
"ignore filter should not be used; found in "
"{} on line {}".format(self.__filename, node.lineno))
if p.ls[-1] == 'warn' and (
len(p.ls) == 1 or p.ls[-2] == 'warnings'):
if "_lib/tests/test_warnings.py" is self.__filename:
# This file
return
# See if stacklevel exists:
# if len(node.args) == 3:
# return
# args = {kw.arg for kw in node.keywords}
# if "stacklevel" in args:
# return
# raise AssertionError(
# "warnings should have an appropriate stacklevel; found in "
# "{} on line {}".format(self.__filename, node.lineno))
@slow
def test_warning_calls():
# combined "ignore" and stacklevel error
base = Path(scipy.__file__).parent
for path in base.rglob("*.py"):
# There is still one missing occurance in optimize.py,
# this is one that should be fixed and this removed then.
if path == base / "optimize" / "optimize.py":
continue
# use tokenize to auto-detect encoding on systems where no
# default encoding is defined (e.g. LANG='C')
with tokenize.open(str(path)) as file:
tree = ast.parse(file.read())
FindFuncs(path).visit(tree)
if __name__ == "__main__":
run_module_suite()
|
|
70291f0f276d0ae2ade1161d89627dd43e4df975
|
app/migrations/0002_brewpidevice_time_profile_started.py
|
app/migrations/0002_brewpidevice_time_profile_started.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-09 08:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0001_squashed_0005_brewpidevice_active_profile'),
]
operations = [
migrations.AddField(
model_name='brewpidevice',
name='time_profile_started',
field=models.DateTimeField(blank=True, default=None, null=True),
),
]
|
Add migrations for latest model changes.
|
Add migrations for latest model changes.
|
Python
|
mit
|
thorrak/fermentrack,thorrak/fermentrack,thorrak/fermentrack,thorrak/fermentrack,thorrak/fermentrack
|
Add migrations for latest model changes.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-09 08:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0001_squashed_0005_brewpidevice_active_profile'),
]
operations = [
migrations.AddField(
model_name='brewpidevice',
name='time_profile_started',
field=models.DateTimeField(blank=True, default=None, null=True),
),
]
|
<commit_before><commit_msg>Add migrations for latest model changes.<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-09 08:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0001_squashed_0005_brewpidevice_active_profile'),
]
operations = [
migrations.AddField(
model_name='brewpidevice',
name='time_profile_started',
field=models.DateTimeField(blank=True, default=None, null=True),
),
]
|
Add migrations for latest model changes.# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-09 08:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0001_squashed_0005_brewpidevice_active_profile'),
]
operations = [
migrations.AddField(
model_name='brewpidevice',
name='time_profile_started',
field=models.DateTimeField(blank=True, default=None, null=True),
),
]
|
<commit_before><commit_msg>Add migrations for latest model changes.<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-09 08:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0001_squashed_0005_brewpidevice_active_profile'),
]
operations = [
migrations.AddField(
model_name='brewpidevice',
name='time_profile_started',
field=models.DateTimeField(blank=True, default=None, null=True),
),
]
|
|
524aa867c43ddb44728e1df483a88f334e1e0716
|
standing.py
|
standing.py
|
from requirement import GenEd
class Standing:
def __init__(self, credits_needed=35, credits_taken=0.0):
self.credits_needed = credits_needed
self.credits_taken = credits_taken
self.list = [
GenEd("FYW", 1),
GenEd("WRI", 4),
# TODO: Support requirements that have a variable number of courses needed.
GenEd("FOL", 4),
GenEd("ORC", 1),
GenEd("AQR", 1),
# TODO: support using 'number of courses' instead of 'number of credits'
GenEd("SPM", 2),
GenEd("HWC", 1),
# TODO: support requiring that courses be from different departments
GenEd("MCG", 1),
GenEd("MCD", 1),
GenEd("ALS-A", 1),
GenEd("ALS-L", 1),
GenEd("BTS-B", 1),
GenEd("BTS-T", 1),
GenEd("SED", 1),
GenEd("IST", 1),
GenEd("HBS", 1),
GenEd("EIN", 1)
]
def increment(self, i=1.0):
self.credits_taken += i
def checkStanding(self):
return (self.credits_taken >= self.credits_needed)
def __str__(self):
self.checkStanding()
output = "You have " + str(self.credits_taken) + " credits "
output += "out of " + str(self.credits_needed) + " credits needed."
return output
if __name__ == '__main__':
tmp = [
Standing()
]
for i in tmp:
print(i)
if "FYW" in tmp[0].list:
print("success")
|
Implement a rough draft of Standing
|
Implement a rough draft of Standing
|
Python
|
agpl-3.0
|
hawkrives/gobbldygook,hawkrives/gobbldygook,hawkrives/gobbldygook
|
Implement a rough draft of Standing
|
from requirement import GenEd
class Standing:
def __init__(self, credits_needed=35, credits_taken=0.0):
self.credits_needed = credits_needed
self.credits_taken = credits_taken
self.list = [
GenEd("FYW", 1),
GenEd("WRI", 4),
# TODO: Support requirements that have a variable number of courses needed.
GenEd("FOL", 4),
GenEd("ORC", 1),
GenEd("AQR", 1),
# TODO: support using 'number of courses' instead of 'number of credits'
GenEd("SPM", 2),
GenEd("HWC", 1),
# TODO: support requiring that courses be from different departments
GenEd("MCG", 1),
GenEd("MCD", 1),
GenEd("ALS-A", 1),
GenEd("ALS-L", 1),
GenEd("BTS-B", 1),
GenEd("BTS-T", 1),
GenEd("SED", 1),
GenEd("IST", 1),
GenEd("HBS", 1),
GenEd("EIN", 1)
]
def increment(self, i=1.0):
self.credits_taken += i
def checkStanding(self):
return (self.credits_taken >= self.credits_needed)
def __str__(self):
self.checkStanding()
output = "You have " + str(self.credits_taken) + " credits "
output += "out of " + str(self.credits_needed) + " credits needed."
return output
if __name__ == '__main__':
tmp = [
Standing()
]
for i in tmp:
print(i)
if "FYW" in tmp[0].list:
print("success")
|
<commit_before><commit_msg>Implement a rough draft of Standing<commit_after>
|
from requirement import GenEd
class Standing:
def __init__(self, credits_needed=35, credits_taken=0.0):
self.credits_needed = credits_needed
self.credits_taken = credits_taken
self.list = [
GenEd("FYW", 1),
GenEd("WRI", 4),
# TODO: Support requirements that have a variable number of courses needed.
GenEd("FOL", 4),
GenEd("ORC", 1),
GenEd("AQR", 1),
# TODO: support using 'number of courses' instead of 'number of credits'
GenEd("SPM", 2),
GenEd("HWC", 1),
# TODO: support requiring that courses be from different departments
GenEd("MCG", 1),
GenEd("MCD", 1),
GenEd("ALS-A", 1),
GenEd("ALS-L", 1),
GenEd("BTS-B", 1),
GenEd("BTS-T", 1),
GenEd("SED", 1),
GenEd("IST", 1),
GenEd("HBS", 1),
GenEd("EIN", 1)
]
def increment(self, i=1.0):
self.credits_taken += i
def checkStanding(self):
return (self.credits_taken >= self.credits_needed)
def __str__(self):
self.checkStanding()
output = "You have " + str(self.credits_taken) + " credits "
output += "out of " + str(self.credits_needed) + " credits needed."
return output
if __name__ == '__main__':
tmp = [
Standing()
]
for i in tmp:
print(i)
if "FYW" in tmp[0].list:
print("success")
|
Implement a rough draft of Standingfrom requirement import GenEd
class Standing:
def __init__(self, credits_needed=35, credits_taken=0.0):
self.credits_needed = credits_needed
self.credits_taken = credits_taken
self.list = [
GenEd("FYW", 1),
GenEd("WRI", 4),
# TODO: Support requirements that have a variable number of courses needed.
GenEd("FOL", 4),
GenEd("ORC", 1),
GenEd("AQR", 1),
# TODO: support using 'number of courses' instead of 'number of credits'
GenEd("SPM", 2),
GenEd("HWC", 1),
# TODO: support requiring that courses be from different departments
GenEd("MCG", 1),
GenEd("MCD", 1),
GenEd("ALS-A", 1),
GenEd("ALS-L", 1),
GenEd("BTS-B", 1),
GenEd("BTS-T", 1),
GenEd("SED", 1),
GenEd("IST", 1),
GenEd("HBS", 1),
GenEd("EIN", 1)
]
def increment(self, i=1.0):
self.credits_taken += i
def checkStanding(self):
return (self.credits_taken >= self.credits_needed)
def __str__(self):
self.checkStanding()
output = "You have " + str(self.credits_taken) + " credits "
output += "out of " + str(self.credits_needed) + " credits needed."
return output
if __name__ == '__main__':
tmp = [
Standing()
]
for i in tmp:
print(i)
if "FYW" in tmp[0].list:
print("success")
|
<commit_before><commit_msg>Implement a rough draft of Standing<commit_after>from requirement import GenEd
class Standing:
def __init__(self, credits_needed=35, credits_taken=0.0):
self.credits_needed = credits_needed
self.credits_taken = credits_taken
self.list = [
GenEd("FYW", 1),
GenEd("WRI", 4),
# TODO: Support requirements that have a variable number of courses needed.
GenEd("FOL", 4),
GenEd("ORC", 1),
GenEd("AQR", 1),
# TODO: support using 'number of courses' instead of 'number of credits'
GenEd("SPM", 2),
GenEd("HWC", 1),
# TODO: support requiring that courses be from different departments
GenEd("MCG", 1),
GenEd("MCD", 1),
GenEd("ALS-A", 1),
GenEd("ALS-L", 1),
GenEd("BTS-B", 1),
GenEd("BTS-T", 1),
GenEd("SED", 1),
GenEd("IST", 1),
GenEd("HBS", 1),
GenEd("EIN", 1)
]
def increment(self, i=1.0):
self.credits_taken += i
def checkStanding(self):
return (self.credits_taken >= self.credits_needed)
def __str__(self):
self.checkStanding()
output = "You have " + str(self.credits_taken) + " credits "
output += "out of " + str(self.credits_needed) + " credits needed."
return output
if __name__ == '__main__':
tmp = [
Standing()
]
for i in tmp:
print(i)
if "FYW" in tmp[0].list:
print("success")
|
|
00c3a71edc3fd50a3f98ed61afc1544c7aede786
|
ideas/TestPipe.py
|
ideas/TestPipe.py
|
import multiprocessing
def worker(procnum, send_end):
'''worker function'''
result = str(procnum) + ' represent!'
print result
send_end.send(result)
def main():
jobs = []
pipe_list = []
for i in range(5):
recv_end, send_end = multiprocessing.Pipe(False)
p = multiprocessing.Process(target=worker, args=(i, send_end))
jobs.append(p)
pipe_list.append(recv_end)
p.start()
for proc in jobs:
proc.join()
result_list = [x.recv() for x in pipe_list]
print result_list
if __name__ == '__main__':
main()
|
Add test script to explore using pipes instead of queues
|
Add test script to explore using pipes instead of queues
|
Python
|
bsd-3-clause
|
dkoslicki/CMash,dkoslicki/CMash
|
Add test script to explore using pipes instead of queues
|
import multiprocessing
def worker(procnum, send_end):
'''worker function'''
result = str(procnum) + ' represent!'
print result
send_end.send(result)
def main():
jobs = []
pipe_list = []
for i in range(5):
recv_end, send_end = multiprocessing.Pipe(False)
p = multiprocessing.Process(target=worker, args=(i, send_end))
jobs.append(p)
pipe_list.append(recv_end)
p.start()
for proc in jobs:
proc.join()
result_list = [x.recv() for x in pipe_list]
print result_list
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add test script to explore using pipes instead of queues<commit_after>
|
import multiprocessing
def worker(procnum, send_end):
'''worker function'''
result = str(procnum) + ' represent!'
print result
send_end.send(result)
def main():
jobs = []
pipe_list = []
for i in range(5):
recv_end, send_end = multiprocessing.Pipe(False)
p = multiprocessing.Process(target=worker, args=(i, send_end))
jobs.append(p)
pipe_list.append(recv_end)
p.start()
for proc in jobs:
proc.join()
result_list = [x.recv() for x in pipe_list]
print result_list
if __name__ == '__main__':
main()
|
Add test script to explore using pipes instead of queuesimport multiprocessing
def worker(procnum, send_end):
'''worker function'''
result = str(procnum) + ' represent!'
print result
send_end.send(result)
def main():
jobs = []
pipe_list = []
for i in range(5):
recv_end, send_end = multiprocessing.Pipe(False)
p = multiprocessing.Process(target=worker, args=(i, send_end))
jobs.append(p)
pipe_list.append(recv_end)
p.start()
for proc in jobs:
proc.join()
result_list = [x.recv() for x in pipe_list]
print result_list
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add test script to explore using pipes instead of queues<commit_after>import multiprocessing
def worker(procnum, send_end):
'''worker function'''
result = str(procnum) + ' represent!'
print result
send_end.send(result)
def main():
jobs = []
pipe_list = []
for i in range(5):
recv_end, send_end = multiprocessing.Pipe(False)
p = multiprocessing.Process(target=worker, args=(i, send_end))
jobs.append(p)
pipe_list.append(recv_end)
p.start()
for proc in jobs:
proc.join()
result_list = [x.recv() for x in pipe_list]
print result_list
if __name__ == '__main__':
main()
|
|
0301bc813059473838137b75bc7503cb0fba4af0
|
tempest/tests/common/utils/test_file_utils.py
|
tempest/tests/common/utils/test_file_utils.py
|
# Copyright 2014 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from mock import patch
from tempest.common.utils import file_utils
from tempest.tests import base
class TestFileUtils(base.TestCase):
def test_have_effective_read_path(self):
with patch('__builtin__.open', mock.mock_open(), create=True):
result = file_utils.have_effective_read_access('fake_path')
self.assertTrue(result)
def test_not_effective_read_path(self):
result = file_utils.have_effective_read_access('fake_path')
self.assertFalse(result)
|
Add unit tests for the tempest.common.utils.file_utils
|
Add unit tests for the tempest.common.utils.file_utils
This commit adds a positive and negative unit tests for the single
method from file_utils in tempest.common.utils.
Partially implements bp unit-tests
Change-Id: Ic19428a10785afd8849442f4d1f8f8e0a87f549b
|
Python
|
apache-2.0
|
LIS/lis-tempest,Lilywei123/tempest,akash1808/tempest,cloudbase/lis-tempest,cisco-openstack/tempest,redhat-cip/tempest,CiscoSystems/tempest,ebagdasa/tempest,afaheem88/tempest,Juniper/tempest,Vaidyanath/tempest,jamielennox/tempest,tudorvio/tempest,xbezdick/tempest,Vaidyanath/tempest,jamielennox/tempest,zsoltdudas/lis-tempest,sebrandon1/tempest,openstack/tempest,alinbalutoiu/tempest,hayderimran7/tempest,eggmaster/tempest,yamt/tempest,Tesora/tesora-tempest,eggmaster/tempest,hpcloud-mon/tempest,openstack/tempest,Tesora/tesora-tempest,tudorvio/tempest,izadorozhna/tempest,alinbalutoiu/tempest,rzarzynski/tempest,dkalashnik/tempest,afaheem88/tempest_neutron,afaheem88/tempest_neutron,ebagdasa/tempest,sebrandon1/tempest,rakeshmi/tempest,izadorozhna/tempest,vmahuli/tempest,JioCloud/tempest,Juraci/tempest,vmahuli/tempest,CiscoSystems/tempest,vedujoshi/os_tempest,Mirantis/tempest,masayukig/tempest,nunogt/tempest,hpcloud-mon/tempest,Mirantis/tempest,neerja28/Tempest,cloudbase/lis-tempest,rakeshmi/tempest,manasi24/jiocloud-tempest-qatempest,dkalashnik/tempest,pczerkas/tempest,vedujoshi/tempest,afaheem88/tempest,tonyli71/tempest,bigswitch/tempest,akash1808/tempest,rzarzynski/tempest,roopali8/tempest,neerja28/Tempest,FujitsuEnablingSoftwareTechnologyGmbH/tempest,flyingfish007/tempest,roopali8/tempest,pandeyop/tempest,Juniper/tempest,Juraci/tempest,pandeyop/tempest,yamt/tempest,masayukig/tempest,danielmellado/tempest,cisco-openstack/tempest,pczerkas/tempest,nunogt/tempest,manasi24/tempest,tonyli71/tempest,manasi24/jiocloud-tempest-qatempest,jaspreetw/tempest,JioCloud/tempest,FujitsuEnablingSoftwareTechnologyGmbH/tempest,NexusIS/tempest,hayderimran7/tempest,queria/my-tempest,vedujoshi/os_tempest,redhat-cip/tempest,zsoltdudas/lis-tempest,varunarya10/tempest,queria/my-tempest,manasi24/tempest,bigswitch/tempest,LIS/lis-tempest,flyingfish007/tempest,danielmellado/tempest,varunarya10/tempest,xbezdick/tempest,jaspreetw/tempest,vedujoshi/tempest,Lilywei123/tempest,NexusIS/tempest
|
Add unit tests for the tempest.common.utils.file_utils
This commit adds a positive and negative unit tests for the single
method from file_utils in tempest.common.utils.
Partially implements bp unit-tests
Change-Id: Ic19428a10785afd8849442f4d1f8f8e0a87f549b
|
# Copyright 2014 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from mock import patch
from tempest.common.utils import file_utils
from tempest.tests import base
class TestFileUtils(base.TestCase):
def test_have_effective_read_path(self):
with patch('__builtin__.open', mock.mock_open(), create=True):
result = file_utils.have_effective_read_access('fake_path')
self.assertTrue(result)
def test_not_effective_read_path(self):
result = file_utils.have_effective_read_access('fake_path')
self.assertFalse(result)
|
<commit_before><commit_msg>Add unit tests for the tempest.common.utils.file_utils
This commit adds a positive and negative unit tests for the single
method from file_utils in tempest.common.utils.
Partially implements bp unit-tests
Change-Id: Ic19428a10785afd8849442f4d1f8f8e0a87f549b<commit_after>
|
# Copyright 2014 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from mock import patch
from tempest.common.utils import file_utils
from tempest.tests import base
class TestFileUtils(base.TestCase):
def test_have_effective_read_path(self):
with patch('__builtin__.open', mock.mock_open(), create=True):
result = file_utils.have_effective_read_access('fake_path')
self.assertTrue(result)
def test_not_effective_read_path(self):
result = file_utils.have_effective_read_access('fake_path')
self.assertFalse(result)
|
Add unit tests for the tempest.common.utils.file_utils
This commit adds a positive and negative unit tests for the single
method from file_utils in tempest.common.utils.
Partially implements bp unit-tests
Change-Id: Ic19428a10785afd8849442f4d1f8f8e0a87f549b# Copyright 2014 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from mock import patch
from tempest.common.utils import file_utils
from tempest.tests import base
class TestFileUtils(base.TestCase):
def test_have_effective_read_path(self):
with patch('__builtin__.open', mock.mock_open(), create=True):
result = file_utils.have_effective_read_access('fake_path')
self.assertTrue(result)
def test_not_effective_read_path(self):
result = file_utils.have_effective_read_access('fake_path')
self.assertFalse(result)
|
<commit_before><commit_msg>Add unit tests for the tempest.common.utils.file_utils
This commit adds a positive and negative unit tests for the single
method from file_utils in tempest.common.utils.
Partially implements bp unit-tests
Change-Id: Ic19428a10785afd8849442f4d1f8f8e0a87f549b<commit_after># Copyright 2014 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from mock import patch
from tempest.common.utils import file_utils
from tempest.tests import base
class TestFileUtils(base.TestCase):
def test_have_effective_read_path(self):
with patch('__builtin__.open', mock.mock_open(), create=True):
result = file_utils.have_effective_read_access('fake_path')
self.assertTrue(result)
def test_not_effective_read_path(self):
result = file_utils.have_effective_read_access('fake_path')
self.assertFalse(result)
|
|
a08977e69fa2121de95efc63f686aae983f0062e
|
hoomd/typeparameterdict.py
|
hoomd/typeparameterdict.py
|
from collections import defaultdict
RequiredArg = None
class TypeParameterDict:
def __init__(self, len_keys=1, **kwargs):
self._dict = defaultdict(kwargs)
self._len_keys = len_keys
def __getitem__(self, key):
keys = self._validate_and_split_key(key)
vals = dict()
for key in keys:
vals[key] = self._dict[key]
return vals
def __setitem__(self, key, val):
keys = self._validate_and_split_key(key)
val = self.validate_values(val)
for key in keys:
self._dict[key] = val
def _validate_and_split_key(self, key):
pass
def _validate_values(self, val):
pass
class AttachedTypeParameterDict:
def __init__(self, types, type_param_dict, cpp_obj, sim):
# add all types to c++
pass
def to_dettached(self):
pass
def __getitem__(self, key):
pass
def __setitem__(self, key, val):
pass
|
Add generic dictionary based on types
|
Add generic dictionary based on types
|
Python
|
bsd-3-clause
|
joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue
|
Add generic dictionary based on types
|
from collections import defaultdict
RequiredArg = None
class TypeParameterDict:
def __init__(self, len_keys=1, **kwargs):
self._dict = defaultdict(kwargs)
self._len_keys = len_keys
def __getitem__(self, key):
keys = self._validate_and_split_key(key)
vals = dict()
for key in keys:
vals[key] = self._dict[key]
return vals
def __setitem__(self, key, val):
keys = self._validate_and_split_key(key)
val = self.validate_values(val)
for key in keys:
self._dict[key] = val
def _validate_and_split_key(self, key):
pass
def _validate_values(self, val):
pass
class AttachedTypeParameterDict:
def __init__(self, types, type_param_dict, cpp_obj, sim):
# add all types to c++
pass
def to_dettached(self):
pass
def __getitem__(self, key):
pass
def __setitem__(self, key, val):
pass
|
<commit_before><commit_msg>Add generic dictionary based on types<commit_after>
|
from collections import defaultdict
RequiredArg = None
class TypeParameterDict:
def __init__(self, len_keys=1, **kwargs):
self._dict = defaultdict(kwargs)
self._len_keys = len_keys
def __getitem__(self, key):
keys = self._validate_and_split_key(key)
vals = dict()
for key in keys:
vals[key] = self._dict[key]
return vals
def __setitem__(self, key, val):
keys = self._validate_and_split_key(key)
val = self.validate_values(val)
for key in keys:
self._dict[key] = val
def _validate_and_split_key(self, key):
pass
def _validate_values(self, val):
pass
class AttachedTypeParameterDict:
def __init__(self, types, type_param_dict, cpp_obj, sim):
# add all types to c++
pass
def to_dettached(self):
pass
def __getitem__(self, key):
pass
def __setitem__(self, key, val):
pass
|
Add generic dictionary based on typesfrom collections import defaultdict
RequiredArg = None
class TypeParameterDict:
def __init__(self, len_keys=1, **kwargs):
self._dict = defaultdict(kwargs)
self._len_keys = len_keys
def __getitem__(self, key):
keys = self._validate_and_split_key(key)
vals = dict()
for key in keys:
vals[key] = self._dict[key]
return vals
def __setitem__(self, key, val):
keys = self._validate_and_split_key(key)
val = self.validate_values(val)
for key in keys:
self._dict[key] = val
def _validate_and_split_key(self, key):
pass
def _validate_values(self, val):
pass
class AttachedTypeParameterDict:
def __init__(self, types, type_param_dict, cpp_obj, sim):
# add all types to c++
pass
def to_dettached(self):
pass
def __getitem__(self, key):
pass
def __setitem__(self, key, val):
pass
|
<commit_before><commit_msg>Add generic dictionary based on types<commit_after>from collections import defaultdict
RequiredArg = None
class TypeParameterDict:
def __init__(self, len_keys=1, **kwargs):
self._dict = defaultdict(kwargs)
self._len_keys = len_keys
def __getitem__(self, key):
keys = self._validate_and_split_key(key)
vals = dict()
for key in keys:
vals[key] = self._dict[key]
return vals
def __setitem__(self, key, val):
keys = self._validate_and_split_key(key)
val = self.validate_values(val)
for key in keys:
self._dict[key] = val
def _validate_and_split_key(self, key):
pass
def _validate_values(self, val):
pass
class AttachedTypeParameterDict:
def __init__(self, types, type_param_dict, cpp_obj, sim):
# add all types to c++
pass
def to_dettached(self):
pass
def __getitem__(self, key):
pass
def __setitem__(self, key, val):
pass
|
|
fc80d75dd04c9a5058c687c038308f99d3d254b3
|
config/sublime/toggle_vintageous.py
|
config/sublime/toggle_vintageous.py
|
import sublime
import sublime_plugin
class ToggleVintageousCommand(sublime_plugin.WindowCommand):
def run(self):
settings = sublime.load_settings('Preferences.sublime-settings')
ignored = settings.get('ignored_packages')
if 'Vintageous' in ignored:
ignored.remove('Vintageous')
else:
ignored.append('Vintageous')
settings.set('ignored_packages', ignored)
sublime.save_settings('Preferences.sublime-settings')
|
Add plugin to toggle vintageous
|
Add plugin to toggle vintageous
|
Python
|
mit
|
Rypac/dotfiles,Rypac/dotfiles,Rypac/dotfiles
|
Add plugin to toggle vintageous
|
import sublime
import sublime_plugin
class ToggleVintageousCommand(sublime_plugin.WindowCommand):
def run(self):
settings = sublime.load_settings('Preferences.sublime-settings')
ignored = settings.get('ignored_packages')
if 'Vintageous' in ignored:
ignored.remove('Vintageous')
else:
ignored.append('Vintageous')
settings.set('ignored_packages', ignored)
sublime.save_settings('Preferences.sublime-settings')
|
<commit_before><commit_msg>Add plugin to toggle vintageous<commit_after>
|
import sublime
import sublime_plugin
class ToggleVintageousCommand(sublime_plugin.WindowCommand):
def run(self):
settings = sublime.load_settings('Preferences.sublime-settings')
ignored = settings.get('ignored_packages')
if 'Vintageous' in ignored:
ignored.remove('Vintageous')
else:
ignored.append('Vintageous')
settings.set('ignored_packages', ignored)
sublime.save_settings('Preferences.sublime-settings')
|
Add plugin to toggle vintageousimport sublime
import sublime_plugin
class ToggleVintageousCommand(sublime_plugin.WindowCommand):
def run(self):
settings = sublime.load_settings('Preferences.sublime-settings')
ignored = settings.get('ignored_packages')
if 'Vintageous' in ignored:
ignored.remove('Vintageous')
else:
ignored.append('Vintageous')
settings.set('ignored_packages', ignored)
sublime.save_settings('Preferences.sublime-settings')
|
<commit_before><commit_msg>Add plugin to toggle vintageous<commit_after>import sublime
import sublime_plugin
class ToggleVintageousCommand(sublime_plugin.WindowCommand):
def run(self):
settings = sublime.load_settings('Preferences.sublime-settings')
ignored = settings.get('ignored_packages')
if 'Vintageous' in ignored:
ignored.remove('Vintageous')
else:
ignored.append('Vintageous')
settings.set('ignored_packages', ignored)
sublime.save_settings('Preferences.sublime-settings')
|
|
e4485a24312c447814fb78fa4d4ff8c08e99ced8
|
corehq/apps/locations/management/commands/remove_couch_loc_types.py
|
corehq/apps/locations/management/commands/remove_couch_loc_types.py
|
from django.core.management.base import BaseCommand
from dimagi.utils.couch.database import iter_docs
from corehq.apps.domain.models import Domain
class IterativeSaver(object):
"""
Bulk save docs in chunks.
with IterativeSaver(db) as iter_db:
for doc in iter_docs(db)
iter_db.save(doc)
"""
def __init__(self, database, chunksize=100):
self.db = database
self.chunksize = chunksize
def __enter__(self):
self.to_save = []
return self
def commit(self):
self.db.bulk_save(self.to_save)
self.to_save = []
def save(self, doc):
self.to_save.append(doc)
if len(self.to_save) >= self.chunksize:
self.commit()
def __exit__(self, exc_type, exc_val, exc_tb):
if self.to_save:
self.commit()
class Command(BaseCommand):
help = "(2015-04-02) Delete the location_types property on Domain"
def handle(self, *args, **kwargs):
domain_ids = [d['id'] for d in Domain.get_all(include_docs=False)]
with IterativeSaver(Domain.get_db()) as iter_db:
for domain_doc in iter_docs(Domain.get_db(), domain_ids):
if (
domain_doc.pop('location_types', None) or
domain_doc.pop('obsolete_location_types', None)
):
print ("Removing location_types from domain {} - {}"
.format(domain_doc['name'], domain_doc['_id']))
iter_db.save(domain_doc)
|
Add mgmt cmd to delete location_types from couch
|
Add mgmt cmd to delete location_types from couch
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq
|
Add mgmt cmd to delete location_types from couch
|
from django.core.management.base import BaseCommand
from dimagi.utils.couch.database import iter_docs
from corehq.apps.domain.models import Domain
class IterativeSaver(object):
"""
Bulk save docs in chunks.
with IterativeSaver(db) as iter_db:
for doc in iter_docs(db)
iter_db.save(doc)
"""
def __init__(self, database, chunksize=100):
self.db = database
self.chunksize = chunksize
def __enter__(self):
self.to_save = []
return self
def commit(self):
self.db.bulk_save(self.to_save)
self.to_save = []
def save(self, doc):
self.to_save.append(doc)
if len(self.to_save) >= self.chunksize:
self.commit()
def __exit__(self, exc_type, exc_val, exc_tb):
if self.to_save:
self.commit()
class Command(BaseCommand):
help = "(2015-04-02) Delete the location_types property on Domain"
def handle(self, *args, **kwargs):
domain_ids = [d['id'] for d in Domain.get_all(include_docs=False)]
with IterativeSaver(Domain.get_db()) as iter_db:
for domain_doc in iter_docs(Domain.get_db(), domain_ids):
if (
domain_doc.pop('location_types', None) or
domain_doc.pop('obsolete_location_types', None)
):
print ("Removing location_types from domain {} - {}"
.format(domain_doc['name'], domain_doc['_id']))
iter_db.save(domain_doc)
|
<commit_before><commit_msg>Add mgmt cmd to delete location_types from couch<commit_after>
|
from django.core.management.base import BaseCommand
from dimagi.utils.couch.database import iter_docs
from corehq.apps.domain.models import Domain
class IterativeSaver(object):
"""
Bulk save docs in chunks.
with IterativeSaver(db) as iter_db:
for doc in iter_docs(db)
iter_db.save(doc)
"""
def __init__(self, database, chunksize=100):
self.db = database
self.chunksize = chunksize
def __enter__(self):
self.to_save = []
return self
def commit(self):
self.db.bulk_save(self.to_save)
self.to_save = []
def save(self, doc):
self.to_save.append(doc)
if len(self.to_save) >= self.chunksize:
self.commit()
def __exit__(self, exc_type, exc_val, exc_tb):
if self.to_save:
self.commit()
class Command(BaseCommand):
help = "(2015-04-02) Delete the location_types property on Domain"
def handle(self, *args, **kwargs):
domain_ids = [d['id'] for d in Domain.get_all(include_docs=False)]
with IterativeSaver(Domain.get_db()) as iter_db:
for domain_doc in iter_docs(Domain.get_db(), domain_ids):
if (
domain_doc.pop('location_types', None) or
domain_doc.pop('obsolete_location_types', None)
):
print ("Removing location_types from domain {} - {}"
.format(domain_doc['name'], domain_doc['_id']))
iter_db.save(domain_doc)
|
Add mgmt cmd to delete location_types from couchfrom django.core.management.base import BaseCommand
from dimagi.utils.couch.database import iter_docs
from corehq.apps.domain.models import Domain
class IterativeSaver(object):
"""
Bulk save docs in chunks.
with IterativeSaver(db) as iter_db:
for doc in iter_docs(db)
iter_db.save(doc)
"""
def __init__(self, database, chunksize=100):
self.db = database
self.chunksize = chunksize
def __enter__(self):
self.to_save = []
return self
def commit(self):
self.db.bulk_save(self.to_save)
self.to_save = []
def save(self, doc):
self.to_save.append(doc)
if len(self.to_save) >= self.chunksize:
self.commit()
def __exit__(self, exc_type, exc_val, exc_tb):
if self.to_save:
self.commit()
class Command(BaseCommand):
help = "(2015-04-02) Delete the location_types property on Domain"
def handle(self, *args, **kwargs):
domain_ids = [d['id'] for d in Domain.get_all(include_docs=False)]
with IterativeSaver(Domain.get_db()) as iter_db:
for domain_doc in iter_docs(Domain.get_db(), domain_ids):
if (
domain_doc.pop('location_types', None) or
domain_doc.pop('obsolete_location_types', None)
):
print ("Removing location_types from domain {} - {}"
.format(domain_doc['name'], domain_doc['_id']))
iter_db.save(domain_doc)
|
<commit_before><commit_msg>Add mgmt cmd to delete location_types from couch<commit_after>from django.core.management.base import BaseCommand
from dimagi.utils.couch.database import iter_docs
from corehq.apps.domain.models import Domain
class IterativeSaver(object):
"""
Bulk save docs in chunks.
with IterativeSaver(db) as iter_db:
for doc in iter_docs(db)
iter_db.save(doc)
"""
def __init__(self, database, chunksize=100):
self.db = database
self.chunksize = chunksize
def __enter__(self):
self.to_save = []
return self
def commit(self):
self.db.bulk_save(self.to_save)
self.to_save = []
def save(self, doc):
self.to_save.append(doc)
if len(self.to_save) >= self.chunksize:
self.commit()
def __exit__(self, exc_type, exc_val, exc_tb):
if self.to_save:
self.commit()
class Command(BaseCommand):
help = "(2015-04-02) Delete the location_types property on Domain"
def handle(self, *args, **kwargs):
domain_ids = [d['id'] for d in Domain.get_all(include_docs=False)]
with IterativeSaver(Domain.get_db()) as iter_db:
for domain_doc in iter_docs(Domain.get_db(), domain_ids):
if (
domain_doc.pop('location_types', None) or
domain_doc.pop('obsolete_location_types', None)
):
print ("Removing location_types from domain {} - {}"
.format(domain_doc['name'], domain_doc['_id']))
iter_db.save(domain_doc)
|
|
acdb366fb578b798d27e9207aa4306c9082e2458
|
backend/populate_dimkarakostas.py
|
backend/populate_dimkarakostas.py
|
from string import ascii_lowercase
import django
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backend.settings')
django.setup()
from breach.models import Target, Victim
endpoint = 'https://dimkarakostas.com/rupture/test.php?ref=%s'
prefix = 'imper'
alphabet = ascii_lowercase
secretlength = 9
target_1 = Target(
endpoint=endpoint,
prefix=prefix,
alphabet=alphabet,
secretlength=secretlength
)
target_1.save()
print 'Created Target:\n\tendpoint: {}\n\tprefix: {}\n\talphabet: {}\n\tsecretlength: {}'.format(endpoint, prefix, alphabet, secretlength)
snifferendpoint = 'http://127.0.0.1:9000'
sourceip = '192.168.1.70'
victim_1 = Victim(
target=target_1,
snifferendpoint=snifferendpoint,
sourceip=sourceip
)
victim_1.save()
print 'Created Victim:\n\tvictim_id: {}\n\tsnifferendpoint: {}\n\tsourceip: {}'.format(victim_1.id, snifferendpoint, sourceip)
|
Add test population script for noiseless 'dimkarakostas' endpoint
|
Add test population script for noiseless 'dimkarakostas' endpoint
|
Python
|
mit
|
esarafianou/rupture,dimkarakostas/rupture,dimkarakostas/rupture,dimriou/rupture,dimkarakostas/rupture,dimriou/rupture,dionyziz/rupture,dionyziz/rupture,dimkarakostas/rupture,dimriou/rupture,esarafianou/rupture,dimriou/rupture,dimkarakostas/rupture,dionyziz/rupture,dionyziz/rupture,esarafianou/rupture,esarafianou/rupture,dionyziz/rupture,dimriou/rupture
|
Add test population script for noiseless 'dimkarakostas' endpoint
|
from string import ascii_lowercase
import django
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backend.settings')
django.setup()
from breach.models import Target, Victim
endpoint = 'https://dimkarakostas.com/rupture/test.php?ref=%s'
prefix = 'imper'
alphabet = ascii_lowercase
secretlength = 9
target_1 = Target(
endpoint=endpoint,
prefix=prefix,
alphabet=alphabet,
secretlength=secretlength
)
target_1.save()
print 'Created Target:\n\tendpoint: {}\n\tprefix: {}\n\talphabet: {}\n\tsecretlength: {}'.format(endpoint, prefix, alphabet, secretlength)
snifferendpoint = 'http://127.0.0.1:9000'
sourceip = '192.168.1.70'
victim_1 = Victim(
target=target_1,
snifferendpoint=snifferendpoint,
sourceip=sourceip
)
victim_1.save()
print 'Created Victim:\n\tvictim_id: {}\n\tsnifferendpoint: {}\n\tsourceip: {}'.format(victim_1.id, snifferendpoint, sourceip)
|
<commit_before><commit_msg>Add test population script for noiseless 'dimkarakostas' endpoint<commit_after>
|
from string import ascii_lowercase
import django
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backend.settings')
django.setup()
from breach.models import Target, Victim
endpoint = 'https://dimkarakostas.com/rupture/test.php?ref=%s'
prefix = 'imper'
alphabet = ascii_lowercase
secretlength = 9
target_1 = Target(
endpoint=endpoint,
prefix=prefix,
alphabet=alphabet,
secretlength=secretlength
)
target_1.save()
print 'Created Target:\n\tendpoint: {}\n\tprefix: {}\n\talphabet: {}\n\tsecretlength: {}'.format(endpoint, prefix, alphabet, secretlength)
snifferendpoint = 'http://127.0.0.1:9000'
sourceip = '192.168.1.70'
victim_1 = Victim(
target=target_1,
snifferendpoint=snifferendpoint,
sourceip=sourceip
)
victim_1.save()
print 'Created Victim:\n\tvictim_id: {}\n\tsnifferendpoint: {}\n\tsourceip: {}'.format(victim_1.id, snifferendpoint, sourceip)
|
Add test population script for noiseless 'dimkarakostas' endpointfrom string import ascii_lowercase
import django
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backend.settings')
django.setup()
from breach.models import Target, Victim
endpoint = 'https://dimkarakostas.com/rupture/test.php?ref=%s'
prefix = 'imper'
alphabet = ascii_lowercase
secretlength = 9
target_1 = Target(
endpoint=endpoint,
prefix=prefix,
alphabet=alphabet,
secretlength=secretlength
)
target_1.save()
print 'Created Target:\n\tendpoint: {}\n\tprefix: {}\n\talphabet: {}\n\tsecretlength: {}'.format(endpoint, prefix, alphabet, secretlength)
snifferendpoint = 'http://127.0.0.1:9000'
sourceip = '192.168.1.70'
victim_1 = Victim(
target=target_1,
snifferendpoint=snifferendpoint,
sourceip=sourceip
)
victim_1.save()
print 'Created Victim:\n\tvictim_id: {}\n\tsnifferendpoint: {}\n\tsourceip: {}'.format(victim_1.id, snifferendpoint, sourceip)
|
<commit_before><commit_msg>Add test population script for noiseless 'dimkarakostas' endpoint<commit_after>from string import ascii_lowercase
import django
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backend.settings')
django.setup()
from breach.models import Target, Victim
endpoint = 'https://dimkarakostas.com/rupture/test.php?ref=%s'
prefix = 'imper'
alphabet = ascii_lowercase
secretlength = 9
target_1 = Target(
endpoint=endpoint,
prefix=prefix,
alphabet=alphabet,
secretlength=secretlength
)
target_1.save()
print 'Created Target:\n\tendpoint: {}\n\tprefix: {}\n\talphabet: {}\n\tsecretlength: {}'.format(endpoint, prefix, alphabet, secretlength)
snifferendpoint = 'http://127.0.0.1:9000'
sourceip = '192.168.1.70'
victim_1 = Victim(
target=target_1,
snifferendpoint=snifferendpoint,
sourceip=sourceip
)
victim_1.save()
print 'Created Victim:\n\tvictim_id: {}\n\tsnifferendpoint: {}\n\tsourceip: {}'.format(victim_1.id, snifferendpoint, sourceip)
|
|
19ca3135add52010d4d171af79174033b7e7d680
|
bluesky/tests/test_legacy_plans.py
|
bluesky/tests/test_legacy_plans.py
|
import pytest
import bluesky.plans as bp
def test_legacy_plan_names():
assert bp.outer_product_scan is bp.grid_scan
assert bp.relative_outer_product_scan is bp.rel_grid_scan
assert bp.relative_scan is bp.rel_scan
assert bp.relative_spiral is bp.rel_spiral
assert bp.relative_spiral_fermat is bp.rel_spiral_fermat
assert bp.relative_list_scan is bp.rel_list_scan
assert relative_log_scan is rel_log_scan
assert relative_adaptive_scan is rel_adaptive_scan
|
Add test to check for legacy plan names
|
TST: Add test to check for legacy plan names
|
Python
|
bsd-3-clause
|
ericdill/bluesky,ericdill/bluesky
|
TST: Add test to check for legacy plan names
|
import pytest
import bluesky.plans as bp
def test_legacy_plan_names():
assert bp.outer_product_scan is bp.grid_scan
assert bp.relative_outer_product_scan is bp.rel_grid_scan
assert bp.relative_scan is bp.rel_scan
assert bp.relative_spiral is bp.rel_spiral
assert bp.relative_spiral_fermat is bp.rel_spiral_fermat
assert bp.relative_list_scan is bp.rel_list_scan
assert relative_log_scan is rel_log_scan
assert relative_adaptive_scan is rel_adaptive_scan
|
<commit_before><commit_msg>TST: Add test to check for legacy plan names<commit_after>
|
import pytest
import bluesky.plans as bp
def test_legacy_plan_names():
assert bp.outer_product_scan is bp.grid_scan
assert bp.relative_outer_product_scan is bp.rel_grid_scan
assert bp.relative_scan is bp.rel_scan
assert bp.relative_spiral is bp.rel_spiral
assert bp.relative_spiral_fermat is bp.rel_spiral_fermat
assert bp.relative_list_scan is bp.rel_list_scan
assert relative_log_scan is rel_log_scan
assert relative_adaptive_scan is rel_adaptive_scan
|
TST: Add test to check for legacy plan namesimport pytest
import bluesky.plans as bp
def test_legacy_plan_names():
assert bp.outer_product_scan is bp.grid_scan
assert bp.relative_outer_product_scan is bp.rel_grid_scan
assert bp.relative_scan is bp.rel_scan
assert bp.relative_spiral is bp.rel_spiral
assert bp.relative_spiral_fermat is bp.rel_spiral_fermat
assert bp.relative_list_scan is bp.rel_list_scan
assert relative_log_scan is rel_log_scan
assert relative_adaptive_scan is rel_adaptive_scan
|
<commit_before><commit_msg>TST: Add test to check for legacy plan names<commit_after>import pytest
import bluesky.plans as bp
def test_legacy_plan_names():
assert bp.outer_product_scan is bp.grid_scan
assert bp.relative_outer_product_scan is bp.rel_grid_scan
assert bp.relative_scan is bp.rel_scan
assert bp.relative_spiral is bp.rel_spiral
assert bp.relative_spiral_fermat is bp.rel_spiral_fermat
assert bp.relative_list_scan is bp.rel_list_scan
assert relative_log_scan is rel_log_scan
assert relative_adaptive_scan is rel_adaptive_scan
|
|
ea98ee9c0a4d7e49a6c8200d02533d12ab01f664
|
tests/test_kdf.py
|
tests/test_kdf.py
|
from zerodb.crypto import kdf
import ZEO
test_key = b'x' * 32
test_args_1 = dict(
username='user1', password='password1',
key_file=ZEO.tests.testssl.client_key,
cert_file=ZEO.tests.testssl.client_cert,
appname='zerodb.com', key=test_key)
test_args_2 = dict(
username='user1', password='password2',
key_file=ZEO.tests.testssl.client_key,
cert_file=ZEO.tests.testssl.client_cert,
appname='zerodb.com', key=test_key)
def test_kdfs():
# Test that all methods give the same password hash
kfp_password, kfp_key = kdf.key_from_password(**test_args_1)
kfc_password, kfc_key = kdf.key_from_cert(**test_args_1)
hash_password, hash_key = kdf.hash_password(**test_args_1)
# Password hash should be always the same (!)
assert kfp_password == kfc_password == hash_password
# Last one doesn't touch key
assert hash_key == test_key
# All methods make different encryption keys
assert len(set([kfp_key, kfc_key, hash_key])) == 3
kfp_password_2, kfp_key_2 = kdf.key_from_password(**test_args_2)
assert kfp_password_2 != kfp_password
assert kfp_key_2 != kfp_key
kfc_password_2, kfc_key_2 = kdf.key_from_cert(**test_args_2)
assert kfc_password_2 != kfc_password
assert kfc_key_2 == kfc_key
hash_password_2, hash_key_2 = kdf.hash_password(**test_args_2)
assert hash_password_2 != hash_password
assert hash_key == test_key
assert kfp_password_2 == kfc_password_2 == hash_password_2
assert kfp_password != test_args_1['password']
assert kfp_password_2 != test_args_2['password']
|
Test kdfs just in case
|
Test kdfs just in case
|
Python
|
agpl-3.0
|
zerodb/zerodb,zero-db/zerodb,zerodb/zerodb,zero-db/zerodb
|
Test kdfs just in case
|
from zerodb.crypto import kdf
import ZEO
test_key = b'x' * 32
test_args_1 = dict(
username='user1', password='password1',
key_file=ZEO.tests.testssl.client_key,
cert_file=ZEO.tests.testssl.client_cert,
appname='zerodb.com', key=test_key)
test_args_2 = dict(
username='user1', password='password2',
key_file=ZEO.tests.testssl.client_key,
cert_file=ZEO.tests.testssl.client_cert,
appname='zerodb.com', key=test_key)
def test_kdfs():
# Test that all methods give the same password hash
kfp_password, kfp_key = kdf.key_from_password(**test_args_1)
kfc_password, kfc_key = kdf.key_from_cert(**test_args_1)
hash_password, hash_key = kdf.hash_password(**test_args_1)
# Password hash should be always the same (!)
assert kfp_password == kfc_password == hash_password
# Last one doesn't touch key
assert hash_key == test_key
# All methods make different encryption keys
assert len(set([kfp_key, kfc_key, hash_key])) == 3
kfp_password_2, kfp_key_2 = kdf.key_from_password(**test_args_2)
assert kfp_password_2 != kfp_password
assert kfp_key_2 != kfp_key
kfc_password_2, kfc_key_2 = kdf.key_from_cert(**test_args_2)
assert kfc_password_2 != kfc_password
assert kfc_key_2 == kfc_key
hash_password_2, hash_key_2 = kdf.hash_password(**test_args_2)
assert hash_password_2 != hash_password
assert hash_key == test_key
assert kfp_password_2 == kfc_password_2 == hash_password_2
assert kfp_password != test_args_1['password']
assert kfp_password_2 != test_args_2['password']
|
<commit_before><commit_msg>Test kdfs just in case<commit_after>
|
from zerodb.crypto import kdf
import ZEO
test_key = b'x' * 32
test_args_1 = dict(
username='user1', password='password1',
key_file=ZEO.tests.testssl.client_key,
cert_file=ZEO.tests.testssl.client_cert,
appname='zerodb.com', key=test_key)
test_args_2 = dict(
username='user1', password='password2',
key_file=ZEO.tests.testssl.client_key,
cert_file=ZEO.tests.testssl.client_cert,
appname='zerodb.com', key=test_key)
def test_kdfs():
# Test that all methods give the same password hash
kfp_password, kfp_key = kdf.key_from_password(**test_args_1)
kfc_password, kfc_key = kdf.key_from_cert(**test_args_1)
hash_password, hash_key = kdf.hash_password(**test_args_1)
# Password hash should be always the same (!)
assert kfp_password == kfc_password == hash_password
# Last one doesn't touch key
assert hash_key == test_key
# All methods make different encryption keys
assert len(set([kfp_key, kfc_key, hash_key])) == 3
kfp_password_2, kfp_key_2 = kdf.key_from_password(**test_args_2)
assert kfp_password_2 != kfp_password
assert kfp_key_2 != kfp_key
kfc_password_2, kfc_key_2 = kdf.key_from_cert(**test_args_2)
assert kfc_password_2 != kfc_password
assert kfc_key_2 == kfc_key
hash_password_2, hash_key_2 = kdf.hash_password(**test_args_2)
assert hash_password_2 != hash_password
assert hash_key == test_key
assert kfp_password_2 == kfc_password_2 == hash_password_2
assert kfp_password != test_args_1['password']
assert kfp_password_2 != test_args_2['password']
|
Test kdfs just in casefrom zerodb.crypto import kdf
import ZEO
test_key = b'x' * 32
test_args_1 = dict(
username='user1', password='password1',
key_file=ZEO.tests.testssl.client_key,
cert_file=ZEO.tests.testssl.client_cert,
appname='zerodb.com', key=test_key)
test_args_2 = dict(
username='user1', password='password2',
key_file=ZEO.tests.testssl.client_key,
cert_file=ZEO.tests.testssl.client_cert,
appname='zerodb.com', key=test_key)
def test_kdfs():
# Test that all methods give the same password hash
kfp_password, kfp_key = kdf.key_from_password(**test_args_1)
kfc_password, kfc_key = kdf.key_from_cert(**test_args_1)
hash_password, hash_key = kdf.hash_password(**test_args_1)
# Password hash should be always the same (!)
assert kfp_password == kfc_password == hash_password
# Last one doesn't touch key
assert hash_key == test_key
# All methods make different encryption keys
assert len(set([kfp_key, kfc_key, hash_key])) == 3
kfp_password_2, kfp_key_2 = kdf.key_from_password(**test_args_2)
assert kfp_password_2 != kfp_password
assert kfp_key_2 != kfp_key
kfc_password_2, kfc_key_2 = kdf.key_from_cert(**test_args_2)
assert kfc_password_2 != kfc_password
assert kfc_key_2 == kfc_key
hash_password_2, hash_key_2 = kdf.hash_password(**test_args_2)
assert hash_password_2 != hash_password
assert hash_key == test_key
assert kfp_password_2 == kfc_password_2 == hash_password_2
assert kfp_password != test_args_1['password']
assert kfp_password_2 != test_args_2['password']
|
<commit_before><commit_msg>Test kdfs just in case<commit_after>from zerodb.crypto import kdf
import ZEO
test_key = b'x' * 32
test_args_1 = dict(
username='user1', password='password1',
key_file=ZEO.tests.testssl.client_key,
cert_file=ZEO.tests.testssl.client_cert,
appname='zerodb.com', key=test_key)
test_args_2 = dict(
username='user1', password='password2',
key_file=ZEO.tests.testssl.client_key,
cert_file=ZEO.tests.testssl.client_cert,
appname='zerodb.com', key=test_key)
def test_kdfs():
# Test that all methods give the same password hash
kfp_password, kfp_key = kdf.key_from_password(**test_args_1)
kfc_password, kfc_key = kdf.key_from_cert(**test_args_1)
hash_password, hash_key = kdf.hash_password(**test_args_1)
# Password hash should be always the same (!)
assert kfp_password == kfc_password == hash_password
# Last one doesn't touch key
assert hash_key == test_key
# All methods make different encryption keys
assert len(set([kfp_key, kfc_key, hash_key])) == 3
kfp_password_2, kfp_key_2 = kdf.key_from_password(**test_args_2)
assert kfp_password_2 != kfp_password
assert kfp_key_2 != kfp_key
kfc_password_2, kfc_key_2 = kdf.key_from_cert(**test_args_2)
assert kfc_password_2 != kfc_password
assert kfc_key_2 == kfc_key
hash_password_2, hash_key_2 = kdf.hash_password(**test_args_2)
assert hash_password_2 != hash_password
assert hash_key == test_key
assert kfp_password_2 == kfc_password_2 == hash_password_2
assert kfp_password != test_args_1['password']
assert kfp_password_2 != test_args_2['password']
|
|
75a2d7d8602c62a303b1ef0c4e75b337e08d8f02
|
utils/studs_member_picture_url.py
|
utils/studs_member_picture_url.py
|
#!/usr/bin/python3
import pymongo
import unicodedata as ud
DB_USER = ''
DB_PASSWORD = ''
DB_URL = ''
# Add Studs members here
STUDS_MEMBERS = [
'Micky Mick',
'Lerp Lerpsson',
]
CDN_MEMBERS_URL = ''
uri = 'mongodb://{}:{}@{}'.format(DB_USER, DB_PASSWORD, DB_URL)
normalize = lambda name: ud.normalize('NFKD', name).encode('ASCII', 'ignore').decode('utf-8')
success = '\x1b[6;30;42m[SUCCESS]\x1b[0m '
error = '\x1b[6;30;41m[ERROR]\x1b[0m '
def generate_picture_url(first_name, last_name, file_format='jpg'):
names = map(lambda n: n.split(' '), [first_name, last_name])
flattened = [val for sublist in names for val in sublist]
normalized_name = '-'.join(map(lambda n: normalize(n.lower()), flattened))
return '{}{}.{}'.format(CDN_MEMBERS_URL, normalized_name, file_format)
def main():
client = pymongo.MongoClient(uri)
db = client.get_database()
user_collection = db['users']
processed = 0
print('Processing {} members'.format(len(STUDS_MEMBERS)))
print()
for member in STUDS_MEMBERS:
# Accounting for first names that have spaces
first_name, last_name = member.rsplit(' ', 1)
query = {
'profile.firstName': first_name,
'profile.lastName': last_name,
'profile.memberType': 'studs_member',
}
count = user_collection.count_documents(query)
if count > 0:
if count == 1:
url = generate_picture_url(first_name, last_name)
user_collection.update_one(query,
{
'$set': {
'profile.picture': url
}
}
)
print(success + 'Updated picture URL for {} {} ({})'.format(first_name, last_name, url))
processed += 1
else:
print(error + 'More than one member with the name {} {}, skipping'.format(first_name, last_name))
else:
print(error + 'Could not find {} {}'.format(first_name, last_name))
print()
client.close()
print(success + 'Done... updated {}/{} members'.format(processed, len(STUDS_MEMBERS)))
if __name__ == '__main__':
main()
|
Add script for setting image urls
|
Add script for setting image urls
In case anyone wants to use it in the future.
|
Python
|
mit
|
studieresan/overlord,studieresan/overlord,studieresan/overlord,studieresan/overlord,studieresan/overlord
|
Add script for setting image urls
In case anyone wants to use it in the future.
|
#!/usr/bin/python3
import pymongo
import unicodedata as ud
DB_USER = ''
DB_PASSWORD = ''
DB_URL = ''
# Add Studs members here
STUDS_MEMBERS = [
'Micky Mick',
'Lerp Lerpsson',
]
CDN_MEMBERS_URL = ''
uri = 'mongodb://{}:{}@{}'.format(DB_USER, DB_PASSWORD, DB_URL)
normalize = lambda name: ud.normalize('NFKD', name).encode('ASCII', 'ignore').decode('utf-8')
success = '\x1b[6;30;42m[SUCCESS]\x1b[0m '
error = '\x1b[6;30;41m[ERROR]\x1b[0m '
def generate_picture_url(first_name, last_name, file_format='jpg'):
names = map(lambda n: n.split(' '), [first_name, last_name])
flattened = [val for sublist in names for val in sublist]
normalized_name = '-'.join(map(lambda n: normalize(n.lower()), flattened))
return '{}{}.{}'.format(CDN_MEMBERS_URL, normalized_name, file_format)
def main():
client = pymongo.MongoClient(uri)
db = client.get_database()
user_collection = db['users']
processed = 0
print('Processing {} members'.format(len(STUDS_MEMBERS)))
print()
for member in STUDS_MEMBERS:
# Accounting for first names that have spaces
first_name, last_name = member.rsplit(' ', 1)
query = {
'profile.firstName': first_name,
'profile.lastName': last_name,
'profile.memberType': 'studs_member',
}
count = user_collection.count_documents(query)
if count > 0:
if count == 1:
url = generate_picture_url(first_name, last_name)
user_collection.update_one(query,
{
'$set': {
'profile.picture': url
}
}
)
print(success + 'Updated picture URL for {} {} ({})'.format(first_name, last_name, url))
processed += 1
else:
print(error + 'More than one member with the name {} {}, skipping'.format(first_name, last_name))
else:
print(error + 'Could not find {} {}'.format(first_name, last_name))
print()
client.close()
print(success + 'Done... updated {}/{} members'.format(processed, len(STUDS_MEMBERS)))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script for setting image urls
In case anyone wants to use it in the future.<commit_after>
|
#!/usr/bin/python3
import pymongo
import unicodedata as ud
DB_USER = ''
DB_PASSWORD = ''
DB_URL = ''
# Add Studs members here
STUDS_MEMBERS = [
'Micky Mick',
'Lerp Lerpsson',
]
CDN_MEMBERS_URL = ''
uri = 'mongodb://{}:{}@{}'.format(DB_USER, DB_PASSWORD, DB_URL)
normalize = lambda name: ud.normalize('NFKD', name).encode('ASCII', 'ignore').decode('utf-8')
success = '\x1b[6;30;42m[SUCCESS]\x1b[0m '
error = '\x1b[6;30;41m[ERROR]\x1b[0m '
def generate_picture_url(first_name, last_name, file_format='jpg'):
names = map(lambda n: n.split(' '), [first_name, last_name])
flattened = [val for sublist in names for val in sublist]
normalized_name = '-'.join(map(lambda n: normalize(n.lower()), flattened))
return '{}{}.{}'.format(CDN_MEMBERS_URL, normalized_name, file_format)
def main():
client = pymongo.MongoClient(uri)
db = client.get_database()
user_collection = db['users']
processed = 0
print('Processing {} members'.format(len(STUDS_MEMBERS)))
print()
for member in STUDS_MEMBERS:
# Accounting for first names that have spaces
first_name, last_name = member.rsplit(' ', 1)
query = {
'profile.firstName': first_name,
'profile.lastName': last_name,
'profile.memberType': 'studs_member',
}
count = user_collection.count_documents(query)
if count > 0:
if count == 1:
url = generate_picture_url(first_name, last_name)
user_collection.update_one(query,
{
'$set': {
'profile.picture': url
}
}
)
print(success + 'Updated picture URL for {} {} ({})'.format(first_name, last_name, url))
processed += 1
else:
print(error + 'More than one member with the name {} {}, skipping'.format(first_name, last_name))
else:
print(error + 'Could not find {} {}'.format(first_name, last_name))
print()
client.close()
print(success + 'Done... updated {}/{} members'.format(processed, len(STUDS_MEMBERS)))
if __name__ == '__main__':
main()
|
Add script for setting image urls
In case anyone wants to use it in the future.#!/usr/bin/python3
import pymongo
import unicodedata as ud
DB_USER = ''
DB_PASSWORD = ''
DB_URL = ''
# Add Studs members here
STUDS_MEMBERS = [
'Micky Mick',
'Lerp Lerpsson',
]
CDN_MEMBERS_URL = ''
uri = 'mongodb://{}:{}@{}'.format(DB_USER, DB_PASSWORD, DB_URL)
normalize = lambda name: ud.normalize('NFKD', name).encode('ASCII', 'ignore').decode('utf-8')
success = '\x1b[6;30;42m[SUCCESS]\x1b[0m '
error = '\x1b[6;30;41m[ERROR]\x1b[0m '
def generate_picture_url(first_name, last_name, file_format='jpg'):
names = map(lambda n: n.split(' '), [first_name, last_name])
flattened = [val for sublist in names for val in sublist]
normalized_name = '-'.join(map(lambda n: normalize(n.lower()), flattened))
return '{}{}.{}'.format(CDN_MEMBERS_URL, normalized_name, file_format)
def main():
client = pymongo.MongoClient(uri)
db = client.get_database()
user_collection = db['users']
processed = 0
print('Processing {} members'.format(len(STUDS_MEMBERS)))
print()
for member in STUDS_MEMBERS:
# Accounting for first names that have spaces
first_name, last_name = member.rsplit(' ', 1)
query = {
'profile.firstName': first_name,
'profile.lastName': last_name,
'profile.memberType': 'studs_member',
}
count = user_collection.count_documents(query)
if count > 0:
if count == 1:
url = generate_picture_url(first_name, last_name)
user_collection.update_one(query,
{
'$set': {
'profile.picture': url
}
}
)
print(success + 'Updated picture URL for {} {} ({})'.format(first_name, last_name, url))
processed += 1
else:
print(error + 'More than one member with the name {} {}, skipping'.format(first_name, last_name))
else:
print(error + 'Could not find {} {}'.format(first_name, last_name))
print()
client.close()
print(success + 'Done... updated {}/{} members'.format(processed, len(STUDS_MEMBERS)))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script for setting image urls
In case anyone wants to use it in the future.<commit_after>#!/usr/bin/python3
import pymongo
import unicodedata as ud
DB_USER = ''
DB_PASSWORD = ''
DB_URL = ''
# Add Studs members here
STUDS_MEMBERS = [
'Micky Mick',
'Lerp Lerpsson',
]
CDN_MEMBERS_URL = ''
uri = 'mongodb://{}:{}@{}'.format(DB_USER, DB_PASSWORD, DB_URL)
normalize = lambda name: ud.normalize('NFKD', name).encode('ASCII', 'ignore').decode('utf-8')
success = '\x1b[6;30;42m[SUCCESS]\x1b[0m '
error = '\x1b[6;30;41m[ERROR]\x1b[0m '
def generate_picture_url(first_name, last_name, file_format='jpg'):
names = map(lambda n: n.split(' '), [first_name, last_name])
flattened = [val for sublist in names for val in sublist]
normalized_name = '-'.join(map(lambda n: normalize(n.lower()), flattened))
return '{}{}.{}'.format(CDN_MEMBERS_URL, normalized_name, file_format)
def main():
client = pymongo.MongoClient(uri)
db = client.get_database()
user_collection = db['users']
processed = 0
print('Processing {} members'.format(len(STUDS_MEMBERS)))
print()
for member in STUDS_MEMBERS:
# Accounting for first names that have spaces
first_name, last_name = member.rsplit(' ', 1)
query = {
'profile.firstName': first_name,
'profile.lastName': last_name,
'profile.memberType': 'studs_member',
}
count = user_collection.count_documents(query)
if count > 0:
if count == 1:
url = generate_picture_url(first_name, last_name)
user_collection.update_one(query,
{
'$set': {
'profile.picture': url
}
}
)
print(success + 'Updated picture URL for {} {} ({})'.format(first_name, last_name, url))
processed += 1
else:
print(error + 'More than one member with the name {} {}, skipping'.format(first_name, last_name))
else:
print(error + 'Could not find {} {}'.format(first_name, last_name))
print()
client.close()
print(success + 'Done... updated {}/{} members'.format(processed, len(STUDS_MEMBERS)))
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.