commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f02f8f5a68bd26d1ece32c50482729b7774b6e2a
|
scripts/simple-analysis.py
|
scripts/simple-analysis.py
|
#!/usr/bin/python
from __future__ import print_function, division
import networkx
from reddit_meatspace.models import MeetupConnections
connections = MeetupConnections._byID("2013")
digraph = networkx.DiGraph()
for connection, timestamp in connections._values().iteritems():
left, right = connection.split(":")
digraph.add_edge(left, right)
lenient = digraph.to_undirected(reciprocal=False)
strict = digraph.to_undirected(reciprocal=True)
meetups = networkx.connected_component_subgraphs(lenient)
print("{0} people @ {1} meetups (avg. {2:.2} per meetup)".format(
len(lenient), len(meetups), len(lenient) / len(meetups)))
print("{0} connections of {1} distinct meetings ({2:.2%})".format(strict.size(), lenient.size(), strict.size() / lenient.size()))
|
Add a simple script for looking at connections.
|
Add a simple script for looking at connections.
|
Python
|
bsd-3-clause
|
reddit/reddit-plugin-meatspace,reddit/reddit-plugin-meatspace,reddit/reddit-plugin-meatspace
|
Add a simple script for looking at connections.
|
#!/usr/bin/python
from __future__ import print_function, division
import networkx
from reddit_meatspace.models import MeetupConnections
connections = MeetupConnections._byID("2013")
digraph = networkx.DiGraph()
for connection, timestamp in connections._values().iteritems():
left, right = connection.split(":")
digraph.add_edge(left, right)
lenient = digraph.to_undirected(reciprocal=False)
strict = digraph.to_undirected(reciprocal=True)
meetups = networkx.connected_component_subgraphs(lenient)
print("{0} people @ {1} meetups (avg. {2:.2} per meetup)".format(
len(lenient), len(meetups), len(lenient) / len(meetups)))
print("{0} connections of {1} distinct meetings ({2:.2%})".format(strict.size(), lenient.size(), strict.size() / lenient.size()))
|
<commit_before><commit_msg>Add a simple script for looking at connections.<commit_after>
|
#!/usr/bin/python
from __future__ import print_function, division
import networkx
from reddit_meatspace.models import MeetupConnections
connections = MeetupConnections._byID("2013")
digraph = networkx.DiGraph()
for connection, timestamp in connections._values().iteritems():
left, right = connection.split(":")
digraph.add_edge(left, right)
lenient = digraph.to_undirected(reciprocal=False)
strict = digraph.to_undirected(reciprocal=True)
meetups = networkx.connected_component_subgraphs(lenient)
print("{0} people @ {1} meetups (avg. {2:.2} per meetup)".format(
len(lenient), len(meetups), len(lenient) / len(meetups)))
print("{0} connections of {1} distinct meetings ({2:.2%})".format(strict.size(), lenient.size(), strict.size() / lenient.size()))
|
Add a simple script for looking at connections.#!/usr/bin/python
from __future__ import print_function, division
import networkx
from reddit_meatspace.models import MeetupConnections
connections = MeetupConnections._byID("2013")
digraph = networkx.DiGraph()
for connection, timestamp in connections._values().iteritems():
left, right = connection.split(":")
digraph.add_edge(left, right)
lenient = digraph.to_undirected(reciprocal=False)
strict = digraph.to_undirected(reciprocal=True)
meetups = networkx.connected_component_subgraphs(lenient)
print("{0} people @ {1} meetups (avg. {2:.2} per meetup)".format(
len(lenient), len(meetups), len(lenient) / len(meetups)))
print("{0} connections of {1} distinct meetings ({2:.2%})".format(strict.size(), lenient.size(), strict.size() / lenient.size()))
|
<commit_before><commit_msg>Add a simple script for looking at connections.<commit_after>#!/usr/bin/python
from __future__ import print_function, division
import networkx
from reddit_meatspace.models import MeetupConnections
connections = MeetupConnections._byID("2013")
digraph = networkx.DiGraph()
for connection, timestamp in connections._values().iteritems():
left, right = connection.split(":")
digraph.add_edge(left, right)
lenient = digraph.to_undirected(reciprocal=False)
strict = digraph.to_undirected(reciprocal=True)
meetups = networkx.connected_component_subgraphs(lenient)
print("{0} people @ {1} meetups (avg. {2:.2} per meetup)".format(
len(lenient), len(meetups), len(lenient) / len(meetups)))
print("{0} connections of {1} distinct meetings ({2:.2%})".format(strict.size(), lenient.size(), strict.size() / lenient.size()))
|
|
39b63523634801fe8ef2cca03e11b3875d84cdbd
|
flare/flare_io.py
|
flare/flare_io.py
|
from flare.struc import Structure
from typing import List
from json import dump, load
from flare.util import NumpyEncoder
def md_trajectory_to_file(filename, structures: List[Structure]):
"""
Take a list of structures and write them to a json file.
:param filename:
:param structures:
"""
f = open(filename, 'w')
dump([s.as_dict() for s in structures], f, cls=NumpyEncoder)
f.close()
def md_trajectory_from_file(filename):
"""
Read a list of structures from a json file, formatted as in md_trajectory_to_file.
:param filename:
"""
f = open(filename, 'r')
structure_list = load(f)
structures = [Structure.from_dict(dictionary) for dictionary in structure_list]
return structures
|
from flare.struc import Structure
from typing import List
from json import dump, load
from flare.util import NumpyEncoder
def md_trajectory_to_file(filename: str, structures: List[Structure]):
"""
Take a list of structures and write them to a json file.
:param filename:
:param structures:
"""
with open(filename, 'w') as f:
dump([s.as_dict() for s in structures], f, cls=NumpyEncoder)
def md_trajectory_from_file(filename: str):
"""
Read a list of structures from a json file, formatted as in md_trajectory_to_file.
:param filename:
"""
with open(filename, 'r') as f:
structure_list = load(f)
structures = [Structure.from_dict(dictionary) for dictionary in structure_list]
return structures
|
Tweak syntax for f.close() concision, add typehints
|
Tweak syntax for f.close() concision, add typehints
|
Python
|
mit
|
mir-group/flare,mir-group/flare
|
from flare.struc import Structure
from typing import List
from json import dump, load
from flare.util import NumpyEncoder
def md_trajectory_to_file(filename, structures: List[Structure]):
"""
Take a list of structures and write them to a json file.
:param filename:
:param structures:
"""
f = open(filename, 'w')
dump([s.as_dict() for s in structures], f, cls=NumpyEncoder)
f.close()
def md_trajectory_from_file(filename):
"""
Read a list of structures from a json file, formatted as in md_trajectory_to_file.
:param filename:
"""
f = open(filename, 'r')
structure_list = load(f)
structures = [Structure.from_dict(dictionary) for dictionary in structure_list]
return structures
Tweak syntax for f.close() concision, add typehints
|
from flare.struc import Structure
from typing import List
from json import dump, load
from flare.util import NumpyEncoder
def md_trajectory_to_file(filename: str, structures: List[Structure]):
"""
Take a list of structures and write them to a json file.
:param filename:
:param structures:
"""
with open(filename, 'w') as f:
dump([s.as_dict() for s in structures], f, cls=NumpyEncoder)
def md_trajectory_from_file(filename: str):
"""
Read a list of structures from a json file, formatted as in md_trajectory_to_file.
:param filename:
"""
with open(filename, 'r') as f:
structure_list = load(f)
structures = [Structure.from_dict(dictionary) for dictionary in structure_list]
return structures
|
<commit_before>from flare.struc import Structure
from typing import List
from json import dump, load
from flare.util import NumpyEncoder
def md_trajectory_to_file(filename, structures: List[Structure]):
"""
Take a list of structures and write them to a json file.
:param filename:
:param structures:
"""
f = open(filename, 'w')
dump([s.as_dict() for s in structures], f, cls=NumpyEncoder)
f.close()
def md_trajectory_from_file(filename):
"""
Read a list of structures from a json file, formatted as in md_trajectory_to_file.
:param filename:
"""
f = open(filename, 'r')
structure_list = load(f)
structures = [Structure.from_dict(dictionary) for dictionary in structure_list]
return structures
<commit_msg>Tweak syntax for f.close() concision, add typehints<commit_after>
|
from flare.struc import Structure
from typing import List
from json import dump, load
from flare.util import NumpyEncoder
def md_trajectory_to_file(filename: str, structures: List[Structure]):
"""
Take a list of structures and write them to a json file.
:param filename:
:param structures:
"""
with open(filename, 'w') as f:
dump([s.as_dict() for s in structures], f, cls=NumpyEncoder)
def md_trajectory_from_file(filename: str):
"""
Read a list of structures from a json file, formatted as in md_trajectory_to_file.
:param filename:
"""
with open(filename, 'r') as f:
structure_list = load(f)
structures = [Structure.from_dict(dictionary) for dictionary in structure_list]
return structures
|
from flare.struc import Structure
from typing import List
from json import dump, load
from flare.util import NumpyEncoder
def md_trajectory_to_file(filename, structures: List[Structure]):
"""
Take a list of structures and write them to a json file.
:param filename:
:param structures:
"""
f = open(filename, 'w')
dump([s.as_dict() for s in structures], f, cls=NumpyEncoder)
f.close()
def md_trajectory_from_file(filename):
"""
Read a list of structures from a json file, formatted as in md_trajectory_to_file.
:param filename:
"""
f = open(filename, 'r')
structure_list = load(f)
structures = [Structure.from_dict(dictionary) for dictionary in structure_list]
return structures
Tweak syntax for f.close() concision, add typehintsfrom flare.struc import Structure
from typing import List
from json import dump, load
from flare.util import NumpyEncoder
def md_trajectory_to_file(filename: str, structures: List[Structure]):
"""
Take a list of structures and write them to a json file.
:param filename:
:param structures:
"""
with open(filename, 'w') as f:
dump([s.as_dict() for s in structures], f, cls=NumpyEncoder)
def md_trajectory_from_file(filename: str):
"""
Read a list of structures from a json file, formatted as in md_trajectory_to_file.
:param filename:
"""
with open(filename, 'r') as f:
structure_list = load(f)
structures = [Structure.from_dict(dictionary) for dictionary in structure_list]
return structures
|
<commit_before>from flare.struc import Structure
from typing import List
from json import dump, load
from flare.util import NumpyEncoder
def md_trajectory_to_file(filename, structures: List[Structure]):
"""
Take a list of structures and write them to a json file.
:param filename:
:param structures:
"""
f = open(filename, 'w')
dump([s.as_dict() for s in structures], f, cls=NumpyEncoder)
f.close()
def md_trajectory_from_file(filename):
"""
Read a list of structures from a json file, formatted as in md_trajectory_to_file.
:param filename:
"""
f = open(filename, 'r')
structure_list = load(f)
structures = [Structure.from_dict(dictionary) for dictionary in structure_list]
return structures
<commit_msg>Tweak syntax for f.close() concision, add typehints<commit_after>from flare.struc import Structure
from typing import List
from json import dump, load
from flare.util import NumpyEncoder
def md_trajectory_to_file(filename: str, structures: List[Structure]):
"""
Take a list of structures and write them to a json file.
:param filename:
:param structures:
"""
with open(filename, 'w') as f:
dump([s.as_dict() for s in structures], f, cls=NumpyEncoder)
def md_trajectory_from_file(filename: str):
"""
Read a list of structures from a json file, formatted as in md_trajectory_to_file.
:param filename:
"""
with open(filename, 'r') as f:
structure_list = load(f)
structures = [Structure.from_dict(dictionary) for dictionary in structure_list]
return structures
|
cb0d6124ea31e8fb9ff8957072a2b881b882127e
|
examples/hero9_timelapse_webcam.py
|
examples/hero9_timelapse_webcam.py
|
import sys
import time
from goprocam import GoProCamera, constants
import threading
import logging
"""
I use PM2 to start my GoPro cameras, using a Raspberry Pi 4, works perfectly.
pm2 start timelapse.py --cron "30 7 * * *" --log timelapse.log --no-autorestart
This script will overrride some settings for reliability:
Voice control: OFF
AutoPower off: NEVER
Beeps: OFF (Do not want the camera beeping at 6AM)
NightLapse configuration left untouched, I recommend always using Auto shutter for sunrise and locking the White Balance to 4000k or higher.
"""
def start_timelapse(interface):
gopro = GoProCamera.GoPro(ip_address=GoProCamera.GoPro.getWebcamIP(
interface), camera=constants.gpcontrol, webcam_device=interface)
logging.info(
"Started goprocam instance with interface {}".format(interface))
gopro.gpControlSet(constants.Setup.VOICE_CONTROL,
constants.Setup.VoiceControl.OFF)
gopro.gpControlSet(constants.Setup.AUTO_OFF, constants.Setup.AutoOff.Never)
logging.info("All config set")
gopro.mode(constants.Mode.MultiShotMode,
constants.Mode.SubMode.MultiShot.NightLapse)
gopro.shutter(constants.start)
logging.info("Started timelapse")
cameras = sys.argv[1]
cameras = cameras.split(",")
for interface in cameras:
thr = threading.Thread(target=start_timelapse, args=(interface,))
thr.start()
|
Add Timelapse script for sunrise timelapses
|
Add Timelapse script for sunrise timelapses
|
Python
|
mit
|
KonradIT/gopro-py-api,KonradIT/gopro-py-api
|
Add Timelapse script for sunrise timelapses
|
import sys
import time
from goprocam import GoProCamera, constants
import threading
import logging
"""
I use PM2 to start my GoPro cameras, using a Raspberry Pi 4, works perfectly.
pm2 start timelapse.py --cron "30 7 * * *" --log timelapse.log --no-autorestart
This script will overrride some settings for reliability:
Voice control: OFF
AutoPower off: NEVER
Beeps: OFF (Do not want the camera beeping at 6AM)
NightLapse configuration left untouched, I recommend always using Auto shutter for sunrise and locking the White Balance to 4000k or higher.
"""
def start_timelapse(interface):
gopro = GoProCamera.GoPro(ip_address=GoProCamera.GoPro.getWebcamIP(
interface), camera=constants.gpcontrol, webcam_device=interface)
logging.info(
"Started goprocam instance with interface {}".format(interface))
gopro.gpControlSet(constants.Setup.VOICE_CONTROL,
constants.Setup.VoiceControl.OFF)
gopro.gpControlSet(constants.Setup.AUTO_OFF, constants.Setup.AutoOff.Never)
logging.info("All config set")
gopro.mode(constants.Mode.MultiShotMode,
constants.Mode.SubMode.MultiShot.NightLapse)
gopro.shutter(constants.start)
logging.info("Started timelapse")
cameras = sys.argv[1]
cameras = cameras.split(",")
for interface in cameras:
thr = threading.Thread(target=start_timelapse, args=(interface,))
thr.start()
|
<commit_before><commit_msg>Add Timelapse script for sunrise timelapses<commit_after>
|
import sys
import time
from goprocam import GoProCamera, constants
import threading
import logging
"""
I use PM2 to start my GoPro cameras, using a Raspberry Pi 4, works perfectly.
pm2 start timelapse.py --cron "30 7 * * *" --log timelapse.log --no-autorestart
This script will overrride some settings for reliability:
Voice control: OFF
AutoPower off: NEVER
Beeps: OFF (Do not want the camera beeping at 6AM)
NightLapse configuration left untouched, I recommend always using Auto shutter for sunrise and locking the White Balance to 4000k or higher.
"""
def start_timelapse(interface):
gopro = GoProCamera.GoPro(ip_address=GoProCamera.GoPro.getWebcamIP(
interface), camera=constants.gpcontrol, webcam_device=interface)
logging.info(
"Started goprocam instance with interface {}".format(interface))
gopro.gpControlSet(constants.Setup.VOICE_CONTROL,
constants.Setup.VoiceControl.OFF)
gopro.gpControlSet(constants.Setup.AUTO_OFF, constants.Setup.AutoOff.Never)
logging.info("All config set")
gopro.mode(constants.Mode.MultiShotMode,
constants.Mode.SubMode.MultiShot.NightLapse)
gopro.shutter(constants.start)
logging.info("Started timelapse")
cameras = sys.argv[1]
cameras = cameras.split(",")
for interface in cameras:
thr = threading.Thread(target=start_timelapse, args=(interface,))
thr.start()
|
Add Timelapse script for sunrise timelapsesimport sys
import time
from goprocam import GoProCamera, constants
import threading
import logging
"""
I use PM2 to start my GoPro cameras, using a Raspberry Pi 4, works perfectly.
pm2 start timelapse.py --cron "30 7 * * *" --log timelapse.log --no-autorestart
This script will overrride some settings for reliability:
Voice control: OFF
AutoPower off: NEVER
Beeps: OFF (Do not want the camera beeping at 6AM)
NightLapse configuration left untouched, I recommend always using Auto shutter for sunrise and locking the White Balance to 4000k or higher.
"""
def start_timelapse(interface):
gopro = GoProCamera.GoPro(ip_address=GoProCamera.GoPro.getWebcamIP(
interface), camera=constants.gpcontrol, webcam_device=interface)
logging.info(
"Started goprocam instance with interface {}".format(interface))
gopro.gpControlSet(constants.Setup.VOICE_CONTROL,
constants.Setup.VoiceControl.OFF)
gopro.gpControlSet(constants.Setup.AUTO_OFF, constants.Setup.AutoOff.Never)
logging.info("All config set")
gopro.mode(constants.Mode.MultiShotMode,
constants.Mode.SubMode.MultiShot.NightLapse)
gopro.shutter(constants.start)
logging.info("Started timelapse")
cameras = sys.argv[1]
cameras = cameras.split(",")
for interface in cameras:
thr = threading.Thread(target=start_timelapse, args=(interface,))
thr.start()
|
<commit_before><commit_msg>Add Timelapse script for sunrise timelapses<commit_after>import sys
import time
from goprocam import GoProCamera, constants
import threading
import logging
"""
I use PM2 to start my GoPro cameras, using a Raspberry Pi 4, works perfectly.
pm2 start timelapse.py --cron "30 7 * * *" --log timelapse.log --no-autorestart
This script will overrride some settings for reliability:
Voice control: OFF
AutoPower off: NEVER
Beeps: OFF (Do not want the camera beeping at 6AM)
NightLapse configuration left untouched, I recommend always using Auto shutter for sunrise and locking the White Balance to 4000k or higher.
"""
def start_timelapse(interface):
gopro = GoProCamera.GoPro(ip_address=GoProCamera.GoPro.getWebcamIP(
interface), camera=constants.gpcontrol, webcam_device=interface)
logging.info(
"Started goprocam instance with interface {}".format(interface))
gopro.gpControlSet(constants.Setup.VOICE_CONTROL,
constants.Setup.VoiceControl.OFF)
gopro.gpControlSet(constants.Setup.AUTO_OFF, constants.Setup.AutoOff.Never)
logging.info("All config set")
gopro.mode(constants.Mode.MultiShotMode,
constants.Mode.SubMode.MultiShot.NightLapse)
gopro.shutter(constants.start)
logging.info("Started timelapse")
cameras = sys.argv[1]
cameras = cameras.split(",")
for interface in cameras:
thr = threading.Thread(target=start_timelapse, args=(interface,))
thr.start()
|
|
4651e178ddbeac9211f8170e2e20f8a35ff0e3ab
|
ocradmin/plugins/test_nodetree.py
|
ocradmin/plugins/test_nodetree.py
|
#!/usr/bin/python
import os
import sys
import json
sys.path.append(os.path.abspath(".."))
os.environ['DJANGO_SETTINGS_MODULE'] = 'ocradmin.settings'
sys.path.insert(0, "lib")
from nodetree import script
from nodetree.manager import ModuleManager
def run(nodelist, outpath):
manager = ModuleManager()
manager.register_module("ocradmin.plugins.ocropus_nodes")
manager.register_module("ocradmin.plugins.tesseract_nodes")
manager.register_module("ocradmin.plugins.cuneiform_nodes")
manager.register_module("ocradmin.plugins.numpy_nodes")
manager.register_module("ocradmin.plugins.pil_nodes")
s = script.Script(nodelist, manager=manager)
term = s.get_terminals()[0]
print "Rendering to %s" % outpath
out = manager.get_new_node("Ocropus::FileOut", label="Output",
params=[("path", os.path.abspath(outpath))])
out.set_input(0, term)
out.eval()
if __name__ == "__main__":
if len(sys.argv) < 3:
print "Usage: %s <script> <output>" % sys.argv[0]
sys.exit(1)
nodes = None
with open(sys.argv[1], "r") as f:
nodes = json.load(f)
if nodes is None:
print "No nodes found in script"
sys.exit(1)
run(nodes, sys.argv[2])
|
Add a simple CLI script for writing out results of scripts
|
Add a simple CLI script for writing out results of scripts
|
Python
|
apache-2.0
|
vitorio/ocropodium,vitorio/ocropodium,vitorio/ocropodium,vitorio/ocropodium
|
Add a simple CLI script for writing out results of scripts
|
#!/usr/bin/python
import os
import sys
import json
sys.path.append(os.path.abspath(".."))
os.environ['DJANGO_SETTINGS_MODULE'] = 'ocradmin.settings'
sys.path.insert(0, "lib")
from nodetree import script
from nodetree.manager import ModuleManager
def run(nodelist, outpath):
manager = ModuleManager()
manager.register_module("ocradmin.plugins.ocropus_nodes")
manager.register_module("ocradmin.plugins.tesseract_nodes")
manager.register_module("ocradmin.plugins.cuneiform_nodes")
manager.register_module("ocradmin.plugins.numpy_nodes")
manager.register_module("ocradmin.plugins.pil_nodes")
s = script.Script(nodelist, manager=manager)
term = s.get_terminals()[0]
print "Rendering to %s" % outpath
out = manager.get_new_node("Ocropus::FileOut", label="Output",
params=[("path", os.path.abspath(outpath))])
out.set_input(0, term)
out.eval()
if __name__ == "__main__":
if len(sys.argv) < 3:
print "Usage: %s <script> <output>" % sys.argv[0]
sys.exit(1)
nodes = None
with open(sys.argv[1], "r") as f:
nodes = json.load(f)
if nodes is None:
print "No nodes found in script"
sys.exit(1)
run(nodes, sys.argv[2])
|
<commit_before><commit_msg>Add a simple CLI script for writing out results of scripts<commit_after>
|
#!/usr/bin/python
import os
import sys
import json
sys.path.append(os.path.abspath(".."))
os.environ['DJANGO_SETTINGS_MODULE'] = 'ocradmin.settings'
sys.path.insert(0, "lib")
from nodetree import script
from nodetree.manager import ModuleManager
def run(nodelist, outpath):
manager = ModuleManager()
manager.register_module("ocradmin.plugins.ocropus_nodes")
manager.register_module("ocradmin.plugins.tesseract_nodes")
manager.register_module("ocradmin.plugins.cuneiform_nodes")
manager.register_module("ocradmin.plugins.numpy_nodes")
manager.register_module("ocradmin.plugins.pil_nodes")
s = script.Script(nodelist, manager=manager)
term = s.get_terminals()[0]
print "Rendering to %s" % outpath
out = manager.get_new_node("Ocropus::FileOut", label="Output",
params=[("path", os.path.abspath(outpath))])
out.set_input(0, term)
out.eval()
if __name__ == "__main__":
if len(sys.argv) < 3:
print "Usage: %s <script> <output>" % sys.argv[0]
sys.exit(1)
nodes = None
with open(sys.argv[1], "r") as f:
nodes = json.load(f)
if nodes is None:
print "No nodes found in script"
sys.exit(1)
run(nodes, sys.argv[2])
|
Add a simple CLI script for writing out results of scripts#!/usr/bin/python
import os
import sys
import json
sys.path.append(os.path.abspath(".."))
os.environ['DJANGO_SETTINGS_MODULE'] = 'ocradmin.settings'
sys.path.insert(0, "lib")
from nodetree import script
from nodetree.manager import ModuleManager
def run(nodelist, outpath):
manager = ModuleManager()
manager.register_module("ocradmin.plugins.ocropus_nodes")
manager.register_module("ocradmin.plugins.tesseract_nodes")
manager.register_module("ocradmin.plugins.cuneiform_nodes")
manager.register_module("ocradmin.plugins.numpy_nodes")
manager.register_module("ocradmin.plugins.pil_nodes")
s = script.Script(nodelist, manager=manager)
term = s.get_terminals()[0]
print "Rendering to %s" % outpath
out = manager.get_new_node("Ocropus::FileOut", label="Output",
params=[("path", os.path.abspath(outpath))])
out.set_input(0, term)
out.eval()
if __name__ == "__main__":
if len(sys.argv) < 3:
print "Usage: %s <script> <output>" % sys.argv[0]
sys.exit(1)
nodes = None
with open(sys.argv[1], "r") as f:
nodes = json.load(f)
if nodes is None:
print "No nodes found in script"
sys.exit(1)
run(nodes, sys.argv[2])
|
<commit_before><commit_msg>Add a simple CLI script for writing out results of scripts<commit_after>#!/usr/bin/python
import os
import sys
import json
sys.path.append(os.path.abspath(".."))
os.environ['DJANGO_SETTINGS_MODULE'] = 'ocradmin.settings'
sys.path.insert(0, "lib")
from nodetree import script
from nodetree.manager import ModuleManager
def run(nodelist, outpath):
manager = ModuleManager()
manager.register_module("ocradmin.plugins.ocropus_nodes")
manager.register_module("ocradmin.plugins.tesseract_nodes")
manager.register_module("ocradmin.plugins.cuneiform_nodes")
manager.register_module("ocradmin.plugins.numpy_nodes")
manager.register_module("ocradmin.plugins.pil_nodes")
s = script.Script(nodelist, manager=manager)
term = s.get_terminals()[0]
print "Rendering to %s" % outpath
out = manager.get_new_node("Ocropus::FileOut", label="Output",
params=[("path", os.path.abspath(outpath))])
out.set_input(0, term)
out.eval()
if __name__ == "__main__":
if len(sys.argv) < 3:
print "Usage: %s <script> <output>" % sys.argv[0]
sys.exit(1)
nodes = None
with open(sys.argv[1], "r") as f:
nodes = json.load(f)
if nodes is None:
print "No nodes found in script"
sys.exit(1)
run(nodes, sys.argv[2])
|
|
23138ab91e5ac0ecf92a0968bf8e4abfa7d0c763
|
removedups.py
|
removedups.py
|
import hashlib, csv, os
def md5(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def process_directory_csv(current_dir_fullpath, sub_dir_list, files, csvwriter):
for file in files:
full_name = current_dir_fullpath + '/' + file
# print(" " + full_name)
csvwriter.writerow([md5(full_name), str(os.path.getsize(full_name)), full_name])
def walk_all_subdirectories(path, output_file_name):
# count = 0
with open(output_file_name, "w") as csvfile:
csvwriter = csv.writer(csvfile, delimiter=':', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for current_dir, sub_dirs, files in os.walk(path):
print(current_dir)
process_directory_csv(current_dir, sub_dirs, files, csvwriter)
csvfile.flush()
# DEBUG CODE - process only 5 directories
# count += 1
# if count >= 10:
# csvfile.close()
# break;
csvfile.close()
def sort_file(inname, outname):
input_file = open(inname, "r")
output_file = open(outname, "w", 1)
lines = [] # give lines variable a type of list
for line in input_file:
lines.append(line)
lines.sort()
for line in lines:
output_file.write(line)
input_file.close()
output_file.close()
def generate_delete_commands(sortedfile, outname):
import csv
output_file = open(outname, "w", 1)
previous_checksum = "IMPOSSIBLE_CHECKSUM"
with open(sortedfile) as f:
reader = csv.reader(f, delimiter=':', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row in reader:
# print(row[0], row)
if previous_checksum == row[0]:
output_file.write("rm '" + row[2] + "'\n")
print("removing " + row[2])
os.remove(row[2])
previous_checksum = row[0]
f.close()
output_file.close()
# Main program follows
directory_name = ".."
unsorted_file_name = "filelist.csv"
sorted_file_name = "sortedfilelist.csv"
delete_command_file_name = "deletecommands.sh"
if __name__ == '__main__':
walk_all_subdirectories('..', unsorted_file_name)
sort_file(unsorted_file_name, sorted_file_name)
generate_delete_commands(sorted_file_name, delete_command_file_name)
|
Remove duplicates in all subdirectories - working raw version.
|
Remove duplicates in all subdirectories - working raw version.
|
Python
|
apache-2.0
|
alprab/utils
|
Remove duplicates in all subdirectories - working raw version.
|
import hashlib, csv, os
def md5(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def process_directory_csv(current_dir_fullpath, sub_dir_list, files, csvwriter):
for file in files:
full_name = current_dir_fullpath + '/' + file
# print(" " + full_name)
csvwriter.writerow([md5(full_name), str(os.path.getsize(full_name)), full_name])
def walk_all_subdirectories(path, output_file_name):
# count = 0
with open(output_file_name, "w") as csvfile:
csvwriter = csv.writer(csvfile, delimiter=':', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for current_dir, sub_dirs, files in os.walk(path):
print(current_dir)
process_directory_csv(current_dir, sub_dirs, files, csvwriter)
csvfile.flush()
# DEBUG CODE - process only 5 directories
# count += 1
# if count >= 10:
# csvfile.close()
# break;
csvfile.close()
def sort_file(inname, outname):
input_file = open(inname, "r")
output_file = open(outname, "w", 1)
lines = [] # give lines variable a type of list
for line in input_file:
lines.append(line)
lines.sort()
for line in lines:
output_file.write(line)
input_file.close()
output_file.close()
def generate_delete_commands(sortedfile, outname):
import csv
output_file = open(outname, "w", 1)
previous_checksum = "IMPOSSIBLE_CHECKSUM"
with open(sortedfile) as f:
reader = csv.reader(f, delimiter=':', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row in reader:
# print(row[0], row)
if previous_checksum == row[0]:
output_file.write("rm '" + row[2] + "'\n")
print("removing " + row[2])
os.remove(row[2])
previous_checksum = row[0]
f.close()
output_file.close()
# Main program follows
directory_name = ".."
unsorted_file_name = "filelist.csv"
sorted_file_name = "sortedfilelist.csv"
delete_command_file_name = "deletecommands.sh"
if __name__ == '__main__':
walk_all_subdirectories('..', unsorted_file_name)
sort_file(unsorted_file_name, sorted_file_name)
generate_delete_commands(sorted_file_name, delete_command_file_name)
|
<commit_before><commit_msg>Remove duplicates in all subdirectories - working raw version.<commit_after>
|
import hashlib, csv, os
def md5(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def process_directory_csv(current_dir_fullpath, sub_dir_list, files, csvwriter):
for file in files:
full_name = current_dir_fullpath + '/' + file
# print(" " + full_name)
csvwriter.writerow([md5(full_name), str(os.path.getsize(full_name)), full_name])
def walk_all_subdirectories(path, output_file_name):
# count = 0
with open(output_file_name, "w") as csvfile:
csvwriter = csv.writer(csvfile, delimiter=':', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for current_dir, sub_dirs, files in os.walk(path):
print(current_dir)
process_directory_csv(current_dir, sub_dirs, files, csvwriter)
csvfile.flush()
# DEBUG CODE - process only 5 directories
# count += 1
# if count >= 10:
# csvfile.close()
# break;
csvfile.close()
def sort_file(inname, outname):
input_file = open(inname, "r")
output_file = open(outname, "w", 1)
lines = [] # give lines variable a type of list
for line in input_file:
lines.append(line)
lines.sort()
for line in lines:
output_file.write(line)
input_file.close()
output_file.close()
def generate_delete_commands(sortedfile, outname):
import csv
output_file = open(outname, "w", 1)
previous_checksum = "IMPOSSIBLE_CHECKSUM"
with open(sortedfile) as f:
reader = csv.reader(f, delimiter=':', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row in reader:
# print(row[0], row)
if previous_checksum == row[0]:
output_file.write("rm '" + row[2] + "'\n")
print("removing " + row[2])
os.remove(row[2])
previous_checksum = row[0]
f.close()
output_file.close()
# Main program follows
directory_name = ".."
unsorted_file_name = "filelist.csv"
sorted_file_name = "sortedfilelist.csv"
delete_command_file_name = "deletecommands.sh"
if __name__ == '__main__':
walk_all_subdirectories('..', unsorted_file_name)
sort_file(unsorted_file_name, sorted_file_name)
generate_delete_commands(sorted_file_name, delete_command_file_name)
|
Remove duplicates in all subdirectories - working raw version.import hashlib, csv, os
def md5(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def process_directory_csv(current_dir_fullpath, sub_dir_list, files, csvwriter):
for file in files:
full_name = current_dir_fullpath + '/' + file
# print(" " + full_name)
csvwriter.writerow([md5(full_name), str(os.path.getsize(full_name)), full_name])
def walk_all_subdirectories(path, output_file_name):
# count = 0
with open(output_file_name, "w") as csvfile:
csvwriter = csv.writer(csvfile, delimiter=':', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for current_dir, sub_dirs, files in os.walk(path):
print(current_dir)
process_directory_csv(current_dir, sub_dirs, files, csvwriter)
csvfile.flush()
# DEBUG CODE - process only 5 directories
# count += 1
# if count >= 10:
# csvfile.close()
# break;
csvfile.close()
def sort_file(inname, outname):
input_file = open(inname, "r")
output_file = open(outname, "w", 1)
lines = [] # give lines variable a type of list
for line in input_file:
lines.append(line)
lines.sort()
for line in lines:
output_file.write(line)
input_file.close()
output_file.close()
def generate_delete_commands(sortedfile, outname):
import csv
output_file = open(outname, "w", 1)
previous_checksum = "IMPOSSIBLE_CHECKSUM"
with open(sortedfile) as f:
reader = csv.reader(f, delimiter=':', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row in reader:
# print(row[0], row)
if previous_checksum == row[0]:
output_file.write("rm '" + row[2] + "'\n")
print("removing " + row[2])
os.remove(row[2])
previous_checksum = row[0]
f.close()
output_file.close()
# Main program follows
directory_name = ".."
unsorted_file_name = "filelist.csv"
sorted_file_name = "sortedfilelist.csv"
delete_command_file_name = "deletecommands.sh"
if __name__ == '__main__':
walk_all_subdirectories('..', unsorted_file_name)
sort_file(unsorted_file_name, sorted_file_name)
generate_delete_commands(sorted_file_name, delete_command_file_name)
|
<commit_before><commit_msg>Remove duplicates in all subdirectories - working raw version.<commit_after>import hashlib, csv, os
def md5(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def process_directory_csv(current_dir_fullpath, sub_dir_list, files, csvwriter):
for file in files:
full_name = current_dir_fullpath + '/' + file
# print(" " + full_name)
csvwriter.writerow([md5(full_name), str(os.path.getsize(full_name)), full_name])
def walk_all_subdirectories(path, output_file_name):
# count = 0
with open(output_file_name, "w") as csvfile:
csvwriter = csv.writer(csvfile, delimiter=':', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for current_dir, sub_dirs, files in os.walk(path):
print(current_dir)
process_directory_csv(current_dir, sub_dirs, files, csvwriter)
csvfile.flush()
# DEBUG CODE - process only 5 directories
# count += 1
# if count >= 10:
# csvfile.close()
# break;
csvfile.close()
def sort_file(inname, outname):
input_file = open(inname, "r")
output_file = open(outname, "w", 1)
lines = [] # give lines variable a type of list
for line in input_file:
lines.append(line)
lines.sort()
for line in lines:
output_file.write(line)
input_file.close()
output_file.close()
def generate_delete_commands(sortedfile, outname):
import csv
output_file = open(outname, "w", 1)
previous_checksum = "IMPOSSIBLE_CHECKSUM"
with open(sortedfile) as f:
reader = csv.reader(f, delimiter=':', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row in reader:
# print(row[0], row)
if previous_checksum == row[0]:
output_file.write("rm '" + row[2] + "'\n")
print("removing " + row[2])
os.remove(row[2])
previous_checksum = row[0]
f.close()
output_file.close()
# Main program follows
directory_name = ".."
unsorted_file_name = "filelist.csv"
sorted_file_name = "sortedfilelist.csv"
delete_command_file_name = "deletecommands.sh"
if __name__ == '__main__':
walk_all_subdirectories('..', unsorted_file_name)
sort_file(unsorted_file_name, sorted_file_name)
generate_delete_commands(sorted_file_name, delete_command_file_name)
|
|
99395e345f74bbedd29fd45eebe0738a3b5f4729
|
ckanext/archiver/tests/test_api.py
|
ckanext/archiver/tests/test_api.py
|
import pytest
import tempfile
from ckan import model
from ckan import plugins
from ckan.tests import factories
import ckan.tests.helpers as helpers
from ckanext.archiver import model as archiver_model
from ckanext.archiver.tasks import update_package
@pytest.mark.usefixtures('with_plugins')
@pytest.mark.ckan_config("ckanext-archiver.cache_url_root", "http://localhost:50001/resources/")
@pytest.mark.ckan_config("ckanext-archiver.max_content_length", 1000000)
@pytest.mark.ckan_config("ckan.plugins", "archiver testipipe")
class TestApi(object):
@pytest.fixture(autouse=True)
@pytest.mark.usefixtures(u"clean_db")
def initial_data(cls, clean_db):
archiver_model.init_tables(model.meta.engine)
cls.temp_dir = tempfile.mkdtemp()
def test_package_show(self, client):
url = client + '/?status=200&content=test&content-type=csv'
testipipe = plugins.get_plugin('testipipe')
testipipe.reset()
pkg_dict = {
'name': 'test-package-api',
'resources': [
{
'url': url,
'format': 'TXT',
'description': 'Test'
}
]
}
pkg = factories.Dataset(**pkg_dict)
update_package(pkg['id'])
result = helpers.call_action(
"package_show",
id=pkg["id"]
)
print(result)
assert 'archiver' in result.keys()
|
Test api endpoint for package show
|
Test api endpoint for package show
|
Python
|
mit
|
ckan/ckanext-archiver,ckan/ckanext-archiver,ckan/ckanext-archiver
|
Test api endpoint for package show
|
import pytest
import tempfile
from ckan import model
from ckan import plugins
from ckan.tests import factories
import ckan.tests.helpers as helpers
from ckanext.archiver import model as archiver_model
from ckanext.archiver.tasks import update_package
@pytest.mark.usefixtures('with_plugins')
@pytest.mark.ckan_config("ckanext-archiver.cache_url_root", "http://localhost:50001/resources/")
@pytest.mark.ckan_config("ckanext-archiver.max_content_length", 1000000)
@pytest.mark.ckan_config("ckan.plugins", "archiver testipipe")
class TestApi(object):
@pytest.fixture(autouse=True)
@pytest.mark.usefixtures(u"clean_db")
def initial_data(cls, clean_db):
archiver_model.init_tables(model.meta.engine)
cls.temp_dir = tempfile.mkdtemp()
def test_package_show(self, client):
url = client + '/?status=200&content=test&content-type=csv'
testipipe = plugins.get_plugin('testipipe')
testipipe.reset()
pkg_dict = {
'name': 'test-package-api',
'resources': [
{
'url': url,
'format': 'TXT',
'description': 'Test'
}
]
}
pkg = factories.Dataset(**pkg_dict)
update_package(pkg['id'])
result = helpers.call_action(
"package_show",
id=pkg["id"]
)
print(result)
assert 'archiver' in result.keys()
|
<commit_before><commit_msg>Test api endpoint for package show<commit_after>
|
import pytest
import tempfile
from ckan import model
from ckan import plugins
from ckan.tests import factories
import ckan.tests.helpers as helpers
from ckanext.archiver import model as archiver_model
from ckanext.archiver.tasks import update_package
@pytest.mark.usefixtures('with_plugins')
@pytest.mark.ckan_config("ckanext-archiver.cache_url_root", "http://localhost:50001/resources/")
@pytest.mark.ckan_config("ckanext-archiver.max_content_length", 1000000)
@pytest.mark.ckan_config("ckan.plugins", "archiver testipipe")
class TestApi(object):
@pytest.fixture(autouse=True)
@pytest.mark.usefixtures(u"clean_db")
def initial_data(cls, clean_db):
archiver_model.init_tables(model.meta.engine)
cls.temp_dir = tempfile.mkdtemp()
def test_package_show(self, client):
url = client + '/?status=200&content=test&content-type=csv'
testipipe = plugins.get_plugin('testipipe')
testipipe.reset()
pkg_dict = {
'name': 'test-package-api',
'resources': [
{
'url': url,
'format': 'TXT',
'description': 'Test'
}
]
}
pkg = factories.Dataset(**pkg_dict)
update_package(pkg['id'])
result = helpers.call_action(
"package_show",
id=pkg["id"]
)
print(result)
assert 'archiver' in result.keys()
|
Test api endpoint for package showimport pytest
import tempfile
from ckan import model
from ckan import plugins
from ckan.tests import factories
import ckan.tests.helpers as helpers
from ckanext.archiver import model as archiver_model
from ckanext.archiver.tasks import update_package
@pytest.mark.usefixtures('with_plugins')
@pytest.mark.ckan_config("ckanext-archiver.cache_url_root", "http://localhost:50001/resources/")
@pytest.mark.ckan_config("ckanext-archiver.max_content_length", 1000000)
@pytest.mark.ckan_config("ckan.plugins", "archiver testipipe")
class TestApi(object):
@pytest.fixture(autouse=True)
@pytest.mark.usefixtures(u"clean_db")
def initial_data(cls, clean_db):
archiver_model.init_tables(model.meta.engine)
cls.temp_dir = tempfile.mkdtemp()
def test_package_show(self, client):
url = client + '/?status=200&content=test&content-type=csv'
testipipe = plugins.get_plugin('testipipe')
testipipe.reset()
pkg_dict = {
'name': 'test-package-api',
'resources': [
{
'url': url,
'format': 'TXT',
'description': 'Test'
}
]
}
pkg = factories.Dataset(**pkg_dict)
update_package(pkg['id'])
result = helpers.call_action(
"package_show",
id=pkg["id"]
)
print(result)
assert 'archiver' in result.keys()
|
<commit_before><commit_msg>Test api endpoint for package show<commit_after>import pytest
import tempfile
from ckan import model
from ckan import plugins
from ckan.tests import factories
import ckan.tests.helpers as helpers
from ckanext.archiver import model as archiver_model
from ckanext.archiver.tasks import update_package
@pytest.mark.usefixtures('with_plugins')
@pytest.mark.ckan_config("ckanext-archiver.cache_url_root", "http://localhost:50001/resources/")
@pytest.mark.ckan_config("ckanext-archiver.max_content_length", 1000000)
@pytest.mark.ckan_config("ckan.plugins", "archiver testipipe")
class TestApi(object):
@pytest.fixture(autouse=True)
@pytest.mark.usefixtures(u"clean_db")
def initial_data(cls, clean_db):
archiver_model.init_tables(model.meta.engine)
cls.temp_dir = tempfile.mkdtemp()
def test_package_show(self, client):
url = client + '/?status=200&content=test&content-type=csv'
testipipe = plugins.get_plugin('testipipe')
testipipe.reset()
pkg_dict = {
'name': 'test-package-api',
'resources': [
{
'url': url,
'format': 'TXT',
'description': 'Test'
}
]
}
pkg = factories.Dataset(**pkg_dict)
update_package(pkg['id'])
result = helpers.call_action(
"package_show",
id=pkg["id"]
)
print(result)
assert 'archiver' in result.keys()
|
|
797249c42c8c1c0d6eda05dbf9e9d16d2706b373
|
h2o-py/tests/testdir_algos/deepwater/pyunit_lenet_deepwater.py
|
h2o-py/tests/testdir_algos/deepwater/pyunit_lenet_deepwater.py
|
from __future__ import print_function
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils
from h2o.estimators.deepwater import H2ODeepWaterEstimator
def deepwater_lenet():
print("Test checks if Deep Water works fine with a multiclass image dataset")
frame = h2o.import_file(pyunit_utils.locate("bigdata/laptop/deepwater/imagenet/cat_dog_mouse.csv"))
print(frame.head(5))
model = H2ODeepWaterEstimator(epochs=100, rate=1e-3, network='lenet', score_interval=0, train_samples_per_iteration=1000)
model.train(x=[0],y=1, training_frame=frame)
model.show()
error = model.model_performance(train=True).mean_per_class_error()
assert error < 0.1, "mean classification error is too high : " + str(error)
if __name__ == "__main__":
pyunit_utils.standalone_test(deepwater_lenet)
else:
deepwater_lenet()
|
Add LeNet example with custom scoring and train_samples_per_iteration.
|
Add LeNet example with custom scoring and train_samples_per_iteration.
|
Python
|
apache-2.0
|
mathemage/h2o-3,h2oai/h2o-3,spennihana/h2o-3,spennihana/h2o-3,michalkurka/h2o-3,h2oai/h2o-3,michalkurka/h2o-3,spennihana/h2o-3,jangorecki/h2o-3,michalkurka/h2o-3,mathemage/h2o-3,h2oai/h2o-dev,mathemage/h2o-3,h2oai/h2o-3,mathemage/h2o-3,h2oai/h2o-dev,h2oai/h2o-dev,michalkurka/h2o-3,jangorecki/h2o-3,michalkurka/h2o-3,spennihana/h2o-3,spennihana/h2o-3,h2oai/h2o-dev,jangorecki/h2o-3,michalkurka/h2o-3,mathemage/h2o-3,spennihana/h2o-3,jangorecki/h2o-3,h2oai/h2o-dev,mathemage/h2o-3,michalkurka/h2o-3,h2oai/h2o-3,jangorecki/h2o-3,h2oai/h2o-dev,mathemage/h2o-3,h2oai/h2o-3,h2oai/h2o-dev,jangorecki/h2o-3,spennihana/h2o-3,h2oai/h2o-3,jangorecki/h2o-3,h2oai/h2o-3,h2oai/h2o-3
|
Add LeNet example with custom scoring and train_samples_per_iteration.
|
from __future__ import print_function
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils
from h2o.estimators.deepwater import H2ODeepWaterEstimator
def deepwater_lenet():
print("Test checks if Deep Water works fine with a multiclass image dataset")
frame = h2o.import_file(pyunit_utils.locate("bigdata/laptop/deepwater/imagenet/cat_dog_mouse.csv"))
print(frame.head(5))
model = H2ODeepWaterEstimator(epochs=100, rate=1e-3, network='lenet', score_interval=0, train_samples_per_iteration=1000)
model.train(x=[0],y=1, training_frame=frame)
model.show()
error = model.model_performance(train=True).mean_per_class_error()
assert error < 0.1, "mean classification error is too high : " + str(error)
if __name__ == "__main__":
pyunit_utils.standalone_test(deepwater_lenet)
else:
deepwater_lenet()
|
<commit_before><commit_msg>Add LeNet example with custom scoring and train_samples_per_iteration.<commit_after>
|
from __future__ import print_function
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils
from h2o.estimators.deepwater import H2ODeepWaterEstimator
def deepwater_lenet():
print("Test checks if Deep Water works fine with a multiclass image dataset")
frame = h2o.import_file(pyunit_utils.locate("bigdata/laptop/deepwater/imagenet/cat_dog_mouse.csv"))
print(frame.head(5))
model = H2ODeepWaterEstimator(epochs=100, rate=1e-3, network='lenet', score_interval=0, train_samples_per_iteration=1000)
model.train(x=[0],y=1, training_frame=frame)
model.show()
error = model.model_performance(train=True).mean_per_class_error()
assert error < 0.1, "mean classification error is too high : " + str(error)
if __name__ == "__main__":
pyunit_utils.standalone_test(deepwater_lenet)
else:
deepwater_lenet()
|
Add LeNet example with custom scoring and train_samples_per_iteration.from __future__ import print_function
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils
from h2o.estimators.deepwater import H2ODeepWaterEstimator
def deepwater_lenet():
print("Test checks if Deep Water works fine with a multiclass image dataset")
frame = h2o.import_file(pyunit_utils.locate("bigdata/laptop/deepwater/imagenet/cat_dog_mouse.csv"))
print(frame.head(5))
model = H2ODeepWaterEstimator(epochs=100, rate=1e-3, network='lenet', score_interval=0, train_samples_per_iteration=1000)
model.train(x=[0],y=1, training_frame=frame)
model.show()
error = model.model_performance(train=True).mean_per_class_error()
assert error < 0.1, "mean classification error is too high : " + str(error)
if __name__ == "__main__":
pyunit_utils.standalone_test(deepwater_lenet)
else:
deepwater_lenet()
|
<commit_before><commit_msg>Add LeNet example with custom scoring and train_samples_per_iteration.<commit_after>from __future__ import print_function
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from tests import pyunit_utils
from h2o.estimators.deepwater import H2ODeepWaterEstimator
def deepwater_lenet():
print("Test checks if Deep Water works fine with a multiclass image dataset")
frame = h2o.import_file(pyunit_utils.locate("bigdata/laptop/deepwater/imagenet/cat_dog_mouse.csv"))
print(frame.head(5))
model = H2ODeepWaterEstimator(epochs=100, rate=1e-3, network='lenet', score_interval=0, train_samples_per_iteration=1000)
model.train(x=[0],y=1, training_frame=frame)
model.show()
error = model.model_performance(train=True).mean_per_class_error()
assert error < 0.1, "mean classification error is too high : " + str(error)
if __name__ == "__main__":
pyunit_utils.standalone_test(deepwater_lenet)
else:
deepwater_lenet()
|
|
8cdbbbaf33cd09bc742761ce8cd5b79b185710cd
|
webtool/server/management/commands/timer_update.py
|
webtool/server/management/commands/timer_update.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import io
import datetime
from django.core.management.base import BaseCommand
from server.models import Instruction, Tour, Talk, Session, Season
from server.views.bulletin import Activities
from server.views.bulletin.translator import Translator
class Command(BaseCommand):
leave_locale_alone = True
help = 'Update activities regarding the current date'
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
def handle(self, *args, **options):
season = Season.objects.get(current=True)
canceled = season.state_list.get(name='Ausgefallen')
completed = season.state_list.get(name='Durchgeführt')
not_touch = (canceled.id, completed.id)
today = datetime.date.today()
for instruction in Instruction.objects.filter(topic__seasons=season).exclude(state_id__in=not_touch):
event = instruction.instruction
event_done = ((event.end_date is None and event.start_date < today) or
(event.end_date and event.end_date < today))
if event_done:
instruction.state = completed
instruction.save()
instruction.instruction.save()
for event in instruction.meeting_list.all():
event.save()
for tour in Tour.objects.filter(season=season).exclude(state_id__in=not_touch):
event = tour.tour
event_done = ((event.end_date is None and event.start_date < today) or
(event.end_date and event.end_date < today))
if event_done:
tour.state = completed
tour.save()
tour.deadline.save()
if tour.preliminary:
tour.preliminary.save()
tour.tour.save()
for talk in Talk.objects.filter(season=season).exclude(state_id__in=not_touch):
event = talk.talk
event_done = ((event.end_date is None and event.start_date < today) or
(event.end_date and event.end_date < today))
if event_done:
talk.state = completed
talk.save()
talk.talk.save()
for session in Session.objects.filter(collective__seasons=season).exclude(state_id__in=not_touch):
event = session.session
event_done = ((event.end_date is None and event.start_date < today) or
(event.end_date and event.end_date < today))
if event_done:
session.state = completed
session.save()
session.session.save()
|
Introduce a timer based update of activities
|
Introduce a timer based update of activities
|
Python
|
bsd-2-clause
|
wodo/WebTool3,wodo/WebTool3,wodo/WebTool3,wodo/WebTool3
|
Introduce a timer based update of activities
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import io
import datetime
from django.core.management.base import BaseCommand
from server.models import Instruction, Tour, Talk, Session, Season
from server.views.bulletin import Activities
from server.views.bulletin.translator import Translator
class Command(BaseCommand):
leave_locale_alone = True
help = 'Update activities regarding the current date'
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
def handle(self, *args, **options):
season = Season.objects.get(current=True)
canceled = season.state_list.get(name='Ausgefallen')
completed = season.state_list.get(name='Durchgeführt')
not_touch = (canceled.id, completed.id)
today = datetime.date.today()
for instruction in Instruction.objects.filter(topic__seasons=season).exclude(state_id__in=not_touch):
event = instruction.instruction
event_done = ((event.end_date is None and event.start_date < today) or
(event.end_date and event.end_date < today))
if event_done:
instruction.state = completed
instruction.save()
instruction.instruction.save()
for event in instruction.meeting_list.all():
event.save()
for tour in Tour.objects.filter(season=season).exclude(state_id__in=not_touch):
event = tour.tour
event_done = ((event.end_date is None and event.start_date < today) or
(event.end_date and event.end_date < today))
if event_done:
tour.state = completed
tour.save()
tour.deadline.save()
if tour.preliminary:
tour.preliminary.save()
tour.tour.save()
for talk in Talk.objects.filter(season=season).exclude(state_id__in=not_touch):
event = talk.talk
event_done = ((event.end_date is None and event.start_date < today) or
(event.end_date and event.end_date < today))
if event_done:
talk.state = completed
talk.save()
talk.talk.save()
for session in Session.objects.filter(collective__seasons=season).exclude(state_id__in=not_touch):
event = session.session
event_done = ((event.end_date is None and event.start_date < today) or
(event.end_date and event.end_date < today))
if event_done:
session.state = completed
session.save()
session.session.save()
|
<commit_before><commit_msg>Introduce a timer based update of activities<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import io
import datetime
from django.core.management.base import BaseCommand
from server.models import Instruction, Tour, Talk, Session, Season
from server.views.bulletin import Activities
from server.views.bulletin.translator import Translator
class Command(BaseCommand):
leave_locale_alone = True
help = 'Update activities regarding the current date'
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
def handle(self, *args, **options):
season = Season.objects.get(current=True)
canceled = season.state_list.get(name='Ausgefallen')
completed = season.state_list.get(name='Durchgeführt')
not_touch = (canceled.id, completed.id)
today = datetime.date.today()
for instruction in Instruction.objects.filter(topic__seasons=season).exclude(state_id__in=not_touch):
event = instruction.instruction
event_done = ((event.end_date is None and event.start_date < today) or
(event.end_date and event.end_date < today))
if event_done:
instruction.state = completed
instruction.save()
instruction.instruction.save()
for event in instruction.meeting_list.all():
event.save()
for tour in Tour.objects.filter(season=season).exclude(state_id__in=not_touch):
event = tour.tour
event_done = ((event.end_date is None and event.start_date < today) or
(event.end_date and event.end_date < today))
if event_done:
tour.state = completed
tour.save()
tour.deadline.save()
if tour.preliminary:
tour.preliminary.save()
tour.tour.save()
for talk in Talk.objects.filter(season=season).exclude(state_id__in=not_touch):
event = talk.talk
event_done = ((event.end_date is None and event.start_date < today) or
(event.end_date and event.end_date < today))
if event_done:
talk.state = completed
talk.save()
talk.talk.save()
for session in Session.objects.filter(collective__seasons=season).exclude(state_id__in=not_touch):
event = session.session
event_done = ((event.end_date is None and event.start_date < today) or
(event.end_date and event.end_date < today))
if event_done:
session.state = completed
session.save()
session.session.save()
|
Introduce a timer based update of activities# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import io
import datetime
from django.core.management.base import BaseCommand
from server.models import Instruction, Tour, Talk, Session, Season
from server.views.bulletin import Activities
from server.views.bulletin.translator import Translator
class Command(BaseCommand):
leave_locale_alone = True
help = 'Update activities regarding the current date'
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
def handle(self, *args, **options):
season = Season.objects.get(current=True)
canceled = season.state_list.get(name='Ausgefallen')
completed = season.state_list.get(name='Durchgeführt')
not_touch = (canceled.id, completed.id)
today = datetime.date.today()
for instruction in Instruction.objects.filter(topic__seasons=season).exclude(state_id__in=not_touch):
event = instruction.instruction
event_done = ((event.end_date is None and event.start_date < today) or
(event.end_date and event.end_date < today))
if event_done:
instruction.state = completed
instruction.save()
instruction.instruction.save()
for event in instruction.meeting_list.all():
event.save()
for tour in Tour.objects.filter(season=season).exclude(state_id__in=not_touch):
event = tour.tour
event_done = ((event.end_date is None and event.start_date < today) or
(event.end_date and event.end_date < today))
if event_done:
tour.state = completed
tour.save()
tour.deadline.save()
if tour.preliminary:
tour.preliminary.save()
tour.tour.save()
for talk in Talk.objects.filter(season=season).exclude(state_id__in=not_touch):
event = talk.talk
event_done = ((event.end_date is None and event.start_date < today) or
(event.end_date and event.end_date < today))
if event_done:
talk.state = completed
talk.save()
talk.talk.save()
for session in Session.objects.filter(collective__seasons=season).exclude(state_id__in=not_touch):
event = session.session
event_done = ((event.end_date is None and event.start_date < today) or
(event.end_date and event.end_date < today))
if event_done:
session.state = completed
session.save()
session.session.save()
|
<commit_before><commit_msg>Introduce a timer based update of activities<commit_after># -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import io
import datetime
from django.core.management.base import BaseCommand
from server.models import Instruction, Tour, Talk, Session, Season
from server.views.bulletin import Activities
from server.views.bulletin.translator import Translator
class Command(BaseCommand):
leave_locale_alone = True
help = 'Update activities regarding the current date'
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
def handle(self, *args, **options):
season = Season.objects.get(current=True)
canceled = season.state_list.get(name='Ausgefallen')
completed = season.state_list.get(name='Durchgeführt')
not_touch = (canceled.id, completed.id)
today = datetime.date.today()
for instruction in Instruction.objects.filter(topic__seasons=season).exclude(state_id__in=not_touch):
event = instruction.instruction
event_done = ((event.end_date is None and event.start_date < today) or
(event.end_date and event.end_date < today))
if event_done:
instruction.state = completed
instruction.save()
instruction.instruction.save()
for event in instruction.meeting_list.all():
event.save()
for tour in Tour.objects.filter(season=season).exclude(state_id__in=not_touch):
event = tour.tour
event_done = ((event.end_date is None and event.start_date < today) or
(event.end_date and event.end_date < today))
if event_done:
tour.state = completed
tour.save()
tour.deadline.save()
if tour.preliminary:
tour.preliminary.save()
tour.tour.save()
for talk in Talk.objects.filter(season=season).exclude(state_id__in=not_touch):
event = talk.talk
event_done = ((event.end_date is None and event.start_date < today) or
(event.end_date and event.end_date < today))
if event_done:
talk.state = completed
talk.save()
talk.talk.save()
for session in Session.objects.filter(collective__seasons=season).exclude(state_id__in=not_touch):
event = session.session
event_done = ((event.end_date is None and event.start_date < today) or
(event.end_date and event.end_date < today))
if event_done:
session.state = completed
session.save()
session.session.save()
|
|
6693172856655329d99f038d54b1d8819fc1a9b6
|
scripts/examples/02-Board-Control/native_emitters.py
|
scripts/examples/02-Board-Control/native_emitters.py
|
import time
@micropython.asm_thumb
def asm():
movw(r0, 42)
@micropython.viper
def viper(a, b):
return a + b
@micropython.native
def native(a, b):
return a + b
print(asm())
print(viper(1, 2))
print(native(1, 2))
|
Add native code emitters example.
|
Add native code emitters example.
|
Python
|
mit
|
kwagyeman/openmv,openmv/openmv,openmv/openmv,iabdalkader/openmv,openmv/openmv,openmv/openmv,iabdalkader/openmv,iabdalkader/openmv,kwagyeman/openmv,kwagyeman/openmv,iabdalkader/openmv,kwagyeman/openmv
|
Add native code emitters example.
|
import time
@micropython.asm_thumb
def asm():
movw(r0, 42)
@micropython.viper
def viper(a, b):
return a + b
@micropython.native
def native(a, b):
return a + b
print(asm())
print(viper(1, 2))
print(native(1, 2))
|
<commit_before><commit_msg>Add native code emitters example.<commit_after>
|
import time
@micropython.asm_thumb
def asm():
movw(r0, 42)
@micropython.viper
def viper(a, b):
return a + b
@micropython.native
def native(a, b):
return a + b
print(asm())
print(viper(1, 2))
print(native(1, 2))
|
Add native code emitters example.import time
@micropython.asm_thumb
def asm():
movw(r0, 42)
@micropython.viper
def viper(a, b):
return a + b
@micropython.native
def native(a, b):
return a + b
print(asm())
print(viper(1, 2))
print(native(1, 2))
|
<commit_before><commit_msg>Add native code emitters example.<commit_after>import time
@micropython.asm_thumb
def asm():
movw(r0, 42)
@micropython.viper
def viper(a, b):
return a + b
@micropython.native
def native(a, b):
return a + b
print(asm())
print(viper(1, 2))
print(native(1, 2))
|
|
fa6060a21767a0b5b2b3a10e4301e0c1a30134cb
|
i8c/tests/test_opt_lit0_cmp_before_bra.py
|
i8c/tests/test_opt_lit0_cmp_before_bra.py
|
from i8c.tests import TestCase
SOURCE1 = """\
define test::optimize_cmp_bra_const_const returns ptr
argument ptr x
dup
load NULL
beq return_the_null
deref ptr
return
return_the_null:
"""
SOURCE2 = """\
define test::optimize_cmp_bra_const_const returns ptr
argument ptr x
dup
load NULL
bne dereference
return
dereference:
deref ptr
"""
class TestOptimizeLit0CmpBeforeBra(TestCase):
def test_optimize_lit0_cmp_before_bra(self):
"""Check that lit0,cmp before bra is eliminated."""
for source in SOURCE1, SOURCE2:
tree, output = self.compile(source)
self.assertEqual(["dup", "bra", "skip", "deref"], output.opnames)
|
Test the lit0,cmp before bra eliminator
|
Test the lit0,cmp before bra eliminator
|
Python
|
lgpl-2.1
|
gbenson/i8c
|
Test the lit0,cmp before bra eliminator
|
from i8c.tests import TestCase
SOURCE1 = """\
define test::optimize_cmp_bra_const_const returns ptr
argument ptr x
dup
load NULL
beq return_the_null
deref ptr
return
return_the_null:
"""
SOURCE2 = """\
define test::optimize_cmp_bra_const_const returns ptr
argument ptr x
dup
load NULL
bne dereference
return
dereference:
deref ptr
"""
class TestOptimizeLit0CmpBeforeBra(TestCase):
def test_optimize_lit0_cmp_before_bra(self):
"""Check that lit0,cmp before bra is eliminated."""
for source in SOURCE1, SOURCE2:
tree, output = self.compile(source)
self.assertEqual(["dup", "bra", "skip", "deref"], output.opnames)
|
<commit_before><commit_msg>Test the lit0,cmp before bra eliminator<commit_after>
|
from i8c.tests import TestCase
SOURCE1 = """\
define test::optimize_cmp_bra_const_const returns ptr
argument ptr x
dup
load NULL
beq return_the_null
deref ptr
return
return_the_null:
"""
SOURCE2 = """\
define test::optimize_cmp_bra_const_const returns ptr
argument ptr x
dup
load NULL
bne dereference
return
dereference:
deref ptr
"""
class TestOptimizeLit0CmpBeforeBra(TestCase):
def test_optimize_lit0_cmp_before_bra(self):
"""Check that lit0,cmp before bra is eliminated."""
for source in SOURCE1, SOURCE2:
tree, output = self.compile(source)
self.assertEqual(["dup", "bra", "skip", "deref"], output.opnames)
|
Test the lit0,cmp before bra eliminatorfrom i8c.tests import TestCase
SOURCE1 = """\
define test::optimize_cmp_bra_const_const returns ptr
argument ptr x
dup
load NULL
beq return_the_null
deref ptr
return
return_the_null:
"""
SOURCE2 = """\
define test::optimize_cmp_bra_const_const returns ptr
argument ptr x
dup
load NULL
bne dereference
return
dereference:
deref ptr
"""
class TestOptimizeLit0CmpBeforeBra(TestCase):
def test_optimize_lit0_cmp_before_bra(self):
"""Check that lit0,cmp before bra is eliminated."""
for source in SOURCE1, SOURCE2:
tree, output = self.compile(source)
self.assertEqual(["dup", "bra", "skip", "deref"], output.opnames)
|
<commit_before><commit_msg>Test the lit0,cmp before bra eliminator<commit_after>from i8c.tests import TestCase
SOURCE1 = """\
define test::optimize_cmp_bra_const_const returns ptr
argument ptr x
dup
load NULL
beq return_the_null
deref ptr
return
return_the_null:
"""
SOURCE2 = """\
define test::optimize_cmp_bra_const_const returns ptr
argument ptr x
dup
load NULL
bne dereference
return
dereference:
deref ptr
"""
class TestOptimizeLit0CmpBeforeBra(TestCase):
def test_optimize_lit0_cmp_before_bra(self):
"""Check that lit0,cmp before bra is eliminated."""
for source in SOURCE1, SOURCE2:
tree, output = self.compile(source)
self.assertEqual(["dup", "bra", "skip", "deref"], output.opnames)
|
|
a3a48824b36ef62edaf128379f1baec5482166e7
|
src/nodeconductor_saltstack/migrations/0005_resource_error_message.py
|
src/nodeconductor_saltstack/migrations/0005_resource_error_message.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('nodeconductor_saltstack', '0004_remove_useless_spl_fields'),
]
operations = [
migrations.AddField(
model_name='domain',
name='error_message',
field=models.TextField(blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='site',
name='error_message',
field=models.TextField(blank=True),
preserve_default=True,
),
]
|
Save error_message for resources (SAAS-982)
|
Save error_message for resources (SAAS-982)
|
Python
|
mit
|
opennode/nodeconductor-saltstack
|
Save error_message for resources (SAAS-982)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('nodeconductor_saltstack', '0004_remove_useless_spl_fields'),
]
operations = [
migrations.AddField(
model_name='domain',
name='error_message',
field=models.TextField(blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='site',
name='error_message',
field=models.TextField(blank=True),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Save error_message for resources (SAAS-982)<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('nodeconductor_saltstack', '0004_remove_useless_spl_fields'),
]
operations = [
migrations.AddField(
model_name='domain',
name='error_message',
field=models.TextField(blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='site',
name='error_message',
field=models.TextField(blank=True),
preserve_default=True,
),
]
|
Save error_message for resources (SAAS-982)# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('nodeconductor_saltstack', '0004_remove_useless_spl_fields'),
]
operations = [
migrations.AddField(
model_name='domain',
name='error_message',
field=models.TextField(blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='site',
name='error_message',
field=models.TextField(blank=True),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Save error_message for resources (SAAS-982)<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('nodeconductor_saltstack', '0004_remove_useless_spl_fields'),
]
operations = [
migrations.AddField(
model_name='domain',
name='error_message',
field=models.TextField(blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='site',
name='error_message',
field=models.TextField(blank=True),
preserve_default=True,
),
]
|
|
779fb015913a17fcb8fb290515845e6b47c3ae50
|
latex2markdown.py
|
latex2markdown.py
|
"""
A Very simple tool to convert latex documents to markdown documents
"""
import re
span_substitutions = [
(r'\\emph\{(.+)\}', r'*\1*'),
(r'\\textbf\{(.+)\}', r'**\1**'),
(r'\\verb;(.+);', r'`\1`'),
(r'\\includegraphics\{(.+)\}', r''),
]
def convert_span_elements(line):
""" Converts all recognizable span elements into markdown
"""
for (f, r) in span_substitutions:
p = re.compile(f)
line = p.sub(r, line)
return line
# This next bit is to test the conversion as it builds
from sys import stdin
if __name__=="__main__":
for line in stdin:
print(convert_span_elements(line),end='')
|
Create the converter (with span-conversion functionality)
|
Create the converter (with span-conversion functionality)
|
Python
|
mit
|
jladan/latex2markdown
|
Create the converter (with span-conversion functionality)
|
"""
A Very simple tool to convert latex documents to markdown documents
"""
import re
span_substitutions = [
(r'\\emph\{(.+)\}', r'*\1*'),
(r'\\textbf\{(.+)\}', r'**\1**'),
(r'\\verb;(.+);', r'`\1`'),
(r'\\includegraphics\{(.+)\}', r''),
]
def convert_span_elements(line):
""" Converts all recognizable span elements into markdown
"""
for (f, r) in span_substitutions:
p = re.compile(f)
line = p.sub(r, line)
return line
# This next bit is to test the conversion as it builds
from sys import stdin
if __name__=="__main__":
for line in stdin:
print(convert_span_elements(line),end='')
|
<commit_before><commit_msg>Create the converter (with span-conversion functionality)<commit_after>
|
"""
A Very simple tool to convert latex documents to markdown documents
"""
import re
span_substitutions = [
(r'\\emph\{(.+)\}', r'*\1*'),
(r'\\textbf\{(.+)\}', r'**\1**'),
(r'\\verb;(.+);', r'`\1`'),
(r'\\includegraphics\{(.+)\}', r''),
]
def convert_span_elements(line):
""" Converts all recognizable span elements into markdown
"""
for (f, r) in span_substitutions:
p = re.compile(f)
line = p.sub(r, line)
return line
# This next bit is to test the conversion as it builds
from sys import stdin
if __name__=="__main__":
for line in stdin:
print(convert_span_elements(line),end='')
|
Create the converter (with span-conversion functionality)"""
A Very simple tool to convert latex documents to markdown documents
"""
import re
span_substitutions = [
(r'\\emph\{(.+)\}', r'*\1*'),
(r'\\textbf\{(.+)\}', r'**\1**'),
(r'\\verb;(.+);', r'`\1`'),
(r'\\includegraphics\{(.+)\}', r''),
]
def convert_span_elements(line):
""" Converts all recognizable span elements into markdown
"""
for (f, r) in span_substitutions:
p = re.compile(f)
line = p.sub(r, line)
return line
# This next bit is to test the conversion as it builds
from sys import stdin
if __name__=="__main__":
for line in stdin:
print(convert_span_elements(line),end='')
|
<commit_before><commit_msg>Create the converter (with span-conversion functionality)<commit_after>"""
A Very simple tool to convert latex documents to markdown documents
"""
import re
span_substitutions = [
(r'\\emph\{(.+)\}', r'*\1*'),
(r'\\textbf\{(.+)\}', r'**\1**'),
(r'\\verb;(.+);', r'`\1`'),
(r'\\includegraphics\{(.+)\}', r''),
]
def convert_span_elements(line):
""" Converts all recognizable span elements into markdown
"""
for (f, r) in span_substitutions:
p = re.compile(f)
line = p.sub(r, line)
return line
# This next bit is to test the conversion as it builds
from sys import stdin
if __name__=="__main__":
for line in stdin:
print(convert_span_elements(line),end='')
|
|
6014dab06ed2275c5703ab9f9e63272656733c69
|
moj_utils/rest.py
|
moj_utils/rest.py
|
from django.conf import settings
def retrieve_all_pages(api_endpoint, **kwargs):
"""
Some MTP apis are paginated, this method loads all pages into a single results list
:param api_endpoint: slumber callable, e.g. `[api_client].cashbook.transactions.locked.get`
:param kwargs: additional arguments to pass into api callable
"""
loaded_results = []
offset = 0
while True:
response = api_endpoint(limit=settings.REQUEST_PAGE_SIZE, offset=offset,
**kwargs)
count = response.get('count', 0)
loaded_results += response.get('results', [])
if len(loaded_results) >= count:
break
offset += settings.REQUEST_PAGE_SIZE
return loaded_results
|
Add retrieve_all_pages util method from mtp-cashbook
|
Add retrieve_all_pages util method from mtp-cashbook
This method is duplicated in functionality in multiple
front end apps.
|
Python
|
mit
|
ministryofjustice/django-utils,ministryofjustice/django-utils
|
Add retrieve_all_pages util method from mtp-cashbook
This method is duplicated in functionality in multiple
front end apps.
|
from django.conf import settings
def retrieve_all_pages(api_endpoint, **kwargs):
"""
Some MTP apis are paginated, this method loads all pages into a single results list
:param api_endpoint: slumber callable, e.g. `[api_client].cashbook.transactions.locked.get`
:param kwargs: additional arguments to pass into api callable
"""
loaded_results = []
offset = 0
while True:
response = api_endpoint(limit=settings.REQUEST_PAGE_SIZE, offset=offset,
**kwargs)
count = response.get('count', 0)
loaded_results += response.get('results', [])
if len(loaded_results) >= count:
break
offset += settings.REQUEST_PAGE_SIZE
return loaded_results
|
<commit_before><commit_msg>Add retrieve_all_pages util method from mtp-cashbook
This method is duplicated in functionality in multiple
front end apps.<commit_after>
|
from django.conf import settings
def retrieve_all_pages(api_endpoint, **kwargs):
"""
Some MTP apis are paginated, this method loads all pages into a single results list
:param api_endpoint: slumber callable, e.g. `[api_client].cashbook.transactions.locked.get`
:param kwargs: additional arguments to pass into api callable
"""
loaded_results = []
offset = 0
while True:
response = api_endpoint(limit=settings.REQUEST_PAGE_SIZE, offset=offset,
**kwargs)
count = response.get('count', 0)
loaded_results += response.get('results', [])
if len(loaded_results) >= count:
break
offset += settings.REQUEST_PAGE_SIZE
return loaded_results
|
Add retrieve_all_pages util method from mtp-cashbook
This method is duplicated in functionality in multiple
front end apps.from django.conf import settings
def retrieve_all_pages(api_endpoint, **kwargs):
"""
Some MTP apis are paginated, this method loads all pages into a single results list
:param api_endpoint: slumber callable, e.g. `[api_client].cashbook.transactions.locked.get`
:param kwargs: additional arguments to pass into api callable
"""
loaded_results = []
offset = 0
while True:
response = api_endpoint(limit=settings.REQUEST_PAGE_SIZE, offset=offset,
**kwargs)
count = response.get('count', 0)
loaded_results += response.get('results', [])
if len(loaded_results) >= count:
break
offset += settings.REQUEST_PAGE_SIZE
return loaded_results
|
<commit_before><commit_msg>Add retrieve_all_pages util method from mtp-cashbook
This method is duplicated in functionality in multiple
front end apps.<commit_after>from django.conf import settings
def retrieve_all_pages(api_endpoint, **kwargs):
"""
Some MTP apis are paginated, this method loads all pages into a single results list
:param api_endpoint: slumber callable, e.g. `[api_client].cashbook.transactions.locked.get`
:param kwargs: additional arguments to pass into api callable
"""
loaded_results = []
offset = 0
while True:
response = api_endpoint(limit=settings.REQUEST_PAGE_SIZE, offset=offset,
**kwargs)
count = response.get('count', 0)
loaded_results += response.get('results', [])
if len(loaded_results) >= count:
break
offset += settings.REQUEST_PAGE_SIZE
return loaded_results
|
|
c675fe2a82733ef210bf287df277f8ae956a4295
|
rarbg-get.py
|
rarbg-get.py
|
#!env /usr/bin/python3
import sys
import urllib.parse
import urllib.request
def main():
search = sys.argv[1]
url = 'http://rarbg.to/torrents.php?order=seeders&by=DESC&search='
url = url + search
print(url)
req = urllib.request.Request(url, headers={'User-Agent' : "Magic Browser"})
resp = urllib.request.urlopen(req)
respData = resp.read()
if __name__ == '__main__':
main()
|
Add beginning of main script
|
Add beginning of main script
|
Python
|
mit
|
jadams/rarbg-get
|
Add beginning of main script
|
#!env /usr/bin/python3
import sys
import urllib.parse
import urllib.request
def main():
search = sys.argv[1]
url = 'http://rarbg.to/torrents.php?order=seeders&by=DESC&search='
url = url + search
print(url)
req = urllib.request.Request(url, headers={'User-Agent' : "Magic Browser"})
resp = urllib.request.urlopen(req)
respData = resp.read()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add beginning of main script<commit_after>
|
#!env /usr/bin/python3
import sys
import urllib.parse
import urllib.request
def main():
search = sys.argv[1]
url = 'http://rarbg.to/torrents.php?order=seeders&by=DESC&search='
url = url + search
print(url)
req = urllib.request.Request(url, headers={'User-Agent' : "Magic Browser"})
resp = urllib.request.urlopen(req)
respData = resp.read()
if __name__ == '__main__':
main()
|
Add beginning of main script#!env /usr/bin/python3
import sys
import urllib.parse
import urllib.request
def main():
search = sys.argv[1]
url = 'http://rarbg.to/torrents.php?order=seeders&by=DESC&search='
url = url + search
print(url)
req = urllib.request.Request(url, headers={'User-Agent' : "Magic Browser"})
resp = urllib.request.urlopen(req)
respData = resp.read()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add beginning of main script<commit_after>#!env /usr/bin/python3
import sys
import urllib.parse
import urllib.request
def main():
search = sys.argv[1]
url = 'http://rarbg.to/torrents.php?order=seeders&by=DESC&search='
url = url + search
print(url)
req = urllib.request.Request(url, headers={'User-Agent' : "Magic Browser"})
resp = urllib.request.urlopen(req)
respData = resp.read()
if __name__ == '__main__':
main()
|
|
d2bcba204d36a8ffd1e6a1ed79b89fcb6f1c88c5
|
ideas/test_kmc.py
|
ideas/test_kmc.py
|
# This code will test out the idea of using kmc to
# 1. quickly enumerate the k-mers
# 2. intersect these with the training database, output as fasta
# 3. use that reduced fasta of intersecting kmers as the query to CMash
####################################################################
# First, I will need to dump the training database to a fasta file
from CMash import MinHash as MH
import os
import blist
training_out_file = '/nfs1/Koslicki_Lab/koslickd/KMC_test/NathanRefSeqTraining60mers.fa'
training_data ='/nfs1/Koslicki_Lab/koslickd/MiCOPCMash/TrainingData/NathanRefSeq/micopdb_n_1000_k_60.h5'
training_file_names = "/nfs1/Koslicki_Lab/koslickd/MiCOPCMash/TrainingData/NathanRefSeq/absolute_file_names.txt"
file_names = []
with open(training_file_names, 'r') as fid:
iter = 0
for line in fid.readlines():
line = line.strip()
file_names.append(os.path.basename(line))
iter += 1
if iter > 1000:
break
all_kmers = blist.blist()
for file_name in file_names:
sketch = MH.import_multiple_from_single_hdf5(training_data, import_list=[file_name])[0]
all_kmers += sketch._kmers
all_kmers_set = set(all_kmers)
with open(training_out_file, 'w') as fid:
iter = 0
for kmer in all_kmers_set:
fid.write(">seq_%d\n" % iter)
fid.write("%s\n" % kmer)
iter += 1
##########################################################################
|
Add file to test out kmc approach. Dump training k-mers to fasta file
|
Add file to test out kmc approach. Dump training k-mers to fasta file
|
Python
|
bsd-3-clause
|
dkoslicki/CMash,dkoslicki/CMash
|
Add file to test out kmc approach. Dump training k-mers to fasta file
|
# This code will test out the idea of using kmc to
# 1. quickly enumerate the k-mers
# 2. intersect these with the training database, output as fasta
# 3. use that reduced fasta of intersecting kmers as the query to CMash
####################################################################
# First, I will need to dump the training database to a fasta file
from CMash import MinHash as MH
import os
import blist
training_out_file = '/nfs1/Koslicki_Lab/koslickd/KMC_test/NathanRefSeqTraining60mers.fa'
training_data ='/nfs1/Koslicki_Lab/koslickd/MiCOPCMash/TrainingData/NathanRefSeq/micopdb_n_1000_k_60.h5'
training_file_names = "/nfs1/Koslicki_Lab/koslickd/MiCOPCMash/TrainingData/NathanRefSeq/absolute_file_names.txt"
file_names = []
with open(training_file_names, 'r') as fid:
iter = 0
for line in fid.readlines():
line = line.strip()
file_names.append(os.path.basename(line))
iter += 1
if iter > 1000:
break
all_kmers = blist.blist()
for file_name in file_names:
sketch = MH.import_multiple_from_single_hdf5(training_data, import_list=[file_name])[0]
all_kmers += sketch._kmers
all_kmers_set = set(all_kmers)
with open(training_out_file, 'w') as fid:
iter = 0
for kmer in all_kmers_set:
fid.write(">seq_%d\n" % iter)
fid.write("%s\n" % kmer)
iter += 1
##########################################################################
|
<commit_before><commit_msg>Add file to test out kmc approach. Dump training k-mers to fasta file<commit_after>
|
# This code will test out the idea of using kmc to
# 1. quickly enumerate the k-mers
# 2. intersect these with the training database, output as fasta
# 3. use that reduced fasta of intersecting kmers as the query to CMash
####################################################################
# First, I will need to dump the training database to a fasta file
from CMash import MinHash as MH
import os
import blist
training_out_file = '/nfs1/Koslicki_Lab/koslickd/KMC_test/NathanRefSeqTraining60mers.fa'
training_data ='/nfs1/Koslicki_Lab/koslickd/MiCOPCMash/TrainingData/NathanRefSeq/micopdb_n_1000_k_60.h5'
training_file_names = "/nfs1/Koslicki_Lab/koslickd/MiCOPCMash/TrainingData/NathanRefSeq/absolute_file_names.txt"
file_names = []
with open(training_file_names, 'r') as fid:
iter = 0
for line in fid.readlines():
line = line.strip()
file_names.append(os.path.basename(line))
iter += 1
if iter > 1000:
break
all_kmers = blist.blist()
for file_name in file_names:
sketch = MH.import_multiple_from_single_hdf5(training_data, import_list=[file_name])[0]
all_kmers += sketch._kmers
all_kmers_set = set(all_kmers)
with open(training_out_file, 'w') as fid:
iter = 0
for kmer in all_kmers_set:
fid.write(">seq_%d\n" % iter)
fid.write("%s\n" % kmer)
iter += 1
##########################################################################
|
Add file to test out kmc approach. Dump training k-mers to fasta file# This code will test out the idea of using kmc to
# 1. quickly enumerate the k-mers
# 2. intersect these with the training database, output as fasta
# 3. use that reduced fasta of intersecting kmers as the query to CMash
####################################################################
# First, I will need to dump the training database to a fasta file
from CMash import MinHash as MH
import os
import blist
training_out_file = '/nfs1/Koslicki_Lab/koslickd/KMC_test/NathanRefSeqTraining60mers.fa'
training_data ='/nfs1/Koslicki_Lab/koslickd/MiCOPCMash/TrainingData/NathanRefSeq/micopdb_n_1000_k_60.h5'
training_file_names = "/nfs1/Koslicki_Lab/koslickd/MiCOPCMash/TrainingData/NathanRefSeq/absolute_file_names.txt"
file_names = []
with open(training_file_names, 'r') as fid:
iter = 0
for line in fid.readlines():
line = line.strip()
file_names.append(os.path.basename(line))
iter += 1
if iter > 1000:
break
all_kmers = blist.blist()
for file_name in file_names:
sketch = MH.import_multiple_from_single_hdf5(training_data, import_list=[file_name])[0]
all_kmers += sketch._kmers
all_kmers_set = set(all_kmers)
with open(training_out_file, 'w') as fid:
iter = 0
for kmer in all_kmers_set:
fid.write(">seq_%d\n" % iter)
fid.write("%s\n" % kmer)
iter += 1
##########################################################################
|
<commit_before><commit_msg>Add file to test out kmc approach. Dump training k-mers to fasta file<commit_after># This code will test out the idea of using kmc to
# 1. quickly enumerate the k-mers
# 2. intersect these with the training database, output as fasta
# 3. use that reduced fasta of intersecting kmers as the query to CMash
####################################################################
# First, I will need to dump the training database to a fasta file
from CMash import MinHash as MH
import os
import blist
training_out_file = '/nfs1/Koslicki_Lab/koslickd/KMC_test/NathanRefSeqTraining60mers.fa'
training_data ='/nfs1/Koslicki_Lab/koslickd/MiCOPCMash/TrainingData/NathanRefSeq/micopdb_n_1000_k_60.h5'
training_file_names = "/nfs1/Koslicki_Lab/koslickd/MiCOPCMash/TrainingData/NathanRefSeq/absolute_file_names.txt"
file_names = []
with open(training_file_names, 'r') as fid:
iter = 0
for line in fid.readlines():
line = line.strip()
file_names.append(os.path.basename(line))
iter += 1
if iter > 1000:
break
all_kmers = blist.blist()
for file_name in file_names:
sketch = MH.import_multiple_from_single_hdf5(training_data, import_list=[file_name])[0]
all_kmers += sketch._kmers
all_kmers_set = set(all_kmers)
with open(training_out_file, 'w') as fid:
iter = 0
for kmer in all_kmers_set:
fid.write(">seq_%d\n" % iter)
fid.write("%s\n" % kmer)
iter += 1
##########################################################################
|
|
02ad029840b2e770bc802fd7f8504498cb0f756d
|
lib/ansible/plugins/test/mathstuff.py
|
lib/ansible/plugins/test/mathstuff.py
|
# (c) 2016, Ansible, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
__metaclass__ = type
def issubset(a, b):
return set(a) <= set(b)
def issuperset(a, b):
return set(a) >= set(b)
class TestModule(object):
''' Ansible math jinja2 tests '''
def tests(self):
return {
# set theory
'issubset': issubset,
'issuperset': issuperset,
}
|
Add `issubset` and `issuperset` tests
|
Add `issubset` and `issuperset` tests
|
Python
|
mit
|
thaim/ansible,thaim/ansible
|
Add `issubset` and `issuperset` tests
|
# (c) 2016, Ansible, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
__metaclass__ = type
def issubset(a, b):
return set(a) <= set(b)
def issuperset(a, b):
return set(a) >= set(b)
class TestModule(object):
''' Ansible math jinja2 tests '''
def tests(self):
return {
# set theory
'issubset': issubset,
'issuperset': issuperset,
}
|
<commit_before><commit_msg>Add `issubset` and `issuperset` tests<commit_after>
|
# (c) 2016, Ansible, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
__metaclass__ = type
def issubset(a, b):
return set(a) <= set(b)
def issuperset(a, b):
return set(a) >= set(b)
class TestModule(object):
''' Ansible math jinja2 tests '''
def tests(self):
return {
# set theory
'issubset': issubset,
'issuperset': issuperset,
}
|
Add `issubset` and `issuperset` tests# (c) 2016, Ansible, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
__metaclass__ = type
def issubset(a, b):
return set(a) <= set(b)
def issuperset(a, b):
return set(a) >= set(b)
class TestModule(object):
''' Ansible math jinja2 tests '''
def tests(self):
return {
# set theory
'issubset': issubset,
'issuperset': issuperset,
}
|
<commit_before><commit_msg>Add `issubset` and `issuperset` tests<commit_after># (c) 2016, Ansible, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
__metaclass__ = type
def issubset(a, b):
return set(a) <= set(b)
def issuperset(a, b):
return set(a) >= set(b)
class TestModule(object):
''' Ansible math jinja2 tests '''
def tests(self):
return {
# set theory
'issubset': issubset,
'issuperset': issuperset,
}
|
|
bf86584829f56f91b363f251d77f3157f952db0f
|
tests/test_cyprep.py
|
tests/test_cyprep.py
|
import unittest
import numpy as np
import yatsm._cyprep
class TestCyPrep(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Test data
n_band = 7
n_mask = 50
n_images = 1000
cls.data = np.random.randint(
0, 10000, size=(n_band, n_images)).astype(np.int32)
for b in range(n_band):
cls.data[b, np.random.choice(np.arange(0, n_images),
size=n_mask, replace=False)] = 16000
cls.mins = np.repeat(0, n_band).astype(np.int16)
cls.maxes = np.repeat(10000, n_band).astype(np.int16)
def test_get_valid_mask(self):
truth = np.all([((b > _min) & (b < _max)) for b, _min, _max in
zip(np.rollaxis(self.data, 0),
self.mins,
self.maxes)], axis=0)
np.testing.assert_equal(
truth,
yatsm._cyprep.get_valid_mask(self.data, self.mins, self.maxes))
if __name__ == '__main__':
unittest.main()
|
Add tests for masking of data based on being within a range of values
|
Add tests for masking of data based on being within a range of values
|
Python
|
mit
|
ceholden/yatsm,c11/yatsm,jmorton/yatsm,valpasq/yatsm,ceholden/yatsm,valpasq/yatsm,jmorton/yatsm,c11/yatsm,jmorton/yatsm
|
Add tests for masking of data based on being within a range of values
|
import unittest
import numpy as np
import yatsm._cyprep
class TestCyPrep(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Test data
n_band = 7
n_mask = 50
n_images = 1000
cls.data = np.random.randint(
0, 10000, size=(n_band, n_images)).astype(np.int32)
for b in range(n_band):
cls.data[b, np.random.choice(np.arange(0, n_images),
size=n_mask, replace=False)] = 16000
cls.mins = np.repeat(0, n_band).astype(np.int16)
cls.maxes = np.repeat(10000, n_band).astype(np.int16)
def test_get_valid_mask(self):
truth = np.all([((b > _min) & (b < _max)) for b, _min, _max in
zip(np.rollaxis(self.data, 0),
self.mins,
self.maxes)], axis=0)
np.testing.assert_equal(
truth,
yatsm._cyprep.get_valid_mask(self.data, self.mins, self.maxes))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for masking of data based on being within a range of values<commit_after>
|
import unittest
import numpy as np
import yatsm._cyprep
class TestCyPrep(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Test data
n_band = 7
n_mask = 50
n_images = 1000
cls.data = np.random.randint(
0, 10000, size=(n_band, n_images)).astype(np.int32)
for b in range(n_band):
cls.data[b, np.random.choice(np.arange(0, n_images),
size=n_mask, replace=False)] = 16000
cls.mins = np.repeat(0, n_band).astype(np.int16)
cls.maxes = np.repeat(10000, n_band).astype(np.int16)
def test_get_valid_mask(self):
truth = np.all([((b > _min) & (b < _max)) for b, _min, _max in
zip(np.rollaxis(self.data, 0),
self.mins,
self.maxes)], axis=0)
np.testing.assert_equal(
truth,
yatsm._cyprep.get_valid_mask(self.data, self.mins, self.maxes))
if __name__ == '__main__':
unittest.main()
|
Add tests for masking of data based on being within a range of valuesimport unittest
import numpy as np
import yatsm._cyprep
class TestCyPrep(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Test data
n_band = 7
n_mask = 50
n_images = 1000
cls.data = np.random.randint(
0, 10000, size=(n_band, n_images)).astype(np.int32)
for b in range(n_band):
cls.data[b, np.random.choice(np.arange(0, n_images),
size=n_mask, replace=False)] = 16000
cls.mins = np.repeat(0, n_band).astype(np.int16)
cls.maxes = np.repeat(10000, n_band).astype(np.int16)
def test_get_valid_mask(self):
truth = np.all([((b > _min) & (b < _max)) for b, _min, _max in
zip(np.rollaxis(self.data, 0),
self.mins,
self.maxes)], axis=0)
np.testing.assert_equal(
truth,
yatsm._cyprep.get_valid_mask(self.data, self.mins, self.maxes))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for masking of data based on being within a range of values<commit_after>import unittest
import numpy as np
import yatsm._cyprep
class TestCyPrep(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Test data
n_band = 7
n_mask = 50
n_images = 1000
cls.data = np.random.randint(
0, 10000, size=(n_band, n_images)).astype(np.int32)
for b in range(n_band):
cls.data[b, np.random.choice(np.arange(0, n_images),
size=n_mask, replace=False)] = 16000
cls.mins = np.repeat(0, n_band).astype(np.int16)
cls.maxes = np.repeat(10000, n_band).astype(np.int16)
def test_get_valid_mask(self):
truth = np.all([((b > _min) & (b < _max)) for b, _min, _max in
zip(np.rollaxis(self.data, 0),
self.mins,
self.maxes)], axis=0)
np.testing.assert_equal(
truth,
yatsm._cyprep.get_valid_mask(self.data, self.mins, self.maxes))
if __name__ == '__main__':
unittest.main()
|
|
aa78a2670766b0a5e093a1876cb402ed513573bd
|
openfisca_france/scripts/parameters/explore_parameters_unit.py
|
openfisca_france/scripts/parameters/explore_parameters_unit.py
|
# -*- coding: utf-8 -*-
from openfisca_core.parameters import ParameterNode, Scale
from openfisca_france import FranceTaxBenefitSystem
tax_benefit_system = FranceTaxBenefitSystem()
parameters = tax_benefit_system.parameters
def get_parameters_by_unit(parameter, parameters_by_unit = None):
if parameters_by_unit is None:
parameters_by_unit = dict(
scale = list(),
none = list(),
currency = list(),
rate = list(),
year = list(),
)
for name, sub_parameter in parameter.children.items():
if isinstance(sub_parameter, ParameterNode):
get_parameters_by_unit(sub_parameter, parameters_by_unit)
else:
if isinstance(sub_parameter, Scale):
parameters_by_unit['scale'].append(sub_parameter)
elif sub_parameter.unit is None:
parameters_by_unit['none'].append(sub_parameter)
elif sub_parameter.unit == "/1":
parameters_by_unit['rate'].append(sub_parameter)
elif sub_parameter.unit == "currency":
parameters_by_unit['currency'].append(sub_parameter)
elif sub_parameter.unit == "year":
parameters_by_unit['year'].append(sub_parameter)
else:
raise ValueError("Parameter {} has a stange unit {}".format(
sub_parameter.name, sub_parameter.unit))
return parameters_by_unit
if __name__ == '__main__':
parameters_by_unit = get_parameters_by_unit(parameters)
print('Distribution of parameters types:')
for type_, sub_parameters in parameters_by_unit.items():
print(type_, len(parameters_by_unit[type_]))
print('\n')
print('List of parameters with no units')
for param in parameters_by_unit['none']:
print (param.name)
|
Add script to explore parameters units
|
Add script to explore parameters units
|
Python
|
agpl-3.0
|
antoinearnoud/openfisca-france,antoinearnoud/openfisca-france,sgmap/openfisca-france,sgmap/openfisca-france
|
Add script to explore parameters units
|
# -*- coding: utf-8 -*-
from openfisca_core.parameters import ParameterNode, Scale
from openfisca_france import FranceTaxBenefitSystem
tax_benefit_system = FranceTaxBenefitSystem()
parameters = tax_benefit_system.parameters
def get_parameters_by_unit(parameter, parameters_by_unit = None):
if parameters_by_unit is None:
parameters_by_unit = dict(
scale = list(),
none = list(),
currency = list(),
rate = list(),
year = list(),
)
for name, sub_parameter in parameter.children.items():
if isinstance(sub_parameter, ParameterNode):
get_parameters_by_unit(sub_parameter, parameters_by_unit)
else:
if isinstance(sub_parameter, Scale):
parameters_by_unit['scale'].append(sub_parameter)
elif sub_parameter.unit is None:
parameters_by_unit['none'].append(sub_parameter)
elif sub_parameter.unit == "/1":
parameters_by_unit['rate'].append(sub_parameter)
elif sub_parameter.unit == "currency":
parameters_by_unit['currency'].append(sub_parameter)
elif sub_parameter.unit == "year":
parameters_by_unit['year'].append(sub_parameter)
else:
raise ValueError("Parameter {} has a stange unit {}".format(
sub_parameter.name, sub_parameter.unit))
return parameters_by_unit
if __name__ == '__main__':
parameters_by_unit = get_parameters_by_unit(parameters)
print('Distribution of parameters types:')
for type_, sub_parameters in parameters_by_unit.items():
print(type_, len(parameters_by_unit[type_]))
print('\n')
print('List of parameters with no units')
for param in parameters_by_unit['none']:
print (param.name)
|
<commit_before><commit_msg>Add script to explore parameters units<commit_after>
|
# -*- coding: utf-8 -*-
from openfisca_core.parameters import ParameterNode, Scale
from openfisca_france import FranceTaxBenefitSystem
tax_benefit_system = FranceTaxBenefitSystem()
parameters = tax_benefit_system.parameters
def get_parameters_by_unit(parameter, parameters_by_unit = None):
if parameters_by_unit is None:
parameters_by_unit = dict(
scale = list(),
none = list(),
currency = list(),
rate = list(),
year = list(),
)
for name, sub_parameter in parameter.children.items():
if isinstance(sub_parameter, ParameterNode):
get_parameters_by_unit(sub_parameter, parameters_by_unit)
else:
if isinstance(sub_parameter, Scale):
parameters_by_unit['scale'].append(sub_parameter)
elif sub_parameter.unit is None:
parameters_by_unit['none'].append(sub_parameter)
elif sub_parameter.unit == "/1":
parameters_by_unit['rate'].append(sub_parameter)
elif sub_parameter.unit == "currency":
parameters_by_unit['currency'].append(sub_parameter)
elif sub_parameter.unit == "year":
parameters_by_unit['year'].append(sub_parameter)
else:
raise ValueError("Parameter {} has a stange unit {}".format(
sub_parameter.name, sub_parameter.unit))
return parameters_by_unit
if __name__ == '__main__':
parameters_by_unit = get_parameters_by_unit(parameters)
print('Distribution of parameters types:')
for type_, sub_parameters in parameters_by_unit.items():
print(type_, len(parameters_by_unit[type_]))
print('\n')
print('List of parameters with no units')
for param in parameters_by_unit['none']:
print (param.name)
|
Add script to explore parameters units# -*- coding: utf-8 -*-
from openfisca_core.parameters import ParameterNode, Scale
from openfisca_france import FranceTaxBenefitSystem
tax_benefit_system = FranceTaxBenefitSystem()
parameters = tax_benefit_system.parameters
def get_parameters_by_unit(parameter, parameters_by_unit = None):
if parameters_by_unit is None:
parameters_by_unit = dict(
scale = list(),
none = list(),
currency = list(),
rate = list(),
year = list(),
)
for name, sub_parameter in parameter.children.items():
if isinstance(sub_parameter, ParameterNode):
get_parameters_by_unit(sub_parameter, parameters_by_unit)
else:
if isinstance(sub_parameter, Scale):
parameters_by_unit['scale'].append(sub_parameter)
elif sub_parameter.unit is None:
parameters_by_unit['none'].append(sub_parameter)
elif sub_parameter.unit == "/1":
parameters_by_unit['rate'].append(sub_parameter)
elif sub_parameter.unit == "currency":
parameters_by_unit['currency'].append(sub_parameter)
elif sub_parameter.unit == "year":
parameters_by_unit['year'].append(sub_parameter)
else:
raise ValueError("Parameter {} has a stange unit {}".format(
sub_parameter.name, sub_parameter.unit))
return parameters_by_unit
if __name__ == '__main__':
parameters_by_unit = get_parameters_by_unit(parameters)
print('Distribution of parameters types:')
for type_, sub_parameters in parameters_by_unit.items():
print(type_, len(parameters_by_unit[type_]))
print('\n')
print('List of parameters with no units')
for param in parameters_by_unit['none']:
print (param.name)
|
<commit_before><commit_msg>Add script to explore parameters units<commit_after># -*- coding: utf-8 -*-
from openfisca_core.parameters import ParameterNode, Scale
from openfisca_france import FranceTaxBenefitSystem
tax_benefit_system = FranceTaxBenefitSystem()
parameters = tax_benefit_system.parameters
def get_parameters_by_unit(parameter, parameters_by_unit = None):
if parameters_by_unit is None:
parameters_by_unit = dict(
scale = list(),
none = list(),
currency = list(),
rate = list(),
year = list(),
)
for name, sub_parameter in parameter.children.items():
if isinstance(sub_parameter, ParameterNode):
get_parameters_by_unit(sub_parameter, parameters_by_unit)
else:
if isinstance(sub_parameter, Scale):
parameters_by_unit['scale'].append(sub_parameter)
elif sub_parameter.unit is None:
parameters_by_unit['none'].append(sub_parameter)
elif sub_parameter.unit == "/1":
parameters_by_unit['rate'].append(sub_parameter)
elif sub_parameter.unit == "currency":
parameters_by_unit['currency'].append(sub_parameter)
elif sub_parameter.unit == "year":
parameters_by_unit['year'].append(sub_parameter)
else:
raise ValueError("Parameter {} has a stange unit {}".format(
sub_parameter.name, sub_parameter.unit))
return parameters_by_unit
if __name__ == '__main__':
parameters_by_unit = get_parameters_by_unit(parameters)
print('Distribution of parameters types:')
for type_, sub_parameters in parameters_by_unit.items():
print(type_, len(parameters_by_unit[type_]))
print('\n')
print('List of parameters with no units')
for param in parameters_by_unit['none']:
print (param.name)
|
|
26ab37868e67b5b815cf8df67cc04876ff44c148
|
tests/rules_tests/isValid_tests/NongrammarEntitiesTest.py
|
tests/rules_tests/isValid_tests/NongrammarEntitiesTest.py
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import main, TestCase
from grammpy import Rule
from .grammar import *
class NongrammarEntitiesTest(TestCase):
pass
if __name__ == '__main__':
main()
|
Add file for Nongrammar entities tests
|
Add file for Nongrammar entities tests
|
Python
|
mit
|
PatrikValkovic/grammpy
|
Add file for Nongrammar entities tests
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import main, TestCase
from grammpy import Rule
from .grammar import *
class NongrammarEntitiesTest(TestCase):
pass
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add file for Nongrammar entities tests<commit_after>
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import main, TestCase
from grammpy import Rule
from .grammar import *
class NongrammarEntitiesTest(TestCase):
pass
if __name__ == '__main__':
main()
|
Add file for Nongrammar entities tests#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import main, TestCase
from grammpy import Rule
from .grammar import *
class NongrammarEntitiesTest(TestCase):
pass
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add file for Nongrammar entities tests<commit_after>#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import main, TestCase
from grammpy import Rule
from .grammar import *
class NongrammarEntitiesTest(TestCase):
pass
if __name__ == '__main__':
main()
|
|
fa3a02e6660ce556defc2f2c6008c6eb24eb71c1
|
Sketches/JT/Jam/library/trunk/Kamaelia/Apps/Jam/Audio/Sampler.py
|
Sketches/JT/Jam/library/trunk/Kamaelia/Apps/Jam/Audio/Sampler.py
|
import time
import wave
import pygame
import numpy
import Axon
from Axon.SchedulingComponent import SchedulingComponent
class WavVoice(SchedulingComponent):
bufferSize = 1024
def __init__(self, fileName, **argd):
super(WavVoice, self).__init__(**argd)
self.on = False
self.wavFile = wave.open(fileName)
self.sampleRate = self.wavFile.getframerate()
self.period = float(self.bufferSize)/self.sampleRate
self.frame = 0
self.lastSendTime = time.time()
self.scheduleAbs("Send", self.lastSendTime + self.period)
def main(self):
while 1:
if self.dataReady("inbox"):
address, arguments = self.recv("inbox")
address = address.split("/")[-1]
if address == "On":
self.on = True
self.wavFile.rewind()
self.frame = 0
if address == "Off":
self.on = False
if self.dataReady("event"):
self.recv("event")
if self.on:
if self.frame < self.wavFile.getnframes():
sample = self.wavFile.readframes(self.bufferSize)
sample = numpy.frombuffer(sample, dtype="int16")
self.frame += len(sample)
if len(sample) < self.bufferSize:
# Pad with zeroes
padSize = self.bufferSize - len(sample)
sample = numpy.append(sample, numpy.zeros(padSize))
# Convert to float
sample = sample.astype("float64")
# Scale to -1 - 1
sample /= 2**(8 * self.wavFile.getsampwidth() - 1)
else:
sample = numpy.zeros(self.bufferSize)
else:
sample = numpy.zeros(self.bufferSize)
self.send(sample, "outbox")
self.lastSendTime += self.period
self.scheduleAbs("Send", self.lastSendTime + self.period)
if not self.anyReady():
self.pause()
if __name__ == "__main__":
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.Apps.Jam.Util.Numpy import TypeConverter
from Kamaelia.Codec.Vorbis import AOAudioPlaybackAdaptor
from Kamaelia.Util.PureTransformer import PureTransformer
from Kamaelia.Apps.Jam.UI.StepSequencer import StepSequencer
from Kamaelia.Apps.Jam.Audio.Synth import Synth
from Kamaelia.Apps.Jam.Audio.Polyphony import Targetter
files = ["Ride", "HH", "Snare", "Kick"]
files = ["/home/joe/Desktop/%s.wav"%fileName for fileName in files]
def voiceGenerator():
for i in range(4):
yield WavVoice(files[i])
Pipeline(StepSequencer(stepsPerBeat=4), Synth(voiceGenerator, polyphoniser=Targetter, polyphony=4), PureTransformer(lambda x:x*(2**15-1)), TypeConverter(type="int16"), AOAudioPlaybackAdaptor()).run()
|
Add a simple sampler for playing wav files triggered by note on messages
|
Add a simple sampler for playing wav files triggered by note on messages
|
Python
|
apache-2.0
|
sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia
|
Add a simple sampler for playing wav files triggered by note on messages
|
import time
import wave
import pygame
import numpy
import Axon
from Axon.SchedulingComponent import SchedulingComponent
class WavVoice(SchedulingComponent):
bufferSize = 1024
def __init__(self, fileName, **argd):
super(WavVoice, self).__init__(**argd)
self.on = False
self.wavFile = wave.open(fileName)
self.sampleRate = self.wavFile.getframerate()
self.period = float(self.bufferSize)/self.sampleRate
self.frame = 0
self.lastSendTime = time.time()
self.scheduleAbs("Send", self.lastSendTime + self.period)
def main(self):
while 1:
if self.dataReady("inbox"):
address, arguments = self.recv("inbox")
address = address.split("/")[-1]
if address == "On":
self.on = True
self.wavFile.rewind()
self.frame = 0
if address == "Off":
self.on = False
if self.dataReady("event"):
self.recv("event")
if self.on:
if self.frame < self.wavFile.getnframes():
sample = self.wavFile.readframes(self.bufferSize)
sample = numpy.frombuffer(sample, dtype="int16")
self.frame += len(sample)
if len(sample) < self.bufferSize:
# Pad with zeroes
padSize = self.bufferSize - len(sample)
sample = numpy.append(sample, numpy.zeros(padSize))
# Convert to float
sample = sample.astype("float64")
# Scale to -1 - 1
sample /= 2**(8 * self.wavFile.getsampwidth() - 1)
else:
sample = numpy.zeros(self.bufferSize)
else:
sample = numpy.zeros(self.bufferSize)
self.send(sample, "outbox")
self.lastSendTime += self.period
self.scheduleAbs("Send", self.lastSendTime + self.period)
if not self.anyReady():
self.pause()
if __name__ == "__main__":
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.Apps.Jam.Util.Numpy import TypeConverter
from Kamaelia.Codec.Vorbis import AOAudioPlaybackAdaptor
from Kamaelia.Util.PureTransformer import PureTransformer
from Kamaelia.Apps.Jam.UI.StepSequencer import StepSequencer
from Kamaelia.Apps.Jam.Audio.Synth import Synth
from Kamaelia.Apps.Jam.Audio.Polyphony import Targetter
files = ["Ride", "HH", "Snare", "Kick"]
files = ["/home/joe/Desktop/%s.wav"%fileName for fileName in files]
def voiceGenerator():
for i in range(4):
yield WavVoice(files[i])
Pipeline(StepSequencer(stepsPerBeat=4), Synth(voiceGenerator, polyphoniser=Targetter, polyphony=4), PureTransformer(lambda x:x*(2**15-1)), TypeConverter(type="int16"), AOAudioPlaybackAdaptor()).run()
|
<commit_before><commit_msg>Add a simple sampler for playing wav files triggered by note on messages<commit_after>
|
import time
import wave
import pygame
import numpy
import Axon
from Axon.SchedulingComponent import SchedulingComponent
class WavVoice(SchedulingComponent):
bufferSize = 1024
def __init__(self, fileName, **argd):
super(WavVoice, self).__init__(**argd)
self.on = False
self.wavFile = wave.open(fileName)
self.sampleRate = self.wavFile.getframerate()
self.period = float(self.bufferSize)/self.sampleRate
self.frame = 0
self.lastSendTime = time.time()
self.scheduleAbs("Send", self.lastSendTime + self.period)
def main(self):
while 1:
if self.dataReady("inbox"):
address, arguments = self.recv("inbox")
address = address.split("/")[-1]
if address == "On":
self.on = True
self.wavFile.rewind()
self.frame = 0
if address == "Off":
self.on = False
if self.dataReady("event"):
self.recv("event")
if self.on:
if self.frame < self.wavFile.getnframes():
sample = self.wavFile.readframes(self.bufferSize)
sample = numpy.frombuffer(sample, dtype="int16")
self.frame += len(sample)
if len(sample) < self.bufferSize:
# Pad with zeroes
padSize = self.bufferSize - len(sample)
sample = numpy.append(sample, numpy.zeros(padSize))
# Convert to float
sample = sample.astype("float64")
# Scale to -1 - 1
sample /= 2**(8 * self.wavFile.getsampwidth() - 1)
else:
sample = numpy.zeros(self.bufferSize)
else:
sample = numpy.zeros(self.bufferSize)
self.send(sample, "outbox")
self.lastSendTime += self.period
self.scheduleAbs("Send", self.lastSendTime + self.period)
if not self.anyReady():
self.pause()
if __name__ == "__main__":
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.Apps.Jam.Util.Numpy import TypeConverter
from Kamaelia.Codec.Vorbis import AOAudioPlaybackAdaptor
from Kamaelia.Util.PureTransformer import PureTransformer
from Kamaelia.Apps.Jam.UI.StepSequencer import StepSequencer
from Kamaelia.Apps.Jam.Audio.Synth import Synth
from Kamaelia.Apps.Jam.Audio.Polyphony import Targetter
files = ["Ride", "HH", "Snare", "Kick"]
files = ["/home/joe/Desktop/%s.wav"%fileName for fileName in files]
def voiceGenerator():
for i in range(4):
yield WavVoice(files[i])
Pipeline(StepSequencer(stepsPerBeat=4), Synth(voiceGenerator, polyphoniser=Targetter, polyphony=4), PureTransformer(lambda x:x*(2**15-1)), TypeConverter(type="int16"), AOAudioPlaybackAdaptor()).run()
|
Add a simple sampler for playing wav files triggered by note on messagesimport time
import wave
import pygame
import numpy
import Axon
from Axon.SchedulingComponent import SchedulingComponent
class WavVoice(SchedulingComponent):
bufferSize = 1024
def __init__(self, fileName, **argd):
super(WavVoice, self).__init__(**argd)
self.on = False
self.wavFile = wave.open(fileName)
self.sampleRate = self.wavFile.getframerate()
self.period = float(self.bufferSize)/self.sampleRate
self.frame = 0
self.lastSendTime = time.time()
self.scheduleAbs("Send", self.lastSendTime + self.period)
def main(self):
while 1:
if self.dataReady("inbox"):
address, arguments = self.recv("inbox")
address = address.split("/")[-1]
if address == "On":
self.on = True
self.wavFile.rewind()
self.frame = 0
if address == "Off":
self.on = False
if self.dataReady("event"):
self.recv("event")
if self.on:
if self.frame < self.wavFile.getnframes():
sample = self.wavFile.readframes(self.bufferSize)
sample = numpy.frombuffer(sample, dtype="int16")
self.frame += len(sample)
if len(sample) < self.bufferSize:
# Pad with zeroes
padSize = self.bufferSize - len(sample)
sample = numpy.append(sample, numpy.zeros(padSize))
# Convert to float
sample = sample.astype("float64")
# Scale to -1 - 1
sample /= 2**(8 * self.wavFile.getsampwidth() - 1)
else:
sample = numpy.zeros(self.bufferSize)
else:
sample = numpy.zeros(self.bufferSize)
self.send(sample, "outbox")
self.lastSendTime += self.period
self.scheduleAbs("Send", self.lastSendTime + self.period)
if not self.anyReady():
self.pause()
if __name__ == "__main__":
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.Apps.Jam.Util.Numpy import TypeConverter
from Kamaelia.Codec.Vorbis import AOAudioPlaybackAdaptor
from Kamaelia.Util.PureTransformer import PureTransformer
from Kamaelia.Apps.Jam.UI.StepSequencer import StepSequencer
from Kamaelia.Apps.Jam.Audio.Synth import Synth
from Kamaelia.Apps.Jam.Audio.Polyphony import Targetter
files = ["Ride", "HH", "Snare", "Kick"]
files = ["/home/joe/Desktop/%s.wav"%fileName for fileName in files]
def voiceGenerator():
for i in range(4):
yield WavVoice(files[i])
Pipeline(StepSequencer(stepsPerBeat=4), Synth(voiceGenerator, polyphoniser=Targetter, polyphony=4), PureTransformer(lambda x:x*(2**15-1)), TypeConverter(type="int16"), AOAudioPlaybackAdaptor()).run()
|
<commit_before><commit_msg>Add a simple sampler for playing wav files triggered by note on messages<commit_after>import time
import wave
import pygame
import numpy
import Axon
from Axon.SchedulingComponent import SchedulingComponent
class WavVoice(SchedulingComponent):
bufferSize = 1024
def __init__(self, fileName, **argd):
super(WavVoice, self).__init__(**argd)
self.on = False
self.wavFile = wave.open(fileName)
self.sampleRate = self.wavFile.getframerate()
self.period = float(self.bufferSize)/self.sampleRate
self.frame = 0
self.lastSendTime = time.time()
self.scheduleAbs("Send", self.lastSendTime + self.period)
def main(self):
while 1:
if self.dataReady("inbox"):
address, arguments = self.recv("inbox")
address = address.split("/")[-1]
if address == "On":
self.on = True
self.wavFile.rewind()
self.frame = 0
if address == "Off":
self.on = False
if self.dataReady("event"):
self.recv("event")
if self.on:
if self.frame < self.wavFile.getnframes():
sample = self.wavFile.readframes(self.bufferSize)
sample = numpy.frombuffer(sample, dtype="int16")
self.frame += len(sample)
if len(sample) < self.bufferSize:
# Pad with zeroes
padSize = self.bufferSize - len(sample)
sample = numpy.append(sample, numpy.zeros(padSize))
# Convert to float
sample = sample.astype("float64")
# Scale to -1 - 1
sample /= 2**(8 * self.wavFile.getsampwidth() - 1)
else:
sample = numpy.zeros(self.bufferSize)
else:
sample = numpy.zeros(self.bufferSize)
self.send(sample, "outbox")
self.lastSendTime += self.period
self.scheduleAbs("Send", self.lastSendTime + self.period)
if not self.anyReady():
self.pause()
if __name__ == "__main__":
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.Apps.Jam.Util.Numpy import TypeConverter
from Kamaelia.Codec.Vorbis import AOAudioPlaybackAdaptor
from Kamaelia.Util.PureTransformer import PureTransformer
from Kamaelia.Apps.Jam.UI.StepSequencer import StepSequencer
from Kamaelia.Apps.Jam.Audio.Synth import Synth
from Kamaelia.Apps.Jam.Audio.Polyphony import Targetter
files = ["Ride", "HH", "Snare", "Kick"]
files = ["/home/joe/Desktop/%s.wav"%fileName for fileName in files]
def voiceGenerator():
for i in range(4):
yield WavVoice(files[i])
Pipeline(StepSequencer(stepsPerBeat=4), Synth(voiceGenerator, polyphoniser=Targetter, polyphony=4), PureTransformer(lambda x:x*(2**15-1)), TypeConverter(type="int16"), AOAudioPlaybackAdaptor()).run()
|
|
4e3644234fab9cb14a3d511b24bce3ed8a1446e0
|
tests/scales/test_minor.py
|
tests/scales/test_minor.py
|
# Copyright (c) Paul R. Tagliamonte <tag@pault.ag>, 2015
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from muse.scales.minor import NaturalMinorScale
from muse.tone import Tone
def take(it, count):
for _ in range(count):
yield next(it)
SCALE = ['B♭4', 'C4', 'C♯4', 'E♭4', 'F4', 'F♯4', 'G♯4', 'B♭5']
def test_scale_acending_iteratation():
cs = NaturalMinorScale(Tone(100)) # Bb4
series = list(take(cs.acending(), 8))
assert [x._tone_name() for x in series] == SCALE
def test_scale_acending_iteratation():
cs = NaturalMinorScale(Tone(1300)) # Bb5
series = list(take(cs.decending(), 8))
assert [x._tone_name() for x in series] == list(reversed(SCALE))
|
Add in a minor testcase.
|
Add in a minor testcase.
|
Python
|
mit
|
paultag/python-muse
|
Add in a minor testcase.
|
# Copyright (c) Paul R. Tagliamonte <tag@pault.ag>, 2015
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from muse.scales.minor import NaturalMinorScale
from muse.tone import Tone
def take(it, count):
for _ in range(count):
yield next(it)
SCALE = ['B♭4', 'C4', 'C♯4', 'E♭4', 'F4', 'F♯4', 'G♯4', 'B♭5']
def test_scale_acending_iteratation():
cs = NaturalMinorScale(Tone(100)) # Bb4
series = list(take(cs.acending(), 8))
assert [x._tone_name() for x in series] == SCALE
def test_scale_acending_iteratation():
cs = NaturalMinorScale(Tone(1300)) # Bb5
series = list(take(cs.decending(), 8))
assert [x._tone_name() for x in series] == list(reversed(SCALE))
|
<commit_before><commit_msg>Add in a minor testcase.<commit_after>
|
# Copyright (c) Paul R. Tagliamonte <tag@pault.ag>, 2015
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from muse.scales.minor import NaturalMinorScale
from muse.tone import Tone
def take(it, count):
for _ in range(count):
yield next(it)
SCALE = ['B♭4', 'C4', 'C♯4', 'E♭4', 'F4', 'F♯4', 'G♯4', 'B♭5']
def test_scale_acending_iteratation():
cs = NaturalMinorScale(Tone(100)) # Bb4
series = list(take(cs.acending(), 8))
assert [x._tone_name() for x in series] == SCALE
def test_scale_acending_iteratation():
cs = NaturalMinorScale(Tone(1300)) # Bb5
series = list(take(cs.decending(), 8))
assert [x._tone_name() for x in series] == list(reversed(SCALE))
|
Add in a minor testcase.# Copyright (c) Paul R. Tagliamonte <tag@pault.ag>, 2015
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from muse.scales.minor import NaturalMinorScale
from muse.tone import Tone
def take(it, count):
for _ in range(count):
yield next(it)
SCALE = ['B♭4', 'C4', 'C♯4', 'E♭4', 'F4', 'F♯4', 'G♯4', 'B♭5']
def test_scale_acending_iteratation():
cs = NaturalMinorScale(Tone(100)) # Bb4
series = list(take(cs.acending(), 8))
assert [x._tone_name() for x in series] == SCALE
def test_scale_acending_iteratation():
cs = NaturalMinorScale(Tone(1300)) # Bb5
series = list(take(cs.decending(), 8))
assert [x._tone_name() for x in series] == list(reversed(SCALE))
|
<commit_before><commit_msg>Add in a minor testcase.<commit_after># Copyright (c) Paul R. Tagliamonte <tag@pault.ag>, 2015
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from muse.scales.minor import NaturalMinorScale
from muse.tone import Tone
def take(it, count):
for _ in range(count):
yield next(it)
SCALE = ['B♭4', 'C4', 'C♯4', 'E♭4', 'F4', 'F♯4', 'G♯4', 'B♭5']
def test_scale_acending_iteratation():
cs = NaturalMinorScale(Tone(100)) # Bb4
series = list(take(cs.acending(), 8))
assert [x._tone_name() for x in series] == SCALE
def test_scale_acending_iteratation():
cs = NaturalMinorScale(Tone(1300)) # Bb5
series = list(take(cs.decending(), 8))
assert [x._tone_name() for x in series] == list(reversed(SCALE))
|
|
6f8c64ed6f99493811cab54137a1eed44d851260
|
scripts/GetGroupAndModuleFromClassName.py
|
scripts/GetGroupAndModuleFromClassName.py
|
#!/usr/bin/env python
""" Given the path to the ITK Source Dir
print group and module of a given class
for instance, try:
./GetGroupAndModuleFromClassName /path/to/ITK Image
"""
import sys
import os
itk_dir = sys.argv[1]
cmakefile = os.path.join( itk_dir, 'CMake', 'UseITK.cmake' )
if not os.path.exists( cmakefile ):
print 'Error: wrong path'
else:
class_name = sys.argv[2]
path = ''
for root, dirs, files in os.walk( os.path.join( itk_dir, 'Modules' ) ):
for f in files:
if f == 'itk' + class_name + '.h':
path = root
if len( path ) != 0:
# let's extract the Group
temp = path.strip( os.path.join( itk_dir, 'Modules' ) )
temp = temp.strip( 'include' )
GroupModule = temp.split( '/' )
print 'Group: ' + GroupModule[ 0 ]
print 'Module: ' + GroupModule[ 1 ]
else:
print 'Error: this class is not part of itk'
|
Add python script to get group and module given a class name
|
Add python script to get group and module given a class name
|
Python
|
apache-2.0
|
InsightSoftwareConsortium/ITKExamples,InsightSoftwareConsortium/ITKExamples,InsightSoftwareConsortium/ITKExamples,InsightSoftwareConsortium/ITKExamples,InsightSoftwareConsortium/ITKExamples
|
Add python script to get group and module given a class name
|
#!/usr/bin/env python
""" Given the path to the ITK Source Dir
print group and module of a given class
for instance, try:
./GetGroupAndModuleFromClassName /path/to/ITK Image
"""
import sys
import os
itk_dir = sys.argv[1]
cmakefile = os.path.join( itk_dir, 'CMake', 'UseITK.cmake' )
if not os.path.exists( cmakefile ):
print 'Error: wrong path'
else:
class_name = sys.argv[2]
path = ''
for root, dirs, files in os.walk( os.path.join( itk_dir, 'Modules' ) ):
for f in files:
if f == 'itk' + class_name + '.h':
path = root
if len( path ) != 0:
# let's extract the Group
temp = path.strip( os.path.join( itk_dir, 'Modules' ) )
temp = temp.strip( 'include' )
GroupModule = temp.split( '/' )
print 'Group: ' + GroupModule[ 0 ]
print 'Module: ' + GroupModule[ 1 ]
else:
print 'Error: this class is not part of itk'
|
<commit_before><commit_msg>Add python script to get group and module given a class name<commit_after>
|
#!/usr/bin/env python
""" Given the path to the ITK Source Dir
print group and module of a given class
for instance, try:
./GetGroupAndModuleFromClassName /path/to/ITK Image
"""
import sys
import os
itk_dir = sys.argv[1]
cmakefile = os.path.join( itk_dir, 'CMake', 'UseITK.cmake' )
if not os.path.exists( cmakefile ):
print 'Error: wrong path'
else:
class_name = sys.argv[2]
path = ''
for root, dirs, files in os.walk( os.path.join( itk_dir, 'Modules' ) ):
for f in files:
if f == 'itk' + class_name + '.h':
path = root
if len( path ) != 0:
# let's extract the Group
temp = path.strip( os.path.join( itk_dir, 'Modules' ) )
temp = temp.strip( 'include' )
GroupModule = temp.split( '/' )
print 'Group: ' + GroupModule[ 0 ]
print 'Module: ' + GroupModule[ 1 ]
else:
print 'Error: this class is not part of itk'
|
Add python script to get group and module given a class name#!/usr/bin/env python
""" Given the path to the ITK Source Dir
print group and module of a given class
for instance, try:
./GetGroupAndModuleFromClassName /path/to/ITK Image
"""
import sys
import os
itk_dir = sys.argv[1]
cmakefile = os.path.join( itk_dir, 'CMake', 'UseITK.cmake' )
if not os.path.exists( cmakefile ):
print 'Error: wrong path'
else:
class_name = sys.argv[2]
path = ''
for root, dirs, files in os.walk( os.path.join( itk_dir, 'Modules' ) ):
for f in files:
if f == 'itk' + class_name + '.h':
path = root
if len( path ) != 0:
# let's extract the Group
temp = path.strip( os.path.join( itk_dir, 'Modules' ) )
temp = temp.strip( 'include' )
GroupModule = temp.split( '/' )
print 'Group: ' + GroupModule[ 0 ]
print 'Module: ' + GroupModule[ 1 ]
else:
print 'Error: this class is not part of itk'
|
<commit_before><commit_msg>Add python script to get group and module given a class name<commit_after>#!/usr/bin/env python
""" Given the path to the ITK Source Dir
print group and module of a given class
for instance, try:
./GetGroupAndModuleFromClassName /path/to/ITK Image
"""
import sys
import os
itk_dir = sys.argv[1]
cmakefile = os.path.join( itk_dir, 'CMake', 'UseITK.cmake' )
if not os.path.exists( cmakefile ):
print 'Error: wrong path'
else:
class_name = sys.argv[2]
path = ''
for root, dirs, files in os.walk( os.path.join( itk_dir, 'Modules' ) ):
for f in files:
if f == 'itk' + class_name + '.h':
path = root
if len( path ) != 0:
# let's extract the Group
temp = path.strip( os.path.join( itk_dir, 'Modules' ) )
temp = temp.strip( 'include' )
GroupModule = temp.split( '/' )
print 'Group: ' + GroupModule[ 0 ]
print 'Module: ' + GroupModule[ 1 ]
else:
print 'Error: this class is not part of itk'
|
|
acc5c52011db4c8edc615ae3e0cad9cea4fe58b8
|
spreadflow_observer_fs/test/test_source.py
|
spreadflow_observer_fs/test/test_source.py
|
# -*- coding: utf-8 -*-
# pylint: disable=too-many-public-methods
"""
Integration tests for spreadflow filesystem observer source.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import copy
from bson import BSON
from datetime import datetime
from twisted.internet import defer
from mock import Mock
from testtools import TestCase, run_test_with
from testtools.twistedsupport import AsynchronousDeferredRunTest
from spreadflow_core.scheduler import Scheduler
from spreadflow_delta.test.matchers import MatchesSendDeltaItemInvocation
from spreadflow_observer_fs.source import FilesystemObserverSource
def _spawnProcess(processProtocol, executable, args=(), env={}, path=None, uid=None, gid=None, usePTY=0, childFDs=None):
"""
Spawn process method signature.
"""
class SpreadflowSourceIntegrationTestCase(TestCase):
"""
Integration tests for spreadflow filesystem observer source.
"""
@run_test_with(AsynchronousDeferredRunTest)
@defer.inlineCallbacks
def test_source_process(self):
source = FilesystemObserverSource('*.txt', '/some/directory')
reactor = Mock()
reactor.spawnProcess = Mock(spec=_spawnProcess)
scheduler = Mock()
scheduler.send = Mock(spec=Scheduler.send)
# Attach source to the scheduler.
yield source.attach(scheduler, reactor)
self.assertEquals(reactor.spawnProcess.call_count, 1)
# Simulate a message directed to the source.
msg = {
'port': 'default',
'item': {
'type': 'delta',
'date': datetime(2010, 10, 20, 20, 10),
'inserts': ['abcdefg'],
'deletes': ['hiklmno'],
'data': {
'abcdefg': {
'path': '/some/directory/xyz.txt'
}
}
}
}
matches = MatchesSendDeltaItemInvocation(copy.deepcopy(msg['item']), source)
source.peer.dataReceived(BSON.encode(msg))
self.assertEquals(scheduler.send.call_count, 1)
self.assertThat(scheduler.send.call_args, matches)
|
Add basic test for filesystem observer source
|
Add basic test for filesystem observer source
|
Python
|
mit
|
znerol/spreadflow-observer-fs,spreadflow/spreadflow-observer-fs
|
Add basic test for filesystem observer source
|
# -*- coding: utf-8 -*-
# pylint: disable=too-many-public-methods
"""
Integration tests for spreadflow filesystem observer source.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import copy
from bson import BSON
from datetime import datetime
from twisted.internet import defer
from mock import Mock
from testtools import TestCase, run_test_with
from testtools.twistedsupport import AsynchronousDeferredRunTest
from spreadflow_core.scheduler import Scheduler
from spreadflow_delta.test.matchers import MatchesSendDeltaItemInvocation
from spreadflow_observer_fs.source import FilesystemObserverSource
def _spawnProcess(processProtocol, executable, args=(), env={}, path=None, uid=None, gid=None, usePTY=0, childFDs=None):
"""
Spawn process method signature.
"""
class SpreadflowSourceIntegrationTestCase(TestCase):
"""
Integration tests for spreadflow filesystem observer source.
"""
@run_test_with(AsynchronousDeferredRunTest)
@defer.inlineCallbacks
def test_source_process(self):
source = FilesystemObserverSource('*.txt', '/some/directory')
reactor = Mock()
reactor.spawnProcess = Mock(spec=_spawnProcess)
scheduler = Mock()
scheduler.send = Mock(spec=Scheduler.send)
# Attach source to the scheduler.
yield source.attach(scheduler, reactor)
self.assertEquals(reactor.spawnProcess.call_count, 1)
# Simulate a message directed to the source.
msg = {
'port': 'default',
'item': {
'type': 'delta',
'date': datetime(2010, 10, 20, 20, 10),
'inserts': ['abcdefg'],
'deletes': ['hiklmno'],
'data': {
'abcdefg': {
'path': '/some/directory/xyz.txt'
}
}
}
}
matches = MatchesSendDeltaItemInvocation(copy.deepcopy(msg['item']), source)
source.peer.dataReceived(BSON.encode(msg))
self.assertEquals(scheduler.send.call_count, 1)
self.assertThat(scheduler.send.call_args, matches)
|
<commit_before><commit_msg>Add basic test for filesystem observer source<commit_after>
|
# -*- coding: utf-8 -*-
# pylint: disable=too-many-public-methods
"""
Integration tests for spreadflow filesystem observer source.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import copy
from bson import BSON
from datetime import datetime
from twisted.internet import defer
from mock import Mock
from testtools import TestCase, run_test_with
from testtools.twistedsupport import AsynchronousDeferredRunTest
from spreadflow_core.scheduler import Scheduler
from spreadflow_delta.test.matchers import MatchesSendDeltaItemInvocation
from spreadflow_observer_fs.source import FilesystemObserverSource
def _spawnProcess(processProtocol, executable, args=(), env={}, path=None, uid=None, gid=None, usePTY=0, childFDs=None):
"""
Spawn process method signature.
"""
class SpreadflowSourceIntegrationTestCase(TestCase):
"""
Integration tests for spreadflow filesystem observer source.
"""
@run_test_with(AsynchronousDeferredRunTest)
@defer.inlineCallbacks
def test_source_process(self):
source = FilesystemObserverSource('*.txt', '/some/directory')
reactor = Mock()
reactor.spawnProcess = Mock(spec=_spawnProcess)
scheduler = Mock()
scheduler.send = Mock(spec=Scheduler.send)
# Attach source to the scheduler.
yield source.attach(scheduler, reactor)
self.assertEquals(reactor.spawnProcess.call_count, 1)
# Simulate a message directed to the source.
msg = {
'port': 'default',
'item': {
'type': 'delta',
'date': datetime(2010, 10, 20, 20, 10),
'inserts': ['abcdefg'],
'deletes': ['hiklmno'],
'data': {
'abcdefg': {
'path': '/some/directory/xyz.txt'
}
}
}
}
matches = MatchesSendDeltaItemInvocation(copy.deepcopy(msg['item']), source)
source.peer.dataReceived(BSON.encode(msg))
self.assertEquals(scheduler.send.call_count, 1)
self.assertThat(scheduler.send.call_args, matches)
|
Add basic test for filesystem observer source# -*- coding: utf-8 -*-
# pylint: disable=too-many-public-methods
"""
Integration tests for spreadflow filesystem observer source.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import copy
from bson import BSON
from datetime import datetime
from twisted.internet import defer
from mock import Mock
from testtools import TestCase, run_test_with
from testtools.twistedsupport import AsynchronousDeferredRunTest
from spreadflow_core.scheduler import Scheduler
from spreadflow_delta.test.matchers import MatchesSendDeltaItemInvocation
from spreadflow_observer_fs.source import FilesystemObserverSource
def _spawnProcess(processProtocol, executable, args=(), env={}, path=None, uid=None, gid=None, usePTY=0, childFDs=None):
"""
Spawn process method signature.
"""
class SpreadflowSourceIntegrationTestCase(TestCase):
"""
Integration tests for spreadflow filesystem observer source.
"""
@run_test_with(AsynchronousDeferredRunTest)
@defer.inlineCallbacks
def test_source_process(self):
source = FilesystemObserverSource('*.txt', '/some/directory')
reactor = Mock()
reactor.spawnProcess = Mock(spec=_spawnProcess)
scheduler = Mock()
scheduler.send = Mock(spec=Scheduler.send)
# Attach source to the scheduler.
yield source.attach(scheduler, reactor)
self.assertEquals(reactor.spawnProcess.call_count, 1)
# Simulate a message directed to the source.
msg = {
'port': 'default',
'item': {
'type': 'delta',
'date': datetime(2010, 10, 20, 20, 10),
'inserts': ['abcdefg'],
'deletes': ['hiklmno'],
'data': {
'abcdefg': {
'path': '/some/directory/xyz.txt'
}
}
}
}
matches = MatchesSendDeltaItemInvocation(copy.deepcopy(msg['item']), source)
source.peer.dataReceived(BSON.encode(msg))
self.assertEquals(scheduler.send.call_count, 1)
self.assertThat(scheduler.send.call_args, matches)
|
<commit_before><commit_msg>Add basic test for filesystem observer source<commit_after># -*- coding: utf-8 -*-
# pylint: disable=too-many-public-methods
"""
Integration tests for spreadflow filesystem observer source.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import copy
from bson import BSON
from datetime import datetime
from twisted.internet import defer
from mock import Mock
from testtools import TestCase, run_test_with
from testtools.twistedsupport import AsynchronousDeferredRunTest
from spreadflow_core.scheduler import Scheduler
from spreadflow_delta.test.matchers import MatchesSendDeltaItemInvocation
from spreadflow_observer_fs.source import FilesystemObserverSource
def _spawnProcess(processProtocol, executable, args=(), env={}, path=None, uid=None, gid=None, usePTY=0, childFDs=None):
"""
Spawn process method signature.
"""
class SpreadflowSourceIntegrationTestCase(TestCase):
"""
Integration tests for spreadflow filesystem observer source.
"""
@run_test_with(AsynchronousDeferredRunTest)
@defer.inlineCallbacks
def test_source_process(self):
source = FilesystemObserverSource('*.txt', '/some/directory')
reactor = Mock()
reactor.spawnProcess = Mock(spec=_spawnProcess)
scheduler = Mock()
scheduler.send = Mock(spec=Scheduler.send)
# Attach source to the scheduler.
yield source.attach(scheduler, reactor)
self.assertEquals(reactor.spawnProcess.call_count, 1)
# Simulate a message directed to the source.
msg = {
'port': 'default',
'item': {
'type': 'delta',
'date': datetime(2010, 10, 20, 20, 10),
'inserts': ['abcdefg'],
'deletes': ['hiklmno'],
'data': {
'abcdefg': {
'path': '/some/directory/xyz.txt'
}
}
}
}
matches = MatchesSendDeltaItemInvocation(copy.deepcopy(msg['item']), source)
source.peer.dataReceived(BSON.encode(msg))
self.assertEquals(scheduler.send.call_count, 1)
self.assertThat(scheduler.send.call_args, matches)
|
|
5273a97ab1da4b809573617d3fc01705c322992f
|
thecut/authorship/tests/test_forms.py
|
thecut/authorship/tests/test_forms.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.test import TestCase
from django import forms
from mock import patch
from test_app.models import AuthorshipModel
from thecut.authorship.factories import UserFactory
from thecut.authorship.forms import AuthorshipMixin
class AuthorshipModelForm(AuthorshipMixin, forms.ModelForm):
class Meta:
model = AuthorshipModel
fields = []
class DummyUser(object):
pass
class TestAuthorshipMixin(TestCase):
def test_requires_an_extra_argument_on_creating_an_instance(self):
self.assertRaises(TypeError, AuthorshipModelForm)
def test_sets_user_attribute(self):
dummy_user = DummyUser()
form = AuthorshipModelForm(user=dummy_user)
self.assertEqual(dummy_user, form.user)
class DummyUnsavedModel(object):
def __init__(self):
self.pk = None
class TestAuthorshipMixinSave(TestCase):
@patch('django.forms.ModelForm.save')
def test_calls_super_class_save_method(self, superclass_save):
form = AuthorshipModelForm(user=UserFactory())
form.instance = DummyUnsavedModel()
form.save()
self.assertTrue(superclass_save.called)
@patch('django.forms.ModelForm.save')
def test_sets_updated_by_to_given_user(self, superclass_save):
user = DummyUser()
form = AuthorshipModelForm(user=user)
form.instance = DummyUnsavedModel()
form.cleaned_data = {}
form.save()
self.assertEqual(user, form.instance.updated_by)
@patch('django.forms.ModelForm.save')
def test_sets_created_by_if_instance_is_not_saved(self, superclass_save):
user = DummyUser()
form = AuthorshipModelForm(user=user)
form.instance = DummyUnsavedModel()
form.cleaned_data = {}
form.save()
self.assertEqual(user, form.instance.created_by)
@patch('django.forms.ModelForm.save')
def test_does_not_set_created_by_if_instance_is_saved(self,
superclass_save):
class DummySavedModel(object):
def __init__(self):
self.pk = 'arbitrary-value'
self.created_by = 'arbitrary-value'
user = DummyUser()
form = AuthorshipModelForm(user=user)
form.instance = DummySavedModel()
form.cleaned_data = {}
form.save()
self.assertNotEqual(user, form.instance.created_by)
|
Add tests for form mixin.
|
Add tests for form mixin.
|
Python
|
apache-2.0
|
thecut/thecut-authorship
|
Add tests for form mixin.
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.test import TestCase
from django import forms
from mock import patch
from test_app.models import AuthorshipModel
from thecut.authorship.factories import UserFactory
from thecut.authorship.forms import AuthorshipMixin
class AuthorshipModelForm(AuthorshipMixin, forms.ModelForm):
class Meta:
model = AuthorshipModel
fields = []
class DummyUser(object):
pass
class TestAuthorshipMixin(TestCase):
def test_requires_an_extra_argument_on_creating_an_instance(self):
self.assertRaises(TypeError, AuthorshipModelForm)
def test_sets_user_attribute(self):
dummy_user = DummyUser()
form = AuthorshipModelForm(user=dummy_user)
self.assertEqual(dummy_user, form.user)
class DummyUnsavedModel(object):
def __init__(self):
self.pk = None
class TestAuthorshipMixinSave(TestCase):
@patch('django.forms.ModelForm.save')
def test_calls_super_class_save_method(self, superclass_save):
form = AuthorshipModelForm(user=UserFactory())
form.instance = DummyUnsavedModel()
form.save()
self.assertTrue(superclass_save.called)
@patch('django.forms.ModelForm.save')
def test_sets_updated_by_to_given_user(self, superclass_save):
user = DummyUser()
form = AuthorshipModelForm(user=user)
form.instance = DummyUnsavedModel()
form.cleaned_data = {}
form.save()
self.assertEqual(user, form.instance.updated_by)
@patch('django.forms.ModelForm.save')
def test_sets_created_by_if_instance_is_not_saved(self, superclass_save):
user = DummyUser()
form = AuthorshipModelForm(user=user)
form.instance = DummyUnsavedModel()
form.cleaned_data = {}
form.save()
self.assertEqual(user, form.instance.created_by)
@patch('django.forms.ModelForm.save')
def test_does_not_set_created_by_if_instance_is_saved(self,
superclass_save):
class DummySavedModel(object):
def __init__(self):
self.pk = 'arbitrary-value'
self.created_by = 'arbitrary-value'
user = DummyUser()
form = AuthorshipModelForm(user=user)
form.instance = DummySavedModel()
form.cleaned_data = {}
form.save()
self.assertNotEqual(user, form.instance.created_by)
|
<commit_before><commit_msg>Add tests for form mixin.<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.test import TestCase
from django import forms
from mock import patch
from test_app.models import AuthorshipModel
from thecut.authorship.factories import UserFactory
from thecut.authorship.forms import AuthorshipMixin
class AuthorshipModelForm(AuthorshipMixin, forms.ModelForm):
class Meta:
model = AuthorshipModel
fields = []
class DummyUser(object):
pass
class TestAuthorshipMixin(TestCase):
def test_requires_an_extra_argument_on_creating_an_instance(self):
self.assertRaises(TypeError, AuthorshipModelForm)
def test_sets_user_attribute(self):
dummy_user = DummyUser()
form = AuthorshipModelForm(user=dummy_user)
self.assertEqual(dummy_user, form.user)
class DummyUnsavedModel(object):
def __init__(self):
self.pk = None
class TestAuthorshipMixinSave(TestCase):
@patch('django.forms.ModelForm.save')
def test_calls_super_class_save_method(self, superclass_save):
form = AuthorshipModelForm(user=UserFactory())
form.instance = DummyUnsavedModel()
form.save()
self.assertTrue(superclass_save.called)
@patch('django.forms.ModelForm.save')
def test_sets_updated_by_to_given_user(self, superclass_save):
user = DummyUser()
form = AuthorshipModelForm(user=user)
form.instance = DummyUnsavedModel()
form.cleaned_data = {}
form.save()
self.assertEqual(user, form.instance.updated_by)
@patch('django.forms.ModelForm.save')
def test_sets_created_by_if_instance_is_not_saved(self, superclass_save):
user = DummyUser()
form = AuthorshipModelForm(user=user)
form.instance = DummyUnsavedModel()
form.cleaned_data = {}
form.save()
self.assertEqual(user, form.instance.created_by)
@patch('django.forms.ModelForm.save')
def test_does_not_set_created_by_if_instance_is_saved(self,
superclass_save):
class DummySavedModel(object):
def __init__(self):
self.pk = 'arbitrary-value'
self.created_by = 'arbitrary-value'
user = DummyUser()
form = AuthorshipModelForm(user=user)
form.instance = DummySavedModel()
form.cleaned_data = {}
form.save()
self.assertNotEqual(user, form.instance.created_by)
|
Add tests for form mixin.# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.test import TestCase
from django import forms
from mock import patch
from test_app.models import AuthorshipModel
from thecut.authorship.factories import UserFactory
from thecut.authorship.forms import AuthorshipMixin
class AuthorshipModelForm(AuthorshipMixin, forms.ModelForm):
class Meta:
model = AuthorshipModel
fields = []
class DummyUser(object):
pass
class TestAuthorshipMixin(TestCase):
def test_requires_an_extra_argument_on_creating_an_instance(self):
self.assertRaises(TypeError, AuthorshipModelForm)
def test_sets_user_attribute(self):
dummy_user = DummyUser()
form = AuthorshipModelForm(user=dummy_user)
self.assertEqual(dummy_user, form.user)
class DummyUnsavedModel(object):
def __init__(self):
self.pk = None
class TestAuthorshipMixinSave(TestCase):
@patch('django.forms.ModelForm.save')
def test_calls_super_class_save_method(self, superclass_save):
form = AuthorshipModelForm(user=UserFactory())
form.instance = DummyUnsavedModel()
form.save()
self.assertTrue(superclass_save.called)
@patch('django.forms.ModelForm.save')
def test_sets_updated_by_to_given_user(self, superclass_save):
user = DummyUser()
form = AuthorshipModelForm(user=user)
form.instance = DummyUnsavedModel()
form.cleaned_data = {}
form.save()
self.assertEqual(user, form.instance.updated_by)
@patch('django.forms.ModelForm.save')
def test_sets_created_by_if_instance_is_not_saved(self, superclass_save):
user = DummyUser()
form = AuthorshipModelForm(user=user)
form.instance = DummyUnsavedModel()
form.cleaned_data = {}
form.save()
self.assertEqual(user, form.instance.created_by)
@patch('django.forms.ModelForm.save')
def test_does_not_set_created_by_if_instance_is_saved(self,
superclass_save):
class DummySavedModel(object):
def __init__(self):
self.pk = 'arbitrary-value'
self.created_by = 'arbitrary-value'
user = DummyUser()
form = AuthorshipModelForm(user=user)
form.instance = DummySavedModel()
form.cleaned_data = {}
form.save()
self.assertNotEqual(user, form.instance.created_by)
|
<commit_before><commit_msg>Add tests for form mixin.<commit_after># -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.test import TestCase
from django import forms
from mock import patch
from test_app.models import AuthorshipModel
from thecut.authorship.factories import UserFactory
from thecut.authorship.forms import AuthorshipMixin
class AuthorshipModelForm(AuthorshipMixin, forms.ModelForm):
class Meta:
model = AuthorshipModel
fields = []
class DummyUser(object):
pass
class TestAuthorshipMixin(TestCase):
def test_requires_an_extra_argument_on_creating_an_instance(self):
self.assertRaises(TypeError, AuthorshipModelForm)
def test_sets_user_attribute(self):
dummy_user = DummyUser()
form = AuthorshipModelForm(user=dummy_user)
self.assertEqual(dummy_user, form.user)
class DummyUnsavedModel(object):
def __init__(self):
self.pk = None
class TestAuthorshipMixinSave(TestCase):
@patch('django.forms.ModelForm.save')
def test_calls_super_class_save_method(self, superclass_save):
form = AuthorshipModelForm(user=UserFactory())
form.instance = DummyUnsavedModel()
form.save()
self.assertTrue(superclass_save.called)
@patch('django.forms.ModelForm.save')
def test_sets_updated_by_to_given_user(self, superclass_save):
user = DummyUser()
form = AuthorshipModelForm(user=user)
form.instance = DummyUnsavedModel()
form.cleaned_data = {}
form.save()
self.assertEqual(user, form.instance.updated_by)
@patch('django.forms.ModelForm.save')
def test_sets_created_by_if_instance_is_not_saved(self, superclass_save):
user = DummyUser()
form = AuthorshipModelForm(user=user)
form.instance = DummyUnsavedModel()
form.cleaned_data = {}
form.save()
self.assertEqual(user, form.instance.created_by)
@patch('django.forms.ModelForm.save')
def test_does_not_set_created_by_if_instance_is_saved(self,
superclass_save):
class DummySavedModel(object):
def __init__(self):
self.pk = 'arbitrary-value'
self.created_by = 'arbitrary-value'
user = DummyUser()
form = AuthorshipModelForm(user=user)
form.instance = DummySavedModel()
form.cleaned_data = {}
form.save()
self.assertNotEqual(user, form.instance.created_by)
|
|
e838b6d53f131badfbb7b51b4eb268ebb5d7c450
|
spacy/tests/matcher/test_entity_id.py
|
spacy/tests/matcher/test_entity_id.py
|
from __future__ import unicode_literals
import spacy
from spacy.vocab import Vocab
from spacy.matcher import Matcher
from spacy.tokens.doc import Doc
from spacy.attrs import *
import pytest
@pytest.fixture
def en_vocab():
return spacy.get_lang_class('en').Defaults.create_vocab()
def test_init_matcher(en_vocab):
matcher = Matcher(en_vocab)
assert matcher.n_patterns == 0
assert matcher(Doc(en_vocab, words=[u'Some', u'words'])) == []
def test_add_empty_entity(en_vocab):
matcher = Matcher(en_vocab)
matcher.add_entity('TestEntity')
assert matcher.n_patterns == 0
assert matcher(Doc(en_vocab, words=[u'Test', u'Entity'])) == []
def test_get_entity_attrs(en_vocab):
matcher = Matcher(en_vocab)
matcher.add_entity('TestEntity')
entity = matcher.get_entity('TestEntity')
assert entity == {}
matcher.add_entity('TestEntity2', attrs={'Hello': 'World'})
entity = matcher.get_entity('TestEntity2')
assert entity == {'Hello': 'World'}
assert matcher.get_entity('TestEntity') == {}
def test_get_entity_via_match(en_vocab):
matcher = Matcher(en_vocab)
matcher.add_entity('TestEntity', attrs={u'Hello': u'World'})
assert matcher.n_patterns == 0
assert matcher(Doc(en_vocab, words=[u'Test', u'Entity'])) == []
matcher.add_pattern(u'TestEntity', [{ORTH: u'Test'}, {ORTH: u'Entity'}])
assert matcher.n_patterns == 1
matches = matcher(Doc(en_vocab, words=[u'Test', u'Entity']))
assert len(matches) == 1
assert len(matches[0]) == 4
ent_id, label, start, end = matches[0]
assert ent_id == matcher.vocab.strings[u'TestEntity']
assert label == 0
assert start == 0
assert end == 2
attrs = matcher.get_entity(ent_id)
assert attrs == {u'Hello': u'World'}
|
Add tests for using the new Entity ID tracking in the rule matcher
|
Add tests for using the new Entity ID tracking in the rule matcher
|
Python
|
mit
|
banglakit/spaCy,spacy-io/spaCy,recognai/spaCy,explosion/spaCy,raphael0202/spaCy,aikramer2/spaCy,oroszgy/spaCy.hu,Gregory-Howard/spaCy,Gregory-Howard/spaCy,aikramer2/spaCy,banglakit/spaCy,raphael0202/spaCy,honnibal/spaCy,banglakit/spaCy,raphael0202/spaCy,recognai/spaCy,spacy-io/spaCy,oroszgy/spaCy.hu,raphael0202/spaCy,banglakit/spaCy,banglakit/spaCy,aikramer2/spaCy,honnibal/spaCy,Gregory-Howard/spaCy,Gregory-Howard/spaCy,raphael0202/spaCy,spacy-io/spaCy,spacy-io/spaCy,recognai/spaCy,spacy-io/spaCy,oroszgy/spaCy.hu,banglakit/spaCy,aikramer2/spaCy,spacy-io/spaCy,oroszgy/spaCy.hu,recognai/spaCy,explosion/spaCy,raphael0202/spaCy,recognai/spaCy,oroszgy/spaCy.hu,honnibal/spaCy,explosion/spaCy,explosion/spaCy,explosion/spaCy,oroszgy/spaCy.hu,aikramer2/spaCy,Gregory-Howard/spaCy,aikramer2/spaCy,explosion/spaCy,honnibal/spaCy,recognai/spaCy,Gregory-Howard/spaCy
|
Add tests for using the new Entity ID tracking in the rule matcher
|
from __future__ import unicode_literals
import spacy
from spacy.vocab import Vocab
from spacy.matcher import Matcher
from spacy.tokens.doc import Doc
from spacy.attrs import *
import pytest
@pytest.fixture
def en_vocab():
return spacy.get_lang_class('en').Defaults.create_vocab()
def test_init_matcher(en_vocab):
matcher = Matcher(en_vocab)
assert matcher.n_patterns == 0
assert matcher(Doc(en_vocab, words=[u'Some', u'words'])) == []
def test_add_empty_entity(en_vocab):
matcher = Matcher(en_vocab)
matcher.add_entity('TestEntity')
assert matcher.n_patterns == 0
assert matcher(Doc(en_vocab, words=[u'Test', u'Entity'])) == []
def test_get_entity_attrs(en_vocab):
matcher = Matcher(en_vocab)
matcher.add_entity('TestEntity')
entity = matcher.get_entity('TestEntity')
assert entity == {}
matcher.add_entity('TestEntity2', attrs={'Hello': 'World'})
entity = matcher.get_entity('TestEntity2')
assert entity == {'Hello': 'World'}
assert matcher.get_entity('TestEntity') == {}
def test_get_entity_via_match(en_vocab):
matcher = Matcher(en_vocab)
matcher.add_entity('TestEntity', attrs={u'Hello': u'World'})
assert matcher.n_patterns == 0
assert matcher(Doc(en_vocab, words=[u'Test', u'Entity'])) == []
matcher.add_pattern(u'TestEntity', [{ORTH: u'Test'}, {ORTH: u'Entity'}])
assert matcher.n_patterns == 1
matches = matcher(Doc(en_vocab, words=[u'Test', u'Entity']))
assert len(matches) == 1
assert len(matches[0]) == 4
ent_id, label, start, end = matches[0]
assert ent_id == matcher.vocab.strings[u'TestEntity']
assert label == 0
assert start == 0
assert end == 2
attrs = matcher.get_entity(ent_id)
assert attrs == {u'Hello': u'World'}
|
<commit_before><commit_msg>Add tests for using the new Entity ID tracking in the rule matcher<commit_after>
|
from __future__ import unicode_literals
import spacy
from spacy.vocab import Vocab
from spacy.matcher import Matcher
from spacy.tokens.doc import Doc
from spacy.attrs import *
import pytest
@pytest.fixture
def en_vocab():
return spacy.get_lang_class('en').Defaults.create_vocab()
def test_init_matcher(en_vocab):
matcher = Matcher(en_vocab)
assert matcher.n_patterns == 0
assert matcher(Doc(en_vocab, words=[u'Some', u'words'])) == []
def test_add_empty_entity(en_vocab):
matcher = Matcher(en_vocab)
matcher.add_entity('TestEntity')
assert matcher.n_patterns == 0
assert matcher(Doc(en_vocab, words=[u'Test', u'Entity'])) == []
def test_get_entity_attrs(en_vocab):
matcher = Matcher(en_vocab)
matcher.add_entity('TestEntity')
entity = matcher.get_entity('TestEntity')
assert entity == {}
matcher.add_entity('TestEntity2', attrs={'Hello': 'World'})
entity = matcher.get_entity('TestEntity2')
assert entity == {'Hello': 'World'}
assert matcher.get_entity('TestEntity') == {}
def test_get_entity_via_match(en_vocab):
matcher = Matcher(en_vocab)
matcher.add_entity('TestEntity', attrs={u'Hello': u'World'})
assert matcher.n_patterns == 0
assert matcher(Doc(en_vocab, words=[u'Test', u'Entity'])) == []
matcher.add_pattern(u'TestEntity', [{ORTH: u'Test'}, {ORTH: u'Entity'}])
assert matcher.n_patterns == 1
matches = matcher(Doc(en_vocab, words=[u'Test', u'Entity']))
assert len(matches) == 1
assert len(matches[0]) == 4
ent_id, label, start, end = matches[0]
assert ent_id == matcher.vocab.strings[u'TestEntity']
assert label == 0
assert start == 0
assert end == 2
attrs = matcher.get_entity(ent_id)
assert attrs == {u'Hello': u'World'}
|
Add tests for using the new Entity ID tracking in the rule matcherfrom __future__ import unicode_literals
import spacy
from spacy.vocab import Vocab
from spacy.matcher import Matcher
from spacy.tokens.doc import Doc
from spacy.attrs import *
import pytest
@pytest.fixture
def en_vocab():
return spacy.get_lang_class('en').Defaults.create_vocab()
def test_init_matcher(en_vocab):
matcher = Matcher(en_vocab)
assert matcher.n_patterns == 0
assert matcher(Doc(en_vocab, words=[u'Some', u'words'])) == []
def test_add_empty_entity(en_vocab):
matcher = Matcher(en_vocab)
matcher.add_entity('TestEntity')
assert matcher.n_patterns == 0
assert matcher(Doc(en_vocab, words=[u'Test', u'Entity'])) == []
def test_get_entity_attrs(en_vocab):
matcher = Matcher(en_vocab)
matcher.add_entity('TestEntity')
entity = matcher.get_entity('TestEntity')
assert entity == {}
matcher.add_entity('TestEntity2', attrs={'Hello': 'World'})
entity = matcher.get_entity('TestEntity2')
assert entity == {'Hello': 'World'}
assert matcher.get_entity('TestEntity') == {}
def test_get_entity_via_match(en_vocab):
matcher = Matcher(en_vocab)
matcher.add_entity('TestEntity', attrs={u'Hello': u'World'})
assert matcher.n_patterns == 0
assert matcher(Doc(en_vocab, words=[u'Test', u'Entity'])) == []
matcher.add_pattern(u'TestEntity', [{ORTH: u'Test'}, {ORTH: u'Entity'}])
assert matcher.n_patterns == 1
matches = matcher(Doc(en_vocab, words=[u'Test', u'Entity']))
assert len(matches) == 1
assert len(matches[0]) == 4
ent_id, label, start, end = matches[0]
assert ent_id == matcher.vocab.strings[u'TestEntity']
assert label == 0
assert start == 0
assert end == 2
attrs = matcher.get_entity(ent_id)
assert attrs == {u'Hello': u'World'}
|
<commit_before><commit_msg>Add tests for using the new Entity ID tracking in the rule matcher<commit_after>from __future__ import unicode_literals
import spacy
from spacy.vocab import Vocab
from spacy.matcher import Matcher
from spacy.tokens.doc import Doc
from spacy.attrs import *
import pytest
@pytest.fixture
def en_vocab():
return spacy.get_lang_class('en').Defaults.create_vocab()
def test_init_matcher(en_vocab):
matcher = Matcher(en_vocab)
assert matcher.n_patterns == 0
assert matcher(Doc(en_vocab, words=[u'Some', u'words'])) == []
def test_add_empty_entity(en_vocab):
matcher = Matcher(en_vocab)
matcher.add_entity('TestEntity')
assert matcher.n_patterns == 0
assert matcher(Doc(en_vocab, words=[u'Test', u'Entity'])) == []
def test_get_entity_attrs(en_vocab):
matcher = Matcher(en_vocab)
matcher.add_entity('TestEntity')
entity = matcher.get_entity('TestEntity')
assert entity == {}
matcher.add_entity('TestEntity2', attrs={'Hello': 'World'})
entity = matcher.get_entity('TestEntity2')
assert entity == {'Hello': 'World'}
assert matcher.get_entity('TestEntity') == {}
def test_get_entity_via_match(en_vocab):
matcher = Matcher(en_vocab)
matcher.add_entity('TestEntity', attrs={u'Hello': u'World'})
assert matcher.n_patterns == 0
assert matcher(Doc(en_vocab, words=[u'Test', u'Entity'])) == []
matcher.add_pattern(u'TestEntity', [{ORTH: u'Test'}, {ORTH: u'Entity'}])
assert matcher.n_patterns == 1
matches = matcher(Doc(en_vocab, words=[u'Test', u'Entity']))
assert len(matches) == 1
assert len(matches[0]) == 4
ent_id, label, start, end = matches[0]
assert ent_id == matcher.vocab.strings[u'TestEntity']
assert label == 0
assert start == 0
assert end == 2
attrs = matcher.get_entity(ent_id)
assert attrs == {u'Hello': u'World'}
|
|
9a6ca54f7cca0bd5f21f0bc590a034e7e3e05b6e
|
src/icp/apps/user/migrations/0002_add_userprofiles_to_existing_users.py
|
src/icp/apps/user/migrations/0002_add_userprofiles_to_existing_users.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.contrib.postgres.fields
from django.conf import settings
def create_user_profiles_for_existing_users(apps, schema_editor):
User = apps.get_model('auth', 'User')
UserProfile = apps.get_model('user', 'UserProfile')
for user in User.objects.all():
UserProfile.objects.create(user=user)
class Migration(migrations.Migration):
dependencies = [
('user', '0001_initial')
]
operations = [
migrations.RunPython(create_user_profiles_for_existing_users)
]
|
Add migration to add userprofiles to existing users
|
Add migration to add userprofiles to existing users
All existing users on staging/production will be users of the Pollination Mapper app.
Refs #290
|
Python
|
apache-2.0
|
project-icp/bee-pollinator-app,project-icp/bee-pollinator-app,project-icp/bee-pollinator-app,project-icp/bee-pollinator-app
|
Add migration to add userprofiles to existing users
All existing users on staging/production will be users of the Pollination Mapper app.
Refs #290
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.contrib.postgres.fields
from django.conf import settings
def create_user_profiles_for_existing_users(apps, schema_editor):
User = apps.get_model('auth', 'User')
UserProfile = apps.get_model('user', 'UserProfile')
for user in User.objects.all():
UserProfile.objects.create(user=user)
class Migration(migrations.Migration):
dependencies = [
('user', '0001_initial')
]
operations = [
migrations.RunPython(create_user_profiles_for_existing_users)
]
|
<commit_before><commit_msg>Add migration to add userprofiles to existing users
All existing users on staging/production will be users of the Pollination Mapper app.
Refs #290<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.contrib.postgres.fields
from django.conf import settings
def create_user_profiles_for_existing_users(apps, schema_editor):
User = apps.get_model('auth', 'User')
UserProfile = apps.get_model('user', 'UserProfile')
for user in User.objects.all():
UserProfile.objects.create(user=user)
class Migration(migrations.Migration):
dependencies = [
('user', '0001_initial')
]
operations = [
migrations.RunPython(create_user_profiles_for_existing_users)
]
|
Add migration to add userprofiles to existing users
All existing users on staging/production will be users of the Pollination Mapper app.
Refs #290# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.contrib.postgres.fields
from django.conf import settings
def create_user_profiles_for_existing_users(apps, schema_editor):
User = apps.get_model('auth', 'User')
UserProfile = apps.get_model('user', 'UserProfile')
for user in User.objects.all():
UserProfile.objects.create(user=user)
class Migration(migrations.Migration):
dependencies = [
('user', '0001_initial')
]
operations = [
migrations.RunPython(create_user_profiles_for_existing_users)
]
|
<commit_before><commit_msg>Add migration to add userprofiles to existing users
All existing users on staging/production will be users of the Pollination Mapper app.
Refs #290<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.contrib.postgres.fields
from django.conf import settings
def create_user_profiles_for_existing_users(apps, schema_editor):
User = apps.get_model('auth', 'User')
UserProfile = apps.get_model('user', 'UserProfile')
for user in User.objects.all():
UserProfile.objects.create(user=user)
class Migration(migrations.Migration):
dependencies = [
('user', '0001_initial')
]
operations = [
migrations.RunPython(create_user_profiles_for_existing_users)
]
|
|
1bb1ececfcd548d52a28b713f4ee7eb4e710da85
|
keras_tf_multigpu/examples/fchollet_inception3_multigpu.py
|
keras_tf_multigpu/examples/fchollet_inception3_multigpu.py
|
import tensorflow as tf
from keras.applications import InceptionV3
from keras.utils import multi_gpu_model
import numpy as np
num_samples = 1000
height = 224
width = 224
num_classes = 1000
gpu_count = 2
# Instantiate the base model
# (here, we do it on CPU, which is optional).
with tf.device('/cpu:0' if gpu_count > 1 else '/gpu:0'):
model = InceptionV3(weights=None,
input_shape=(height, width, 3),
classes=num_classes)
# Replicates the model on N GPUs.
# This assumes that your machine has N available GPUs.
if gpu_count > 1:
parallel_model = multi_gpu_model(model, gpus=gpu_count)
else:
parallel_model = model
parallel_model.compile(loss='categorical_crossentropy',
optimizer='rmsprop')
# Generate dummy data.
x = np.random.random((num_samples, height, width, 3))
y = np.random.random((num_samples, num_classes))
# This `fit` call will be distributed on N GPUs.
# Since the batch size is N*32, each GPU will process 32 samples.
parallel_model.fit(x, y, epochs=20, batch_size=32 * gpu_count)
|
Add an example of using fchollet multi_gpu_model on InceptionV3.
|
Add an example of using fchollet multi_gpu_model on InceptionV3.
Adapted from Keras example - parameterized number of GPUs, fixed imports,
different model class, fixed device placement for single GPU.
|
Python
|
mit
|
rossumai/keras-multi-gpu,rossumai/keras-multi-gpu
|
Add an example of using fchollet multi_gpu_model on InceptionV3.
Adapted from Keras example - parameterized number of GPUs, fixed imports,
different model class, fixed device placement for single GPU.
|
import tensorflow as tf
from keras.applications import InceptionV3
from keras.utils import multi_gpu_model
import numpy as np
num_samples = 1000
height = 224
width = 224
num_classes = 1000
gpu_count = 2
# Instantiate the base model
# (here, we do it on CPU, which is optional).
with tf.device('/cpu:0' if gpu_count > 1 else '/gpu:0'):
model = InceptionV3(weights=None,
input_shape=(height, width, 3),
classes=num_classes)
# Replicates the model on N GPUs.
# This assumes that your machine has N available GPUs.
if gpu_count > 1:
parallel_model = multi_gpu_model(model, gpus=gpu_count)
else:
parallel_model = model
parallel_model.compile(loss='categorical_crossentropy',
optimizer='rmsprop')
# Generate dummy data.
x = np.random.random((num_samples, height, width, 3))
y = np.random.random((num_samples, num_classes))
# This `fit` call will be distributed on N GPUs.
# Since the batch size is N*32, each GPU will process 32 samples.
parallel_model.fit(x, y, epochs=20, batch_size=32 * gpu_count)
|
<commit_before><commit_msg>Add an example of using fchollet multi_gpu_model on InceptionV3.
Adapted from Keras example - parameterized number of GPUs, fixed imports,
different model class, fixed device placement for single GPU.<commit_after>
|
import tensorflow as tf
from keras.applications import InceptionV3
from keras.utils import multi_gpu_model
import numpy as np
num_samples = 1000
height = 224
width = 224
num_classes = 1000
gpu_count = 2
# Instantiate the base model
# (here, we do it on CPU, which is optional).
with tf.device('/cpu:0' if gpu_count > 1 else '/gpu:0'):
model = InceptionV3(weights=None,
input_shape=(height, width, 3),
classes=num_classes)
# Replicates the model on N GPUs.
# This assumes that your machine has N available GPUs.
if gpu_count > 1:
parallel_model = multi_gpu_model(model, gpus=gpu_count)
else:
parallel_model = model
parallel_model.compile(loss='categorical_crossentropy',
optimizer='rmsprop')
# Generate dummy data.
x = np.random.random((num_samples, height, width, 3))
y = np.random.random((num_samples, num_classes))
# This `fit` call will be distributed on N GPUs.
# Since the batch size is N*32, each GPU will process 32 samples.
parallel_model.fit(x, y, epochs=20, batch_size=32 * gpu_count)
|
Add an example of using fchollet multi_gpu_model on InceptionV3.
Adapted from Keras example - parameterized number of GPUs, fixed imports,
different model class, fixed device placement for single GPU.import tensorflow as tf
from keras.applications import InceptionV3
from keras.utils import multi_gpu_model
import numpy as np
num_samples = 1000
height = 224
width = 224
num_classes = 1000
gpu_count = 2
# Instantiate the base model
# (here, we do it on CPU, which is optional).
with tf.device('/cpu:0' if gpu_count > 1 else '/gpu:0'):
model = InceptionV3(weights=None,
input_shape=(height, width, 3),
classes=num_classes)
# Replicates the model on N GPUs.
# This assumes that your machine has N available GPUs.
if gpu_count > 1:
parallel_model = multi_gpu_model(model, gpus=gpu_count)
else:
parallel_model = model
parallel_model.compile(loss='categorical_crossentropy',
optimizer='rmsprop')
# Generate dummy data.
x = np.random.random((num_samples, height, width, 3))
y = np.random.random((num_samples, num_classes))
# This `fit` call will be distributed on N GPUs.
# Since the batch size is N*32, each GPU will process 32 samples.
parallel_model.fit(x, y, epochs=20, batch_size=32 * gpu_count)
|
<commit_before><commit_msg>Add an example of using fchollet multi_gpu_model on InceptionV3.
Adapted from Keras example - parameterized number of GPUs, fixed imports,
different model class, fixed device placement for single GPU.<commit_after>import tensorflow as tf
from keras.applications import InceptionV3
from keras.utils import multi_gpu_model
import numpy as np
num_samples = 1000
height = 224
width = 224
num_classes = 1000
gpu_count = 2
# Instantiate the base model
# (here, we do it on CPU, which is optional).
with tf.device('/cpu:0' if gpu_count > 1 else '/gpu:0'):
model = InceptionV3(weights=None,
input_shape=(height, width, 3),
classes=num_classes)
# Replicates the model on N GPUs.
# This assumes that your machine has N available GPUs.
if gpu_count > 1:
parallel_model = multi_gpu_model(model, gpus=gpu_count)
else:
parallel_model = model
parallel_model.compile(loss='categorical_crossentropy',
optimizer='rmsprop')
# Generate dummy data.
x = np.random.random((num_samples, height, width, 3))
y = np.random.random((num_samples, num_classes))
# This `fit` call will be distributed on N GPUs.
# Since the batch size is N*32, each GPU will process 32 samples.
parallel_model.fit(x, y, epochs=20, batch_size=32 * gpu_count)
|
|
8c7fc2382db0ec9c901f6c2c2b00971f3ee7c3cc
|
logintokens/tests/test_backends.py
|
logintokens/tests/test_backends.py
|
"""logintokens app unittests for backends
"""
from time import sleep
from django.test import TestCase, Client
from django.contrib.auth import get_user_model, authenticate
from logintokens.tokens import default_token_generator
USER = get_user_model()
class EmailOnlyAuthenticationBackendTest(TestCase):
"""Tests for email only authentication backend
"""
def setUp(self):
self.client = Client()
self.generator = default_token_generator
self.new_username = 'newvisitor'
self.existing_user = USER._default_manager.create_user('existinguser')
def test_different_tokens_usable(self):
"""Two differing tokens should both be usabe to authenticate.
"""
username = self.existing_user.get_username()
token1 = self.generator.make_token(username)
sleep(1)
token2 = self.generator.make_token(username)
self.assertNotEqual(token1, token2)
self.assertEqual(authenticate(token=token1), self.existing_user)
self.assertEqual(authenticate(token=token2), self.existing_user)
def test_login_invalidates_tokens(self):
"""Tokens generated before a successful login should become invalid.
"""
username = self.existing_user.get_username()
token1 = self.generator.make_token(username)
sleep(1)
token2 = self.generator.make_token(username)
self.assertNotEqual(token1, token2)
self.client.force_login(self.existing_user)
self.assertIsNone(authenticate(token=token1))
self.assertIsNone(authenticate(token=token2))
def test_new_visitor_creates_user(self):
"""Using a token from a new visitor should create their user object.
"""
token = self.generator.make_token(self.new_username)
user = authenticate(token=token)
self.assertIsInstance(user, USER)
|
Add tests for custom authentication backend
|
Add tests for custom authentication backend
|
Python
|
mit
|
randomic/aniauth-tdd,randomic/aniauth-tdd
|
Add tests for custom authentication backend
|
"""logintokens app unittests for backends
"""
from time import sleep
from django.test import TestCase, Client
from django.contrib.auth import get_user_model, authenticate
from logintokens.tokens import default_token_generator
USER = get_user_model()
class EmailOnlyAuthenticationBackendTest(TestCase):
"""Tests for email only authentication backend
"""
def setUp(self):
self.client = Client()
self.generator = default_token_generator
self.new_username = 'newvisitor'
self.existing_user = USER._default_manager.create_user('existinguser')
def test_different_tokens_usable(self):
"""Two differing tokens should both be usabe to authenticate.
"""
username = self.existing_user.get_username()
token1 = self.generator.make_token(username)
sleep(1)
token2 = self.generator.make_token(username)
self.assertNotEqual(token1, token2)
self.assertEqual(authenticate(token=token1), self.existing_user)
self.assertEqual(authenticate(token=token2), self.existing_user)
def test_login_invalidates_tokens(self):
"""Tokens generated before a successful login should become invalid.
"""
username = self.existing_user.get_username()
token1 = self.generator.make_token(username)
sleep(1)
token2 = self.generator.make_token(username)
self.assertNotEqual(token1, token2)
self.client.force_login(self.existing_user)
self.assertIsNone(authenticate(token=token1))
self.assertIsNone(authenticate(token=token2))
def test_new_visitor_creates_user(self):
"""Using a token from a new visitor should create their user object.
"""
token = self.generator.make_token(self.new_username)
user = authenticate(token=token)
self.assertIsInstance(user, USER)
|
<commit_before><commit_msg>Add tests for custom authentication backend<commit_after>
|
"""logintokens app unittests for backends
"""
from time import sleep
from django.test import TestCase, Client
from django.contrib.auth import get_user_model, authenticate
from logintokens.tokens import default_token_generator
USER = get_user_model()
class EmailOnlyAuthenticationBackendTest(TestCase):
"""Tests for email only authentication backend
"""
def setUp(self):
self.client = Client()
self.generator = default_token_generator
self.new_username = 'newvisitor'
self.existing_user = USER._default_manager.create_user('existinguser')
def test_different_tokens_usable(self):
"""Two differing tokens should both be usabe to authenticate.
"""
username = self.existing_user.get_username()
token1 = self.generator.make_token(username)
sleep(1)
token2 = self.generator.make_token(username)
self.assertNotEqual(token1, token2)
self.assertEqual(authenticate(token=token1), self.existing_user)
self.assertEqual(authenticate(token=token2), self.existing_user)
def test_login_invalidates_tokens(self):
"""Tokens generated before a successful login should become invalid.
"""
username = self.existing_user.get_username()
token1 = self.generator.make_token(username)
sleep(1)
token2 = self.generator.make_token(username)
self.assertNotEqual(token1, token2)
self.client.force_login(self.existing_user)
self.assertIsNone(authenticate(token=token1))
self.assertIsNone(authenticate(token=token2))
def test_new_visitor_creates_user(self):
"""Using a token from a new visitor should create their user object.
"""
token = self.generator.make_token(self.new_username)
user = authenticate(token=token)
self.assertIsInstance(user, USER)
|
Add tests for custom authentication backend"""logintokens app unittests for backends
"""
from time import sleep
from django.test import TestCase, Client
from django.contrib.auth import get_user_model, authenticate
from logintokens.tokens import default_token_generator
USER = get_user_model()
class EmailOnlyAuthenticationBackendTest(TestCase):
"""Tests for email only authentication backend
"""
def setUp(self):
self.client = Client()
self.generator = default_token_generator
self.new_username = 'newvisitor'
self.existing_user = USER._default_manager.create_user('existinguser')
def test_different_tokens_usable(self):
"""Two differing tokens should both be usabe to authenticate.
"""
username = self.existing_user.get_username()
token1 = self.generator.make_token(username)
sleep(1)
token2 = self.generator.make_token(username)
self.assertNotEqual(token1, token2)
self.assertEqual(authenticate(token=token1), self.existing_user)
self.assertEqual(authenticate(token=token2), self.existing_user)
def test_login_invalidates_tokens(self):
"""Tokens generated before a successful login should become invalid.
"""
username = self.existing_user.get_username()
token1 = self.generator.make_token(username)
sleep(1)
token2 = self.generator.make_token(username)
self.assertNotEqual(token1, token2)
self.client.force_login(self.existing_user)
self.assertIsNone(authenticate(token=token1))
self.assertIsNone(authenticate(token=token2))
def test_new_visitor_creates_user(self):
"""Using a token from a new visitor should create their user object.
"""
token = self.generator.make_token(self.new_username)
user = authenticate(token=token)
self.assertIsInstance(user, USER)
|
<commit_before><commit_msg>Add tests for custom authentication backend<commit_after>"""logintokens app unittests for backends
"""
from time import sleep
from django.test import TestCase, Client
from django.contrib.auth import get_user_model, authenticate
from logintokens.tokens import default_token_generator
USER = get_user_model()
class EmailOnlyAuthenticationBackendTest(TestCase):
"""Tests for email only authentication backend
"""
def setUp(self):
self.client = Client()
self.generator = default_token_generator
self.new_username = 'newvisitor'
self.existing_user = USER._default_manager.create_user('existinguser')
def test_different_tokens_usable(self):
"""Two differing tokens should both be usabe to authenticate.
"""
username = self.existing_user.get_username()
token1 = self.generator.make_token(username)
sleep(1)
token2 = self.generator.make_token(username)
self.assertNotEqual(token1, token2)
self.assertEqual(authenticate(token=token1), self.existing_user)
self.assertEqual(authenticate(token=token2), self.existing_user)
def test_login_invalidates_tokens(self):
"""Tokens generated before a successful login should become invalid.
"""
username = self.existing_user.get_username()
token1 = self.generator.make_token(username)
sleep(1)
token2 = self.generator.make_token(username)
self.assertNotEqual(token1, token2)
self.client.force_login(self.existing_user)
self.assertIsNone(authenticate(token=token1))
self.assertIsNone(authenticate(token=token2))
def test_new_visitor_creates_user(self):
"""Using a token from a new visitor should create their user object.
"""
token = self.generator.make_token(self.new_username)
user = authenticate(token=token)
self.assertIsInstance(user, USER)
|
|
54ca48a2b8cbd53cd6506fdbce47d16f03a28a7d
|
tests/test_sorting_and_searching/test_bubble_sort.py
|
tests/test_sorting_and_searching/test_bubble_sort.py
|
import unittest
from aids.sorting_and_searching.bubble_sort import bubble_sort
class BubbleSortTestCase(unittest.TestCase):
'''
Unit tests for bubble sort
'''
def setUp(self):
self.example_1 = [2, 5, 4, 3, 1]
def test_bubble_sort(self):
bubble_sort(self.example_1)
self.assertEqual(self.example_1,[1,2,3,4,5])
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
Add unit tests for bubble sort
|
Add unit tests for bubble sort
|
Python
|
mit
|
ueg1990/aids
|
Add unit tests for bubble sort
|
import unittest
from aids.sorting_and_searching.bubble_sort import bubble_sort
class BubbleSortTestCase(unittest.TestCase):
'''
Unit tests for bubble sort
'''
def setUp(self):
self.example_1 = [2, 5, 4, 3, 1]
def test_bubble_sort(self):
bubble_sort(self.example_1)
self.assertEqual(self.example_1,[1,2,3,4,5])
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unit tests for bubble sort<commit_after>
|
import unittest
from aids.sorting_and_searching.bubble_sort import bubble_sort
class BubbleSortTestCase(unittest.TestCase):
'''
Unit tests for bubble sort
'''
def setUp(self):
self.example_1 = [2, 5, 4, 3, 1]
def test_bubble_sort(self):
bubble_sort(self.example_1)
self.assertEqual(self.example_1,[1,2,3,4,5])
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
Add unit tests for bubble sortimport unittest
from aids.sorting_and_searching.bubble_sort import bubble_sort
class BubbleSortTestCase(unittest.TestCase):
'''
Unit tests for bubble sort
'''
def setUp(self):
self.example_1 = [2, 5, 4, 3, 1]
def test_bubble_sort(self):
bubble_sort(self.example_1)
self.assertEqual(self.example_1,[1,2,3,4,5])
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unit tests for bubble sort<commit_after>import unittest
from aids.sorting_and_searching.bubble_sort import bubble_sort
class BubbleSortTestCase(unittest.TestCase):
'''
Unit tests for bubble sort
'''
def setUp(self):
self.example_1 = [2, 5, 4, 3, 1]
def test_bubble_sort(self):
bubble_sort(self.example_1)
self.assertEqual(self.example_1,[1,2,3,4,5])
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
|
67596d081059a004e5f7ab15f7972773fdf2f15e
|
tests/syft/grid/messages/setup_msg_test.py
|
tests/syft/grid/messages/setup_msg_test.py
|
# syft absolute
import syft as sy
from syft.core.io.address import Address
from syft.grid.messages.setup_messages import CreateInitialSetUpMessage
from syft.grid.messages.setup_messages import CreateInitialSetUpResponse
from syft.grid.messages.setup_messages import GetSetUpMessage
from syft.grid.messages.setup_messages import GetSetUpResponse
def test_create_initial_setup_message_serde() -> None:
bob_vm = sy.VirtualMachine(name="Bob")
target = Address(name="Alice")
request_content = {
"settings": {
"cloud-admin-token" : "d84we35ad3a1d59a84sd9",
"cloud-credentials": "<cloud-credentials.pem>",
"infra": {
"autoscaling": True,
"triggers": {
"memory": "50",
"vCPU": "80"
}
},
}
}
msg = CreateInitialSetUpMessage(
address=target,
content= request_content,
reply_to=bob_vm.address,
)
blob = msg.serialize()
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
def test_create_initial_setup_response_serde() -> None:
target = Address(name="Alice")
request_content = {"msg": "Initial setup registered successfully!"}
msg = CreateInitialSetUpResponse(
address=target,
success=True,
content= request_content,
)
blob = msg.serialize()
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
def test_get_initial_setup_message_serde() -> None:
bob_vm = sy.VirtualMachine(name="Bob")
target = Address(name="Alice")
request_content = {}
msg = GetSetUpMessage(
address=target,
content=request_content,
reply_to=bob_vm.address,
)
blob = msg.serialize()
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
def test_delete_worker_response_serde() -> None:
target = Address(name="Alice")
content = {
"settings": {
"cloud-admin-token" : "d84we35ad3a1d59a84sd9",
"cloud-credentials": "<cloud-credentials.pem>",
"infra": {
"autoscaling": True,
"triggers": {
"memory": "50",
"vCPU": "80"
}
},
}
}
msg = GetSetUpResponse(
success=True,
address=target,
content=content,
)
blob = msg.serialize()
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
|
ADD PyGrid SetupService message tests
|
ADD PyGrid SetupService message tests
|
Python
|
apache-2.0
|
OpenMined/PySyft,OpenMined/PySyft,OpenMined/PySyft,OpenMined/PySyft
|
ADD PyGrid SetupService message tests
|
# syft absolute
import syft as sy
from syft.core.io.address import Address
from syft.grid.messages.setup_messages import CreateInitialSetUpMessage
from syft.grid.messages.setup_messages import CreateInitialSetUpResponse
from syft.grid.messages.setup_messages import GetSetUpMessage
from syft.grid.messages.setup_messages import GetSetUpResponse
def test_create_initial_setup_message_serde() -> None:
bob_vm = sy.VirtualMachine(name="Bob")
target = Address(name="Alice")
request_content = {
"settings": {
"cloud-admin-token" : "d84we35ad3a1d59a84sd9",
"cloud-credentials": "<cloud-credentials.pem>",
"infra": {
"autoscaling": True,
"triggers": {
"memory": "50",
"vCPU": "80"
}
},
}
}
msg = CreateInitialSetUpMessage(
address=target,
content= request_content,
reply_to=bob_vm.address,
)
blob = msg.serialize()
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
def test_create_initial_setup_response_serde() -> None:
target = Address(name="Alice")
request_content = {"msg": "Initial setup registered successfully!"}
msg = CreateInitialSetUpResponse(
address=target,
success=True,
content= request_content,
)
blob = msg.serialize()
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
def test_get_initial_setup_message_serde() -> None:
bob_vm = sy.VirtualMachine(name="Bob")
target = Address(name="Alice")
request_content = {}
msg = GetSetUpMessage(
address=target,
content=request_content,
reply_to=bob_vm.address,
)
blob = msg.serialize()
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
def test_delete_worker_response_serde() -> None:
target = Address(name="Alice")
content = {
"settings": {
"cloud-admin-token" : "d84we35ad3a1d59a84sd9",
"cloud-credentials": "<cloud-credentials.pem>",
"infra": {
"autoscaling": True,
"triggers": {
"memory": "50",
"vCPU": "80"
}
},
}
}
msg = GetSetUpResponse(
success=True,
address=target,
content=content,
)
blob = msg.serialize()
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
|
<commit_before><commit_msg>ADD PyGrid SetupService message tests<commit_after>
|
# syft absolute
import syft as sy
from syft.core.io.address import Address
from syft.grid.messages.setup_messages import CreateInitialSetUpMessage
from syft.grid.messages.setup_messages import CreateInitialSetUpResponse
from syft.grid.messages.setup_messages import GetSetUpMessage
from syft.grid.messages.setup_messages import GetSetUpResponse
def test_create_initial_setup_message_serde() -> None:
bob_vm = sy.VirtualMachine(name="Bob")
target = Address(name="Alice")
request_content = {
"settings": {
"cloud-admin-token" : "d84we35ad3a1d59a84sd9",
"cloud-credentials": "<cloud-credentials.pem>",
"infra": {
"autoscaling": True,
"triggers": {
"memory": "50",
"vCPU": "80"
}
},
}
}
msg = CreateInitialSetUpMessage(
address=target,
content= request_content,
reply_to=bob_vm.address,
)
blob = msg.serialize()
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
def test_create_initial_setup_response_serde() -> None:
target = Address(name="Alice")
request_content = {"msg": "Initial setup registered successfully!"}
msg = CreateInitialSetUpResponse(
address=target,
success=True,
content= request_content,
)
blob = msg.serialize()
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
def test_get_initial_setup_message_serde() -> None:
bob_vm = sy.VirtualMachine(name="Bob")
target = Address(name="Alice")
request_content = {}
msg = GetSetUpMessage(
address=target,
content=request_content,
reply_to=bob_vm.address,
)
blob = msg.serialize()
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
def test_delete_worker_response_serde() -> None:
target = Address(name="Alice")
content = {
"settings": {
"cloud-admin-token" : "d84we35ad3a1d59a84sd9",
"cloud-credentials": "<cloud-credentials.pem>",
"infra": {
"autoscaling": True,
"triggers": {
"memory": "50",
"vCPU": "80"
}
},
}
}
msg = GetSetUpResponse(
success=True,
address=target,
content=content,
)
blob = msg.serialize()
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
|
ADD PyGrid SetupService message tests# syft absolute
import syft as sy
from syft.core.io.address import Address
from syft.grid.messages.setup_messages import CreateInitialSetUpMessage
from syft.grid.messages.setup_messages import CreateInitialSetUpResponse
from syft.grid.messages.setup_messages import GetSetUpMessage
from syft.grid.messages.setup_messages import GetSetUpResponse
def test_create_initial_setup_message_serde() -> None:
bob_vm = sy.VirtualMachine(name="Bob")
target = Address(name="Alice")
request_content = {
"settings": {
"cloud-admin-token" : "d84we35ad3a1d59a84sd9",
"cloud-credentials": "<cloud-credentials.pem>",
"infra": {
"autoscaling": True,
"triggers": {
"memory": "50",
"vCPU": "80"
}
},
}
}
msg = CreateInitialSetUpMessage(
address=target,
content= request_content,
reply_to=bob_vm.address,
)
blob = msg.serialize()
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
def test_create_initial_setup_response_serde() -> None:
target = Address(name="Alice")
request_content = {"msg": "Initial setup registered successfully!"}
msg = CreateInitialSetUpResponse(
address=target,
success=True,
content= request_content,
)
blob = msg.serialize()
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
def test_get_initial_setup_message_serde() -> None:
bob_vm = sy.VirtualMachine(name="Bob")
target = Address(name="Alice")
request_content = {}
msg = GetSetUpMessage(
address=target,
content=request_content,
reply_to=bob_vm.address,
)
blob = msg.serialize()
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
def test_delete_worker_response_serde() -> None:
target = Address(name="Alice")
content = {
"settings": {
"cloud-admin-token" : "d84we35ad3a1d59a84sd9",
"cloud-credentials": "<cloud-credentials.pem>",
"infra": {
"autoscaling": True,
"triggers": {
"memory": "50",
"vCPU": "80"
}
},
}
}
msg = GetSetUpResponse(
success=True,
address=target,
content=content,
)
blob = msg.serialize()
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
|
<commit_before><commit_msg>ADD PyGrid SetupService message tests<commit_after># syft absolute
import syft as sy
from syft.core.io.address import Address
from syft.grid.messages.setup_messages import CreateInitialSetUpMessage
from syft.grid.messages.setup_messages import CreateInitialSetUpResponse
from syft.grid.messages.setup_messages import GetSetUpMessage
from syft.grid.messages.setup_messages import GetSetUpResponse
def test_create_initial_setup_message_serde() -> None:
bob_vm = sy.VirtualMachine(name="Bob")
target = Address(name="Alice")
request_content = {
"settings": {
"cloud-admin-token" : "d84we35ad3a1d59a84sd9",
"cloud-credentials": "<cloud-credentials.pem>",
"infra": {
"autoscaling": True,
"triggers": {
"memory": "50",
"vCPU": "80"
}
},
}
}
msg = CreateInitialSetUpMessage(
address=target,
content= request_content,
reply_to=bob_vm.address,
)
blob = msg.serialize()
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
def test_create_initial_setup_response_serde() -> None:
target = Address(name="Alice")
request_content = {"msg": "Initial setup registered successfully!"}
msg = CreateInitialSetUpResponse(
address=target,
success=True,
content= request_content,
)
blob = msg.serialize()
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
def test_get_initial_setup_message_serde() -> None:
bob_vm = sy.VirtualMachine(name="Bob")
target = Address(name="Alice")
request_content = {}
msg = GetSetUpMessage(
address=target,
content=request_content,
reply_to=bob_vm.address,
)
blob = msg.serialize()
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
def test_delete_worker_response_serde() -> None:
target = Address(name="Alice")
content = {
"settings": {
"cloud-admin-token" : "d84we35ad3a1d59a84sd9",
"cloud-credentials": "<cloud-credentials.pem>",
"infra": {
"autoscaling": True,
"triggers": {
"memory": "50",
"vCPU": "80"
}
},
}
}
msg = GetSetUpResponse(
success=True,
address=target,
content=content,
)
blob = msg.serialize()
msg2 = sy.deserialize(blob=blob)
assert msg.id == msg2.id
assert msg.address == target
assert msg.content == msg2.content
assert msg == msg2
|
|
478072e8350d03655364ea9147bbe21bafabbcce
|
geotrek/feedback/tests/test_template_tags.py
|
geotrek/feedback/tests/test_template_tags.py
|
from datetime import datetime
from django.test import TestCase
from geotrek.authent.tests.factories import UserFactory, UserProfileFactory
from geotrek.feedback.templatetags.feedback_tags import (
predefined_emails, resolved_intervention_info, status_ids_and_colors)
from geotrek.feedback.tests.factories import (PredefinedEmailFactory,
ReportStatusFactory)
from geotrek.maintenance.tests.factories import ReportInterventionFactory
class TestFeedbackTemplateTags(TestCase):
@classmethod
def setUpTestData(cls):
cls.user1 = UserFactory(username="CCCC")
UserProfileFactory.create(user=cls.user1, extended_username="Communauté des Communes des Communautés Communataires")
cls.user2 = UserFactory(username="Kurt")
UserProfileFactory.create(user=cls.user2)
solved_status = ReportStatusFactory(identifier='solved_intervention', color="#448654")
cls.intervention_solved_1 = ReportInterventionFactory(date=datetime(year=1997, month=4, day=4).date())
cls.report_1 = cls.intervention_solved_1.target
cls.report_1.status = solved_status
cls.report_1.assigned_user = cls.user1
cls.report_1.save()
cls.intervention_solved_2 = ReportInterventionFactory(date=datetime(year=1997, month=5, day=4).date())
cls.report_2 = cls.intervention_solved_2.target
cls.report_2.status = solved_status
cls.report_2.assigned_user = cls.user2
cls.report_2.save()
cls.email1 = PredefinedEmailFactory()
cls.email2 = PredefinedEmailFactory()
def test_resolved_intervention_username(self):
self.assertEqual(
"{\"date\": \"04/04/1997\", \"username\": \"Communaut\\u00e9 des Communes des Communaut\\u00e9s Communataires\"}",
resolved_intervention_info(self.report_1)
)
self.assertEqual(
"{\"date\": \"04/05/1997\", \"username\": \"Kurt\"}",
resolved_intervention_info(self.report_2)
)
def test_status_ids_and_colors(self):
self.assertEqual(
"{\"1\": {\"id\": \"solved_intervention\", \"color\": \"#448654\"}, \"2\": {\"id\": \"ID 1\", \"color\": \"#444444\"}, \"3\": {\"id\": \"ID 2\", \"color\": \"#444444\"}}",
status_ids_and_colors()
)
def test_predefined_emails(self):
self.assertEqual(
"{\"1\": {\"label\": \"Predefined Email 0\", \"text\": \"Some email body content 0\"}, \"2\": {\"label\": \"Predefined Email 1\", \"text\": \"Some email body content 1\"}}",
predefined_emails()
)
|
Add tests for template tags
|
Add tests for template tags
|
Python
|
bsd-2-clause
|
makinacorpus/Geotrek,makinacorpus/Geotrek,makinacorpus/Geotrek,GeotrekCE/Geotrek-admin,GeotrekCE/Geotrek-admin,GeotrekCE/Geotrek-admin,GeotrekCE/Geotrek-admin,makinacorpus/Geotrek
|
Add tests for template tags
|
from datetime import datetime
from django.test import TestCase
from geotrek.authent.tests.factories import UserFactory, UserProfileFactory
from geotrek.feedback.templatetags.feedback_tags import (
predefined_emails, resolved_intervention_info, status_ids_and_colors)
from geotrek.feedback.tests.factories import (PredefinedEmailFactory,
ReportStatusFactory)
from geotrek.maintenance.tests.factories import ReportInterventionFactory
class TestFeedbackTemplateTags(TestCase):
@classmethod
def setUpTestData(cls):
cls.user1 = UserFactory(username="CCCC")
UserProfileFactory.create(user=cls.user1, extended_username="Communauté des Communes des Communautés Communataires")
cls.user2 = UserFactory(username="Kurt")
UserProfileFactory.create(user=cls.user2)
solved_status = ReportStatusFactory(identifier='solved_intervention', color="#448654")
cls.intervention_solved_1 = ReportInterventionFactory(date=datetime(year=1997, month=4, day=4).date())
cls.report_1 = cls.intervention_solved_1.target
cls.report_1.status = solved_status
cls.report_1.assigned_user = cls.user1
cls.report_1.save()
cls.intervention_solved_2 = ReportInterventionFactory(date=datetime(year=1997, month=5, day=4).date())
cls.report_2 = cls.intervention_solved_2.target
cls.report_2.status = solved_status
cls.report_2.assigned_user = cls.user2
cls.report_2.save()
cls.email1 = PredefinedEmailFactory()
cls.email2 = PredefinedEmailFactory()
def test_resolved_intervention_username(self):
self.assertEqual(
"{\"date\": \"04/04/1997\", \"username\": \"Communaut\\u00e9 des Communes des Communaut\\u00e9s Communataires\"}",
resolved_intervention_info(self.report_1)
)
self.assertEqual(
"{\"date\": \"04/05/1997\", \"username\": \"Kurt\"}",
resolved_intervention_info(self.report_2)
)
def test_status_ids_and_colors(self):
self.assertEqual(
"{\"1\": {\"id\": \"solved_intervention\", \"color\": \"#448654\"}, \"2\": {\"id\": \"ID 1\", \"color\": \"#444444\"}, \"3\": {\"id\": \"ID 2\", \"color\": \"#444444\"}}",
status_ids_and_colors()
)
def test_predefined_emails(self):
self.assertEqual(
"{\"1\": {\"label\": \"Predefined Email 0\", \"text\": \"Some email body content 0\"}, \"2\": {\"label\": \"Predefined Email 1\", \"text\": \"Some email body content 1\"}}",
predefined_emails()
)
|
<commit_before><commit_msg>Add tests for template tags<commit_after>
|
from datetime import datetime
from django.test import TestCase
from geotrek.authent.tests.factories import UserFactory, UserProfileFactory
from geotrek.feedback.templatetags.feedback_tags import (
predefined_emails, resolved_intervention_info, status_ids_and_colors)
from geotrek.feedback.tests.factories import (PredefinedEmailFactory,
ReportStatusFactory)
from geotrek.maintenance.tests.factories import ReportInterventionFactory
class TestFeedbackTemplateTags(TestCase):
@classmethod
def setUpTestData(cls):
cls.user1 = UserFactory(username="CCCC")
UserProfileFactory.create(user=cls.user1, extended_username="Communauté des Communes des Communautés Communataires")
cls.user2 = UserFactory(username="Kurt")
UserProfileFactory.create(user=cls.user2)
solved_status = ReportStatusFactory(identifier='solved_intervention', color="#448654")
cls.intervention_solved_1 = ReportInterventionFactory(date=datetime(year=1997, month=4, day=4).date())
cls.report_1 = cls.intervention_solved_1.target
cls.report_1.status = solved_status
cls.report_1.assigned_user = cls.user1
cls.report_1.save()
cls.intervention_solved_2 = ReportInterventionFactory(date=datetime(year=1997, month=5, day=4).date())
cls.report_2 = cls.intervention_solved_2.target
cls.report_2.status = solved_status
cls.report_2.assigned_user = cls.user2
cls.report_2.save()
cls.email1 = PredefinedEmailFactory()
cls.email2 = PredefinedEmailFactory()
def test_resolved_intervention_username(self):
self.assertEqual(
"{\"date\": \"04/04/1997\", \"username\": \"Communaut\\u00e9 des Communes des Communaut\\u00e9s Communataires\"}",
resolved_intervention_info(self.report_1)
)
self.assertEqual(
"{\"date\": \"04/05/1997\", \"username\": \"Kurt\"}",
resolved_intervention_info(self.report_2)
)
def test_status_ids_and_colors(self):
self.assertEqual(
"{\"1\": {\"id\": \"solved_intervention\", \"color\": \"#448654\"}, \"2\": {\"id\": \"ID 1\", \"color\": \"#444444\"}, \"3\": {\"id\": \"ID 2\", \"color\": \"#444444\"}}",
status_ids_and_colors()
)
def test_predefined_emails(self):
self.assertEqual(
"{\"1\": {\"label\": \"Predefined Email 0\", \"text\": \"Some email body content 0\"}, \"2\": {\"label\": \"Predefined Email 1\", \"text\": \"Some email body content 1\"}}",
predefined_emails()
)
|
Add tests for template tagsfrom datetime import datetime
from django.test import TestCase
from geotrek.authent.tests.factories import UserFactory, UserProfileFactory
from geotrek.feedback.templatetags.feedback_tags import (
predefined_emails, resolved_intervention_info, status_ids_and_colors)
from geotrek.feedback.tests.factories import (PredefinedEmailFactory,
ReportStatusFactory)
from geotrek.maintenance.tests.factories import ReportInterventionFactory
class TestFeedbackTemplateTags(TestCase):
@classmethod
def setUpTestData(cls):
cls.user1 = UserFactory(username="CCCC")
UserProfileFactory.create(user=cls.user1, extended_username="Communauté des Communes des Communautés Communataires")
cls.user2 = UserFactory(username="Kurt")
UserProfileFactory.create(user=cls.user2)
solved_status = ReportStatusFactory(identifier='solved_intervention', color="#448654")
cls.intervention_solved_1 = ReportInterventionFactory(date=datetime(year=1997, month=4, day=4).date())
cls.report_1 = cls.intervention_solved_1.target
cls.report_1.status = solved_status
cls.report_1.assigned_user = cls.user1
cls.report_1.save()
cls.intervention_solved_2 = ReportInterventionFactory(date=datetime(year=1997, month=5, day=4).date())
cls.report_2 = cls.intervention_solved_2.target
cls.report_2.status = solved_status
cls.report_2.assigned_user = cls.user2
cls.report_2.save()
cls.email1 = PredefinedEmailFactory()
cls.email2 = PredefinedEmailFactory()
def test_resolved_intervention_username(self):
self.assertEqual(
"{\"date\": \"04/04/1997\", \"username\": \"Communaut\\u00e9 des Communes des Communaut\\u00e9s Communataires\"}",
resolved_intervention_info(self.report_1)
)
self.assertEqual(
"{\"date\": \"04/05/1997\", \"username\": \"Kurt\"}",
resolved_intervention_info(self.report_2)
)
def test_status_ids_and_colors(self):
self.assertEqual(
"{\"1\": {\"id\": \"solved_intervention\", \"color\": \"#448654\"}, \"2\": {\"id\": \"ID 1\", \"color\": \"#444444\"}, \"3\": {\"id\": \"ID 2\", \"color\": \"#444444\"}}",
status_ids_and_colors()
)
def test_predefined_emails(self):
self.assertEqual(
"{\"1\": {\"label\": \"Predefined Email 0\", \"text\": \"Some email body content 0\"}, \"2\": {\"label\": \"Predefined Email 1\", \"text\": \"Some email body content 1\"}}",
predefined_emails()
)
|
<commit_before><commit_msg>Add tests for template tags<commit_after>from datetime import datetime
from django.test import TestCase
from geotrek.authent.tests.factories import UserFactory, UserProfileFactory
from geotrek.feedback.templatetags.feedback_tags import (
predefined_emails, resolved_intervention_info, status_ids_and_colors)
from geotrek.feedback.tests.factories import (PredefinedEmailFactory,
ReportStatusFactory)
from geotrek.maintenance.tests.factories import ReportInterventionFactory
class TestFeedbackTemplateTags(TestCase):
@classmethod
def setUpTestData(cls):
cls.user1 = UserFactory(username="CCCC")
UserProfileFactory.create(user=cls.user1, extended_username="Communauté des Communes des Communautés Communataires")
cls.user2 = UserFactory(username="Kurt")
UserProfileFactory.create(user=cls.user2)
solved_status = ReportStatusFactory(identifier='solved_intervention', color="#448654")
cls.intervention_solved_1 = ReportInterventionFactory(date=datetime(year=1997, month=4, day=4).date())
cls.report_1 = cls.intervention_solved_1.target
cls.report_1.status = solved_status
cls.report_1.assigned_user = cls.user1
cls.report_1.save()
cls.intervention_solved_2 = ReportInterventionFactory(date=datetime(year=1997, month=5, day=4).date())
cls.report_2 = cls.intervention_solved_2.target
cls.report_2.status = solved_status
cls.report_2.assigned_user = cls.user2
cls.report_2.save()
cls.email1 = PredefinedEmailFactory()
cls.email2 = PredefinedEmailFactory()
def test_resolved_intervention_username(self):
self.assertEqual(
"{\"date\": \"04/04/1997\", \"username\": \"Communaut\\u00e9 des Communes des Communaut\\u00e9s Communataires\"}",
resolved_intervention_info(self.report_1)
)
self.assertEqual(
"{\"date\": \"04/05/1997\", \"username\": \"Kurt\"}",
resolved_intervention_info(self.report_2)
)
def test_status_ids_and_colors(self):
self.assertEqual(
"{\"1\": {\"id\": \"solved_intervention\", \"color\": \"#448654\"}, \"2\": {\"id\": \"ID 1\", \"color\": \"#444444\"}, \"3\": {\"id\": \"ID 2\", \"color\": \"#444444\"}}",
status_ids_and_colors()
)
def test_predefined_emails(self):
self.assertEqual(
"{\"1\": {\"label\": \"Predefined Email 0\", \"text\": \"Some email body content 0\"}, \"2\": {\"label\": \"Predefined Email 1\", \"text\": \"Some email body content 1\"}}",
predefined_emails()
)
|
|
8355cb358d14589a194926d37beeb5af7af2a591
|
falmer/events/migrations/0012_auto_20170905_1208.py
|
falmer/events/migrations/0012_auto_20170905_1208.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-09-05 11:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0011_auto_20170905_1028'),
]
operations = [
migrations.AlterField(
model_name='mslevent',
name='image_url',
field=models.URLField(max_length=2000),
),
migrations.AlterField(
model_name='mslevent',
name='url',
field=models.URLField(max_length=2000),
),
]
|
Increase event image url limit from 200
|
Increase event image url limit from 200
|
Python
|
mit
|
sussexstudent/falmer,sussexstudent/falmer,sussexstudent/falmer,sussexstudent/falmer
|
Increase event image url limit from 200
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-09-05 11:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0011_auto_20170905_1028'),
]
operations = [
migrations.AlterField(
model_name='mslevent',
name='image_url',
field=models.URLField(max_length=2000),
),
migrations.AlterField(
model_name='mslevent',
name='url',
field=models.URLField(max_length=2000),
),
]
|
<commit_before><commit_msg>Increase event image url limit from 200<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-09-05 11:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0011_auto_20170905_1028'),
]
operations = [
migrations.AlterField(
model_name='mslevent',
name='image_url',
field=models.URLField(max_length=2000),
),
migrations.AlterField(
model_name='mslevent',
name='url',
field=models.URLField(max_length=2000),
),
]
|
Increase event image url limit from 200# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-09-05 11:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0011_auto_20170905_1028'),
]
operations = [
migrations.AlterField(
model_name='mslevent',
name='image_url',
field=models.URLField(max_length=2000),
),
migrations.AlterField(
model_name='mslevent',
name='url',
field=models.URLField(max_length=2000),
),
]
|
<commit_before><commit_msg>Increase event image url limit from 200<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-09-05 11:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0011_auto_20170905_1028'),
]
operations = [
migrations.AlterField(
model_name='mslevent',
name='image_url',
field=models.URLField(max_length=2000),
),
migrations.AlterField(
model_name='mslevent',
name='url',
field=models.URLField(max_length=2000),
),
]
|
|
a16b4401f37f08d8cb5e1f9ec1b7d4a3221360ab
|
test/test_regular_extrusion.py
|
test/test_regular_extrusion.py
|
# -*- coding: utf-8 -*-
"""Creates regular cube mesh by extrusion.
"""
import pygmsh
from helpers import compute_volume
def test():
x = 5
y = 4
z = 3
x_layers = 10
y_layers = 5
z_layers = 3
geom = pygmsh.built_in.Geometry()
p = geom.add_point([0, 0, 0], 1)
_, l, _ = geom.extrude(p, [x, 0, 0], num_layers=x_layers)
_, s, _ = geom.extrude(l, [0, y, 0], num_layers=y_layers)
geom.extrude(s, [0, 0, z], num_layers=z_layers)
points, cells, _, _, _ = pygmsh.generate_mesh(geom)
ref_vol = x * y * z
assert abs(compute_volume(points, cells) - ref_vol) < 1.0e-2 * ref_vol
# Each grid-cell from layered extrusion will result in 6 tetrahedrons.
ref_tetras = 6 * x_layers * y_layers * z_layers
assert len(cells["tetra"]) == ref_tetras
return points, cells
if __name__ == "__main__":
import meshio
meshio.write_points_cells("cube.vtu", *test())
|
Add test case for regular extrusion
|
Add test case for regular extrusion
|
Python
|
bsd-3-clause
|
nschloe/python4gmsh
|
Add test case for regular extrusion
|
# -*- coding: utf-8 -*-
"""Creates regular cube mesh by extrusion.
"""
import pygmsh
from helpers import compute_volume
def test():
x = 5
y = 4
z = 3
x_layers = 10
y_layers = 5
z_layers = 3
geom = pygmsh.built_in.Geometry()
p = geom.add_point([0, 0, 0], 1)
_, l, _ = geom.extrude(p, [x, 0, 0], num_layers=x_layers)
_, s, _ = geom.extrude(l, [0, y, 0], num_layers=y_layers)
geom.extrude(s, [0, 0, z], num_layers=z_layers)
points, cells, _, _, _ = pygmsh.generate_mesh(geom)
ref_vol = x * y * z
assert abs(compute_volume(points, cells) - ref_vol) < 1.0e-2 * ref_vol
# Each grid-cell from layered extrusion will result in 6 tetrahedrons.
ref_tetras = 6 * x_layers * y_layers * z_layers
assert len(cells["tetra"]) == ref_tetras
return points, cells
if __name__ == "__main__":
import meshio
meshio.write_points_cells("cube.vtu", *test())
|
<commit_before><commit_msg>Add test case for regular extrusion<commit_after>
|
# -*- coding: utf-8 -*-
"""Creates regular cube mesh by extrusion.
"""
import pygmsh
from helpers import compute_volume
def test():
x = 5
y = 4
z = 3
x_layers = 10
y_layers = 5
z_layers = 3
geom = pygmsh.built_in.Geometry()
p = geom.add_point([0, 0, 0], 1)
_, l, _ = geom.extrude(p, [x, 0, 0], num_layers=x_layers)
_, s, _ = geom.extrude(l, [0, y, 0], num_layers=y_layers)
geom.extrude(s, [0, 0, z], num_layers=z_layers)
points, cells, _, _, _ = pygmsh.generate_mesh(geom)
ref_vol = x * y * z
assert abs(compute_volume(points, cells) - ref_vol) < 1.0e-2 * ref_vol
# Each grid-cell from layered extrusion will result in 6 tetrahedrons.
ref_tetras = 6 * x_layers * y_layers * z_layers
assert len(cells["tetra"]) == ref_tetras
return points, cells
if __name__ == "__main__":
import meshio
meshio.write_points_cells("cube.vtu", *test())
|
Add test case for regular extrusion# -*- coding: utf-8 -*-
"""Creates regular cube mesh by extrusion.
"""
import pygmsh
from helpers import compute_volume
def test():
x = 5
y = 4
z = 3
x_layers = 10
y_layers = 5
z_layers = 3
geom = pygmsh.built_in.Geometry()
p = geom.add_point([0, 0, 0], 1)
_, l, _ = geom.extrude(p, [x, 0, 0], num_layers=x_layers)
_, s, _ = geom.extrude(l, [0, y, 0], num_layers=y_layers)
geom.extrude(s, [0, 0, z], num_layers=z_layers)
points, cells, _, _, _ = pygmsh.generate_mesh(geom)
ref_vol = x * y * z
assert abs(compute_volume(points, cells) - ref_vol) < 1.0e-2 * ref_vol
# Each grid-cell from layered extrusion will result in 6 tetrahedrons.
ref_tetras = 6 * x_layers * y_layers * z_layers
assert len(cells["tetra"]) == ref_tetras
return points, cells
if __name__ == "__main__":
import meshio
meshio.write_points_cells("cube.vtu", *test())
|
<commit_before><commit_msg>Add test case for regular extrusion<commit_after># -*- coding: utf-8 -*-
"""Creates regular cube mesh by extrusion.
"""
import pygmsh
from helpers import compute_volume
def test():
x = 5
y = 4
z = 3
x_layers = 10
y_layers = 5
z_layers = 3
geom = pygmsh.built_in.Geometry()
p = geom.add_point([0, 0, 0], 1)
_, l, _ = geom.extrude(p, [x, 0, 0], num_layers=x_layers)
_, s, _ = geom.extrude(l, [0, y, 0], num_layers=y_layers)
geom.extrude(s, [0, 0, z], num_layers=z_layers)
points, cells, _, _, _ = pygmsh.generate_mesh(geom)
ref_vol = x * y * z
assert abs(compute_volume(points, cells) - ref_vol) < 1.0e-2 * ref_vol
# Each grid-cell from layered extrusion will result in 6 tetrahedrons.
ref_tetras = 6 * x_layers * y_layers * z_layers
assert len(cells["tetra"]) == ref_tetras
return points, cells
if __name__ == "__main__":
import meshio
meshio.write_points_cells("cube.vtu", *test())
|
|
7bd6f3e7751deecfc3cd555fc071d722c856802c
|
chips/compiler/builtins.py
|
chips/compiler/builtins.py
|
#!/usr/bin/env python
"""Support Library for builtin Functionality"""
__author__ = "Jon Dawson"
__copyright__ = "Copyright (C) 2013, Jonathan P Dawson"
__version__ = "0.1"
builtins="""
unsigned unsigned_divide_xxxx(unsigned dividend, unsigned divisor){
unsigned denom = divisor;
unsigned bit = 1;
unsigned quotient = 0;
if( denom > dividend ) return 0;
if( denom == dividend ) return 1;
while(denom <= dividend){
denom <<= 1;
bit <<= 1;
}
denom >>= 1;
bit >>= 1;
while(bit){
if(dividend >= denom){
dividend -= denom;
quotient |= bit;
}
bit >>= 1;
denom >>= 1;
}
return quotient;
}
int divide_xxxx(int dividend, int divisor){
unsigned udividend, udivisor, uquotient;
unsigned dividend_sign, divisor_sign, quotient_sign;
dividend_sign = dividend & 0x8000u;
divisor_sign = divisor & 0x8000u;
quotient_sign = dividend_sign ^ divisor_sign;
udividend = dividend_sign ? -dividend : dividend;
udivisor = divisor_sign ? -divisor : divisor;
uquotient = unsigned_divide_xxxx(udividend, udivisor);
return quotient_sign ? -uquotient : uquotient;
}
long unsigned long_unsigned_divide_xxxx(long unsigned dividend, long unsigned divisor){
long unsigned denom = divisor;
long unsigned bit = 1;
long unsigned quotient = 0;
if( denom > dividend ) return 0;
if( denom == dividend ) return 1;
while(denom <= dividend){
denom <<= 1;
bit <<= 1;
}
denom >>= 1;
bit >>= 1;
while(bit){
if(dividend >= denom){
dividend -= denom;
quotient |= bit;
}
bit >>= 1;
denom >>= 1;
}
return quotient;
}
long int long_divide_xxxx(long int dividend, long int divisor){
long unsigned udividend, udivisor, uquotient;
long unsigned dividend_sign, divisor_sign, quotient_sign;
dividend_sign = dividend & 0x80000000ul;
divisor_sign = divisor & 0x80000000ul;
quotient_sign = dividend_sign ^ divisor_sign;
udividend = dividend_sign ? -dividend : dividend;
udivisor = divisor_sign ? -divisor : divisor;
uquotient = long_unsigned_divide_xxxx(udividend, udivisor);
return quotient_sign ? -uquotient : uquotient;
}
"""
|
Implement division using built in library function
|
Implement division using built in library function
|
Python
|
mit
|
dawsonjon/Chips-2.0,dawsonjon/Chips-2.0,dawsonjon/Chips-2.0,dawsonjon/Chips-2.0,dawsonjon/Chips-2.0
|
Implement division using built in library function
|
#!/usr/bin/env python
"""Support Library for builtin Functionality"""
__author__ = "Jon Dawson"
__copyright__ = "Copyright (C) 2013, Jonathan P Dawson"
__version__ = "0.1"
builtins="""
unsigned unsigned_divide_xxxx(unsigned dividend, unsigned divisor){
unsigned denom = divisor;
unsigned bit = 1;
unsigned quotient = 0;
if( denom > dividend ) return 0;
if( denom == dividend ) return 1;
while(denom <= dividend){
denom <<= 1;
bit <<= 1;
}
denom >>= 1;
bit >>= 1;
while(bit){
if(dividend >= denom){
dividend -= denom;
quotient |= bit;
}
bit >>= 1;
denom >>= 1;
}
return quotient;
}
int divide_xxxx(int dividend, int divisor){
unsigned udividend, udivisor, uquotient;
unsigned dividend_sign, divisor_sign, quotient_sign;
dividend_sign = dividend & 0x8000u;
divisor_sign = divisor & 0x8000u;
quotient_sign = dividend_sign ^ divisor_sign;
udividend = dividend_sign ? -dividend : dividend;
udivisor = divisor_sign ? -divisor : divisor;
uquotient = unsigned_divide_xxxx(udividend, udivisor);
return quotient_sign ? -uquotient : uquotient;
}
long unsigned long_unsigned_divide_xxxx(long unsigned dividend, long unsigned divisor){
long unsigned denom = divisor;
long unsigned bit = 1;
long unsigned quotient = 0;
if( denom > dividend ) return 0;
if( denom == dividend ) return 1;
while(denom <= dividend){
denom <<= 1;
bit <<= 1;
}
denom >>= 1;
bit >>= 1;
while(bit){
if(dividend >= denom){
dividend -= denom;
quotient |= bit;
}
bit >>= 1;
denom >>= 1;
}
return quotient;
}
long int long_divide_xxxx(long int dividend, long int divisor){
long unsigned udividend, udivisor, uquotient;
long unsigned dividend_sign, divisor_sign, quotient_sign;
dividend_sign = dividend & 0x80000000ul;
divisor_sign = divisor & 0x80000000ul;
quotient_sign = dividend_sign ^ divisor_sign;
udividend = dividend_sign ? -dividend : dividend;
udivisor = divisor_sign ? -divisor : divisor;
uquotient = long_unsigned_divide_xxxx(udividend, udivisor);
return quotient_sign ? -uquotient : uquotient;
}
"""
|
<commit_before><commit_msg>Implement division using built in library function<commit_after>
|
#!/usr/bin/env python
"""Support Library for builtin Functionality"""
__author__ = "Jon Dawson"
__copyright__ = "Copyright (C) 2013, Jonathan P Dawson"
__version__ = "0.1"
builtins="""
unsigned unsigned_divide_xxxx(unsigned dividend, unsigned divisor){
unsigned denom = divisor;
unsigned bit = 1;
unsigned quotient = 0;
if( denom > dividend ) return 0;
if( denom == dividend ) return 1;
while(denom <= dividend){
denom <<= 1;
bit <<= 1;
}
denom >>= 1;
bit >>= 1;
while(bit){
if(dividend >= denom){
dividend -= denom;
quotient |= bit;
}
bit >>= 1;
denom >>= 1;
}
return quotient;
}
int divide_xxxx(int dividend, int divisor){
unsigned udividend, udivisor, uquotient;
unsigned dividend_sign, divisor_sign, quotient_sign;
dividend_sign = dividend & 0x8000u;
divisor_sign = divisor & 0x8000u;
quotient_sign = dividend_sign ^ divisor_sign;
udividend = dividend_sign ? -dividend : dividend;
udivisor = divisor_sign ? -divisor : divisor;
uquotient = unsigned_divide_xxxx(udividend, udivisor);
return quotient_sign ? -uquotient : uquotient;
}
long unsigned long_unsigned_divide_xxxx(long unsigned dividend, long unsigned divisor){
long unsigned denom = divisor;
long unsigned bit = 1;
long unsigned quotient = 0;
if( denom > dividend ) return 0;
if( denom == dividend ) return 1;
while(denom <= dividend){
denom <<= 1;
bit <<= 1;
}
denom >>= 1;
bit >>= 1;
while(bit){
if(dividend >= denom){
dividend -= denom;
quotient |= bit;
}
bit >>= 1;
denom >>= 1;
}
return quotient;
}
long int long_divide_xxxx(long int dividend, long int divisor){
long unsigned udividend, udivisor, uquotient;
long unsigned dividend_sign, divisor_sign, quotient_sign;
dividend_sign = dividend & 0x80000000ul;
divisor_sign = divisor & 0x80000000ul;
quotient_sign = dividend_sign ^ divisor_sign;
udividend = dividend_sign ? -dividend : dividend;
udivisor = divisor_sign ? -divisor : divisor;
uquotient = long_unsigned_divide_xxxx(udividend, udivisor);
return quotient_sign ? -uquotient : uquotient;
}
"""
|
Implement division using built in library function#!/usr/bin/env python
"""Support Library for builtin Functionality"""
__author__ = "Jon Dawson"
__copyright__ = "Copyright (C) 2013, Jonathan P Dawson"
__version__ = "0.1"
builtins="""
unsigned unsigned_divide_xxxx(unsigned dividend, unsigned divisor){
unsigned denom = divisor;
unsigned bit = 1;
unsigned quotient = 0;
if( denom > dividend ) return 0;
if( denom == dividend ) return 1;
while(denom <= dividend){
denom <<= 1;
bit <<= 1;
}
denom >>= 1;
bit >>= 1;
while(bit){
if(dividend >= denom){
dividend -= denom;
quotient |= bit;
}
bit >>= 1;
denom >>= 1;
}
return quotient;
}
int divide_xxxx(int dividend, int divisor){
unsigned udividend, udivisor, uquotient;
unsigned dividend_sign, divisor_sign, quotient_sign;
dividend_sign = dividend & 0x8000u;
divisor_sign = divisor & 0x8000u;
quotient_sign = dividend_sign ^ divisor_sign;
udividend = dividend_sign ? -dividend : dividend;
udivisor = divisor_sign ? -divisor : divisor;
uquotient = unsigned_divide_xxxx(udividend, udivisor);
return quotient_sign ? -uquotient : uquotient;
}
long unsigned long_unsigned_divide_xxxx(long unsigned dividend, long unsigned divisor){
long unsigned denom = divisor;
long unsigned bit = 1;
long unsigned quotient = 0;
if( denom > dividend ) return 0;
if( denom == dividend ) return 1;
while(denom <= dividend){
denom <<= 1;
bit <<= 1;
}
denom >>= 1;
bit >>= 1;
while(bit){
if(dividend >= denom){
dividend -= denom;
quotient |= bit;
}
bit >>= 1;
denom >>= 1;
}
return quotient;
}
long int long_divide_xxxx(long int dividend, long int divisor){
long unsigned udividend, udivisor, uquotient;
long unsigned dividend_sign, divisor_sign, quotient_sign;
dividend_sign = dividend & 0x80000000ul;
divisor_sign = divisor & 0x80000000ul;
quotient_sign = dividend_sign ^ divisor_sign;
udividend = dividend_sign ? -dividend : dividend;
udivisor = divisor_sign ? -divisor : divisor;
uquotient = long_unsigned_divide_xxxx(udividend, udivisor);
return quotient_sign ? -uquotient : uquotient;
}
"""
|
<commit_before><commit_msg>Implement division using built in library function<commit_after>#!/usr/bin/env python
"""Support Library for builtin Functionality"""
__author__ = "Jon Dawson"
__copyright__ = "Copyright (C) 2013, Jonathan P Dawson"
__version__ = "0.1"
builtins="""
unsigned unsigned_divide_xxxx(unsigned dividend, unsigned divisor){
unsigned denom = divisor;
unsigned bit = 1;
unsigned quotient = 0;
if( denom > dividend ) return 0;
if( denom == dividend ) return 1;
while(denom <= dividend){
denom <<= 1;
bit <<= 1;
}
denom >>= 1;
bit >>= 1;
while(bit){
if(dividend >= denom){
dividend -= denom;
quotient |= bit;
}
bit >>= 1;
denom >>= 1;
}
return quotient;
}
int divide_xxxx(int dividend, int divisor){
unsigned udividend, udivisor, uquotient;
unsigned dividend_sign, divisor_sign, quotient_sign;
dividend_sign = dividend & 0x8000u;
divisor_sign = divisor & 0x8000u;
quotient_sign = dividend_sign ^ divisor_sign;
udividend = dividend_sign ? -dividend : dividend;
udivisor = divisor_sign ? -divisor : divisor;
uquotient = unsigned_divide_xxxx(udividend, udivisor);
return quotient_sign ? -uquotient : uquotient;
}
long unsigned long_unsigned_divide_xxxx(long unsigned dividend, long unsigned divisor){
long unsigned denom = divisor;
long unsigned bit = 1;
long unsigned quotient = 0;
if( denom > dividend ) return 0;
if( denom == dividend ) return 1;
while(denom <= dividend){
denom <<= 1;
bit <<= 1;
}
denom >>= 1;
bit >>= 1;
while(bit){
if(dividend >= denom){
dividend -= denom;
quotient |= bit;
}
bit >>= 1;
denom >>= 1;
}
return quotient;
}
long int long_divide_xxxx(long int dividend, long int divisor){
long unsigned udividend, udivisor, uquotient;
long unsigned dividend_sign, divisor_sign, quotient_sign;
dividend_sign = dividend & 0x80000000ul;
divisor_sign = divisor & 0x80000000ul;
quotient_sign = dividend_sign ^ divisor_sign;
udividend = dividend_sign ? -dividend : dividend;
udivisor = divisor_sign ? -divisor : divisor;
uquotient = long_unsigned_divide_xxxx(udividend, udivisor);
return quotient_sign ? -uquotient : uquotient;
}
"""
|
|
9316bc07c77e2f51332a40bf430cef117f4d89e1
|
util/check_dockerfile_coverage.py
|
util/check_dockerfile_coverage.py
|
import yaml
import os
import pathlib2
import itertools
import argparse
import logging
import sys
TRAVIS_BUILD_DIR = os.environ.get("TRAVIS_BUILD_DIR")
CONFIG_FILE_PATH = pathlib2.Path(TRAVIS_BUILD_DIR, "util", "parsefiles_config.yml")
LOGGER = logging.getLogger(__name__)
def check_coverage(containers):
# open config file containing container weights
config_file_path = pathlib2.Path(CONFIG_FILE_PATH)
with (config_file_path.open(mode='r')) as file:
try:
config = yaml.load(file)
except yaml.YAMLError, exc:
LOGGER.error("error in configuration file: %s" % str(exc))
sys.exit(1)
# get container weights
weights = config.get("weights")
# convert all containers in config file to a list of tuples (<container>, <weight>)
weights_list = [x.items() for x in weights]
weights_list = list(itertools.chain.from_iterable(weights_list))
# performs intersection between weighted containers and input containers
used_containers = [x for x in weights_list if x[0] in containers]
# determine which Dockerfiles are not covered; i.e. the set difference of the Dockerfiles to build minus the Dockerfile
# available to be built is non-empty
uncovered = set(containers) - set([x[0] for x in used_containers])
# exit with error code if uncovered Dockerfiles exist
if uncovered:
LOGGER.error("The following Dockerfiles are not described in the parsefiles_config.yml file: {}. Please see the following documentation on how to add Dockerfile ranks to the configuration file: {}".format(uncovered, "https://github.com/edx/configuration/blob/master/util/README.md"))
sys.exit(1)
def arg_parse():
parser = argparse.ArgumentParser(description = 'Given a list of containers as input and a number of shards, '
'finds an approximation of the optimal distribution of the containers over the shards, provided a set of hard-coded weights '
'in parsefiles_config.yml.')
parser.add_argument('containers', help = "the Dockerfiles that need to be built as the result of some commit change and whose coverage is checked")
return parser.parse_args()
if __name__ == '__main__':
args = arg_parse()
# configure logging
logging.basicConfig()
containers = []
for word in args.containers.split():
containers.append(word)
check_coverage(containers)
|
Add script to check for Dockerfile coverage
|
Add script to check for Dockerfile coverage
|
Python
|
agpl-3.0
|
rue89-tech/configuration,stvstnfrd/configuration,michaelsteiner19/open-edx-configuration,armaan/edx-configuration,hastexo/edx-configuration,armaan/edx-configuration,stvstnfrd/configuration,gsehub/configuration,stvstnfrd/configuration,hks-epod/configuration,open-craft/configuration,edx/configuration,open-craft/configuration,armaan/edx-configuration,proversity-org/configuration,stvstnfrd/configuration,hks-epod/configuration,michaelsteiner19/open-edx-configuration,edx/configuration,gsehub/configuration,edx/configuration,Stanford-Online/configuration,hastexo/edx-configuration,gsehub/configuration,hastexo/edx-configuration,open-craft/configuration,appsembler/configuration,rue89-tech/configuration,arbrandes/edx-configuration,michaelsteiner19/open-edx-configuration,open-craft/configuration,EDUlib/configuration,nunpa/configuration,nunpa/configuration,arbrandes/edx-configuration,mitodl/configuration,mitodl/configuration,Stanford-Online/configuration,rue89-tech/configuration,hks-epod/configuration,Stanford-Online/configuration,jorgeomarmh/configuration,appsembler/configuration,jorgeomarmh/configuration,EDUlib/configuration,armaan/edx-configuration,EDUlib/configuration,nunpa/configuration,stvstnfrd/configuration,hastexo/edx-configuration,gsehub/configuration,proversity-org/configuration,michaelsteiner19/open-edx-configuration,proversity-org/configuration,arbrandes/edx-configuration,arbrandes/edx-configuration,Stanford-Online/configuration,edx/configuration,hks-epod/configuration,jorgeomarmh/configuration,proversity-org/configuration,appsembler/configuration,appsembler/configuration,nunpa/configuration,mitodl/configuration,EDUlib/configuration,mitodl/configuration,jorgeomarmh/configuration,rue89-tech/configuration,rue89-tech/configuration,EDUlib/configuration,hks-epod/configuration,Stanford-Online/configuration,proversity-org/configuration,gsehub/configuration,hastexo/edx-configuration,arbrandes/edx-configuration
|
Add script to check for Dockerfile coverage
|
import yaml
import os
import pathlib2
import itertools
import argparse
import logging
import sys
TRAVIS_BUILD_DIR = os.environ.get("TRAVIS_BUILD_DIR")
CONFIG_FILE_PATH = pathlib2.Path(TRAVIS_BUILD_DIR, "util", "parsefiles_config.yml")
LOGGER = logging.getLogger(__name__)
def check_coverage(containers):
# open config file containing container weights
config_file_path = pathlib2.Path(CONFIG_FILE_PATH)
with (config_file_path.open(mode='r')) as file:
try:
config = yaml.load(file)
except yaml.YAMLError, exc:
LOGGER.error("error in configuration file: %s" % str(exc))
sys.exit(1)
# get container weights
weights = config.get("weights")
# convert all containers in config file to a list of tuples (<container>, <weight>)
weights_list = [x.items() for x in weights]
weights_list = list(itertools.chain.from_iterable(weights_list))
# performs intersection between weighted containers and input containers
used_containers = [x for x in weights_list if x[0] in containers]
# determine which Dockerfiles are not covered; i.e. the set difference of the Dockerfiles to build minus the Dockerfile
# available to be built is non-empty
uncovered = set(containers) - set([x[0] for x in used_containers])
# exit with error code if uncovered Dockerfiles exist
if uncovered:
LOGGER.error("The following Dockerfiles are not described in the parsefiles_config.yml file: {}. Please see the following documentation on how to add Dockerfile ranks to the configuration file: {}".format(uncovered, "https://github.com/edx/configuration/blob/master/util/README.md"))
sys.exit(1)
def arg_parse():
parser = argparse.ArgumentParser(description = 'Given a list of containers as input and a number of shards, '
'finds an approximation of the optimal distribution of the containers over the shards, provided a set of hard-coded weights '
'in parsefiles_config.yml.')
parser.add_argument('containers', help = "the Dockerfiles that need to be built as the result of some commit change and whose coverage is checked")
return parser.parse_args()
if __name__ == '__main__':
args = arg_parse()
# configure logging
logging.basicConfig()
containers = []
for word in args.containers.split():
containers.append(word)
check_coverage(containers)
|
<commit_before><commit_msg>Add script to check for Dockerfile coverage<commit_after>
|
import yaml
import os
import pathlib2
import itertools
import argparse
import logging
import sys
TRAVIS_BUILD_DIR = os.environ.get("TRAVIS_BUILD_DIR")
CONFIG_FILE_PATH = pathlib2.Path(TRAVIS_BUILD_DIR, "util", "parsefiles_config.yml")
LOGGER = logging.getLogger(__name__)
def check_coverage(containers):
# open config file containing container weights
config_file_path = pathlib2.Path(CONFIG_FILE_PATH)
with (config_file_path.open(mode='r')) as file:
try:
config = yaml.load(file)
except yaml.YAMLError, exc:
LOGGER.error("error in configuration file: %s" % str(exc))
sys.exit(1)
# get container weights
weights = config.get("weights")
# convert all containers in config file to a list of tuples (<container>, <weight>)
weights_list = [x.items() for x in weights]
weights_list = list(itertools.chain.from_iterable(weights_list))
# performs intersection between weighted containers and input containers
used_containers = [x for x in weights_list if x[0] in containers]
# determine which Dockerfiles are not covered; i.e. the set difference of the Dockerfiles to build minus the Dockerfile
# available to be built is non-empty
uncovered = set(containers) - set([x[0] for x in used_containers])
# exit with error code if uncovered Dockerfiles exist
if uncovered:
LOGGER.error("The following Dockerfiles are not described in the parsefiles_config.yml file: {}. Please see the following documentation on how to add Dockerfile ranks to the configuration file: {}".format(uncovered, "https://github.com/edx/configuration/blob/master/util/README.md"))
sys.exit(1)
def arg_parse():
parser = argparse.ArgumentParser(description = 'Given a list of containers as input and a number of shards, '
'finds an approximation of the optimal distribution of the containers over the shards, provided a set of hard-coded weights '
'in parsefiles_config.yml.')
parser.add_argument('containers', help = "the Dockerfiles that need to be built as the result of some commit change and whose coverage is checked")
return parser.parse_args()
if __name__ == '__main__':
args = arg_parse()
# configure logging
logging.basicConfig()
containers = []
for word in args.containers.split():
containers.append(word)
check_coverage(containers)
|
Add script to check for Dockerfile coverageimport yaml
import os
import pathlib2
import itertools
import argparse
import logging
import sys
TRAVIS_BUILD_DIR = os.environ.get("TRAVIS_BUILD_DIR")
CONFIG_FILE_PATH = pathlib2.Path(TRAVIS_BUILD_DIR, "util", "parsefiles_config.yml")
LOGGER = logging.getLogger(__name__)
def check_coverage(containers):
# open config file containing container weights
config_file_path = pathlib2.Path(CONFIG_FILE_PATH)
with (config_file_path.open(mode='r')) as file:
try:
config = yaml.load(file)
except yaml.YAMLError, exc:
LOGGER.error("error in configuration file: %s" % str(exc))
sys.exit(1)
# get container weights
weights = config.get("weights")
# convert all containers in config file to a list of tuples (<container>, <weight>)
weights_list = [x.items() for x in weights]
weights_list = list(itertools.chain.from_iterable(weights_list))
# performs intersection between weighted containers and input containers
used_containers = [x for x in weights_list if x[0] in containers]
# determine which Dockerfiles are not covered; i.e. the set difference of the Dockerfiles to build minus the Dockerfile
# available to be built is non-empty
uncovered = set(containers) - set([x[0] for x in used_containers])
# exit with error code if uncovered Dockerfiles exist
if uncovered:
LOGGER.error("The following Dockerfiles are not described in the parsefiles_config.yml file: {}. Please see the following documentation on how to add Dockerfile ranks to the configuration file: {}".format(uncovered, "https://github.com/edx/configuration/blob/master/util/README.md"))
sys.exit(1)
def arg_parse():
parser = argparse.ArgumentParser(description = 'Given a list of containers as input and a number of shards, '
'finds an approximation of the optimal distribution of the containers over the shards, provided a set of hard-coded weights '
'in parsefiles_config.yml.')
parser.add_argument('containers', help = "the Dockerfiles that need to be built as the result of some commit change and whose coverage is checked")
return parser.parse_args()
if __name__ == '__main__':
args = arg_parse()
# configure logging
logging.basicConfig()
containers = []
for word in args.containers.split():
containers.append(word)
check_coverage(containers)
|
<commit_before><commit_msg>Add script to check for Dockerfile coverage<commit_after>import yaml
import os
import pathlib2
import itertools
import argparse
import logging
import sys
TRAVIS_BUILD_DIR = os.environ.get("TRAVIS_BUILD_DIR")
CONFIG_FILE_PATH = pathlib2.Path(TRAVIS_BUILD_DIR, "util", "parsefiles_config.yml")
LOGGER = logging.getLogger(__name__)
def check_coverage(containers):
# open config file containing container weights
config_file_path = pathlib2.Path(CONFIG_FILE_PATH)
with (config_file_path.open(mode='r')) as file:
try:
config = yaml.load(file)
except yaml.YAMLError, exc:
LOGGER.error("error in configuration file: %s" % str(exc))
sys.exit(1)
# get container weights
weights = config.get("weights")
# convert all containers in config file to a list of tuples (<container>, <weight>)
weights_list = [x.items() for x in weights]
weights_list = list(itertools.chain.from_iterable(weights_list))
# performs intersection between weighted containers and input containers
used_containers = [x for x in weights_list if x[0] in containers]
# determine which Dockerfiles are not covered; i.e. the set difference of the Dockerfiles to build minus the Dockerfile
# available to be built is non-empty
uncovered = set(containers) - set([x[0] for x in used_containers])
# exit with error code if uncovered Dockerfiles exist
if uncovered:
LOGGER.error("The following Dockerfiles are not described in the parsefiles_config.yml file: {}. Please see the following documentation on how to add Dockerfile ranks to the configuration file: {}".format(uncovered, "https://github.com/edx/configuration/blob/master/util/README.md"))
sys.exit(1)
def arg_parse():
parser = argparse.ArgumentParser(description = 'Given a list of containers as input and a number of shards, '
'finds an approximation of the optimal distribution of the containers over the shards, provided a set of hard-coded weights '
'in parsefiles_config.yml.')
parser.add_argument('containers', help = "the Dockerfiles that need to be built as the result of some commit change and whose coverage is checked")
return parser.parse_args()
if __name__ == '__main__':
args = arg_parse()
# configure logging
logging.basicConfig()
containers = []
for word in args.containers.split():
containers.append(word)
check_coverage(containers)
|
|
805708048f493ca538a9e0b8d9d40ae1d4baf2c3
|
keepalive-race/keep-alive-race.py
|
keepalive-race/keep-alive-race.py
|
#!/usr/bin/python3
"""
This script demonstrates a race condition with HTTP/1.1 keepalive
"""
import decimal
import json
import subprocess
import time
import threading
import requests
requests.packages.urllib3.disable_warnings()
CREDS = json.loads(subprocess.check_output(
"openstack --os-cloud devstack token issue -f json".split(),
).decode())
URL = 'https://10.0.1.44:8774/v2/%s/servers/detail' % (CREDS['project_id'])
def decimal_range(x, y, jump):
x = decimal.Decimal(x)
y = decimal.Decimal(y)
jump = decimal.Decimal(jump)
while x < y:
yield float(x)
x += jump
def get(exit):
for delay in decimal_range(4.95, 4.96, 0.005):
session = requests.Session()
if exit.is_set():
return
for i in range(10):
if exit.is_set():
return
time.sleep(delay)
headers = {
'User-Agent': 'timeout-race/%s' % i,
'X-Auth-Token': CREDS['id']
}
try:
session.get(URL, verify=False, headers=headers)
except Exception as e:
print(e)
exit.set()
threads = []
exit = threading.Event()
for i in range(50):
threads.append(threading.Thread(target=get,args=(exit,)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
|
Add a tool to reproduce HTTP KeepAlive races in OpenStack gate jobs.
|
Add a tool to reproduce HTTP KeepAlive races in OpenStack gate jobs.
Based on https://github.com/mikem23/keepalive-race
Change-Id: I11f66bf39c6cc2609ee0dbff97ac7b104767ac2b
|
Python
|
apache-2.0
|
JordanP/openstack-snippets,JordanP/openstack-snippets
|
Add a tool to reproduce HTTP KeepAlive races in OpenStack gate jobs.
Based on https://github.com/mikem23/keepalive-race
Change-Id: I11f66bf39c6cc2609ee0dbff97ac7b104767ac2b
|
#!/usr/bin/python3
"""
This script demonstrates a race condition with HTTP/1.1 keepalive
"""
import decimal
import json
import subprocess
import time
import threading
import requests
requests.packages.urllib3.disable_warnings()
CREDS = json.loads(subprocess.check_output(
"openstack --os-cloud devstack token issue -f json".split(),
).decode())
URL = 'https://10.0.1.44:8774/v2/%s/servers/detail' % (CREDS['project_id'])
def decimal_range(x, y, jump):
x = decimal.Decimal(x)
y = decimal.Decimal(y)
jump = decimal.Decimal(jump)
while x < y:
yield float(x)
x += jump
def get(exit):
for delay in decimal_range(4.95, 4.96, 0.005):
session = requests.Session()
if exit.is_set():
return
for i in range(10):
if exit.is_set():
return
time.sleep(delay)
headers = {
'User-Agent': 'timeout-race/%s' % i,
'X-Auth-Token': CREDS['id']
}
try:
session.get(URL, verify=False, headers=headers)
except Exception as e:
print(e)
exit.set()
threads = []
exit = threading.Event()
for i in range(50):
threads.append(threading.Thread(target=get,args=(exit,)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
|
<commit_before><commit_msg>Add a tool to reproduce HTTP KeepAlive races in OpenStack gate jobs.
Based on https://github.com/mikem23/keepalive-race
Change-Id: I11f66bf39c6cc2609ee0dbff97ac7b104767ac2b<commit_after>
|
#!/usr/bin/python3
"""
This script demonstrates a race condition with HTTP/1.1 keepalive
"""
import decimal
import json
import subprocess
import time
import threading
import requests
requests.packages.urllib3.disable_warnings()
CREDS = json.loads(subprocess.check_output(
"openstack --os-cloud devstack token issue -f json".split(),
).decode())
URL = 'https://10.0.1.44:8774/v2/%s/servers/detail' % (CREDS['project_id'])
def decimal_range(x, y, jump):
x = decimal.Decimal(x)
y = decimal.Decimal(y)
jump = decimal.Decimal(jump)
while x < y:
yield float(x)
x += jump
def get(exit):
for delay in decimal_range(4.95, 4.96, 0.005):
session = requests.Session()
if exit.is_set():
return
for i in range(10):
if exit.is_set():
return
time.sleep(delay)
headers = {
'User-Agent': 'timeout-race/%s' % i,
'X-Auth-Token': CREDS['id']
}
try:
session.get(URL, verify=False, headers=headers)
except Exception as e:
print(e)
exit.set()
threads = []
exit = threading.Event()
for i in range(50):
threads.append(threading.Thread(target=get,args=(exit,)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
|
Add a tool to reproduce HTTP KeepAlive races in OpenStack gate jobs.
Based on https://github.com/mikem23/keepalive-race
Change-Id: I11f66bf39c6cc2609ee0dbff97ac7b104767ac2b#!/usr/bin/python3
"""
This script demonstrates a race condition with HTTP/1.1 keepalive
"""
import decimal
import json
import subprocess
import time
import threading
import requests
requests.packages.urllib3.disable_warnings()
CREDS = json.loads(subprocess.check_output(
"openstack --os-cloud devstack token issue -f json".split(),
).decode())
URL = 'https://10.0.1.44:8774/v2/%s/servers/detail' % (CREDS['project_id'])
def decimal_range(x, y, jump):
x = decimal.Decimal(x)
y = decimal.Decimal(y)
jump = decimal.Decimal(jump)
while x < y:
yield float(x)
x += jump
def get(exit):
for delay in decimal_range(4.95, 4.96, 0.005):
session = requests.Session()
if exit.is_set():
return
for i in range(10):
if exit.is_set():
return
time.sleep(delay)
headers = {
'User-Agent': 'timeout-race/%s' % i,
'X-Auth-Token': CREDS['id']
}
try:
session.get(URL, verify=False, headers=headers)
except Exception as e:
print(e)
exit.set()
threads = []
exit = threading.Event()
for i in range(50):
threads.append(threading.Thread(target=get,args=(exit,)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
|
<commit_before><commit_msg>Add a tool to reproduce HTTP KeepAlive races in OpenStack gate jobs.
Based on https://github.com/mikem23/keepalive-race
Change-Id: I11f66bf39c6cc2609ee0dbff97ac7b104767ac2b<commit_after>#!/usr/bin/python3
"""
This script demonstrates a race condition with HTTP/1.1 keepalive
"""
import decimal
import json
import subprocess
import time
import threading
import requests
requests.packages.urllib3.disable_warnings()
CREDS = json.loads(subprocess.check_output(
"openstack --os-cloud devstack token issue -f json".split(),
).decode())
URL = 'https://10.0.1.44:8774/v2/%s/servers/detail' % (CREDS['project_id'])
def decimal_range(x, y, jump):
x = decimal.Decimal(x)
y = decimal.Decimal(y)
jump = decimal.Decimal(jump)
while x < y:
yield float(x)
x += jump
def get(exit):
for delay in decimal_range(4.95, 4.96, 0.005):
session = requests.Session()
if exit.is_set():
return
for i in range(10):
if exit.is_set():
return
time.sleep(delay)
headers = {
'User-Agent': 'timeout-race/%s' % i,
'X-Auth-Token': CREDS['id']
}
try:
session.get(URL, verify=False, headers=headers)
except Exception as e:
print(e)
exit.set()
threads = []
exit = threading.Event()
for i in range(50):
threads.append(threading.Thread(target=get,args=(exit,)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
|
|
48da7ceb86387d3cb6fd53f50110232813123ecc
|
tests/pytests/unit/roster/test_ansible.py
|
tests/pytests/unit/roster/test_ansible.py
|
import pytest
import salt.roster.ansible as ansible
from tests.support.mock import patch
@pytest.mark.xfail
@pytest.mark.parametrize(
"which_value",
[False, None],
)
def test_virtual_returns_False_if_ansible_inventory_doesnt_exist(which_value):
with patch("salt.utils.path.which", autospec=True, return_value=which_value):
assert ansible.__virtual__() == (False, "Install `ansible` to use inventory")
|
Add tests for ansible roster virtual
|
Add tests for ansible roster virtual
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Add tests for ansible roster virtual
|
import pytest
import salt.roster.ansible as ansible
from tests.support.mock import patch
@pytest.mark.xfail
@pytest.mark.parametrize(
"which_value",
[False, None],
)
def test_virtual_returns_False_if_ansible_inventory_doesnt_exist(which_value):
with patch("salt.utils.path.which", autospec=True, return_value=which_value):
assert ansible.__virtual__() == (False, "Install `ansible` to use inventory")
|
<commit_before><commit_msg>Add tests for ansible roster virtual<commit_after>
|
import pytest
import salt.roster.ansible as ansible
from tests.support.mock import patch
@pytest.mark.xfail
@pytest.mark.parametrize(
"which_value",
[False, None],
)
def test_virtual_returns_False_if_ansible_inventory_doesnt_exist(which_value):
with patch("salt.utils.path.which", autospec=True, return_value=which_value):
assert ansible.__virtual__() == (False, "Install `ansible` to use inventory")
|
Add tests for ansible roster virtualimport pytest
import salt.roster.ansible as ansible
from tests.support.mock import patch
@pytest.mark.xfail
@pytest.mark.parametrize(
"which_value",
[False, None],
)
def test_virtual_returns_False_if_ansible_inventory_doesnt_exist(which_value):
with patch("salt.utils.path.which", autospec=True, return_value=which_value):
assert ansible.__virtual__() == (False, "Install `ansible` to use inventory")
|
<commit_before><commit_msg>Add tests for ansible roster virtual<commit_after>import pytest
import salt.roster.ansible as ansible
from tests.support.mock import patch
@pytest.mark.xfail
@pytest.mark.parametrize(
"which_value",
[False, None],
)
def test_virtual_returns_False_if_ansible_inventory_doesnt_exist(which_value):
with patch("salt.utils.path.which", autospec=True, return_value=which_value):
assert ansible.__virtual__() == (False, "Install `ansible` to use inventory")
|
|
59e546ae5afe22aab967e5376c8799e29ccbd86a
|
directoryFileContentCmp.py
|
directoryFileContentCmp.py
|
#! /usr/env/python
import os
import hashlib
import sys
bufsize = 65536
# Path1 = '/Users/kirkchambers/Desktop'
# Path2 = '/Users/kirkchambers/DataSets'
def generate_file_digests_for(path):
path_set = set()
for item in os.walk(path):
(directory, _subdirectories, files) = item
for file in files:
if (file[0] == '.'):
continue
else:
fqFilename = os.path.join(directory, file)
path_set.add(generate_file_digest(fqFilename, file))
return path_set
def generate_file_digest(fqFilename, shortFilename):
hasher = hashlib.md5()
with open(fqFilename, 'rb') as filestream:
fileBuffer = filestream.read(bufsize)
while len(fileBuffer) > 0:
hasher.update(fileBuffer)
fileBuffer = filestream.read(bufsize)
# return "Filename:{file}\nHash:{hash}\nSize:{size}\n".format(file=fqFilename, hash=hasher.hexdigest(), size=os.path.getsize(fqFilename))
return (hasher.hexdigest(), fqFilename, os.path.getsize(fqFilename))
def usage():
print "file_list.py directory1 directory2"
print "Prints out the files present in directory1 which are NOT present in directory2"
if __name__ == "__main__":
try:
(_command, Path1, Path2) = sys.argv
except:
usage()
exit(1)
path_set_1 = generate_file_digests_for(Path1)
path_set_2 = generate_file_digests_for(Path2)
# union = path_set_1 | path_set_2
set_1_exclusives = path_set_1 - path_set_2
# set_2_exclusives = path_set_2 - path_set_1
# print "length of 1: {}".format(len(path_set_1))
# print "length of 2: {}".format(len(path_set_2))
# print "length of union: {}".format(len(union))
# print "length of set1 uniqs: {}".format(len(set_1_exclusives))
# print "length of set2 uniqs: {}".format(len(set_2_exclusives))
print "Files present in {path1} and not in {path2}:".format(path1=Path1, path2=Path2)
for item in set_1_exclusives:
print item[1]
|
Add the basic version of my file comparison script
|
Add the basic version of my file comparison script
|
Python
|
mit
|
kirkchambe/random_scripts
|
Add the basic version of my file comparison script
|
#! /usr/env/python
import os
import hashlib
import sys
bufsize = 65536
# Path1 = '/Users/kirkchambers/Desktop'
# Path2 = '/Users/kirkchambers/DataSets'
def generate_file_digests_for(path):
path_set = set()
for item in os.walk(path):
(directory, _subdirectories, files) = item
for file in files:
if (file[0] == '.'):
continue
else:
fqFilename = os.path.join(directory, file)
path_set.add(generate_file_digest(fqFilename, file))
return path_set
def generate_file_digest(fqFilename, shortFilename):
hasher = hashlib.md5()
with open(fqFilename, 'rb') as filestream:
fileBuffer = filestream.read(bufsize)
while len(fileBuffer) > 0:
hasher.update(fileBuffer)
fileBuffer = filestream.read(bufsize)
# return "Filename:{file}\nHash:{hash}\nSize:{size}\n".format(file=fqFilename, hash=hasher.hexdigest(), size=os.path.getsize(fqFilename))
return (hasher.hexdigest(), fqFilename, os.path.getsize(fqFilename))
def usage():
print "file_list.py directory1 directory2"
print "Prints out the files present in directory1 which are NOT present in directory2"
if __name__ == "__main__":
try:
(_command, Path1, Path2) = sys.argv
except:
usage()
exit(1)
path_set_1 = generate_file_digests_for(Path1)
path_set_2 = generate_file_digests_for(Path2)
# union = path_set_1 | path_set_2
set_1_exclusives = path_set_1 - path_set_2
# set_2_exclusives = path_set_2 - path_set_1
# print "length of 1: {}".format(len(path_set_1))
# print "length of 2: {}".format(len(path_set_2))
# print "length of union: {}".format(len(union))
# print "length of set1 uniqs: {}".format(len(set_1_exclusives))
# print "length of set2 uniqs: {}".format(len(set_2_exclusives))
print "Files present in {path1} and not in {path2}:".format(path1=Path1, path2=Path2)
for item in set_1_exclusives:
print item[1]
|
<commit_before><commit_msg>Add the basic version of my file comparison script<commit_after>
|
#! /usr/env/python
import os
import hashlib
import sys
bufsize = 65536
# Path1 = '/Users/kirkchambers/Desktop'
# Path2 = '/Users/kirkchambers/DataSets'
def generate_file_digests_for(path):
path_set = set()
for item in os.walk(path):
(directory, _subdirectories, files) = item
for file in files:
if (file[0] == '.'):
continue
else:
fqFilename = os.path.join(directory, file)
path_set.add(generate_file_digest(fqFilename, file))
return path_set
def generate_file_digest(fqFilename, shortFilename):
hasher = hashlib.md5()
with open(fqFilename, 'rb') as filestream:
fileBuffer = filestream.read(bufsize)
while len(fileBuffer) > 0:
hasher.update(fileBuffer)
fileBuffer = filestream.read(bufsize)
# return "Filename:{file}\nHash:{hash}\nSize:{size}\n".format(file=fqFilename, hash=hasher.hexdigest(), size=os.path.getsize(fqFilename))
return (hasher.hexdigest(), fqFilename, os.path.getsize(fqFilename))
def usage():
print "file_list.py directory1 directory2"
print "Prints out the files present in directory1 which are NOT present in directory2"
if __name__ == "__main__":
try:
(_command, Path1, Path2) = sys.argv
except:
usage()
exit(1)
path_set_1 = generate_file_digests_for(Path1)
path_set_2 = generate_file_digests_for(Path2)
# union = path_set_1 | path_set_2
set_1_exclusives = path_set_1 - path_set_2
# set_2_exclusives = path_set_2 - path_set_1
# print "length of 1: {}".format(len(path_set_1))
# print "length of 2: {}".format(len(path_set_2))
# print "length of union: {}".format(len(union))
# print "length of set1 uniqs: {}".format(len(set_1_exclusives))
# print "length of set2 uniqs: {}".format(len(set_2_exclusives))
print "Files present in {path1} and not in {path2}:".format(path1=Path1, path2=Path2)
for item in set_1_exclusives:
print item[1]
|
Add the basic version of my file comparison script#! /usr/env/python
import os
import hashlib
import sys
bufsize = 65536
# Path1 = '/Users/kirkchambers/Desktop'
# Path2 = '/Users/kirkchambers/DataSets'
def generate_file_digests_for(path):
path_set = set()
for item in os.walk(path):
(directory, _subdirectories, files) = item
for file in files:
if (file[0] == '.'):
continue
else:
fqFilename = os.path.join(directory, file)
path_set.add(generate_file_digest(fqFilename, file))
return path_set
def generate_file_digest(fqFilename, shortFilename):
hasher = hashlib.md5()
with open(fqFilename, 'rb') as filestream:
fileBuffer = filestream.read(bufsize)
while len(fileBuffer) > 0:
hasher.update(fileBuffer)
fileBuffer = filestream.read(bufsize)
# return "Filename:{file}\nHash:{hash}\nSize:{size}\n".format(file=fqFilename, hash=hasher.hexdigest(), size=os.path.getsize(fqFilename))
return (hasher.hexdigest(), fqFilename, os.path.getsize(fqFilename))
def usage():
print "file_list.py directory1 directory2"
print "Prints out the files present in directory1 which are NOT present in directory2"
if __name__ == "__main__":
try:
(_command, Path1, Path2) = sys.argv
except:
usage()
exit(1)
path_set_1 = generate_file_digests_for(Path1)
path_set_2 = generate_file_digests_for(Path2)
# union = path_set_1 | path_set_2
set_1_exclusives = path_set_1 - path_set_2
# set_2_exclusives = path_set_2 - path_set_1
# print "length of 1: {}".format(len(path_set_1))
# print "length of 2: {}".format(len(path_set_2))
# print "length of union: {}".format(len(union))
# print "length of set1 uniqs: {}".format(len(set_1_exclusives))
# print "length of set2 uniqs: {}".format(len(set_2_exclusives))
print "Files present in {path1} and not in {path2}:".format(path1=Path1, path2=Path2)
for item in set_1_exclusives:
print item[1]
|
<commit_before><commit_msg>Add the basic version of my file comparison script<commit_after>#! /usr/env/python
import os
import hashlib
import sys
bufsize = 65536
# Path1 = '/Users/kirkchambers/Desktop'
# Path2 = '/Users/kirkchambers/DataSets'
def generate_file_digests_for(path):
path_set = set()
for item in os.walk(path):
(directory, _subdirectories, files) = item
for file in files:
if (file[0] == '.'):
continue
else:
fqFilename = os.path.join(directory, file)
path_set.add(generate_file_digest(fqFilename, file))
return path_set
def generate_file_digest(fqFilename, shortFilename):
hasher = hashlib.md5()
with open(fqFilename, 'rb') as filestream:
fileBuffer = filestream.read(bufsize)
while len(fileBuffer) > 0:
hasher.update(fileBuffer)
fileBuffer = filestream.read(bufsize)
# return "Filename:{file}\nHash:{hash}\nSize:{size}\n".format(file=fqFilename, hash=hasher.hexdigest(), size=os.path.getsize(fqFilename))
return (hasher.hexdigest(), fqFilename, os.path.getsize(fqFilename))
def usage():
print "file_list.py directory1 directory2"
print "Prints out the files present in directory1 which are NOT present in directory2"
if __name__ == "__main__":
try:
(_command, Path1, Path2) = sys.argv
except:
usage()
exit(1)
path_set_1 = generate_file_digests_for(Path1)
path_set_2 = generate_file_digests_for(Path2)
# union = path_set_1 | path_set_2
set_1_exclusives = path_set_1 - path_set_2
# set_2_exclusives = path_set_2 - path_set_1
# print "length of 1: {}".format(len(path_set_1))
# print "length of 2: {}".format(len(path_set_2))
# print "length of union: {}".format(len(union))
# print "length of set1 uniqs: {}".format(len(set_1_exclusives))
# print "length of set2 uniqs: {}".format(len(set_2_exclusives))
print "Files present in {path1} and not in {path2}:".format(path1=Path1, path2=Path2)
for item in set_1_exclusives:
print item[1]
|
|
c3afc6c28530c3dfc3bd57d9a1841a60bf92ba4f
|
tools/perf/benchmarks/netsim_top25.py
|
tools/perf/benchmarks/netsim_top25.py
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import test
from perf_tools import page_cycler
class NetsimTop25(test.Test):
"""Measures load time of the top 25 sites under simulated cable network."""
test = page_cycler.PageCycler
test.clear_cache_before_each_run = True
page_set = 'tools/perf/page_sets/top_25.json'
options = {
'extra_wpr_args': [
'--shaping_type=proxy',
'--net=cable'
],
'pageset_repeat': '5',
}
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import test
from perf_tools import page_cycler
class NetsimTop25(test.Test):
"""Measures load time of the top 25 sites under simulated cable network."""
test = page_cycler.PageCycler
page_set = 'tools/perf/page_sets/top_25.json'
options = {
'extra_wpr_args': [
'--shaping_type=proxy',
'--net=cable'
],
'pageset_repeat': '5',
}
def __init__(self):
super(NetsimTop25, self).__init__()
self.test.clear_cache_before_each_run = True
|
Fix bug which caused page cyclers to always clear cache before load.
|
[Telemetry] Fix bug which caused page cyclers to always clear cache before load.
Previously, the cache clearing bit would apply when the netsim benchmark was
imported. This fixes it so that it only applies when it is used.
BUG=256492
NOTRY=True
TBR=dtu@chromium.org
Review URL: https://codereview.chromium.org/18550003
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@209708 0039d316-1c4b-4281-b951-d872f2087c98
|
Python
|
bsd-3-clause
|
mohamed--abdel-maksoud/chromium.src,patrickm/chromium.src,chuan9/chromium-crosswalk,jaruba/chromium.src,Chilledheart/chromium,littlstar/chromium.src,fujunwei/chromium-crosswalk,jaruba/chromium.src,chuan9/chromium-crosswalk,Pluto-tv/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,mogoweb/chromium-crosswalk,hgl888/chromium-crosswalk-efl,krieger-od/nwjs_chromium.src,TheTypoMaster/chromium-crosswalk,dushu1203/chromium.src,Jonekee/chromium.src,Just-D/chromium-1,ltilve/chromium,mohamed--abdel-maksoud/chromium.src,anirudhSK/chromium,Jonekee/chromium.src,Pluto-tv/chromium-crosswalk,dednal/chromium.src,M4sse/chromium.src,markYoungH/chromium.src,axinging/chromium-crosswalk,anirudhSK/chromium,ltilve/chromium,hgl888/chromium-crosswalk-efl,TheTypoMaster/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,mogoweb/chromium-crosswalk,Fireblend/chromium-crosswalk,markYoungH/chromium.src,chuan9/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Just-D/chromium-1,Just-D/chromium-1,Chilledheart/chromium,chuan9/chromium-crosswalk,Fireblend/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Just-D/chromium-1,patrickm/chromium.src,ChromiumWebApps/chromium,bright-sparks/chromium-spacewalk,ltilve/chromium,ondra-novak/chromium.src,Pluto-tv/chromium-crosswalk,dushu1203/chromium.src,dushu1203/chromium.src,TheTypoMaster/chromium-crosswalk,anirudhSK/chromium,littlstar/chromium.src,hgl888/chromium-crosswalk,fujunwei/chromium-crosswalk,axinging/chromium-crosswalk,bright-sparks/chromium-spacewalk,ltilve/chromium,hgl888/chromium-crosswalk-efl,Chilledheart/chromium,M4sse/chromium.src,M4sse/chromium.src,bright-sparks/chromium-spacewalk,chuan9/chromium-crosswalk,patrickm/chromium.src,krieger-od/nwjs_chromium.src,mohamed--abdel-maksoud/chromium.src,hgl888/chromium-crosswalk,ondra-novak/chromium.src,littlstar/chromium.src,ondra-novak/chromium.src,krieger-od/nwjs_chromium.src,M4sse/chromium.src,Jonekee/chromium.src,dushu1203/chromium.src,chuan9/chromium-crosswalk,Chilledheart/chromium,axinging/chromium-crosswalk,jaruba/chromium.src,mohamed--abdel-maksoud/chromium.src,bright-sparks/chromium-spacewalk,dushu1203/chromium.src,patrickm/chromium.src,hgl888/chromium-crosswalk-efl,Jonekee/chromium.src,fujunwei/chromium-crosswalk,mogoweb/chromium-crosswalk,ondra-novak/chromium.src,anirudhSK/chromium,ondra-novak/chromium.src,Fireblend/chromium-crosswalk,anirudhSK/chromium,PeterWangIntel/chromium-crosswalk,dushu1203/chromium.src,Pluto-tv/chromium-crosswalk,ChromiumWebApps/chromium,hgl888/chromium-crosswalk-efl,patrickm/chromium.src,axinging/chromium-crosswalk,ondra-novak/chromium.src,dednal/chromium.src,TheTypoMaster/chromium-crosswalk,ondra-novak/chromium.src,littlstar/chromium.src,bright-sparks/chromium-spacewalk,PeterWangIntel/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,dednal/chromium.src,markYoungH/chromium.src,Chilledheart/chromium,Pluto-tv/chromium-crosswalk,M4sse/chromium.src,mohamed--abdel-maksoud/chromium.src,crosswalk-project/chromium-crosswalk-efl,dushu1203/chromium.src,Just-D/chromium-1,ltilve/chromium,M4sse/chromium.src,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk,krieger-od/nwjs_chromium.src,TheTypoMaster/chromium-crosswalk,anirudhSK/chromium,chuan9/chromium-crosswalk,Chilledheart/chromium,mogoweb/chromium-crosswalk,Fireblend/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,axinging/chromium-crosswalk,mogoweb/chromium-crosswalk,Fireblend/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,jaruba/chromium.src,anirudhSK/chromium,bright-sparks/chromium-spacewalk,ChromiumWebApps/chromium,markYoungH/chromium.src,Pluto-tv/chromium-crosswalk,Jonekee/chromium.src,Jonekee/chromium.src,ltilve/chromium,ChromiumWebApps/chromium,hgl888/chromium-crosswalk,markYoungH/chromium.src,bright-sparks/chromium-spacewalk,mohamed--abdel-maksoud/chromium.src,littlstar/chromium.src,patrickm/chromium.src,M4sse/chromium.src,Jonekee/chromium.src,crosswalk-project/chromium-crosswalk-efl,ChromiumWebApps/chromium,Just-D/chromium-1,Jonekee/chromium.src,bright-sparks/chromium-spacewalk,dednal/chromium.src,PeterWangIntel/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,hgl888/chromium-crosswalk-efl,axinging/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,bright-sparks/chromium-spacewalk,littlstar/chromium.src,axinging/chromium-crosswalk,Just-D/chromium-1,ChromiumWebApps/chromium,mogoweb/chromium-crosswalk,ChromiumWebApps/chromium,hgl888/chromium-crosswalk,fujunwei/chromium-crosswalk,ChromiumWebApps/chromium,dushu1203/chromium.src,Chilledheart/chromium,littlstar/chromium.src,dednal/chromium.src,littlstar/chromium.src,krieger-od/nwjs_chromium.src,mogoweb/chromium-crosswalk,hgl888/chromium-crosswalk,dednal/chromium.src,PeterWangIntel/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Chilledheart/chromium,ltilve/chromium,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk-efl,mogoweb/chromium-crosswalk,fujunwei/chromium-crosswalk,Jonekee/chromium.src,dushu1203/chromium.src,ChromiumWebApps/chromium,Fireblend/chromium-crosswalk,chuan9/chromium-crosswalk,anirudhSK/chromium,Jonekee/chromium.src,jaruba/chromium.src,M4sse/chromium.src,jaruba/chromium.src,markYoungH/chromium.src,mohamed--abdel-maksoud/chromium.src,crosswalk-project/chromium-crosswalk-efl,fujunwei/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,ondra-novak/chromium.src,hgl888/chromium-crosswalk-efl,hgl888/chromium-crosswalk,jaruba/chromium.src,crosswalk-project/chromium-crosswalk-efl,Chilledheart/chromium,markYoungH/chromium.src,dednal/chromium.src,hgl888/chromium-crosswalk,Just-D/chromium-1,axinging/chromium-crosswalk,jaruba/chromium.src,ChromiumWebApps/chromium,axinging/chromium-crosswalk,krieger-od/nwjs_chromium.src,Fireblend/chromium-crosswalk,M4sse/chromium.src,crosswalk-project/chromium-crosswalk-efl,anirudhSK/chromium,jaruba/chromium.src,dednal/chromium.src,dushu1203/chromium.src,dednal/chromium.src,fujunwei/chromium-crosswalk,anirudhSK/chromium,ltilve/chromium,PeterWangIntel/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,patrickm/chromium.src,Jonekee/chromium.src,krieger-od/nwjs_chromium.src,Fireblend/chromium-crosswalk,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk-efl,markYoungH/chromium.src,crosswalk-project/chromium-crosswalk-efl,axinging/chromium-crosswalk,hgl888/chromium-crosswalk-efl,krieger-od/nwjs_chromium.src,mohamed--abdel-maksoud/chromium.src,hgl888/chromium-crosswalk,chuan9/chromium-crosswalk,ChromiumWebApps/chromium,dednal/chromium.src,Fireblend/chromium-crosswalk,markYoungH/chromium.src,krieger-od/nwjs_chromium.src,TheTypoMaster/chromium-crosswalk,jaruba/chromium.src,anirudhSK/chromium,dushu1203/chromium.src,patrickm/chromium.src,mogoweb/chromium-crosswalk,jaruba/chromium.src,ltilve/chromium,PeterWangIntel/chromium-crosswalk,ChromiumWebApps/chromium,M4sse/chromium.src,axinging/chromium-crosswalk,markYoungH/chromium.src,mogoweb/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,markYoungH/chromium.src,fujunwei/chromium-crosswalk,fujunwei/chromium-crosswalk,M4sse/chromium.src,ondra-novak/chromium.src,anirudhSK/chromium,dednal/chromium.src,Just-D/chromium-1,patrickm/chromium.src,crosswalk-project/chromium-crosswalk-efl
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import test
from perf_tools import page_cycler
class NetsimTop25(test.Test):
"""Measures load time of the top 25 sites under simulated cable network."""
test = page_cycler.PageCycler
test.clear_cache_before_each_run = True
page_set = 'tools/perf/page_sets/top_25.json'
options = {
'extra_wpr_args': [
'--shaping_type=proxy',
'--net=cable'
],
'pageset_repeat': '5',
}
[Telemetry] Fix bug which caused page cyclers to always clear cache before load.
Previously, the cache clearing bit would apply when the netsim benchmark was
imported. This fixes it so that it only applies when it is used.
BUG=256492
NOTRY=True
TBR=dtu@chromium.org
Review URL: https://codereview.chromium.org/18550003
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@209708 0039d316-1c4b-4281-b951-d872f2087c98
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import test
from perf_tools import page_cycler
class NetsimTop25(test.Test):
"""Measures load time of the top 25 sites under simulated cable network."""
test = page_cycler.PageCycler
page_set = 'tools/perf/page_sets/top_25.json'
options = {
'extra_wpr_args': [
'--shaping_type=proxy',
'--net=cable'
],
'pageset_repeat': '5',
}
def __init__(self):
super(NetsimTop25, self).__init__()
self.test.clear_cache_before_each_run = True
|
<commit_before># Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import test
from perf_tools import page_cycler
class NetsimTop25(test.Test):
"""Measures load time of the top 25 sites under simulated cable network."""
test = page_cycler.PageCycler
test.clear_cache_before_each_run = True
page_set = 'tools/perf/page_sets/top_25.json'
options = {
'extra_wpr_args': [
'--shaping_type=proxy',
'--net=cable'
],
'pageset_repeat': '5',
}
<commit_msg>[Telemetry] Fix bug which caused page cyclers to always clear cache before load.
Previously, the cache clearing bit would apply when the netsim benchmark was
imported. This fixes it so that it only applies when it is used.
BUG=256492
NOTRY=True
TBR=dtu@chromium.org
Review URL: https://codereview.chromium.org/18550003
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@209708 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import test
from perf_tools import page_cycler
class NetsimTop25(test.Test):
"""Measures load time of the top 25 sites under simulated cable network."""
test = page_cycler.PageCycler
page_set = 'tools/perf/page_sets/top_25.json'
options = {
'extra_wpr_args': [
'--shaping_type=proxy',
'--net=cable'
],
'pageset_repeat': '5',
}
def __init__(self):
super(NetsimTop25, self).__init__()
self.test.clear_cache_before_each_run = True
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import test
from perf_tools import page_cycler
class NetsimTop25(test.Test):
"""Measures load time of the top 25 sites under simulated cable network."""
test = page_cycler.PageCycler
test.clear_cache_before_each_run = True
page_set = 'tools/perf/page_sets/top_25.json'
options = {
'extra_wpr_args': [
'--shaping_type=proxy',
'--net=cable'
],
'pageset_repeat': '5',
}
[Telemetry] Fix bug which caused page cyclers to always clear cache before load.
Previously, the cache clearing bit would apply when the netsim benchmark was
imported. This fixes it so that it only applies when it is used.
BUG=256492
NOTRY=True
TBR=dtu@chromium.org
Review URL: https://codereview.chromium.org/18550003
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@209708 0039d316-1c4b-4281-b951-d872f2087c98# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import test
from perf_tools import page_cycler
class NetsimTop25(test.Test):
"""Measures load time of the top 25 sites under simulated cable network."""
test = page_cycler.PageCycler
page_set = 'tools/perf/page_sets/top_25.json'
options = {
'extra_wpr_args': [
'--shaping_type=proxy',
'--net=cable'
],
'pageset_repeat': '5',
}
def __init__(self):
super(NetsimTop25, self).__init__()
self.test.clear_cache_before_each_run = True
|
<commit_before># Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import test
from perf_tools import page_cycler
class NetsimTop25(test.Test):
"""Measures load time of the top 25 sites under simulated cable network."""
test = page_cycler.PageCycler
test.clear_cache_before_each_run = True
page_set = 'tools/perf/page_sets/top_25.json'
options = {
'extra_wpr_args': [
'--shaping_type=proxy',
'--net=cable'
],
'pageset_repeat': '5',
}
<commit_msg>[Telemetry] Fix bug which caused page cyclers to always clear cache before load.
Previously, the cache clearing bit would apply when the netsim benchmark was
imported. This fixes it so that it only applies when it is used.
BUG=256492
NOTRY=True
TBR=dtu@chromium.org
Review URL: https://codereview.chromium.org/18550003
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@209708 0039d316-1c4b-4281-b951-d872f2087c98<commit_after># Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import test
from perf_tools import page_cycler
class NetsimTop25(test.Test):
"""Measures load time of the top 25 sites under simulated cable network."""
test = page_cycler.PageCycler
page_set = 'tools/perf/page_sets/top_25.json'
options = {
'extra_wpr_args': [
'--shaping_type=proxy',
'--net=cable'
],
'pageset_repeat': '5',
}
def __init__(self):
super(NetsimTop25, self).__init__()
self.test.clear_cache_before_each_run = True
|
fe145fd87db777d9eeb361688d502b1b3ec4b2e1
|
Transformation.py
|
Transformation.py
|
# -*- coding:utf-8 -*-
# ***************************************************************************
# Transformation.py
# -------------------
# update : 2013-11-13
# copyright : (C) 2013 by Michaël Roy
# email : microygh@gmail.com
# ***************************************************************************
# ***************************************************************************
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU General Public License as published by *
# * the Free Software Foundation; either version 2 of the License, or *
# * (at your option) any later version. *
# * *
# ***************************************************************************
#
# External dependencies
#
from numpy import *
|
Add a new Model-View-Projection matrix tool.
|
Add a new Model-View-Projection matrix tool.
|
Python
|
mit
|
microy/MeshToolkit,microy/PyMeshToolkit,microy/MeshToolkit,microy/PyMeshToolkit
|
Add a new Model-View-Projection matrix tool.
|
# -*- coding:utf-8 -*-
# ***************************************************************************
# Transformation.py
# -------------------
# update : 2013-11-13
# copyright : (C) 2013 by Michaël Roy
# email : microygh@gmail.com
# ***************************************************************************
# ***************************************************************************
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU General Public License as published by *
# * the Free Software Foundation; either version 2 of the License, or *
# * (at your option) any later version. *
# * *
# ***************************************************************************
#
# External dependencies
#
from numpy import *
|
<commit_before><commit_msg>Add a new Model-View-Projection matrix tool.<commit_after>
|
# -*- coding:utf-8 -*-
# ***************************************************************************
# Transformation.py
# -------------------
# update : 2013-11-13
# copyright : (C) 2013 by Michaël Roy
# email : microygh@gmail.com
# ***************************************************************************
# ***************************************************************************
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU General Public License as published by *
# * the Free Software Foundation; either version 2 of the License, or *
# * (at your option) any later version. *
# * *
# ***************************************************************************
#
# External dependencies
#
from numpy import *
|
Add a new Model-View-Projection matrix tool.# -*- coding:utf-8 -*-
# ***************************************************************************
# Transformation.py
# -------------------
# update : 2013-11-13
# copyright : (C) 2013 by Michaël Roy
# email : microygh@gmail.com
# ***************************************************************************
# ***************************************************************************
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU General Public License as published by *
# * the Free Software Foundation; either version 2 of the License, or *
# * (at your option) any later version. *
# * *
# ***************************************************************************
#
# External dependencies
#
from numpy import *
|
<commit_before><commit_msg>Add a new Model-View-Projection matrix tool.<commit_after># -*- coding:utf-8 -*-
# ***************************************************************************
# Transformation.py
# -------------------
# update : 2013-11-13
# copyright : (C) 2013 by Michaël Roy
# email : microygh@gmail.com
# ***************************************************************************
# ***************************************************************************
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU General Public License as published by *
# * the Free Software Foundation; either version 2 of the License, or *
# * (at your option) any later version. *
# * *
# ***************************************************************************
#
# External dependencies
#
from numpy import *
|
|
68dbfedf90fb9e6c922971deaeccad148a258a70
|
tests/test_dynamic_ecore_extension.py
|
tests/test_dynamic_ecore_extension.py
|
import pytest
from pyecore.ecore import *
import pyecore.ecore as ecore
from ordered_set import OrderedSet
def test__EModelElement_extension():
A = EClass('A', superclass=(EModelElement.eClass))
a = A()
assert a.eAnnotations == OrderedSet()
annotation = EAnnotation(source='testAnnot')
annotation.details['test'] = 'value'
a.eAnnotations.append(annotation)
assert len(a.eAnnotations) == 1
assert a.getEAnnotation('testAnnot') is annotation
assert a.getEAnnotation('testAnnot').details['test'] == 'value'
def test__EClass_extension():
SuperEClass = EClass('SuperEClass', superclass=(EClass.eClass,))
A = SuperEClass(name='A')
assert isinstance(A, EClass)
a = A()
assert isinstance(a, EObject)
assert a.eClass is A
def test__EClass_modification():
EClass.new_feature = EAttribute('new_feature', EInt)
A = EClass('A')
assert A.new_feature == 0
A.new_feature = 5
assert A.new_feature == 5
with pytest.raises(BadValueError):
A.new_feature = 'a'
|
Add tests for PyEcore extension (EClass/EModelElement tests)
|
Add tests for PyEcore extension (EClass/EModelElement tests)
|
Python
|
bsd-3-clause
|
aranega/pyecore,pyecore/pyecore
|
Add tests for PyEcore extension (EClass/EModelElement tests)
|
import pytest
from pyecore.ecore import *
import pyecore.ecore as ecore
from ordered_set import OrderedSet
def test__EModelElement_extension():
A = EClass('A', superclass=(EModelElement.eClass))
a = A()
assert a.eAnnotations == OrderedSet()
annotation = EAnnotation(source='testAnnot')
annotation.details['test'] = 'value'
a.eAnnotations.append(annotation)
assert len(a.eAnnotations) == 1
assert a.getEAnnotation('testAnnot') is annotation
assert a.getEAnnotation('testAnnot').details['test'] == 'value'
def test__EClass_extension():
SuperEClass = EClass('SuperEClass', superclass=(EClass.eClass,))
A = SuperEClass(name='A')
assert isinstance(A, EClass)
a = A()
assert isinstance(a, EObject)
assert a.eClass is A
def test__EClass_modification():
EClass.new_feature = EAttribute('new_feature', EInt)
A = EClass('A')
assert A.new_feature == 0
A.new_feature = 5
assert A.new_feature == 5
with pytest.raises(BadValueError):
A.new_feature = 'a'
|
<commit_before><commit_msg>Add tests for PyEcore extension (EClass/EModelElement tests)<commit_after>
|
import pytest
from pyecore.ecore import *
import pyecore.ecore as ecore
from ordered_set import OrderedSet
def test__EModelElement_extension():
A = EClass('A', superclass=(EModelElement.eClass))
a = A()
assert a.eAnnotations == OrderedSet()
annotation = EAnnotation(source='testAnnot')
annotation.details['test'] = 'value'
a.eAnnotations.append(annotation)
assert len(a.eAnnotations) == 1
assert a.getEAnnotation('testAnnot') is annotation
assert a.getEAnnotation('testAnnot').details['test'] == 'value'
def test__EClass_extension():
SuperEClass = EClass('SuperEClass', superclass=(EClass.eClass,))
A = SuperEClass(name='A')
assert isinstance(A, EClass)
a = A()
assert isinstance(a, EObject)
assert a.eClass is A
def test__EClass_modification():
EClass.new_feature = EAttribute('new_feature', EInt)
A = EClass('A')
assert A.new_feature == 0
A.new_feature = 5
assert A.new_feature == 5
with pytest.raises(BadValueError):
A.new_feature = 'a'
|
Add tests for PyEcore extension (EClass/EModelElement tests)import pytest
from pyecore.ecore import *
import pyecore.ecore as ecore
from ordered_set import OrderedSet
def test__EModelElement_extension():
A = EClass('A', superclass=(EModelElement.eClass))
a = A()
assert a.eAnnotations == OrderedSet()
annotation = EAnnotation(source='testAnnot')
annotation.details['test'] = 'value'
a.eAnnotations.append(annotation)
assert len(a.eAnnotations) == 1
assert a.getEAnnotation('testAnnot') is annotation
assert a.getEAnnotation('testAnnot').details['test'] == 'value'
def test__EClass_extension():
SuperEClass = EClass('SuperEClass', superclass=(EClass.eClass,))
A = SuperEClass(name='A')
assert isinstance(A, EClass)
a = A()
assert isinstance(a, EObject)
assert a.eClass is A
def test__EClass_modification():
EClass.new_feature = EAttribute('new_feature', EInt)
A = EClass('A')
assert A.new_feature == 0
A.new_feature = 5
assert A.new_feature == 5
with pytest.raises(BadValueError):
A.new_feature = 'a'
|
<commit_before><commit_msg>Add tests for PyEcore extension (EClass/EModelElement tests)<commit_after>import pytest
from pyecore.ecore import *
import pyecore.ecore as ecore
from ordered_set import OrderedSet
def test__EModelElement_extension():
A = EClass('A', superclass=(EModelElement.eClass))
a = A()
assert a.eAnnotations == OrderedSet()
annotation = EAnnotation(source='testAnnot')
annotation.details['test'] = 'value'
a.eAnnotations.append(annotation)
assert len(a.eAnnotations) == 1
assert a.getEAnnotation('testAnnot') is annotation
assert a.getEAnnotation('testAnnot').details['test'] == 'value'
def test__EClass_extension():
SuperEClass = EClass('SuperEClass', superclass=(EClass.eClass,))
A = SuperEClass(name='A')
assert isinstance(A, EClass)
a = A()
assert isinstance(a, EObject)
assert a.eClass is A
def test__EClass_modification():
EClass.new_feature = EAttribute('new_feature', EInt)
A = EClass('A')
assert A.new_feature == 0
A.new_feature = 5
assert A.new_feature == 5
with pytest.raises(BadValueError):
A.new_feature = 'a'
|
|
40431228c8535f325b005bb52485cae87a8be714
|
tests/unit/modules/test_napalm_acl.py
|
tests/unit/modules/test_napalm_acl.py
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Anthony Shaw <anthonyshaw@apache.org>`
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
MagicMock,
NO_MOCK,
NO_MOCK_REASON
)
import tests.support.napalm as napalm_test_support
import salt.modules.napalm_acl as napalm_acl # NOQA
@skipIf(NO_MOCK, NO_MOCK_REASON)
class NapalmAclModuleTestCase(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
module_globals = {
'__salt__': {
'config.option': MagicMock(return_value={
'test': {
'driver': 'test',
'key': '2orgk34kgk34g'
}
}),
'file.file_exists': napalm_test_support.true,
'file.join': napalm_test_support.join,
'file.get_managed': napalm_test_support.get_managed_file,
'random.hash': napalm_test_support.random_hash
}
}
return {napalm_acl: module_globals}
def test_load_term_config(self):
ret = napalm_acl.load_term_config("test_filter", "test_term")
assert ret['out'] is napalm_test_support.TEST_TERM_CONFIG
|
Add test module for napalm_acl
|
Add test module for napalm_acl
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Add test module for napalm_acl
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Anthony Shaw <anthonyshaw@apache.org>`
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
MagicMock,
NO_MOCK,
NO_MOCK_REASON
)
import tests.support.napalm as napalm_test_support
import salt.modules.napalm_acl as napalm_acl # NOQA
@skipIf(NO_MOCK, NO_MOCK_REASON)
class NapalmAclModuleTestCase(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
module_globals = {
'__salt__': {
'config.option': MagicMock(return_value={
'test': {
'driver': 'test',
'key': '2orgk34kgk34g'
}
}),
'file.file_exists': napalm_test_support.true,
'file.join': napalm_test_support.join,
'file.get_managed': napalm_test_support.get_managed_file,
'random.hash': napalm_test_support.random_hash
}
}
return {napalm_acl: module_globals}
def test_load_term_config(self):
ret = napalm_acl.load_term_config("test_filter", "test_term")
assert ret['out'] is napalm_test_support.TEST_TERM_CONFIG
|
<commit_before><commit_msg>Add test module for napalm_acl<commit_after>
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Anthony Shaw <anthonyshaw@apache.org>`
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
MagicMock,
NO_MOCK,
NO_MOCK_REASON
)
import tests.support.napalm as napalm_test_support
import salt.modules.napalm_acl as napalm_acl # NOQA
@skipIf(NO_MOCK, NO_MOCK_REASON)
class NapalmAclModuleTestCase(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
module_globals = {
'__salt__': {
'config.option': MagicMock(return_value={
'test': {
'driver': 'test',
'key': '2orgk34kgk34g'
}
}),
'file.file_exists': napalm_test_support.true,
'file.join': napalm_test_support.join,
'file.get_managed': napalm_test_support.get_managed_file,
'random.hash': napalm_test_support.random_hash
}
}
return {napalm_acl: module_globals}
def test_load_term_config(self):
ret = napalm_acl.load_term_config("test_filter", "test_term")
assert ret['out'] is napalm_test_support.TEST_TERM_CONFIG
|
Add test module for napalm_acl# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Anthony Shaw <anthonyshaw@apache.org>`
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
MagicMock,
NO_MOCK,
NO_MOCK_REASON
)
import tests.support.napalm as napalm_test_support
import salt.modules.napalm_acl as napalm_acl # NOQA
@skipIf(NO_MOCK, NO_MOCK_REASON)
class NapalmAclModuleTestCase(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
module_globals = {
'__salt__': {
'config.option': MagicMock(return_value={
'test': {
'driver': 'test',
'key': '2orgk34kgk34g'
}
}),
'file.file_exists': napalm_test_support.true,
'file.join': napalm_test_support.join,
'file.get_managed': napalm_test_support.get_managed_file,
'random.hash': napalm_test_support.random_hash
}
}
return {napalm_acl: module_globals}
def test_load_term_config(self):
ret = napalm_acl.load_term_config("test_filter", "test_term")
assert ret['out'] is napalm_test_support.TEST_TERM_CONFIG
|
<commit_before><commit_msg>Add test module for napalm_acl<commit_after># -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Anthony Shaw <anthonyshaw@apache.org>`
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
MagicMock,
NO_MOCK,
NO_MOCK_REASON
)
import tests.support.napalm as napalm_test_support
import salt.modules.napalm_acl as napalm_acl # NOQA
@skipIf(NO_MOCK, NO_MOCK_REASON)
class NapalmAclModuleTestCase(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
module_globals = {
'__salt__': {
'config.option': MagicMock(return_value={
'test': {
'driver': 'test',
'key': '2orgk34kgk34g'
}
}),
'file.file_exists': napalm_test_support.true,
'file.join': napalm_test_support.join,
'file.get_managed': napalm_test_support.get_managed_file,
'random.hash': napalm_test_support.random_hash
}
}
return {napalm_acl: module_globals}
def test_load_term_config(self):
ret = napalm_acl.load_term_config("test_filter", "test_term")
assert ret['out'] is napalm_test_support.TEST_TERM_CONFIG
|
|
841fb156fff3d257d39afdc9d3d4e587427fe2cf
|
Source/Scm/wb_scm_project_place_holder.py
|
Source/Scm/wb_scm_project_place_holder.py
|
'''
====================================================================
Copyright (c) 2016 Barry A Scott. All rights reserved.
This software is licensed as described in the file LICENSE.txt,
which you should have received as part of this distribution.
====================================================================
wb_scm_project_place_holder.py
'''
import pathlib
#
# ScmProjectPlaceholder is used when the project cannot be loaded
#
class ScmProjectPlaceholder:
def __init__( self, app, prefs_project ):
self.app = app
self.prefs_project = prefs_project
self.tree = ScmProjectPlaceholderTreeNode( self, prefs_project.name, pathlib.Path( '.' ) )
def scmType( self ):
return self.prefs_project.scm_type
def isNotEqual( self, other ):
return self.projectName() != other.projectName()
def getBranchName( self ):
return ''
def projectName( self ):
return self.prefs_project.name
def projectPath( self ):
return pathlib.Path( self.prefs_project.path )
def updateState( self ):
pass
class ScmProjectPlaceholderTreeNode:
def __init__( self, project, name, path ):
self.project = project
self.name = name
self.__path = path
def __repr__( self ):
return '<ScmProjectPlaceholderTreeNode: project %r, path %s>' % (self.project, self.__path)
def isNotEqual( self, other ):
return (self.relativePath() != other.relativePath()
or self.project.isNotEqual( other.project ))
def __lt__( self, other ):
return self.name < other.name
def relativePath( self ):
return self.__path
def absolutePath( self ):
return self.project.projectPath() / self.__path
def getAllFolderNodes( self ):
return []
def getAllFolderNames( self ):
return []
def getAllFileNames( self ):
return []
def isByPath( self ):
return False
|
Add new file missed in earlier commit place holder for projects that do not load for some reason
|
Add new file missed in earlier commit
place holder for projects that do not load for some reason
|
Python
|
apache-2.0
|
barry-scott/git-workbench,barry-scott/scm-workbench,barry-scott/scm-workbench,barry-scott/git-workbench,barry-scott/scm-workbench
|
Add new file missed in earlier commit
place holder for projects that do not load for some reason
|
'''
====================================================================
Copyright (c) 2016 Barry A Scott. All rights reserved.
This software is licensed as described in the file LICENSE.txt,
which you should have received as part of this distribution.
====================================================================
wb_scm_project_place_holder.py
'''
import pathlib
#
# ScmProjectPlaceholder is used when the project cannot be loaded
#
class ScmProjectPlaceholder:
def __init__( self, app, prefs_project ):
self.app = app
self.prefs_project = prefs_project
self.tree = ScmProjectPlaceholderTreeNode( self, prefs_project.name, pathlib.Path( '.' ) )
def scmType( self ):
return self.prefs_project.scm_type
def isNotEqual( self, other ):
return self.projectName() != other.projectName()
def getBranchName( self ):
return ''
def projectName( self ):
return self.prefs_project.name
def projectPath( self ):
return pathlib.Path( self.prefs_project.path )
def updateState( self ):
pass
class ScmProjectPlaceholderTreeNode:
def __init__( self, project, name, path ):
self.project = project
self.name = name
self.__path = path
def __repr__( self ):
return '<ScmProjectPlaceholderTreeNode: project %r, path %s>' % (self.project, self.__path)
def isNotEqual( self, other ):
return (self.relativePath() != other.relativePath()
or self.project.isNotEqual( other.project ))
def __lt__( self, other ):
return self.name < other.name
def relativePath( self ):
return self.__path
def absolutePath( self ):
return self.project.projectPath() / self.__path
def getAllFolderNodes( self ):
return []
def getAllFolderNames( self ):
return []
def getAllFileNames( self ):
return []
def isByPath( self ):
return False
|
<commit_before><commit_msg>Add new file missed in earlier commit
place holder for projects that do not load for some reason<commit_after>
|
'''
====================================================================
Copyright (c) 2016 Barry A Scott. All rights reserved.
This software is licensed as described in the file LICENSE.txt,
which you should have received as part of this distribution.
====================================================================
wb_scm_project_place_holder.py
'''
import pathlib
#
# ScmProjectPlaceholder is used when the project cannot be loaded
#
class ScmProjectPlaceholder:
def __init__( self, app, prefs_project ):
self.app = app
self.prefs_project = prefs_project
self.tree = ScmProjectPlaceholderTreeNode( self, prefs_project.name, pathlib.Path( '.' ) )
def scmType( self ):
return self.prefs_project.scm_type
def isNotEqual( self, other ):
return self.projectName() != other.projectName()
def getBranchName( self ):
return ''
def projectName( self ):
return self.prefs_project.name
def projectPath( self ):
return pathlib.Path( self.prefs_project.path )
def updateState( self ):
pass
class ScmProjectPlaceholderTreeNode:
def __init__( self, project, name, path ):
self.project = project
self.name = name
self.__path = path
def __repr__( self ):
return '<ScmProjectPlaceholderTreeNode: project %r, path %s>' % (self.project, self.__path)
def isNotEqual( self, other ):
return (self.relativePath() != other.relativePath()
or self.project.isNotEqual( other.project ))
def __lt__( self, other ):
return self.name < other.name
def relativePath( self ):
return self.__path
def absolutePath( self ):
return self.project.projectPath() / self.__path
def getAllFolderNodes( self ):
return []
def getAllFolderNames( self ):
return []
def getAllFileNames( self ):
return []
def isByPath( self ):
return False
|
Add new file missed in earlier commit
place holder for projects that do not load for some reason'''
====================================================================
Copyright (c) 2016 Barry A Scott. All rights reserved.
This software is licensed as described in the file LICENSE.txt,
which you should have received as part of this distribution.
====================================================================
wb_scm_project_place_holder.py
'''
import pathlib
#
# ScmProjectPlaceholder is used when the project cannot be loaded
#
class ScmProjectPlaceholder:
def __init__( self, app, prefs_project ):
self.app = app
self.prefs_project = prefs_project
self.tree = ScmProjectPlaceholderTreeNode( self, prefs_project.name, pathlib.Path( '.' ) )
def scmType( self ):
return self.prefs_project.scm_type
def isNotEqual( self, other ):
return self.projectName() != other.projectName()
def getBranchName( self ):
return ''
def projectName( self ):
return self.prefs_project.name
def projectPath( self ):
return pathlib.Path( self.prefs_project.path )
def updateState( self ):
pass
class ScmProjectPlaceholderTreeNode:
def __init__( self, project, name, path ):
self.project = project
self.name = name
self.__path = path
def __repr__( self ):
return '<ScmProjectPlaceholderTreeNode: project %r, path %s>' % (self.project, self.__path)
def isNotEqual( self, other ):
return (self.relativePath() != other.relativePath()
or self.project.isNotEqual( other.project ))
def __lt__( self, other ):
return self.name < other.name
def relativePath( self ):
return self.__path
def absolutePath( self ):
return self.project.projectPath() / self.__path
def getAllFolderNodes( self ):
return []
def getAllFolderNames( self ):
return []
def getAllFileNames( self ):
return []
def isByPath( self ):
return False
|
<commit_before><commit_msg>Add new file missed in earlier commit
place holder for projects that do not load for some reason<commit_after>'''
====================================================================
Copyright (c) 2016 Barry A Scott. All rights reserved.
This software is licensed as described in the file LICENSE.txt,
which you should have received as part of this distribution.
====================================================================
wb_scm_project_place_holder.py
'''
import pathlib
#
# ScmProjectPlaceholder is used when the project cannot be loaded
#
class ScmProjectPlaceholder:
def __init__( self, app, prefs_project ):
self.app = app
self.prefs_project = prefs_project
self.tree = ScmProjectPlaceholderTreeNode( self, prefs_project.name, pathlib.Path( '.' ) )
def scmType( self ):
return self.prefs_project.scm_type
def isNotEqual( self, other ):
return self.projectName() != other.projectName()
def getBranchName( self ):
return ''
def projectName( self ):
return self.prefs_project.name
def projectPath( self ):
return pathlib.Path( self.prefs_project.path )
def updateState( self ):
pass
class ScmProjectPlaceholderTreeNode:
def __init__( self, project, name, path ):
self.project = project
self.name = name
self.__path = path
def __repr__( self ):
return '<ScmProjectPlaceholderTreeNode: project %r, path %s>' % (self.project, self.__path)
def isNotEqual( self, other ):
return (self.relativePath() != other.relativePath()
or self.project.isNotEqual( other.project ))
def __lt__( self, other ):
return self.name < other.name
def relativePath( self ):
return self.__path
def absolutePath( self ):
return self.project.projectPath() / self.__path
def getAllFolderNodes( self ):
return []
def getAllFolderNames( self ):
return []
def getAllFileNames( self ):
return []
def isByPath( self ):
return False
|
|
5114f177741b105f33819b98415702e53b52eb01
|
corehq/apps/hqadmin/management/commands/update_site_setup.py
|
corehq/apps/hqadmin/management/commands/update_site_setup.py
|
from django.core.management.base import BaseCommand, CommandError
from django.contrib.sites.models import Site
from django.conf import settings
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
'site_address',
help="the new site address that should be used. This would get set in the site objects name "
"and domain."
)
parser.add_argument(
'--skip-checks',
action='store_true',
default=False,
help="If you are sure of what you are doing and want to skip checks to ensure safe update."
)
def handle(self, site_address, *args, **options):
if not options['skip_checks']:
if settings.SITE_ID != 1:
raise CommandError("SITE ID under settings expected to have value 1 since only one object is expected")
sites_count = Site.objects.count()
if sites_count != 1:
raise CommandError("Expected to have only one object added by Site during setup but currently its %s "%
Site.objects.count())
site_object = Site.objects.first()
if site_object.name != "example.com" and site_object.domain != "example.com":
raise CommandError(
"""
Expected the present site object to have dummy example values.
They were probably modified and needs to be rechecked.
Current Values, name -> {name}, domain -> {domain}
""".format(name=site_object.name, domain=site_object.domain
))
site_object = Site.objects.first()
site_object.name = site_address
site_object.domain = site_address
site_object.save()
Site.objects.clear_cache()
site_object = Site.objects.first()
print('Updated!')
print('Site object now is name -> {name}, domain -> {domain}'.format(
name=site_object.name,
domain=site_object.domain
))
|
Add script to update site setup which is used at places like password reset email
|
Add script to update site setup which is used at places like password reset email [skip ci]
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Add script to update site setup which is used at places like password reset email [skip ci]
|
from django.core.management.base import BaseCommand, CommandError
from django.contrib.sites.models import Site
from django.conf import settings
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
'site_address',
help="the new site address that should be used. This would get set in the site objects name "
"and domain."
)
parser.add_argument(
'--skip-checks',
action='store_true',
default=False,
help="If you are sure of what you are doing and want to skip checks to ensure safe update."
)
def handle(self, site_address, *args, **options):
if not options['skip_checks']:
if settings.SITE_ID != 1:
raise CommandError("SITE ID under settings expected to have value 1 since only one object is expected")
sites_count = Site.objects.count()
if sites_count != 1:
raise CommandError("Expected to have only one object added by Site during setup but currently its %s "%
Site.objects.count())
site_object = Site.objects.first()
if site_object.name != "example.com" and site_object.domain != "example.com":
raise CommandError(
"""
Expected the present site object to have dummy example values.
They were probably modified and needs to be rechecked.
Current Values, name -> {name}, domain -> {domain}
""".format(name=site_object.name, domain=site_object.domain
))
site_object = Site.objects.first()
site_object.name = site_address
site_object.domain = site_address
site_object.save()
Site.objects.clear_cache()
site_object = Site.objects.first()
print('Updated!')
print('Site object now is name -> {name}, domain -> {domain}'.format(
name=site_object.name,
domain=site_object.domain
))
|
<commit_before><commit_msg>Add script to update site setup which is used at places like password reset email [skip ci]<commit_after>
|
from django.core.management.base import BaseCommand, CommandError
from django.contrib.sites.models import Site
from django.conf import settings
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
'site_address',
help="the new site address that should be used. This would get set in the site objects name "
"and domain."
)
parser.add_argument(
'--skip-checks',
action='store_true',
default=False,
help="If you are sure of what you are doing and want to skip checks to ensure safe update."
)
def handle(self, site_address, *args, **options):
if not options['skip_checks']:
if settings.SITE_ID != 1:
raise CommandError("SITE ID under settings expected to have value 1 since only one object is expected")
sites_count = Site.objects.count()
if sites_count != 1:
raise CommandError("Expected to have only one object added by Site during setup but currently its %s "%
Site.objects.count())
site_object = Site.objects.first()
if site_object.name != "example.com" and site_object.domain != "example.com":
raise CommandError(
"""
Expected the present site object to have dummy example values.
They were probably modified and needs to be rechecked.
Current Values, name -> {name}, domain -> {domain}
""".format(name=site_object.name, domain=site_object.domain
))
site_object = Site.objects.first()
site_object.name = site_address
site_object.domain = site_address
site_object.save()
Site.objects.clear_cache()
site_object = Site.objects.first()
print('Updated!')
print('Site object now is name -> {name}, domain -> {domain}'.format(
name=site_object.name,
domain=site_object.domain
))
|
Add script to update site setup which is used at places like password reset email [skip ci]from django.core.management.base import BaseCommand, CommandError
from django.contrib.sites.models import Site
from django.conf import settings
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
'site_address',
help="the new site address that should be used. This would get set in the site objects name "
"and domain."
)
parser.add_argument(
'--skip-checks',
action='store_true',
default=False,
help="If you are sure of what you are doing and want to skip checks to ensure safe update."
)
def handle(self, site_address, *args, **options):
if not options['skip_checks']:
if settings.SITE_ID != 1:
raise CommandError("SITE ID under settings expected to have value 1 since only one object is expected")
sites_count = Site.objects.count()
if sites_count != 1:
raise CommandError("Expected to have only one object added by Site during setup but currently its %s "%
Site.objects.count())
site_object = Site.objects.first()
if site_object.name != "example.com" and site_object.domain != "example.com":
raise CommandError(
"""
Expected the present site object to have dummy example values.
They were probably modified and needs to be rechecked.
Current Values, name -> {name}, domain -> {domain}
""".format(name=site_object.name, domain=site_object.domain
))
site_object = Site.objects.first()
site_object.name = site_address
site_object.domain = site_address
site_object.save()
Site.objects.clear_cache()
site_object = Site.objects.first()
print('Updated!')
print('Site object now is name -> {name}, domain -> {domain}'.format(
name=site_object.name,
domain=site_object.domain
))
|
<commit_before><commit_msg>Add script to update site setup which is used at places like password reset email [skip ci]<commit_after>from django.core.management.base import BaseCommand, CommandError
from django.contrib.sites.models import Site
from django.conf import settings
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
'site_address',
help="the new site address that should be used. This would get set in the site objects name "
"and domain."
)
parser.add_argument(
'--skip-checks',
action='store_true',
default=False,
help="If you are sure of what you are doing and want to skip checks to ensure safe update."
)
def handle(self, site_address, *args, **options):
if not options['skip_checks']:
if settings.SITE_ID != 1:
raise CommandError("SITE ID under settings expected to have value 1 since only one object is expected")
sites_count = Site.objects.count()
if sites_count != 1:
raise CommandError("Expected to have only one object added by Site during setup but currently its %s "%
Site.objects.count())
site_object = Site.objects.first()
if site_object.name != "example.com" and site_object.domain != "example.com":
raise CommandError(
"""
Expected the present site object to have dummy example values.
They were probably modified and needs to be rechecked.
Current Values, name -> {name}, domain -> {domain}
""".format(name=site_object.name, domain=site_object.domain
))
site_object = Site.objects.first()
site_object.name = site_address
site_object.domain = site_address
site_object.save()
Site.objects.clear_cache()
site_object = Site.objects.first()
print('Updated!')
print('Site object now is name -> {name}, domain -> {domain}'.format(
name=site_object.name,
domain=site_object.domain
))
|
|
8b4c34e84d306b5f9021de47bc3ae9050e2fc2b3
|
compare_clouds.py
|
compare_clouds.py
|
#!/usr/bin/env python3
from pathlib import Path
"""Code for comparing point clouds"""
cloud1Path = Path("./data/reconstructions/2016_10_24__17_43_17/reference.ply")
cloud2Path = Path("./data/reconstructions/2016_10_24__17_43_17/high_quality.ply")
from load_ply import load_ply
cloud1PointData = load_ply(cloud1Path)[0][:,:3].copy()
cloud2PointData = load_ply(cloud2Path)[0][:,:3].copy()
#if __name__=='__main__':
#pass
|
Fix loading of ply files exported by meshlab
|
Fix loading of ply files exported by meshlab
|
Python
|
mit
|
drewm1980/multi_view_stereo_benchmark,drewm1980/multi_view_stereo_benchmark,drewm1980/multi_view_stereo_benchmark
|
Fix loading of ply files exported by meshlab
|
#!/usr/bin/env python3
from pathlib import Path
"""Code for comparing point clouds"""
cloud1Path = Path("./data/reconstructions/2016_10_24__17_43_17/reference.ply")
cloud2Path = Path("./data/reconstructions/2016_10_24__17_43_17/high_quality.ply")
from load_ply import load_ply
cloud1PointData = load_ply(cloud1Path)[0][:,:3].copy()
cloud2PointData = load_ply(cloud2Path)[0][:,:3].copy()
#if __name__=='__main__':
#pass
|
<commit_before><commit_msg>Fix loading of ply files exported by meshlab<commit_after>
|
#!/usr/bin/env python3
from pathlib import Path
"""Code for comparing point clouds"""
cloud1Path = Path("./data/reconstructions/2016_10_24__17_43_17/reference.ply")
cloud2Path = Path("./data/reconstructions/2016_10_24__17_43_17/high_quality.ply")
from load_ply import load_ply
cloud1PointData = load_ply(cloud1Path)[0][:,:3].copy()
cloud2PointData = load_ply(cloud2Path)[0][:,:3].copy()
#if __name__=='__main__':
#pass
|
Fix loading of ply files exported by meshlab#!/usr/bin/env python3
from pathlib import Path
"""Code for comparing point clouds"""
cloud1Path = Path("./data/reconstructions/2016_10_24__17_43_17/reference.ply")
cloud2Path = Path("./data/reconstructions/2016_10_24__17_43_17/high_quality.ply")
from load_ply import load_ply
cloud1PointData = load_ply(cloud1Path)[0][:,:3].copy()
cloud2PointData = load_ply(cloud2Path)[0][:,:3].copy()
#if __name__=='__main__':
#pass
|
<commit_before><commit_msg>Fix loading of ply files exported by meshlab<commit_after>#!/usr/bin/env python3
from pathlib import Path
"""Code for comparing point clouds"""
cloud1Path = Path("./data/reconstructions/2016_10_24__17_43_17/reference.ply")
cloud2Path = Path("./data/reconstructions/2016_10_24__17_43_17/high_quality.ply")
from load_ply import load_ply
cloud1PointData = load_ply(cloud1Path)[0][:,:3].copy()
cloud2PointData = load_ply(cloud2Path)[0][:,:3].copy()
#if __name__=='__main__':
#pass
|
|
0bf7d9fb20a3d2588ffc0e8341ec2af3df5fe300
|
depot/tests/test_depot_index.py
|
depot/tests/test_depot_index.py
|
from django.test import TestCase, Client
from depot.models import Depot
def create_depot(name, state):
return Depot.objects.create(name=name, active=state)
class DepotIndexTestCase(TestCase):
def test_depot_index_template(self):
response = self.client.get('/depots/')
self.assertTemplateUsed(
response,
template_name='depot/index.html'
)
def test_depot_index_with_no_depots(self):
response = self.client.get('/depots/')
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(response.context['depot_list'], [])
self.assertContains(response, 'No depots available :(')
def test_depot_index_with_active_depot(self):
depot = create_depot('active depot', True)
response = self.client.get('/depots/')
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(
response.context['depot_list'], ['<Depot: Depot active depot>']
)
self.assertContains(response, depot.name)
def test_depot_index_with_archived_depot(self):
depot = create_depot('archived depot', False)
response = self.client.get('/depots/')
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(response.context['depot_list'], [])
self.assertContains(response, 'No depots available')
self.assertNotContains(response, depot.name)
def test_depot_index_with_active_and_archived_depot(self):
active_depot = create_depot('active depot', True)
archived_depot = create_depot('archived depot', False)
response = self.client.get('/depots/')
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(
response.context['depot_list'], ['<Depot: Depot active depot>']
)
self.assertContains(response, active_depot.name)
self.assertNotContains(response, archived_depot.name)
|
Add test for depot index page
|
Add test for depot index page
|
Python
|
agpl-3.0
|
verleihtool/verleihtool,verleihtool/verleihtool,verleihtool/verleihtool,verleihtool/verleihtool
|
Add test for depot index page
|
from django.test import TestCase, Client
from depot.models import Depot
def create_depot(name, state):
return Depot.objects.create(name=name, active=state)
class DepotIndexTestCase(TestCase):
def test_depot_index_template(self):
response = self.client.get('/depots/')
self.assertTemplateUsed(
response,
template_name='depot/index.html'
)
def test_depot_index_with_no_depots(self):
response = self.client.get('/depots/')
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(response.context['depot_list'], [])
self.assertContains(response, 'No depots available :(')
def test_depot_index_with_active_depot(self):
depot = create_depot('active depot', True)
response = self.client.get('/depots/')
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(
response.context['depot_list'], ['<Depot: Depot active depot>']
)
self.assertContains(response, depot.name)
def test_depot_index_with_archived_depot(self):
depot = create_depot('archived depot', False)
response = self.client.get('/depots/')
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(response.context['depot_list'], [])
self.assertContains(response, 'No depots available')
self.assertNotContains(response, depot.name)
def test_depot_index_with_active_and_archived_depot(self):
active_depot = create_depot('active depot', True)
archived_depot = create_depot('archived depot', False)
response = self.client.get('/depots/')
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(
response.context['depot_list'], ['<Depot: Depot active depot>']
)
self.assertContains(response, active_depot.name)
self.assertNotContains(response, archived_depot.name)
|
<commit_before><commit_msg>Add test for depot index page<commit_after>
|
from django.test import TestCase, Client
from depot.models import Depot
def create_depot(name, state):
return Depot.objects.create(name=name, active=state)
class DepotIndexTestCase(TestCase):
def test_depot_index_template(self):
response = self.client.get('/depots/')
self.assertTemplateUsed(
response,
template_name='depot/index.html'
)
def test_depot_index_with_no_depots(self):
response = self.client.get('/depots/')
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(response.context['depot_list'], [])
self.assertContains(response, 'No depots available :(')
def test_depot_index_with_active_depot(self):
depot = create_depot('active depot', True)
response = self.client.get('/depots/')
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(
response.context['depot_list'], ['<Depot: Depot active depot>']
)
self.assertContains(response, depot.name)
def test_depot_index_with_archived_depot(self):
depot = create_depot('archived depot', False)
response = self.client.get('/depots/')
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(response.context['depot_list'], [])
self.assertContains(response, 'No depots available')
self.assertNotContains(response, depot.name)
def test_depot_index_with_active_and_archived_depot(self):
active_depot = create_depot('active depot', True)
archived_depot = create_depot('archived depot', False)
response = self.client.get('/depots/')
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(
response.context['depot_list'], ['<Depot: Depot active depot>']
)
self.assertContains(response, active_depot.name)
self.assertNotContains(response, archived_depot.name)
|
Add test for depot index pagefrom django.test import TestCase, Client
from depot.models import Depot
def create_depot(name, state):
return Depot.objects.create(name=name, active=state)
class DepotIndexTestCase(TestCase):
def test_depot_index_template(self):
response = self.client.get('/depots/')
self.assertTemplateUsed(
response,
template_name='depot/index.html'
)
def test_depot_index_with_no_depots(self):
response = self.client.get('/depots/')
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(response.context['depot_list'], [])
self.assertContains(response, 'No depots available :(')
def test_depot_index_with_active_depot(self):
depot = create_depot('active depot', True)
response = self.client.get('/depots/')
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(
response.context['depot_list'], ['<Depot: Depot active depot>']
)
self.assertContains(response, depot.name)
def test_depot_index_with_archived_depot(self):
depot = create_depot('archived depot', False)
response = self.client.get('/depots/')
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(response.context['depot_list'], [])
self.assertContains(response, 'No depots available')
self.assertNotContains(response, depot.name)
def test_depot_index_with_active_and_archived_depot(self):
active_depot = create_depot('active depot', True)
archived_depot = create_depot('archived depot', False)
response = self.client.get('/depots/')
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(
response.context['depot_list'], ['<Depot: Depot active depot>']
)
self.assertContains(response, active_depot.name)
self.assertNotContains(response, archived_depot.name)
|
<commit_before><commit_msg>Add test for depot index page<commit_after>from django.test import TestCase, Client
from depot.models import Depot
def create_depot(name, state):
return Depot.objects.create(name=name, active=state)
class DepotIndexTestCase(TestCase):
def test_depot_index_template(self):
response = self.client.get('/depots/')
self.assertTemplateUsed(
response,
template_name='depot/index.html'
)
def test_depot_index_with_no_depots(self):
response = self.client.get('/depots/')
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(response.context['depot_list'], [])
self.assertContains(response, 'No depots available :(')
def test_depot_index_with_active_depot(self):
depot = create_depot('active depot', True)
response = self.client.get('/depots/')
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(
response.context['depot_list'], ['<Depot: Depot active depot>']
)
self.assertContains(response, depot.name)
def test_depot_index_with_archived_depot(self):
depot = create_depot('archived depot', False)
response = self.client.get('/depots/')
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(response.context['depot_list'], [])
self.assertContains(response, 'No depots available')
self.assertNotContains(response, depot.name)
def test_depot_index_with_active_and_archived_depot(self):
active_depot = create_depot('active depot', True)
archived_depot = create_depot('archived depot', False)
response = self.client.get('/depots/')
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(
response.context['depot_list'], ['<Depot: Depot active depot>']
)
self.assertContains(response, active_depot.name)
self.assertNotContains(response, archived_depot.name)
|
|
cc79ee252e09ade17961d03265c61a87e270bd88
|
nototools/map_pua_emoji.py
|
nototools/map_pua_emoji.py
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modify an emoji font to map legacy PUA characters to standard ligatures."""
__author__ = 'roozbeh@google.com (Roozbeh Pournader)'
import sys
from fontTools import ttLib
from nototools import add_emoji_gsub
from nototools import font_data
def get_glyph_name_from_gsub(char_seq, font):
"""Find the glyph name for ligature of a given character sequence from GSUB.
"""
cmap = font_data.get_cmap(font)
# FIXME: So many assumptions are made here.
try:
first_glyph = cmap[char_seq[0]]
rest_of_glyphs = [cmap[ch] for ch in char_seq[1:]]
except KeyError:
return None
for lookup in font['GSUB'].table.LookupList.Lookup:
ligatures = lookup.SubTable[0].ligatures
try:
for ligature in ligatures[first_glyph]:
if ligature.Component == rest_of_glyphs:
return ligature.LigGlyph
except KeyError:
continue
return None
def add_pua_cmap(source_file, target_file):
"""Add PUA characters to the cmap of the first font and save as second."""
font = ttLib.TTFont(source_file)
cmap = font_data.get_cmap(font)
for pua, (ch1, ch2) in (add_emoji_gsub.EMOJI_KEYCAPS.items()
+ add_emoji_gsub.EMOJI_FLAGS.items()):
if pua not in cmap:
glyph_name = get_glyph_name_from_gsub([ch1, ch2], font)
if glyph_name is not None:
cmap[pua] = glyph_name
font.save(target_file)
def main(argv):
"""Save the first font given to the second font."""
add_pua_cmap(argv[1], argv[2])
if __name__ == '__main__':
main(sys.argv)
|
Make color emoji use character sequences instead of PUA.
|
Make color emoji use character sequences instead of PUA.
The bitmap emoji tools are extended to create GSUB rules for character
sequences. The images are renamed to code the character sequence in their
filenames. New images are created for ASCII digits and number sign. A new
script is added to add cmaps to the files for backward compatibility.
|
Python
|
apache-2.0
|
davelab6/nototools,anthrotype/nototools,pathumego/nototools,googlei18n/nototools,davelab6/nototools,pahans/nototools,moyogo/nototools,googlefonts/nototools,namemealrady/nototools,googlei18n/nototools,moyogo/nototools,dougfelt/nototools,googlefonts/nototools,pahans/nototools,googlei18n/nototools,googlefonts/nototools,davelab6/nototools,namemealrady/nototools,dougfelt/nototools,pathumego/nototools,anthrotype/nototools,dougfelt/nototools,googlefonts/nototools,anthrotype/nototools,pahans/nototools,namemealrady/nototools,pathumego/nototools,googlefonts/nototools,moyogo/nototools
|
Make color emoji use character sequences instead of PUA.
The bitmap emoji tools are extended to create GSUB rules for character
sequences. The images are renamed to code the character sequence in their
filenames. New images are created for ASCII digits and number sign. A new
script is added to add cmaps to the files for backward compatibility.
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modify an emoji font to map legacy PUA characters to standard ligatures."""
__author__ = 'roozbeh@google.com (Roozbeh Pournader)'
import sys
from fontTools import ttLib
from nototools import add_emoji_gsub
from nototools import font_data
def get_glyph_name_from_gsub(char_seq, font):
"""Find the glyph name for ligature of a given character sequence from GSUB.
"""
cmap = font_data.get_cmap(font)
# FIXME: So many assumptions are made here.
try:
first_glyph = cmap[char_seq[0]]
rest_of_glyphs = [cmap[ch] for ch in char_seq[1:]]
except KeyError:
return None
for lookup in font['GSUB'].table.LookupList.Lookup:
ligatures = lookup.SubTable[0].ligatures
try:
for ligature in ligatures[first_glyph]:
if ligature.Component == rest_of_glyphs:
return ligature.LigGlyph
except KeyError:
continue
return None
def add_pua_cmap(source_file, target_file):
"""Add PUA characters to the cmap of the first font and save as second."""
font = ttLib.TTFont(source_file)
cmap = font_data.get_cmap(font)
for pua, (ch1, ch2) in (add_emoji_gsub.EMOJI_KEYCAPS.items()
+ add_emoji_gsub.EMOJI_FLAGS.items()):
if pua not in cmap:
glyph_name = get_glyph_name_from_gsub([ch1, ch2], font)
if glyph_name is not None:
cmap[pua] = glyph_name
font.save(target_file)
def main(argv):
"""Save the first font given to the second font."""
add_pua_cmap(argv[1], argv[2])
if __name__ == '__main__':
main(sys.argv)
|
<commit_before><commit_msg>Make color emoji use character sequences instead of PUA.
The bitmap emoji tools are extended to create GSUB rules for character
sequences. The images are renamed to code the character sequence in their
filenames. New images are created for ASCII digits and number sign. A new
script is added to add cmaps to the files for backward compatibility.<commit_after>
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modify an emoji font to map legacy PUA characters to standard ligatures."""
__author__ = 'roozbeh@google.com (Roozbeh Pournader)'
import sys
from fontTools import ttLib
from nototools import add_emoji_gsub
from nototools import font_data
def get_glyph_name_from_gsub(char_seq, font):
"""Find the glyph name for ligature of a given character sequence from GSUB.
"""
cmap = font_data.get_cmap(font)
# FIXME: So many assumptions are made here.
try:
first_glyph = cmap[char_seq[0]]
rest_of_glyphs = [cmap[ch] for ch in char_seq[1:]]
except KeyError:
return None
for lookup in font['GSUB'].table.LookupList.Lookup:
ligatures = lookup.SubTable[0].ligatures
try:
for ligature in ligatures[first_glyph]:
if ligature.Component == rest_of_glyphs:
return ligature.LigGlyph
except KeyError:
continue
return None
def add_pua_cmap(source_file, target_file):
"""Add PUA characters to the cmap of the first font and save as second."""
font = ttLib.TTFont(source_file)
cmap = font_data.get_cmap(font)
for pua, (ch1, ch2) in (add_emoji_gsub.EMOJI_KEYCAPS.items()
+ add_emoji_gsub.EMOJI_FLAGS.items()):
if pua not in cmap:
glyph_name = get_glyph_name_from_gsub([ch1, ch2], font)
if glyph_name is not None:
cmap[pua] = glyph_name
font.save(target_file)
def main(argv):
"""Save the first font given to the second font."""
add_pua_cmap(argv[1], argv[2])
if __name__ == '__main__':
main(sys.argv)
|
Make color emoji use character sequences instead of PUA.
The bitmap emoji tools are extended to create GSUB rules for character
sequences. The images are renamed to code the character sequence in their
filenames. New images are created for ASCII digits and number sign. A new
script is added to add cmaps to the files for backward compatibility.#!/usr/bin/python
#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modify an emoji font to map legacy PUA characters to standard ligatures."""
__author__ = 'roozbeh@google.com (Roozbeh Pournader)'
import sys
from fontTools import ttLib
from nototools import add_emoji_gsub
from nototools import font_data
def get_glyph_name_from_gsub(char_seq, font):
"""Find the glyph name for ligature of a given character sequence from GSUB.
"""
cmap = font_data.get_cmap(font)
# FIXME: So many assumptions are made here.
try:
first_glyph = cmap[char_seq[0]]
rest_of_glyphs = [cmap[ch] for ch in char_seq[1:]]
except KeyError:
return None
for lookup in font['GSUB'].table.LookupList.Lookup:
ligatures = lookup.SubTable[0].ligatures
try:
for ligature in ligatures[first_glyph]:
if ligature.Component == rest_of_glyphs:
return ligature.LigGlyph
except KeyError:
continue
return None
def add_pua_cmap(source_file, target_file):
"""Add PUA characters to the cmap of the first font and save as second."""
font = ttLib.TTFont(source_file)
cmap = font_data.get_cmap(font)
for pua, (ch1, ch2) in (add_emoji_gsub.EMOJI_KEYCAPS.items()
+ add_emoji_gsub.EMOJI_FLAGS.items()):
if pua not in cmap:
glyph_name = get_glyph_name_from_gsub([ch1, ch2], font)
if glyph_name is not None:
cmap[pua] = glyph_name
font.save(target_file)
def main(argv):
"""Save the first font given to the second font."""
add_pua_cmap(argv[1], argv[2])
if __name__ == '__main__':
main(sys.argv)
|
<commit_before><commit_msg>Make color emoji use character sequences instead of PUA.
The bitmap emoji tools are extended to create GSUB rules for character
sequences. The images are renamed to code the character sequence in their
filenames. New images are created for ASCII digits and number sign. A new
script is added to add cmaps to the files for backward compatibility.<commit_after>#!/usr/bin/python
#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modify an emoji font to map legacy PUA characters to standard ligatures."""
__author__ = 'roozbeh@google.com (Roozbeh Pournader)'
import sys
from fontTools import ttLib
from nototools import add_emoji_gsub
from nototools import font_data
def get_glyph_name_from_gsub(char_seq, font):
"""Find the glyph name for ligature of a given character sequence from GSUB.
"""
cmap = font_data.get_cmap(font)
# FIXME: So many assumptions are made here.
try:
first_glyph = cmap[char_seq[0]]
rest_of_glyphs = [cmap[ch] for ch in char_seq[1:]]
except KeyError:
return None
for lookup in font['GSUB'].table.LookupList.Lookup:
ligatures = lookup.SubTable[0].ligatures
try:
for ligature in ligatures[first_glyph]:
if ligature.Component == rest_of_glyphs:
return ligature.LigGlyph
except KeyError:
continue
return None
def add_pua_cmap(source_file, target_file):
"""Add PUA characters to the cmap of the first font and save as second."""
font = ttLib.TTFont(source_file)
cmap = font_data.get_cmap(font)
for pua, (ch1, ch2) in (add_emoji_gsub.EMOJI_KEYCAPS.items()
+ add_emoji_gsub.EMOJI_FLAGS.items()):
if pua not in cmap:
glyph_name = get_glyph_name_from_gsub([ch1, ch2], font)
if glyph_name is not None:
cmap[pua] = glyph_name
font.save(target_file)
def main(argv):
"""Save the first font given to the second font."""
add_pua_cmap(argv[1], argv[2])
if __name__ == '__main__':
main(sys.argv)
|
|
3adcefcad4fc3ecb85aa4a22e8b3c4bf5ca4e6f5
|
test/integration/ggrc/converters/test_import_update.py
|
test/integration/ggrc/converters/test_import_update.py
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from integration.ggrc.converters import TestCase
from ggrc import models
class TestImportUpdates(TestCase):
""" Test importing of already existing objects """
def setUp(self):
TestCase.setUp(self)
self.client.get("/login")
def test_policy_basic_update(self):
""" Test simple policy title update """
filename = "policy_basic_import.csv"
response = self.import_file(filename)
self._check_response(response, {})
policy = models.Policy.query.filter_by(slug="p1").first()
self.assertEqual(policy.title, "some weird policy")
filename = "policy_basic_import_update.csv"
response = self.import_file(filename)
self._check_response(response, {})
policy = models.Policy.query.filter_by(slug="p1").first()
self.assertEqual(policy.title, "Edited policy")
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Tests for bulk updates with CSV import."""
from integration.ggrc.converters import TestCase
from ggrc import models
class TestImportUpdates(TestCase):
""" Test importing of already existing objects """
def setUp(self):
TestCase.setUp(self)
self.client.get("/login")
def test_policy_basic_update(self):
""" Test simple policy title update """
filename = "policy_basic_import.csv"
response = self.import_file(filename)
self._check_response(response, {})
policy = models.Policy.query.filter_by(slug="p1").first()
self.assertEqual(policy.title, "some weird policy")
revision_count = models.Revision.query.filter(
models.Revision.resource_type == "Policy",
models.Revision.resource_id == policy.id
).count()
self.assertEqual(revision_count, 1)
filename = "policy_basic_import_update.csv"
response = self.import_file(filename)
self._check_response(response, {})
policy = models.Policy.query.filter_by(slug="p1").first()
self.assertEqual(policy.title, "Edited policy")
revision_count = models.Revision.query.filter(
models.Revision.resource_type == "Policy",
models.Revision.resource_id == policy.id
).count()
self.assertEqual(revision_count, 2)
|
Add tests for revision updates via import
|
Add tests for revision updates via import
This tests checks if new revisions were added for object updated via CSV
import.
|
Python
|
apache-2.0
|
edofic/ggrc-core,selahssea/ggrc-core,edofic/ggrc-core,plamut/ggrc-core,josthkko/ggrc-core,plamut/ggrc-core,VinnieJohns/ggrc-core,andrei-karalionak/ggrc-core,AleksNeStu/ggrc-core,selahssea/ggrc-core,VinnieJohns/ggrc-core,AleksNeStu/ggrc-core,andrei-karalionak/ggrc-core,j0gurt/ggrc-core,kr41/ggrc-core,edofic/ggrc-core,josthkko/ggrc-core,andrei-karalionak/ggrc-core,kr41/ggrc-core,josthkko/ggrc-core,selahssea/ggrc-core,selahssea/ggrc-core,j0gurt/ggrc-core,AleksNeStu/ggrc-core,plamut/ggrc-core,VinnieJohns/ggrc-core,AleksNeStu/ggrc-core,plamut/ggrc-core,josthkko/ggrc-core,j0gurt/ggrc-core,j0gurt/ggrc-core,kr41/ggrc-core,andrei-karalionak/ggrc-core,kr41/ggrc-core,edofic/ggrc-core,VinnieJohns/ggrc-core
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from integration.ggrc.converters import TestCase
from ggrc import models
class TestImportUpdates(TestCase):
""" Test importing of already existing objects """
def setUp(self):
TestCase.setUp(self)
self.client.get("/login")
def test_policy_basic_update(self):
""" Test simple policy title update """
filename = "policy_basic_import.csv"
response = self.import_file(filename)
self._check_response(response, {})
policy = models.Policy.query.filter_by(slug="p1").first()
self.assertEqual(policy.title, "some weird policy")
filename = "policy_basic_import_update.csv"
response = self.import_file(filename)
self._check_response(response, {})
policy = models.Policy.query.filter_by(slug="p1").first()
self.assertEqual(policy.title, "Edited policy")
Add tests for revision updates via import
This tests checks if new revisions were added for object updated via CSV
import.
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Tests for bulk updates with CSV import."""
from integration.ggrc.converters import TestCase
from ggrc import models
class TestImportUpdates(TestCase):
""" Test importing of already existing objects """
def setUp(self):
TestCase.setUp(self)
self.client.get("/login")
def test_policy_basic_update(self):
""" Test simple policy title update """
filename = "policy_basic_import.csv"
response = self.import_file(filename)
self._check_response(response, {})
policy = models.Policy.query.filter_by(slug="p1").first()
self.assertEqual(policy.title, "some weird policy")
revision_count = models.Revision.query.filter(
models.Revision.resource_type == "Policy",
models.Revision.resource_id == policy.id
).count()
self.assertEqual(revision_count, 1)
filename = "policy_basic_import_update.csv"
response = self.import_file(filename)
self._check_response(response, {})
policy = models.Policy.query.filter_by(slug="p1").first()
self.assertEqual(policy.title, "Edited policy")
revision_count = models.Revision.query.filter(
models.Revision.resource_type == "Policy",
models.Revision.resource_id == policy.id
).count()
self.assertEqual(revision_count, 2)
|
<commit_before># Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from integration.ggrc.converters import TestCase
from ggrc import models
class TestImportUpdates(TestCase):
""" Test importing of already existing objects """
def setUp(self):
TestCase.setUp(self)
self.client.get("/login")
def test_policy_basic_update(self):
""" Test simple policy title update """
filename = "policy_basic_import.csv"
response = self.import_file(filename)
self._check_response(response, {})
policy = models.Policy.query.filter_by(slug="p1").first()
self.assertEqual(policy.title, "some weird policy")
filename = "policy_basic_import_update.csv"
response = self.import_file(filename)
self._check_response(response, {})
policy = models.Policy.query.filter_by(slug="p1").first()
self.assertEqual(policy.title, "Edited policy")
<commit_msg>Add tests for revision updates via import
This tests checks if new revisions were added for object updated via CSV
import.<commit_after>
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Tests for bulk updates with CSV import."""
from integration.ggrc.converters import TestCase
from ggrc import models
class TestImportUpdates(TestCase):
""" Test importing of already existing objects """
def setUp(self):
TestCase.setUp(self)
self.client.get("/login")
def test_policy_basic_update(self):
""" Test simple policy title update """
filename = "policy_basic_import.csv"
response = self.import_file(filename)
self._check_response(response, {})
policy = models.Policy.query.filter_by(slug="p1").first()
self.assertEqual(policy.title, "some weird policy")
revision_count = models.Revision.query.filter(
models.Revision.resource_type == "Policy",
models.Revision.resource_id == policy.id
).count()
self.assertEqual(revision_count, 1)
filename = "policy_basic_import_update.csv"
response = self.import_file(filename)
self._check_response(response, {})
policy = models.Policy.query.filter_by(slug="p1").first()
self.assertEqual(policy.title, "Edited policy")
revision_count = models.Revision.query.filter(
models.Revision.resource_type == "Policy",
models.Revision.resource_id == policy.id
).count()
self.assertEqual(revision_count, 2)
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from integration.ggrc.converters import TestCase
from ggrc import models
class TestImportUpdates(TestCase):
""" Test importing of already existing objects """
def setUp(self):
TestCase.setUp(self)
self.client.get("/login")
def test_policy_basic_update(self):
""" Test simple policy title update """
filename = "policy_basic_import.csv"
response = self.import_file(filename)
self._check_response(response, {})
policy = models.Policy.query.filter_by(slug="p1").first()
self.assertEqual(policy.title, "some weird policy")
filename = "policy_basic_import_update.csv"
response = self.import_file(filename)
self._check_response(response, {})
policy = models.Policy.query.filter_by(slug="p1").first()
self.assertEqual(policy.title, "Edited policy")
Add tests for revision updates via import
This tests checks if new revisions were added for object updated via CSV
import.# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Tests for bulk updates with CSV import."""
from integration.ggrc.converters import TestCase
from ggrc import models
class TestImportUpdates(TestCase):
""" Test importing of already existing objects """
def setUp(self):
TestCase.setUp(self)
self.client.get("/login")
def test_policy_basic_update(self):
""" Test simple policy title update """
filename = "policy_basic_import.csv"
response = self.import_file(filename)
self._check_response(response, {})
policy = models.Policy.query.filter_by(slug="p1").first()
self.assertEqual(policy.title, "some weird policy")
revision_count = models.Revision.query.filter(
models.Revision.resource_type == "Policy",
models.Revision.resource_id == policy.id
).count()
self.assertEqual(revision_count, 1)
filename = "policy_basic_import_update.csv"
response = self.import_file(filename)
self._check_response(response, {})
policy = models.Policy.query.filter_by(slug="p1").first()
self.assertEqual(policy.title, "Edited policy")
revision_count = models.Revision.query.filter(
models.Revision.resource_type == "Policy",
models.Revision.resource_id == policy.id
).count()
self.assertEqual(revision_count, 2)
|
<commit_before># Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from integration.ggrc.converters import TestCase
from ggrc import models
class TestImportUpdates(TestCase):
""" Test importing of already existing objects """
def setUp(self):
TestCase.setUp(self)
self.client.get("/login")
def test_policy_basic_update(self):
""" Test simple policy title update """
filename = "policy_basic_import.csv"
response = self.import_file(filename)
self._check_response(response, {})
policy = models.Policy.query.filter_by(slug="p1").first()
self.assertEqual(policy.title, "some weird policy")
filename = "policy_basic_import_update.csv"
response = self.import_file(filename)
self._check_response(response, {})
policy = models.Policy.query.filter_by(slug="p1").first()
self.assertEqual(policy.title, "Edited policy")
<commit_msg>Add tests for revision updates via import
This tests checks if new revisions were added for object updated via CSV
import.<commit_after># Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Tests for bulk updates with CSV import."""
from integration.ggrc.converters import TestCase
from ggrc import models
class TestImportUpdates(TestCase):
""" Test importing of already existing objects """
def setUp(self):
TestCase.setUp(self)
self.client.get("/login")
def test_policy_basic_update(self):
""" Test simple policy title update """
filename = "policy_basic_import.csv"
response = self.import_file(filename)
self._check_response(response, {})
policy = models.Policy.query.filter_by(slug="p1").first()
self.assertEqual(policy.title, "some weird policy")
revision_count = models.Revision.query.filter(
models.Revision.resource_type == "Policy",
models.Revision.resource_id == policy.id
).count()
self.assertEqual(revision_count, 1)
filename = "policy_basic_import_update.csv"
response = self.import_file(filename)
self._check_response(response, {})
policy = models.Policy.query.filter_by(slug="p1").first()
self.assertEqual(policy.title, "Edited policy")
revision_count = models.Revision.query.filter(
models.Revision.resource_type == "Policy",
models.Revision.resource_id == policy.id
).count()
self.assertEqual(revision_count, 2)
|
e48caa4bb61cce466ad5eb9bffbfba8e33312474
|
python/example_code/ec2/terminate_instances.py
|
python/example_code/ec2/terminate_instances.py
|
# snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.]
# snippet-sourcedescription:[terminate_instances.py demonstrates how to terminate an Amazon EC2 instance.]
# snippet-service:[ec2]
# snippet-keyword:[Amazon EC2]
# snippet-keyword:[Python]
# snippet-keyword:[AWS SDK for Python (Boto3)]
# snippet-keyword:[Code Sample]
# snippet-sourcetype:[full-example]
# snippet-sourcedate:[2019-2-11]
# snippet-sourceauthor:[AWS]
# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# This file is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import logging
import boto3
from botocore.exceptions import ClientError
def terminate_instances(instance_ids):
"""Terminate one or more Amazon EC2 instances
:param instance_ids: List of string IDs of EC2 instances to terminate
:return: List of state information for each instance specified in instance_ids.
If error, return None.
"""
# Terminate each instance in the argument list
ec2 = boto3.client('ec2')
try:
states = ec2.terminate_instances(InstanceIds=instance_ids)
except ClientError as e:
logging.error(e)
return None
return states['TerminatingInstances']
def main():
"""Exercise terminate_instances()"""
# Assign these values before running the program
ec2_instance_ids = ['EC2_INSTANCE_ID']
# Set up logging
logging.basicConfig(level=logging.DEBUG,
format='%(levelname)s: %(asctime)s: %(message)s')
# Terminate the EC2 instance(s)
states = terminate_instances(ec2_instance_ids)
if states is not None:
logging.debug('Terminating the following EC2 instances')
for state in states:
logging.debug(f'ID: {state["InstanceId"]}')
logging.debug(f' Current state: Code {state["CurrentState"]["Code"]}, '
f'{state["CurrentState"]["Name"]}')
logging.debug(f' Previous state: Code {state["PreviousState"]["Code"]}, '
f'{state["PreviousState"]["Name"]}')
if __name__ == '__main__':
main()
|
Add Python EC2 TerminateInstances example
|
Add Python EC2 TerminateInstances example
Merge pull request #504 from awsdocs/scalwas_ec2
|
Python
|
apache-2.0
|
awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples
|
Add Python EC2 TerminateInstances example
Merge pull request #504 from awsdocs/scalwas_ec2
|
# snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.]
# snippet-sourcedescription:[terminate_instances.py demonstrates how to terminate an Amazon EC2 instance.]
# snippet-service:[ec2]
# snippet-keyword:[Amazon EC2]
# snippet-keyword:[Python]
# snippet-keyword:[AWS SDK for Python (Boto3)]
# snippet-keyword:[Code Sample]
# snippet-sourcetype:[full-example]
# snippet-sourcedate:[2019-2-11]
# snippet-sourceauthor:[AWS]
# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# This file is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import logging
import boto3
from botocore.exceptions import ClientError
def terminate_instances(instance_ids):
"""Terminate one or more Amazon EC2 instances
:param instance_ids: List of string IDs of EC2 instances to terminate
:return: List of state information for each instance specified in instance_ids.
If error, return None.
"""
# Terminate each instance in the argument list
ec2 = boto3.client('ec2')
try:
states = ec2.terminate_instances(InstanceIds=instance_ids)
except ClientError as e:
logging.error(e)
return None
return states['TerminatingInstances']
def main():
"""Exercise terminate_instances()"""
# Assign these values before running the program
ec2_instance_ids = ['EC2_INSTANCE_ID']
# Set up logging
logging.basicConfig(level=logging.DEBUG,
format='%(levelname)s: %(asctime)s: %(message)s')
# Terminate the EC2 instance(s)
states = terminate_instances(ec2_instance_ids)
if states is not None:
logging.debug('Terminating the following EC2 instances')
for state in states:
logging.debug(f'ID: {state["InstanceId"]}')
logging.debug(f' Current state: Code {state["CurrentState"]["Code"]}, '
f'{state["CurrentState"]["Name"]}')
logging.debug(f' Previous state: Code {state["PreviousState"]["Code"]}, '
f'{state["PreviousState"]["Name"]}')
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add Python EC2 TerminateInstances example
Merge pull request #504 from awsdocs/scalwas_ec2<commit_after>
|
# snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.]
# snippet-sourcedescription:[terminate_instances.py demonstrates how to terminate an Amazon EC2 instance.]
# snippet-service:[ec2]
# snippet-keyword:[Amazon EC2]
# snippet-keyword:[Python]
# snippet-keyword:[AWS SDK for Python (Boto3)]
# snippet-keyword:[Code Sample]
# snippet-sourcetype:[full-example]
# snippet-sourcedate:[2019-2-11]
# snippet-sourceauthor:[AWS]
# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# This file is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import logging
import boto3
from botocore.exceptions import ClientError
def terminate_instances(instance_ids):
"""Terminate one or more Amazon EC2 instances
:param instance_ids: List of string IDs of EC2 instances to terminate
:return: List of state information for each instance specified in instance_ids.
If error, return None.
"""
# Terminate each instance in the argument list
ec2 = boto3.client('ec2')
try:
states = ec2.terminate_instances(InstanceIds=instance_ids)
except ClientError as e:
logging.error(e)
return None
return states['TerminatingInstances']
def main():
"""Exercise terminate_instances()"""
# Assign these values before running the program
ec2_instance_ids = ['EC2_INSTANCE_ID']
# Set up logging
logging.basicConfig(level=logging.DEBUG,
format='%(levelname)s: %(asctime)s: %(message)s')
# Terminate the EC2 instance(s)
states = terminate_instances(ec2_instance_ids)
if states is not None:
logging.debug('Terminating the following EC2 instances')
for state in states:
logging.debug(f'ID: {state["InstanceId"]}')
logging.debug(f' Current state: Code {state["CurrentState"]["Code"]}, '
f'{state["CurrentState"]["Name"]}')
logging.debug(f' Previous state: Code {state["PreviousState"]["Code"]}, '
f'{state["PreviousState"]["Name"]}')
if __name__ == '__main__':
main()
|
Add Python EC2 TerminateInstances example
Merge pull request #504 from awsdocs/scalwas_ec2# snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.]
# snippet-sourcedescription:[terminate_instances.py demonstrates how to terminate an Amazon EC2 instance.]
# snippet-service:[ec2]
# snippet-keyword:[Amazon EC2]
# snippet-keyword:[Python]
# snippet-keyword:[AWS SDK for Python (Boto3)]
# snippet-keyword:[Code Sample]
# snippet-sourcetype:[full-example]
# snippet-sourcedate:[2019-2-11]
# snippet-sourceauthor:[AWS]
# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# This file is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import logging
import boto3
from botocore.exceptions import ClientError
def terminate_instances(instance_ids):
"""Terminate one or more Amazon EC2 instances
:param instance_ids: List of string IDs of EC2 instances to terminate
:return: List of state information for each instance specified in instance_ids.
If error, return None.
"""
# Terminate each instance in the argument list
ec2 = boto3.client('ec2')
try:
states = ec2.terminate_instances(InstanceIds=instance_ids)
except ClientError as e:
logging.error(e)
return None
return states['TerminatingInstances']
def main():
"""Exercise terminate_instances()"""
# Assign these values before running the program
ec2_instance_ids = ['EC2_INSTANCE_ID']
# Set up logging
logging.basicConfig(level=logging.DEBUG,
format='%(levelname)s: %(asctime)s: %(message)s')
# Terminate the EC2 instance(s)
states = terminate_instances(ec2_instance_ids)
if states is not None:
logging.debug('Terminating the following EC2 instances')
for state in states:
logging.debug(f'ID: {state["InstanceId"]}')
logging.debug(f' Current state: Code {state["CurrentState"]["Code"]}, '
f'{state["CurrentState"]["Name"]}')
logging.debug(f' Previous state: Code {state["PreviousState"]["Code"]}, '
f'{state["PreviousState"]["Name"]}')
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add Python EC2 TerminateInstances example
Merge pull request #504 from awsdocs/scalwas_ec2<commit_after># snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.]
# snippet-sourcedescription:[terminate_instances.py demonstrates how to terminate an Amazon EC2 instance.]
# snippet-service:[ec2]
# snippet-keyword:[Amazon EC2]
# snippet-keyword:[Python]
# snippet-keyword:[AWS SDK for Python (Boto3)]
# snippet-keyword:[Code Sample]
# snippet-sourcetype:[full-example]
# snippet-sourcedate:[2019-2-11]
# snippet-sourceauthor:[AWS]
# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# This file is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import logging
import boto3
from botocore.exceptions import ClientError
def terminate_instances(instance_ids):
"""Terminate one or more Amazon EC2 instances
:param instance_ids: List of string IDs of EC2 instances to terminate
:return: List of state information for each instance specified in instance_ids.
If error, return None.
"""
# Terminate each instance in the argument list
ec2 = boto3.client('ec2')
try:
states = ec2.terminate_instances(InstanceIds=instance_ids)
except ClientError as e:
logging.error(e)
return None
return states['TerminatingInstances']
def main():
"""Exercise terminate_instances()"""
# Assign these values before running the program
ec2_instance_ids = ['EC2_INSTANCE_ID']
# Set up logging
logging.basicConfig(level=logging.DEBUG,
format='%(levelname)s: %(asctime)s: %(message)s')
# Terminate the EC2 instance(s)
states = terminate_instances(ec2_instance_ids)
if states is not None:
logging.debug('Terminating the following EC2 instances')
for state in states:
logging.debug(f'ID: {state["InstanceId"]}')
logging.debug(f' Current state: Code {state["CurrentState"]["Code"]}, '
f'{state["CurrentState"]["Name"]}')
logging.debug(f' Previous state: Code {state["PreviousState"]["Code"]}, '
f'{state["PreviousState"]["Name"]}')
if __name__ == '__main__':
main()
|
|
58fee826ab5298f7de036bf320bbc109b853eec8
|
tendrl/commons/manager/__init__.py
|
tendrl/commons/manager/__init__.py
|
import abc
import logging
import six
from tendrl.commons import jobs
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class Manager(object):
def __init__(
self,
sds_sync_thread,
central_store_thread,
):
self._central_store_thread = central_store_thread
self._sds_sync_thread = sds_sync_thread
self._job_consumer_thread = jobs.JobConsumerThread()
def stop(self):
LOG.info("%s stopping" % self.__class__.__name__)
self._job_consumer_thread.stop()
self._sds_sync_thread.stop()
self._central_store_thread.stop()
def start(self):
LOG.info("%s starting" % self.__class__.__name__)
self._central_store_thread.start()
self._sds_sync_thread.start()
self._job_consumer_thread.start()
def join(self):
LOG.info("%s joining" % self.__class__.__name__)
self._job_consumer_thread.join()
self._sds_sync_thread.join()
self._central_store_thread.join()
|
import abc
import logging
import six
from tendrl.commons import jobs
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class Manager(object):
def __init__(
self,
sds_sync_thread,
central_store_thread,
):
self._central_store_thread = central_store_thread
self._sds_sync_thread = sds_sync_thread
self._job_consumer_thread = jobs.JobConsumerThread()
def stop(self):
LOG.info("%s stopping" % self.__class__.__name__)
self._job_consumer_thread.stop()
if self._sds_sync_thread:
self._sds_sync_thread.stop()
self._central_store_thread.stop()
def start(self):
LOG.info("%s starting" % self.__class__.__name__)
self._central_store_thread.start()
if self._sds_sync_thread:
self._sds_sync_thread.start()
self._job_consumer_thread.start()
def join(self):
LOG.info("%s joining" % self.__class__.__name__)
self._job_consumer_thread.join()
if self._sds_sync_thread:
self._sds_sync_thread.join()
self._central_store_thread.join()
|
Add null check for sds sync thread which can be optional
|
Add null check for sds sync thread which can be optional
Signed-off-by: anmolbabu <3d38fb1e9c5ff2662fc415254efcdfedb95b84d5@gmail.com>
|
Python
|
lgpl-2.1
|
Tendrl/commons,rishubhjain/commons,r0h4n/commons
|
import abc
import logging
import six
from tendrl.commons import jobs
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class Manager(object):
def __init__(
self,
sds_sync_thread,
central_store_thread,
):
self._central_store_thread = central_store_thread
self._sds_sync_thread = sds_sync_thread
self._job_consumer_thread = jobs.JobConsumerThread()
def stop(self):
LOG.info("%s stopping" % self.__class__.__name__)
self._job_consumer_thread.stop()
self._sds_sync_thread.stop()
self._central_store_thread.stop()
def start(self):
LOG.info("%s starting" % self.__class__.__name__)
self._central_store_thread.start()
self._sds_sync_thread.start()
self._job_consumer_thread.start()
def join(self):
LOG.info("%s joining" % self.__class__.__name__)
self._job_consumer_thread.join()
self._sds_sync_thread.join()
self._central_store_thread.join()
Add null check for sds sync thread which can be optional
Signed-off-by: anmolbabu <3d38fb1e9c5ff2662fc415254efcdfedb95b84d5@gmail.com>
|
import abc
import logging
import six
from tendrl.commons import jobs
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class Manager(object):
def __init__(
self,
sds_sync_thread,
central_store_thread,
):
self._central_store_thread = central_store_thread
self._sds_sync_thread = sds_sync_thread
self._job_consumer_thread = jobs.JobConsumerThread()
def stop(self):
LOG.info("%s stopping" % self.__class__.__name__)
self._job_consumer_thread.stop()
if self._sds_sync_thread:
self._sds_sync_thread.stop()
self._central_store_thread.stop()
def start(self):
LOG.info("%s starting" % self.__class__.__name__)
self._central_store_thread.start()
if self._sds_sync_thread:
self._sds_sync_thread.start()
self._job_consumer_thread.start()
def join(self):
LOG.info("%s joining" % self.__class__.__name__)
self._job_consumer_thread.join()
if self._sds_sync_thread:
self._sds_sync_thread.join()
self._central_store_thread.join()
|
<commit_before>import abc
import logging
import six
from tendrl.commons import jobs
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class Manager(object):
def __init__(
self,
sds_sync_thread,
central_store_thread,
):
self._central_store_thread = central_store_thread
self._sds_sync_thread = sds_sync_thread
self._job_consumer_thread = jobs.JobConsumerThread()
def stop(self):
LOG.info("%s stopping" % self.__class__.__name__)
self._job_consumer_thread.stop()
self._sds_sync_thread.stop()
self._central_store_thread.stop()
def start(self):
LOG.info("%s starting" % self.__class__.__name__)
self._central_store_thread.start()
self._sds_sync_thread.start()
self._job_consumer_thread.start()
def join(self):
LOG.info("%s joining" % self.__class__.__name__)
self._job_consumer_thread.join()
self._sds_sync_thread.join()
self._central_store_thread.join()
<commit_msg>Add null check for sds sync thread which can be optional
Signed-off-by: anmolbabu <3d38fb1e9c5ff2662fc415254efcdfedb95b84d5@gmail.com><commit_after>
|
import abc
import logging
import six
from tendrl.commons import jobs
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class Manager(object):
def __init__(
self,
sds_sync_thread,
central_store_thread,
):
self._central_store_thread = central_store_thread
self._sds_sync_thread = sds_sync_thread
self._job_consumer_thread = jobs.JobConsumerThread()
def stop(self):
LOG.info("%s stopping" % self.__class__.__name__)
self._job_consumer_thread.stop()
if self._sds_sync_thread:
self._sds_sync_thread.stop()
self._central_store_thread.stop()
def start(self):
LOG.info("%s starting" % self.__class__.__name__)
self._central_store_thread.start()
if self._sds_sync_thread:
self._sds_sync_thread.start()
self._job_consumer_thread.start()
def join(self):
LOG.info("%s joining" % self.__class__.__name__)
self._job_consumer_thread.join()
if self._sds_sync_thread:
self._sds_sync_thread.join()
self._central_store_thread.join()
|
import abc
import logging
import six
from tendrl.commons import jobs
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class Manager(object):
def __init__(
self,
sds_sync_thread,
central_store_thread,
):
self._central_store_thread = central_store_thread
self._sds_sync_thread = sds_sync_thread
self._job_consumer_thread = jobs.JobConsumerThread()
def stop(self):
LOG.info("%s stopping" % self.__class__.__name__)
self._job_consumer_thread.stop()
self._sds_sync_thread.stop()
self._central_store_thread.stop()
def start(self):
LOG.info("%s starting" % self.__class__.__name__)
self._central_store_thread.start()
self._sds_sync_thread.start()
self._job_consumer_thread.start()
def join(self):
LOG.info("%s joining" % self.__class__.__name__)
self._job_consumer_thread.join()
self._sds_sync_thread.join()
self._central_store_thread.join()
Add null check for sds sync thread which can be optional
Signed-off-by: anmolbabu <3d38fb1e9c5ff2662fc415254efcdfedb95b84d5@gmail.com>import abc
import logging
import six
from tendrl.commons import jobs
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class Manager(object):
def __init__(
self,
sds_sync_thread,
central_store_thread,
):
self._central_store_thread = central_store_thread
self._sds_sync_thread = sds_sync_thread
self._job_consumer_thread = jobs.JobConsumerThread()
def stop(self):
LOG.info("%s stopping" % self.__class__.__name__)
self._job_consumer_thread.stop()
if self._sds_sync_thread:
self._sds_sync_thread.stop()
self._central_store_thread.stop()
def start(self):
LOG.info("%s starting" % self.__class__.__name__)
self._central_store_thread.start()
if self._sds_sync_thread:
self._sds_sync_thread.start()
self._job_consumer_thread.start()
def join(self):
LOG.info("%s joining" % self.__class__.__name__)
self._job_consumer_thread.join()
if self._sds_sync_thread:
self._sds_sync_thread.join()
self._central_store_thread.join()
|
<commit_before>import abc
import logging
import six
from tendrl.commons import jobs
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class Manager(object):
def __init__(
self,
sds_sync_thread,
central_store_thread,
):
self._central_store_thread = central_store_thread
self._sds_sync_thread = sds_sync_thread
self._job_consumer_thread = jobs.JobConsumerThread()
def stop(self):
LOG.info("%s stopping" % self.__class__.__name__)
self._job_consumer_thread.stop()
self._sds_sync_thread.stop()
self._central_store_thread.stop()
def start(self):
LOG.info("%s starting" % self.__class__.__name__)
self._central_store_thread.start()
self._sds_sync_thread.start()
self._job_consumer_thread.start()
def join(self):
LOG.info("%s joining" % self.__class__.__name__)
self._job_consumer_thread.join()
self._sds_sync_thread.join()
self._central_store_thread.join()
<commit_msg>Add null check for sds sync thread which can be optional
Signed-off-by: anmolbabu <3d38fb1e9c5ff2662fc415254efcdfedb95b84d5@gmail.com><commit_after>import abc
import logging
import six
from tendrl.commons import jobs
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class Manager(object):
def __init__(
self,
sds_sync_thread,
central_store_thread,
):
self._central_store_thread = central_store_thread
self._sds_sync_thread = sds_sync_thread
self._job_consumer_thread = jobs.JobConsumerThread()
def stop(self):
LOG.info("%s stopping" % self.__class__.__name__)
self._job_consumer_thread.stop()
if self._sds_sync_thread:
self._sds_sync_thread.stop()
self._central_store_thread.stop()
def start(self):
LOG.info("%s starting" % self.__class__.__name__)
self._central_store_thread.start()
if self._sds_sync_thread:
self._sds_sync_thread.start()
self._job_consumer_thread.start()
def join(self):
LOG.info("%s joining" % self.__class__.__name__)
self._job_consumer_thread.join()
if self._sds_sync_thread:
self._sds_sync_thread.join()
self._central_store_thread.join()
|
e5a39d4e17a0555cb242731b34f0ee480367b4fe
|
foireminder/foireminder/reminders/tasks.py
|
foireminder/foireminder/reminders/tasks.py
|
from django.utils import timezone
from .models import ReminderRequest, EmailReminder
def send_todays_notifications(self):
today = timezone.now()
reminders = ReminderRequest.objects.filter(
start__year=today.year,
start__month=today.month,
start__day=today.da
)
for reminder in reminders:
for subscriber in EmailReminder.objects.filter(rule=reminder.rule):
subscriber.send_notification()
|
Add task that sends out notifications
|
Add task that sends out notifications
|
Python
|
mit
|
stefanw/foireminder,stefanw/foireminder
|
Add task that sends out notifications
|
from django.utils import timezone
from .models import ReminderRequest, EmailReminder
def send_todays_notifications(self):
today = timezone.now()
reminders = ReminderRequest.objects.filter(
start__year=today.year,
start__month=today.month,
start__day=today.da
)
for reminder in reminders:
for subscriber in EmailReminder.objects.filter(rule=reminder.rule):
subscriber.send_notification()
|
<commit_before><commit_msg>Add task that sends out notifications<commit_after>
|
from django.utils import timezone
from .models import ReminderRequest, EmailReminder
def send_todays_notifications(self):
today = timezone.now()
reminders = ReminderRequest.objects.filter(
start__year=today.year,
start__month=today.month,
start__day=today.da
)
for reminder in reminders:
for subscriber in EmailReminder.objects.filter(rule=reminder.rule):
subscriber.send_notification()
|
Add task that sends out notificationsfrom django.utils import timezone
from .models import ReminderRequest, EmailReminder
def send_todays_notifications(self):
today = timezone.now()
reminders = ReminderRequest.objects.filter(
start__year=today.year,
start__month=today.month,
start__day=today.da
)
for reminder in reminders:
for subscriber in EmailReminder.objects.filter(rule=reminder.rule):
subscriber.send_notification()
|
<commit_before><commit_msg>Add task that sends out notifications<commit_after>from django.utils import timezone
from .models import ReminderRequest, EmailReminder
def send_todays_notifications(self):
today = timezone.now()
reminders = ReminderRequest.objects.filter(
start__year=today.year,
start__month=today.month,
start__day=today.da
)
for reminder in reminders:
for subscriber in EmailReminder.objects.filter(rule=reminder.rule):
subscriber.send_notification()
|
|
c58c58d5bf1394e04e30f5eeb298818558be027f
|
tests/rules_tests/clearAfterNonTermRemove/__init__.py
|
tests/rules_tests/clearAfterNonTermRemove/__init__.py
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 17.08.2017 22:06
:Licence GNUv3
Part of grammpy
"""
|
Add directory for tests of rules removin
|
Add directory for tests of rules removin
|
Python
|
mit
|
PatrikValkovic/grammpy
|
Add directory for tests of rules removin
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 17.08.2017 22:06
:Licence GNUv3
Part of grammpy
"""
|
<commit_before><commit_msg>Add directory for tests of rules removin<commit_after>
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 17.08.2017 22:06
:Licence GNUv3
Part of grammpy
"""
|
Add directory for tests of rules removin#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 17.08.2017 22:06
:Licence GNUv3
Part of grammpy
"""
|
<commit_before><commit_msg>Add directory for tests of rules removin<commit_after>#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 17.08.2017 22:06
:Licence GNUv3
Part of grammpy
"""
|
|
d40fb122d7083b9735728df15120ed682431be79
|
scripts/make_fhes_seeds.py
|
scripts/make_fhes_seeds.py
|
import yaml
import sys
import numpy as np
from astropy.table import Table
from astropy.coordinates import SkyCoord
from fermipy.catalog import *
from fermipy.utils import *
def get_coord(name,tab):
row = tab[tab['Source_Name'] == name]
return SkyCoord(float(row['RAJ2000']), float(row['DEJ2000']),unit='deg')
def avg_coords(coords):
xyz = np.zeros(3)
for t in coords:
xyz += t.cartesian.xyz
xyz /= np.sum(xyz**2)**0.5
c = SkyCoord(xyz[0], xyz[1], xyz[2],representation='cartesian')
c.representation='spherical'
return c
tab = Table.read(sys.argv[1])
src_names = []
m = np.abs(tab['glat']) < 0.
#m |= (tab['fit_ext_gauss_ts_ext'] > 9.0)
#m |= (tab['fit_ext_disk_ts_ext'] > 9.0)
m |= (tab['fit_halo_ts'] > 16.0)
#m |= (tab['ts'] > 20000.0)
for row in tab[m]:
src_names += [row['codename']]
src_names = sorted(list(set(src_names)))
o = {}
for name in src_names:
#coords = [get_coord(t,cat.table) for t in names]
#c0 = avg_coords(coords)
print(name)
#print(create_source_name(c0))
names = [name]
row = tab[tab['codename'] == names[0].lower().replace(' ','_')]
c0 = SkyCoord(row['ra'],row['dec'],unit='deg')
name = create_source_name(c0).replace('PS','FHES') + 'e'
#print(c0.ra.deg,c0.dec.deg)
#print(names[0])
#print(row['codename'])
src = {'name' : name,
'ra' : float(c0.ra.deg), 'dec' : float(c0.dec.deg),
'SpectrumType' : 'PowerLaw', 'SpatialModel' : 'RadialGaussian',
'SpatialWidth' : float(row['fit_halo_r68']),
'Index' : float(row['fit_halo_index'])}
o[name.lower().replace(' ','_')] = {'selection' : {'target' : name},
'model' : {'sources' : [src]} }
yaml.dump(o,open('out.yaml','w'))
|
Create script for generating analysis seeds.
|
Create script for generating analysis seeds.
|
Python
|
bsd-3-clause
|
woodmd/haloanalysis,woodmd/haloanalysis
|
Create script for generating analysis seeds.
|
import yaml
import sys
import numpy as np
from astropy.table import Table
from astropy.coordinates import SkyCoord
from fermipy.catalog import *
from fermipy.utils import *
def get_coord(name,tab):
row = tab[tab['Source_Name'] == name]
return SkyCoord(float(row['RAJ2000']), float(row['DEJ2000']),unit='deg')
def avg_coords(coords):
xyz = np.zeros(3)
for t in coords:
xyz += t.cartesian.xyz
xyz /= np.sum(xyz**2)**0.5
c = SkyCoord(xyz[0], xyz[1], xyz[2],representation='cartesian')
c.representation='spherical'
return c
tab = Table.read(sys.argv[1])
src_names = []
m = np.abs(tab['glat']) < 0.
#m |= (tab['fit_ext_gauss_ts_ext'] > 9.0)
#m |= (tab['fit_ext_disk_ts_ext'] > 9.0)
m |= (tab['fit_halo_ts'] > 16.0)
#m |= (tab['ts'] > 20000.0)
for row in tab[m]:
src_names += [row['codename']]
src_names = sorted(list(set(src_names)))
o = {}
for name in src_names:
#coords = [get_coord(t,cat.table) for t in names]
#c0 = avg_coords(coords)
print(name)
#print(create_source_name(c0))
names = [name]
row = tab[tab['codename'] == names[0].lower().replace(' ','_')]
c0 = SkyCoord(row['ra'],row['dec'],unit='deg')
name = create_source_name(c0).replace('PS','FHES') + 'e'
#print(c0.ra.deg,c0.dec.deg)
#print(names[0])
#print(row['codename'])
src = {'name' : name,
'ra' : float(c0.ra.deg), 'dec' : float(c0.dec.deg),
'SpectrumType' : 'PowerLaw', 'SpatialModel' : 'RadialGaussian',
'SpatialWidth' : float(row['fit_halo_r68']),
'Index' : float(row['fit_halo_index'])}
o[name.lower().replace(' ','_')] = {'selection' : {'target' : name},
'model' : {'sources' : [src]} }
yaml.dump(o,open('out.yaml','w'))
|
<commit_before><commit_msg>Create script for generating analysis seeds.<commit_after>
|
import yaml
import sys
import numpy as np
from astropy.table import Table
from astropy.coordinates import SkyCoord
from fermipy.catalog import *
from fermipy.utils import *
def get_coord(name,tab):
row = tab[tab['Source_Name'] == name]
return SkyCoord(float(row['RAJ2000']), float(row['DEJ2000']),unit='deg')
def avg_coords(coords):
xyz = np.zeros(3)
for t in coords:
xyz += t.cartesian.xyz
xyz /= np.sum(xyz**2)**0.5
c = SkyCoord(xyz[0], xyz[1], xyz[2],representation='cartesian')
c.representation='spherical'
return c
tab = Table.read(sys.argv[1])
src_names = []
m = np.abs(tab['glat']) < 0.
#m |= (tab['fit_ext_gauss_ts_ext'] > 9.0)
#m |= (tab['fit_ext_disk_ts_ext'] > 9.0)
m |= (tab['fit_halo_ts'] > 16.0)
#m |= (tab['ts'] > 20000.0)
for row in tab[m]:
src_names += [row['codename']]
src_names = sorted(list(set(src_names)))
o = {}
for name in src_names:
#coords = [get_coord(t,cat.table) for t in names]
#c0 = avg_coords(coords)
print(name)
#print(create_source_name(c0))
names = [name]
row = tab[tab['codename'] == names[0].lower().replace(' ','_')]
c0 = SkyCoord(row['ra'],row['dec'],unit='deg')
name = create_source_name(c0).replace('PS','FHES') + 'e'
#print(c0.ra.deg,c0.dec.deg)
#print(names[0])
#print(row['codename'])
src = {'name' : name,
'ra' : float(c0.ra.deg), 'dec' : float(c0.dec.deg),
'SpectrumType' : 'PowerLaw', 'SpatialModel' : 'RadialGaussian',
'SpatialWidth' : float(row['fit_halo_r68']),
'Index' : float(row['fit_halo_index'])}
o[name.lower().replace(' ','_')] = {'selection' : {'target' : name},
'model' : {'sources' : [src]} }
yaml.dump(o,open('out.yaml','w'))
|
Create script for generating analysis seeds.import yaml
import sys
import numpy as np
from astropy.table import Table
from astropy.coordinates import SkyCoord
from fermipy.catalog import *
from fermipy.utils import *
def get_coord(name,tab):
row = tab[tab['Source_Name'] == name]
return SkyCoord(float(row['RAJ2000']), float(row['DEJ2000']),unit='deg')
def avg_coords(coords):
xyz = np.zeros(3)
for t in coords:
xyz += t.cartesian.xyz
xyz /= np.sum(xyz**2)**0.5
c = SkyCoord(xyz[0], xyz[1], xyz[2],representation='cartesian')
c.representation='spherical'
return c
tab = Table.read(sys.argv[1])
src_names = []
m = np.abs(tab['glat']) < 0.
#m |= (tab['fit_ext_gauss_ts_ext'] > 9.0)
#m |= (tab['fit_ext_disk_ts_ext'] > 9.0)
m |= (tab['fit_halo_ts'] > 16.0)
#m |= (tab['ts'] > 20000.0)
for row in tab[m]:
src_names += [row['codename']]
src_names = sorted(list(set(src_names)))
o = {}
for name in src_names:
#coords = [get_coord(t,cat.table) for t in names]
#c0 = avg_coords(coords)
print(name)
#print(create_source_name(c0))
names = [name]
row = tab[tab['codename'] == names[0].lower().replace(' ','_')]
c0 = SkyCoord(row['ra'],row['dec'],unit='deg')
name = create_source_name(c0).replace('PS','FHES') + 'e'
#print(c0.ra.deg,c0.dec.deg)
#print(names[0])
#print(row['codename'])
src = {'name' : name,
'ra' : float(c0.ra.deg), 'dec' : float(c0.dec.deg),
'SpectrumType' : 'PowerLaw', 'SpatialModel' : 'RadialGaussian',
'SpatialWidth' : float(row['fit_halo_r68']),
'Index' : float(row['fit_halo_index'])}
o[name.lower().replace(' ','_')] = {'selection' : {'target' : name},
'model' : {'sources' : [src]} }
yaml.dump(o,open('out.yaml','w'))
|
<commit_before><commit_msg>Create script for generating analysis seeds.<commit_after>import yaml
import sys
import numpy as np
from astropy.table import Table
from astropy.coordinates import SkyCoord
from fermipy.catalog import *
from fermipy.utils import *
def get_coord(name,tab):
row = tab[tab['Source_Name'] == name]
return SkyCoord(float(row['RAJ2000']), float(row['DEJ2000']),unit='deg')
def avg_coords(coords):
xyz = np.zeros(3)
for t in coords:
xyz += t.cartesian.xyz
xyz /= np.sum(xyz**2)**0.5
c = SkyCoord(xyz[0], xyz[1], xyz[2],representation='cartesian')
c.representation='spherical'
return c
tab = Table.read(sys.argv[1])
src_names = []
m = np.abs(tab['glat']) < 0.
#m |= (tab['fit_ext_gauss_ts_ext'] > 9.0)
#m |= (tab['fit_ext_disk_ts_ext'] > 9.0)
m |= (tab['fit_halo_ts'] > 16.0)
#m |= (tab['ts'] > 20000.0)
for row in tab[m]:
src_names += [row['codename']]
src_names = sorted(list(set(src_names)))
o = {}
for name in src_names:
#coords = [get_coord(t,cat.table) for t in names]
#c0 = avg_coords(coords)
print(name)
#print(create_source_name(c0))
names = [name]
row = tab[tab['codename'] == names[0].lower().replace(' ','_')]
c0 = SkyCoord(row['ra'],row['dec'],unit='deg')
name = create_source_name(c0).replace('PS','FHES') + 'e'
#print(c0.ra.deg,c0.dec.deg)
#print(names[0])
#print(row['codename'])
src = {'name' : name,
'ra' : float(c0.ra.deg), 'dec' : float(c0.dec.deg),
'SpectrumType' : 'PowerLaw', 'SpatialModel' : 'RadialGaussian',
'SpatialWidth' : float(row['fit_halo_r68']),
'Index' : float(row['fit_halo_index'])}
o[name.lower().replace(' ','_')] = {'selection' : {'target' : name},
'model' : {'sources' : [src]} }
yaml.dump(o,open('out.yaml','w'))
|
|
d1ee86414d45c571571d75434b8c2256b0120732
|
py/binary-tree-tilt.py
|
py/binary-tree-tilt.py
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def findTilt(self, root):
"""
:type root: TreeNode
:rtype: int
"""
return self.do_findTilt(root)[1]
def do_findTilt(self, cur):
if cur is None:
return (0, 0)
lsum, ltiltsum = self.do_findTilt(cur.left)
rsum, rtiltsum = self.do_findTilt(cur.right)
tilt = abs(lsum - rsum)
return lsum + rsum + cur.val, ltiltsum + rtiltsum + tilt
|
Add py solution for 563. Binary Tree Tilt
|
Add py solution for 563. Binary Tree Tilt
563. Binary Tree Tilt: https://leetcode.com/problems/binary-tree-tilt/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 563. Binary Tree Tilt
563. Binary Tree Tilt: https://leetcode.com/problems/binary-tree-tilt/
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def findTilt(self, root):
"""
:type root: TreeNode
:rtype: int
"""
return self.do_findTilt(root)[1]
def do_findTilt(self, cur):
if cur is None:
return (0, 0)
lsum, ltiltsum = self.do_findTilt(cur.left)
rsum, rtiltsum = self.do_findTilt(cur.right)
tilt = abs(lsum - rsum)
return lsum + rsum + cur.val, ltiltsum + rtiltsum + tilt
|
<commit_before><commit_msg>Add py solution for 563. Binary Tree Tilt
563. Binary Tree Tilt: https://leetcode.com/problems/binary-tree-tilt/<commit_after>
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def findTilt(self, root):
"""
:type root: TreeNode
:rtype: int
"""
return self.do_findTilt(root)[1]
def do_findTilt(self, cur):
if cur is None:
return (0, 0)
lsum, ltiltsum = self.do_findTilt(cur.left)
rsum, rtiltsum = self.do_findTilt(cur.right)
tilt = abs(lsum - rsum)
return lsum + rsum + cur.val, ltiltsum + rtiltsum + tilt
|
Add py solution for 563. Binary Tree Tilt
563. Binary Tree Tilt: https://leetcode.com/problems/binary-tree-tilt/# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def findTilt(self, root):
"""
:type root: TreeNode
:rtype: int
"""
return self.do_findTilt(root)[1]
def do_findTilt(self, cur):
if cur is None:
return (0, 0)
lsum, ltiltsum = self.do_findTilt(cur.left)
rsum, rtiltsum = self.do_findTilt(cur.right)
tilt = abs(lsum - rsum)
return lsum + rsum + cur.val, ltiltsum + rtiltsum + tilt
|
<commit_before><commit_msg>Add py solution for 563. Binary Tree Tilt
563. Binary Tree Tilt: https://leetcode.com/problems/binary-tree-tilt/<commit_after># Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def findTilt(self, root):
"""
:type root: TreeNode
:rtype: int
"""
return self.do_findTilt(root)[1]
def do_findTilt(self, cur):
if cur is None:
return (0, 0)
lsum, ltiltsum = self.do_findTilt(cur.left)
rsum, rtiltsum = self.do_findTilt(cur.right)
tilt = abs(lsum - rsum)
return lsum + rsum + cur.val, ltiltsum + rtiltsum + tilt
|
|
925ff38344b5058ce196877e1fdcf79a1d1f6719
|
ue4/tests/test_messaging.py
|
ue4/tests/test_messaging.py
|
import pytest
from m2u.ue4 import connection
def test_send_message_size():
"""Send a big message, larger than buffer size, so the server has to
read multiple chunks.
"""
message = "TestMessageSize " + ("abcdefg" * 5000)
connection.connect()
result = connection.send_message(message)
assert result == str(len(message))
connection.disconnect()
|
Add basic test for checking messages are received correctly
|
Add basic test for checking messages are received correctly
|
Python
|
mit
|
m2u/m2u
|
Add basic test for checking messages are received correctly
|
import pytest
from m2u.ue4 import connection
def test_send_message_size():
"""Send a big message, larger than buffer size, so the server has to
read multiple chunks.
"""
message = "TestMessageSize " + ("abcdefg" * 5000)
connection.connect()
result = connection.send_message(message)
assert result == str(len(message))
connection.disconnect()
|
<commit_before><commit_msg>Add basic test for checking messages are received correctly<commit_after>
|
import pytest
from m2u.ue4 import connection
def test_send_message_size():
"""Send a big message, larger than buffer size, so the server has to
read multiple chunks.
"""
message = "TestMessageSize " + ("abcdefg" * 5000)
connection.connect()
result = connection.send_message(message)
assert result == str(len(message))
connection.disconnect()
|
Add basic test for checking messages are received correctlyimport pytest
from m2u.ue4 import connection
def test_send_message_size():
"""Send a big message, larger than buffer size, so the server has to
read multiple chunks.
"""
message = "TestMessageSize " + ("abcdefg" * 5000)
connection.connect()
result = connection.send_message(message)
assert result == str(len(message))
connection.disconnect()
|
<commit_before><commit_msg>Add basic test for checking messages are received correctly<commit_after>import pytest
from m2u.ue4 import connection
def test_send_message_size():
"""Send a big message, larger than buffer size, so the server has to
read multiple chunks.
"""
message = "TestMessageSize " + ("abcdefg" * 5000)
connection.connect()
result = connection.send_message(message)
assert result == str(len(message))
connection.disconnect()
|
|
ff700e5d6fc5e0c5062f687110563d7f0312a3f0
|
server/tests/test_admin.py
|
server/tests/test_admin.py
|
"""General functional tests for the API endpoints."""
from django.test import TestCase, Client
# from django.urls import reverse
from rest_framework import status
from server.models import ApiKey, User
# from api.v2.tests.tools import SalAPITestCase
class AdminTest(TestCase):
"""Test the admin site is configured to have all expected views."""
admin_endpoints = {
'apikey', 'businessunit', 'condition', 'fact', 'historicalfact',
'installedupdate', 'machinedetailplugin', 'machinegroup', 'machine',
'pendingappleupdate', 'pendingupdate', 'pluginscriptrow',
'pluginscriptsubmission', 'plugin', 'report', 'salsetting', 'updatehistoryitem',
'updatehistory', 'userprofile'}
def setUp(self):
self.client = Client()
self.user = User.objects.create(username='test')
def test_no_access(self):
"""Test that unauthenticated requests redirected to login."""
for path in self.admin_endpoints:
response = self.client.get('/admin/server/{}'.format(path))
# Redirect to login page.
self.assertEqual(response.status_code, status.HTTP_301_MOVED_PERMANENTLY)
def test_ro_access(self):
"""Test that ro requests are rejected.
RO users should not have access to the admin site (unless they have
`is_staff = True`.
"""
self.user.user_profile = 'RO'
self.user.save()
self.client.force_login(self.user)
for path in self.admin_endpoints:
url = '/admin/server/{}/'.format(path)
response = self.client.get(url)
msg = 'Failed for path: "{}"'.format(path)
self.assertEqual(response.status_code, status.HTTP_302_FOUND, msg=msg)
self.assertEqual(response.url, '/admin/login/?next=/admin/server/{}/'.format(path),
msg=msg)
def test_ga_access(self):
"""Ensure GA userprofile grants admin page access."""
self.user.user_profile = 'GA'
self.user.save()
self.client.force_login(self.user)
for path in self.admin_endpoints:
url = '/admin/server/{}/'.format(path)
response = self.client.get(url, follow=True)
msg = 'Failed for path: "{}"'.format(path)
self.assertEqual(response.status_code, status.HTTP_200_OK, msg=msg)
|
Set up test suite to ensure server admin routes are added.
|
Set up test suite to ensure server admin routes are added.
|
Python
|
apache-2.0
|
sheagcraig/sal,salopensource/sal,salopensource/sal,sheagcraig/sal,salopensource/sal,sheagcraig/sal,sheagcraig/sal,salopensource/sal
|
Set up test suite to ensure server admin routes are added.
|
"""General functional tests for the API endpoints."""
from django.test import TestCase, Client
# from django.urls import reverse
from rest_framework import status
from server.models import ApiKey, User
# from api.v2.tests.tools import SalAPITestCase
class AdminTest(TestCase):
"""Test the admin site is configured to have all expected views."""
admin_endpoints = {
'apikey', 'businessunit', 'condition', 'fact', 'historicalfact',
'installedupdate', 'machinedetailplugin', 'machinegroup', 'machine',
'pendingappleupdate', 'pendingupdate', 'pluginscriptrow',
'pluginscriptsubmission', 'plugin', 'report', 'salsetting', 'updatehistoryitem',
'updatehistory', 'userprofile'}
def setUp(self):
self.client = Client()
self.user = User.objects.create(username='test')
def test_no_access(self):
"""Test that unauthenticated requests redirected to login."""
for path in self.admin_endpoints:
response = self.client.get('/admin/server/{}'.format(path))
# Redirect to login page.
self.assertEqual(response.status_code, status.HTTP_301_MOVED_PERMANENTLY)
def test_ro_access(self):
"""Test that ro requests are rejected.
RO users should not have access to the admin site (unless they have
`is_staff = True`.
"""
self.user.user_profile = 'RO'
self.user.save()
self.client.force_login(self.user)
for path in self.admin_endpoints:
url = '/admin/server/{}/'.format(path)
response = self.client.get(url)
msg = 'Failed for path: "{}"'.format(path)
self.assertEqual(response.status_code, status.HTTP_302_FOUND, msg=msg)
self.assertEqual(response.url, '/admin/login/?next=/admin/server/{}/'.format(path),
msg=msg)
def test_ga_access(self):
"""Ensure GA userprofile grants admin page access."""
self.user.user_profile = 'GA'
self.user.save()
self.client.force_login(self.user)
for path in self.admin_endpoints:
url = '/admin/server/{}/'.format(path)
response = self.client.get(url, follow=True)
msg = 'Failed for path: "{}"'.format(path)
self.assertEqual(response.status_code, status.HTTP_200_OK, msg=msg)
|
<commit_before><commit_msg>Set up test suite to ensure server admin routes are added.<commit_after>
|
"""General functional tests for the API endpoints."""
from django.test import TestCase, Client
# from django.urls import reverse
from rest_framework import status
from server.models import ApiKey, User
# from api.v2.tests.tools import SalAPITestCase
class AdminTest(TestCase):
"""Test the admin site is configured to have all expected views."""
admin_endpoints = {
'apikey', 'businessunit', 'condition', 'fact', 'historicalfact',
'installedupdate', 'machinedetailplugin', 'machinegroup', 'machine',
'pendingappleupdate', 'pendingupdate', 'pluginscriptrow',
'pluginscriptsubmission', 'plugin', 'report', 'salsetting', 'updatehistoryitem',
'updatehistory', 'userprofile'}
def setUp(self):
self.client = Client()
self.user = User.objects.create(username='test')
def test_no_access(self):
"""Test that unauthenticated requests redirected to login."""
for path in self.admin_endpoints:
response = self.client.get('/admin/server/{}'.format(path))
# Redirect to login page.
self.assertEqual(response.status_code, status.HTTP_301_MOVED_PERMANENTLY)
def test_ro_access(self):
"""Test that ro requests are rejected.
RO users should not have access to the admin site (unless they have
`is_staff = True`.
"""
self.user.user_profile = 'RO'
self.user.save()
self.client.force_login(self.user)
for path in self.admin_endpoints:
url = '/admin/server/{}/'.format(path)
response = self.client.get(url)
msg = 'Failed for path: "{}"'.format(path)
self.assertEqual(response.status_code, status.HTTP_302_FOUND, msg=msg)
self.assertEqual(response.url, '/admin/login/?next=/admin/server/{}/'.format(path),
msg=msg)
def test_ga_access(self):
"""Ensure GA userprofile grants admin page access."""
self.user.user_profile = 'GA'
self.user.save()
self.client.force_login(self.user)
for path in self.admin_endpoints:
url = '/admin/server/{}/'.format(path)
response = self.client.get(url, follow=True)
msg = 'Failed for path: "{}"'.format(path)
self.assertEqual(response.status_code, status.HTTP_200_OK, msg=msg)
|
Set up test suite to ensure server admin routes are added."""General functional tests for the API endpoints."""
from django.test import TestCase, Client
# from django.urls import reverse
from rest_framework import status
from server.models import ApiKey, User
# from api.v2.tests.tools import SalAPITestCase
class AdminTest(TestCase):
"""Test the admin site is configured to have all expected views."""
admin_endpoints = {
'apikey', 'businessunit', 'condition', 'fact', 'historicalfact',
'installedupdate', 'machinedetailplugin', 'machinegroup', 'machine',
'pendingappleupdate', 'pendingupdate', 'pluginscriptrow',
'pluginscriptsubmission', 'plugin', 'report', 'salsetting', 'updatehistoryitem',
'updatehistory', 'userprofile'}
def setUp(self):
self.client = Client()
self.user = User.objects.create(username='test')
def test_no_access(self):
"""Test that unauthenticated requests redirected to login."""
for path in self.admin_endpoints:
response = self.client.get('/admin/server/{}'.format(path))
# Redirect to login page.
self.assertEqual(response.status_code, status.HTTP_301_MOVED_PERMANENTLY)
def test_ro_access(self):
"""Test that ro requests are rejected.
RO users should not have access to the admin site (unless they have
`is_staff = True`.
"""
self.user.user_profile = 'RO'
self.user.save()
self.client.force_login(self.user)
for path in self.admin_endpoints:
url = '/admin/server/{}/'.format(path)
response = self.client.get(url)
msg = 'Failed for path: "{}"'.format(path)
self.assertEqual(response.status_code, status.HTTP_302_FOUND, msg=msg)
self.assertEqual(response.url, '/admin/login/?next=/admin/server/{}/'.format(path),
msg=msg)
def test_ga_access(self):
"""Ensure GA userprofile grants admin page access."""
self.user.user_profile = 'GA'
self.user.save()
self.client.force_login(self.user)
for path in self.admin_endpoints:
url = '/admin/server/{}/'.format(path)
response = self.client.get(url, follow=True)
msg = 'Failed for path: "{}"'.format(path)
self.assertEqual(response.status_code, status.HTTP_200_OK, msg=msg)
|
<commit_before><commit_msg>Set up test suite to ensure server admin routes are added.<commit_after>"""General functional tests for the API endpoints."""
from django.test import TestCase, Client
# from django.urls import reverse
from rest_framework import status
from server.models import ApiKey, User
# from api.v2.tests.tools import SalAPITestCase
class AdminTest(TestCase):
"""Test the admin site is configured to have all expected views."""
admin_endpoints = {
'apikey', 'businessunit', 'condition', 'fact', 'historicalfact',
'installedupdate', 'machinedetailplugin', 'machinegroup', 'machine',
'pendingappleupdate', 'pendingupdate', 'pluginscriptrow',
'pluginscriptsubmission', 'plugin', 'report', 'salsetting', 'updatehistoryitem',
'updatehistory', 'userprofile'}
def setUp(self):
self.client = Client()
self.user = User.objects.create(username='test')
def test_no_access(self):
"""Test that unauthenticated requests redirected to login."""
for path in self.admin_endpoints:
response = self.client.get('/admin/server/{}'.format(path))
# Redirect to login page.
self.assertEqual(response.status_code, status.HTTP_301_MOVED_PERMANENTLY)
def test_ro_access(self):
"""Test that ro requests are rejected.
RO users should not have access to the admin site (unless they have
`is_staff = True`.
"""
self.user.user_profile = 'RO'
self.user.save()
self.client.force_login(self.user)
for path in self.admin_endpoints:
url = '/admin/server/{}/'.format(path)
response = self.client.get(url)
msg = 'Failed for path: "{}"'.format(path)
self.assertEqual(response.status_code, status.HTTP_302_FOUND, msg=msg)
self.assertEqual(response.url, '/admin/login/?next=/admin/server/{}/'.format(path),
msg=msg)
def test_ga_access(self):
"""Ensure GA userprofile grants admin page access."""
self.user.user_profile = 'GA'
self.user.save()
self.client.force_login(self.user)
for path in self.admin_endpoints:
url = '/admin/server/{}/'.format(path)
response = self.client.get(url, follow=True)
msg = 'Failed for path: "{}"'.format(path)
self.assertEqual(response.status_code, status.HTTP_200_OK, msg=msg)
|
|
0c29b431a0f5ce9115d7acdcaaabbd27546949c6
|
chmvh_website/contact/tests/views/test_success_view.py
|
chmvh_website/contact/tests/views/test_success_view.py
|
from django.test import RequestFactory
from django.urls import reverse
from contact.views import SuccessView
class TestSuccessView(object):
"""Test cases for the success view"""
url = reverse('contact:success')
def test_get(self, rf: RequestFactory):
"""Test sending a GET request to the view.
Sending a GET request to the view should render the success
page.
"""
request = rf.get(self.url)
response = SuccessView.as_view()(request)
assert response.status_code == 200
assert 'contact/success.html' in response.template_name
|
Add test for contact success view.
|
Add test for contact success view.
|
Python
|
mit
|
cdriehuys/chmvh-website,cdriehuys/chmvh-website,cdriehuys/chmvh-website
|
Add test for contact success view.
|
from django.test import RequestFactory
from django.urls import reverse
from contact.views import SuccessView
class TestSuccessView(object):
"""Test cases for the success view"""
url = reverse('contact:success')
def test_get(self, rf: RequestFactory):
"""Test sending a GET request to the view.
Sending a GET request to the view should render the success
page.
"""
request = rf.get(self.url)
response = SuccessView.as_view()(request)
assert response.status_code == 200
assert 'contact/success.html' in response.template_name
|
<commit_before><commit_msg>Add test for contact success view.<commit_after>
|
from django.test import RequestFactory
from django.urls import reverse
from contact.views import SuccessView
class TestSuccessView(object):
"""Test cases for the success view"""
url = reverse('contact:success')
def test_get(self, rf: RequestFactory):
"""Test sending a GET request to the view.
Sending a GET request to the view should render the success
page.
"""
request = rf.get(self.url)
response = SuccessView.as_view()(request)
assert response.status_code == 200
assert 'contact/success.html' in response.template_name
|
Add test for contact success view.from django.test import RequestFactory
from django.urls import reverse
from contact.views import SuccessView
class TestSuccessView(object):
"""Test cases for the success view"""
url = reverse('contact:success')
def test_get(self, rf: RequestFactory):
"""Test sending a GET request to the view.
Sending a GET request to the view should render the success
page.
"""
request = rf.get(self.url)
response = SuccessView.as_view()(request)
assert response.status_code == 200
assert 'contact/success.html' in response.template_name
|
<commit_before><commit_msg>Add test for contact success view.<commit_after>from django.test import RequestFactory
from django.urls import reverse
from contact.views import SuccessView
class TestSuccessView(object):
"""Test cases for the success view"""
url = reverse('contact:success')
def test_get(self, rf: RequestFactory):
"""Test sending a GET request to the view.
Sending a GET request to the view should render the success
page.
"""
request = rf.get(self.url)
response = SuccessView.as_view()(request)
assert response.status_code == 200
assert 'contact/success.html' in response.template_name
|
|
c650d64247d63d2af7a8168795e7edae5c9ef6ef
|
realtime-plot.py
|
realtime-plot.py
|
import time, random
import math
from collections import deque
start = time.time()
class RealtimePlot:
def __init__(self, axes, max_entries = 100):
self.axis_x = deque(maxlen=max_entries)
self.axis_y = deque(maxlen=max_entries)
self.axes = axes
self.max_entries = max_entries
self.lineplot, = axes.plot([], [], "ro-")
self.axes.set_autoscaley_on(True)
def add(self, x, y):
self.axis_x.append(x)
self.axis_y.append(y)
self.lineplot.set_data(self.axis_x, self.axis_y)
self.axes.set_xlim(self.axis_x[0], self.axis_x[-1] + 1e-15)
self.axes.relim(); self.axes.autoscale_view() # rescale the y-axis
def animate(self, figure, callback, interval = 50):
import matplotlib.animation as animation
def wrapper(frame_index):
self.add(*callback(frame_index))
self.axes.relim(); self.axes.autoscale_view() # rescale the y-axis
return self.lineplot
animation.FuncAnimation(figure, wrapper, interval=interval)
def main():
from matplotlib import pyplot as plt
fig, axes = plt.subplots()
display = RealtimePlot(axes)
display.animate(fig, lambda frame_index: (time.time() - start, random.random() * 100))
plt.show()
fig, axes = plt.subplots()
display = RealtimePlot(axes)
while True:
display.add(time.time() - start, random.random() * 100)
plt.pause(0.001)
if __name__ == "__main__": main()
|
Add realtime chart plotting example
|
Add realtime chart plotting example
|
Python
|
mit
|
voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts
|
Add realtime chart plotting example
|
import time, random
import math
from collections import deque
start = time.time()
class RealtimePlot:
def __init__(self, axes, max_entries = 100):
self.axis_x = deque(maxlen=max_entries)
self.axis_y = deque(maxlen=max_entries)
self.axes = axes
self.max_entries = max_entries
self.lineplot, = axes.plot([], [], "ro-")
self.axes.set_autoscaley_on(True)
def add(self, x, y):
self.axis_x.append(x)
self.axis_y.append(y)
self.lineplot.set_data(self.axis_x, self.axis_y)
self.axes.set_xlim(self.axis_x[0], self.axis_x[-1] + 1e-15)
self.axes.relim(); self.axes.autoscale_view() # rescale the y-axis
def animate(self, figure, callback, interval = 50):
import matplotlib.animation as animation
def wrapper(frame_index):
self.add(*callback(frame_index))
self.axes.relim(); self.axes.autoscale_view() # rescale the y-axis
return self.lineplot
animation.FuncAnimation(figure, wrapper, interval=interval)
def main():
from matplotlib import pyplot as plt
fig, axes = plt.subplots()
display = RealtimePlot(axes)
display.animate(fig, lambda frame_index: (time.time() - start, random.random() * 100))
plt.show()
fig, axes = plt.subplots()
display = RealtimePlot(axes)
while True:
display.add(time.time() - start, random.random() * 100)
plt.pause(0.001)
if __name__ == "__main__": main()
|
<commit_before><commit_msg>Add realtime chart plotting example<commit_after>
|
import time, random
import math
from collections import deque
start = time.time()
class RealtimePlot:
def __init__(self, axes, max_entries = 100):
self.axis_x = deque(maxlen=max_entries)
self.axis_y = deque(maxlen=max_entries)
self.axes = axes
self.max_entries = max_entries
self.lineplot, = axes.plot([], [], "ro-")
self.axes.set_autoscaley_on(True)
def add(self, x, y):
self.axis_x.append(x)
self.axis_y.append(y)
self.lineplot.set_data(self.axis_x, self.axis_y)
self.axes.set_xlim(self.axis_x[0], self.axis_x[-1] + 1e-15)
self.axes.relim(); self.axes.autoscale_view() # rescale the y-axis
def animate(self, figure, callback, interval = 50):
import matplotlib.animation as animation
def wrapper(frame_index):
self.add(*callback(frame_index))
self.axes.relim(); self.axes.autoscale_view() # rescale the y-axis
return self.lineplot
animation.FuncAnimation(figure, wrapper, interval=interval)
def main():
from matplotlib import pyplot as plt
fig, axes = plt.subplots()
display = RealtimePlot(axes)
display.animate(fig, lambda frame_index: (time.time() - start, random.random() * 100))
plt.show()
fig, axes = plt.subplots()
display = RealtimePlot(axes)
while True:
display.add(time.time() - start, random.random() * 100)
plt.pause(0.001)
if __name__ == "__main__": main()
|
Add realtime chart plotting exampleimport time, random
import math
from collections import deque
start = time.time()
class RealtimePlot:
def __init__(self, axes, max_entries = 100):
self.axis_x = deque(maxlen=max_entries)
self.axis_y = deque(maxlen=max_entries)
self.axes = axes
self.max_entries = max_entries
self.lineplot, = axes.plot([], [], "ro-")
self.axes.set_autoscaley_on(True)
def add(self, x, y):
self.axis_x.append(x)
self.axis_y.append(y)
self.lineplot.set_data(self.axis_x, self.axis_y)
self.axes.set_xlim(self.axis_x[0], self.axis_x[-1] + 1e-15)
self.axes.relim(); self.axes.autoscale_view() # rescale the y-axis
def animate(self, figure, callback, interval = 50):
import matplotlib.animation as animation
def wrapper(frame_index):
self.add(*callback(frame_index))
self.axes.relim(); self.axes.autoscale_view() # rescale the y-axis
return self.lineplot
animation.FuncAnimation(figure, wrapper, interval=interval)
def main():
from matplotlib import pyplot as plt
fig, axes = plt.subplots()
display = RealtimePlot(axes)
display.animate(fig, lambda frame_index: (time.time() - start, random.random() * 100))
plt.show()
fig, axes = plt.subplots()
display = RealtimePlot(axes)
while True:
display.add(time.time() - start, random.random() * 100)
plt.pause(0.001)
if __name__ == "__main__": main()
|
<commit_before><commit_msg>Add realtime chart plotting example<commit_after>import time, random
import math
from collections import deque
start = time.time()
class RealtimePlot:
def __init__(self, axes, max_entries = 100):
self.axis_x = deque(maxlen=max_entries)
self.axis_y = deque(maxlen=max_entries)
self.axes = axes
self.max_entries = max_entries
self.lineplot, = axes.plot([], [], "ro-")
self.axes.set_autoscaley_on(True)
def add(self, x, y):
self.axis_x.append(x)
self.axis_y.append(y)
self.lineplot.set_data(self.axis_x, self.axis_y)
self.axes.set_xlim(self.axis_x[0], self.axis_x[-1] + 1e-15)
self.axes.relim(); self.axes.autoscale_view() # rescale the y-axis
def animate(self, figure, callback, interval = 50):
import matplotlib.animation as animation
def wrapper(frame_index):
self.add(*callback(frame_index))
self.axes.relim(); self.axes.autoscale_view() # rescale the y-axis
return self.lineplot
animation.FuncAnimation(figure, wrapper, interval=interval)
def main():
from matplotlib import pyplot as plt
fig, axes = plt.subplots()
display = RealtimePlot(axes)
display.animate(fig, lambda frame_index: (time.time() - start, random.random() * 100))
plt.show()
fig, axes = plt.subplots()
display = RealtimePlot(axes)
while True:
display.add(time.time() - start, random.random() * 100)
plt.pause(0.001)
if __name__ == "__main__": main()
|
|
f7e4ca11c7bfc35bf0fd6becd2a5d5fdd2ca5ed5
|
src/main/python/partition_data.py
|
src/main/python/partition_data.py
|
import csv;
import random;
import sys;
in_file = str(sys.argv[1])
out_file = str(sys.argv[2])
num_partitions = int(sys.argv[3])
header = [];
partitions = [];
for i in range(num_partitions):
partitions.append([])
# Load all the training rows
row_num = 0;
with open(in_file) as file:
reader = csv.reader(file);
header = reader.next();
for row in reader:
partitions[row_num % num_partitions].append(row);
row_num += 1;
# Write test and train files for k partitions
for i in range(num_partitions):
train_rows = []
test_rows = partitions[i];
for j in range(num_partitions):
if i != j:
for row in partitions[j]:
train_rows.append(row);
with open(out_file+'_k'+str(i+1)+'_train.csv', 'wb') as ofile:
writer = csv.writer(ofile)
writer.writerow(header)
for row in train_rows:
writer.writerow(row)
with open(out_file+'_k'+str(i+1)+'_test.csv', 'wb') as ofile:
writer = csv.writer(ofile)
writer.writerow(header)
for row in test_rows:
writer.writerow(row)
|
Add a script to split data with partitions.
|
Add a script to split data with partitions.
|
Python
|
mit
|
juckele/ddr-grader,juckele/ddr-grader
|
Add a script to split data with partitions.
|
import csv;
import random;
import sys;
in_file = str(sys.argv[1])
out_file = str(sys.argv[2])
num_partitions = int(sys.argv[3])
header = [];
partitions = [];
for i in range(num_partitions):
partitions.append([])
# Load all the training rows
row_num = 0;
with open(in_file) as file:
reader = csv.reader(file);
header = reader.next();
for row in reader:
partitions[row_num % num_partitions].append(row);
row_num += 1;
# Write test and train files for k partitions
for i in range(num_partitions):
train_rows = []
test_rows = partitions[i];
for j in range(num_partitions):
if i != j:
for row in partitions[j]:
train_rows.append(row);
with open(out_file+'_k'+str(i+1)+'_train.csv', 'wb') as ofile:
writer = csv.writer(ofile)
writer.writerow(header)
for row in train_rows:
writer.writerow(row)
with open(out_file+'_k'+str(i+1)+'_test.csv', 'wb') as ofile:
writer = csv.writer(ofile)
writer.writerow(header)
for row in test_rows:
writer.writerow(row)
|
<commit_before><commit_msg>Add a script to split data with partitions.<commit_after>
|
import csv;
import random;
import sys;
in_file = str(sys.argv[1])
out_file = str(sys.argv[2])
num_partitions = int(sys.argv[3])
header = [];
partitions = [];
for i in range(num_partitions):
partitions.append([])
# Load all the training rows
row_num = 0;
with open(in_file) as file:
reader = csv.reader(file);
header = reader.next();
for row in reader:
partitions[row_num % num_partitions].append(row);
row_num += 1;
# Write test and train files for k partitions
for i in range(num_partitions):
train_rows = []
test_rows = partitions[i];
for j in range(num_partitions):
if i != j:
for row in partitions[j]:
train_rows.append(row);
with open(out_file+'_k'+str(i+1)+'_train.csv', 'wb') as ofile:
writer = csv.writer(ofile)
writer.writerow(header)
for row in train_rows:
writer.writerow(row)
with open(out_file+'_k'+str(i+1)+'_test.csv', 'wb') as ofile:
writer = csv.writer(ofile)
writer.writerow(header)
for row in test_rows:
writer.writerow(row)
|
Add a script to split data with partitions.import csv;
import random;
import sys;
in_file = str(sys.argv[1])
out_file = str(sys.argv[2])
num_partitions = int(sys.argv[3])
header = [];
partitions = [];
for i in range(num_partitions):
partitions.append([])
# Load all the training rows
row_num = 0;
with open(in_file) as file:
reader = csv.reader(file);
header = reader.next();
for row in reader:
partitions[row_num % num_partitions].append(row);
row_num += 1;
# Write test and train files for k partitions
for i in range(num_partitions):
train_rows = []
test_rows = partitions[i];
for j in range(num_partitions):
if i != j:
for row in partitions[j]:
train_rows.append(row);
with open(out_file+'_k'+str(i+1)+'_train.csv', 'wb') as ofile:
writer = csv.writer(ofile)
writer.writerow(header)
for row in train_rows:
writer.writerow(row)
with open(out_file+'_k'+str(i+1)+'_test.csv', 'wb') as ofile:
writer = csv.writer(ofile)
writer.writerow(header)
for row in test_rows:
writer.writerow(row)
|
<commit_before><commit_msg>Add a script to split data with partitions.<commit_after>import csv;
import random;
import sys;
in_file = str(sys.argv[1])
out_file = str(sys.argv[2])
num_partitions = int(sys.argv[3])
header = [];
partitions = [];
for i in range(num_partitions):
partitions.append([])
# Load all the training rows
row_num = 0;
with open(in_file) as file:
reader = csv.reader(file);
header = reader.next();
for row in reader:
partitions[row_num % num_partitions].append(row);
row_num += 1;
# Write test and train files for k partitions
for i in range(num_partitions):
train_rows = []
test_rows = partitions[i];
for j in range(num_partitions):
if i != j:
for row in partitions[j]:
train_rows.append(row);
with open(out_file+'_k'+str(i+1)+'_train.csv', 'wb') as ofile:
writer = csv.writer(ofile)
writer.writerow(header)
for row in train_rows:
writer.writerow(row)
with open(out_file+'_k'+str(i+1)+'_test.csv', 'wb') as ofile:
writer = csv.writer(ofile)
writer.writerow(header)
for row in test_rows:
writer.writerow(row)
|
|
645507ed9ec43b354880673fbc75afe169ef6697
|
tests/unit/test_handlers.py
|
tests/unit/test_handlers.py
|
from pmxbot import core
def test_contains_always_match():
"""
Contains handler should always match if no rate is specified.
"""
handler = core.ContainsHandler(name='#', func=None)
assert handler.match('Tell me about #foo', channel='bar')
|
Add test capturing bad implementation of contains handler.
|
Add test capturing bad implementation of contains handler.
|
Python
|
bsd-3-clause
|
jawilson/pmxbot,jawilson/pmxbot
|
Add test capturing bad implementation of contains handler.
|
from pmxbot import core
def test_contains_always_match():
"""
Contains handler should always match if no rate is specified.
"""
handler = core.ContainsHandler(name='#', func=None)
assert handler.match('Tell me about #foo', channel='bar')
|
<commit_before><commit_msg>Add test capturing bad implementation of contains handler.<commit_after>
|
from pmxbot import core
def test_contains_always_match():
"""
Contains handler should always match if no rate is specified.
"""
handler = core.ContainsHandler(name='#', func=None)
assert handler.match('Tell me about #foo', channel='bar')
|
Add test capturing bad implementation of contains handler.from pmxbot import core
def test_contains_always_match():
"""
Contains handler should always match if no rate is specified.
"""
handler = core.ContainsHandler(name='#', func=None)
assert handler.match('Tell me about #foo', channel='bar')
|
<commit_before><commit_msg>Add test capturing bad implementation of contains handler.<commit_after>from pmxbot import core
def test_contains_always_match():
"""
Contains handler should always match if no rate is specified.
"""
handler = core.ContainsHandler(name='#', func=None)
assert handler.match('Tell me about #foo', channel='bar')
|
|
b3e9075e819402f93f7dc2e29b61e3e621ab7355
|
impy/imputations/tests/test_averaging_imputations.py
|
impy/imputations/tests/test_averaging_imputations.py
|
"""test_averaging_imputations.py"""
import unittest
import numpy as np
from impy.imputations import mean_imputation
from impy.imputations import mode_imputation
from impy.imputations import median_imputation
from impy.datasets import random_int
class TestAveraging(unittest.TestCase):
""" Tests for Averaging """
def setUp(self):
self.data = random_int(missingness="complete")
def test_mean_return_type(self):
"""Mean Imputation Return Type"""
self.assertEqual(str(type(mean_imputation(self.data))),
"<class 'numpy.ndarray'>")
def test_mode_return_type(self):
"""Mode Imputation Return Type"""
self.assertEqual(str(type(mode_imputation(self.data))),
"<class 'numpy.ndarray'>")
def test_median_return_type(self):
"""Median Imputation Return Type"""
self.assertEqual(str(type(median_imputation(self.data))),
"<class 'numpy.ndarray'>")
def test_mean_fill(self):
""" Mean Imputation Fill Complete Data(nothing should happen)"""
actual = mean_imputation(self.data)
self.assertTrue(np.array_equal(actual, self.data))
def test_mode_fill(self):
""" Mode Imputation Fill Complete Data(nothing should happen)"""
actual = mode_imputation(self.data)
self.assertTrue(np.array_equal(actual, self.data))
def test_median_fill(self):
""" Median Imputation Fill Complete Data(nothing should happen)"""
actual = median_imputation(self.data)
self.assertTrue(np.array_equal(actual, self.data))
if __name__ == "__main__":
unittest.main()
|
Add unit tests for avging imputations
|
Add unit tests for avging imputations
|
Python
|
mit
|
eltonlaw/impyute
|
Add unit tests for avging imputations
|
"""test_averaging_imputations.py"""
import unittest
import numpy as np
from impy.imputations import mean_imputation
from impy.imputations import mode_imputation
from impy.imputations import median_imputation
from impy.datasets import random_int
class TestAveraging(unittest.TestCase):
""" Tests for Averaging """
def setUp(self):
self.data = random_int(missingness="complete")
def test_mean_return_type(self):
"""Mean Imputation Return Type"""
self.assertEqual(str(type(mean_imputation(self.data))),
"<class 'numpy.ndarray'>")
def test_mode_return_type(self):
"""Mode Imputation Return Type"""
self.assertEqual(str(type(mode_imputation(self.data))),
"<class 'numpy.ndarray'>")
def test_median_return_type(self):
"""Median Imputation Return Type"""
self.assertEqual(str(type(median_imputation(self.data))),
"<class 'numpy.ndarray'>")
def test_mean_fill(self):
""" Mean Imputation Fill Complete Data(nothing should happen)"""
actual = mean_imputation(self.data)
self.assertTrue(np.array_equal(actual, self.data))
def test_mode_fill(self):
""" Mode Imputation Fill Complete Data(nothing should happen)"""
actual = mode_imputation(self.data)
self.assertTrue(np.array_equal(actual, self.data))
def test_median_fill(self):
""" Median Imputation Fill Complete Data(nothing should happen)"""
actual = median_imputation(self.data)
self.assertTrue(np.array_equal(actual, self.data))
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add unit tests for avging imputations<commit_after>
|
"""test_averaging_imputations.py"""
import unittest
import numpy as np
from impy.imputations import mean_imputation
from impy.imputations import mode_imputation
from impy.imputations import median_imputation
from impy.datasets import random_int
class TestAveraging(unittest.TestCase):
""" Tests for Averaging """
def setUp(self):
self.data = random_int(missingness="complete")
def test_mean_return_type(self):
"""Mean Imputation Return Type"""
self.assertEqual(str(type(mean_imputation(self.data))),
"<class 'numpy.ndarray'>")
def test_mode_return_type(self):
"""Mode Imputation Return Type"""
self.assertEqual(str(type(mode_imputation(self.data))),
"<class 'numpy.ndarray'>")
def test_median_return_type(self):
"""Median Imputation Return Type"""
self.assertEqual(str(type(median_imputation(self.data))),
"<class 'numpy.ndarray'>")
def test_mean_fill(self):
""" Mean Imputation Fill Complete Data(nothing should happen)"""
actual = mean_imputation(self.data)
self.assertTrue(np.array_equal(actual, self.data))
def test_mode_fill(self):
""" Mode Imputation Fill Complete Data(nothing should happen)"""
actual = mode_imputation(self.data)
self.assertTrue(np.array_equal(actual, self.data))
def test_median_fill(self):
""" Median Imputation Fill Complete Data(nothing should happen)"""
actual = median_imputation(self.data)
self.assertTrue(np.array_equal(actual, self.data))
if __name__ == "__main__":
unittest.main()
|
Add unit tests for avging imputations"""test_averaging_imputations.py"""
import unittest
import numpy as np
from impy.imputations import mean_imputation
from impy.imputations import mode_imputation
from impy.imputations import median_imputation
from impy.datasets import random_int
class TestAveraging(unittest.TestCase):
""" Tests for Averaging """
def setUp(self):
self.data = random_int(missingness="complete")
def test_mean_return_type(self):
"""Mean Imputation Return Type"""
self.assertEqual(str(type(mean_imputation(self.data))),
"<class 'numpy.ndarray'>")
def test_mode_return_type(self):
"""Mode Imputation Return Type"""
self.assertEqual(str(type(mode_imputation(self.data))),
"<class 'numpy.ndarray'>")
def test_median_return_type(self):
"""Median Imputation Return Type"""
self.assertEqual(str(type(median_imputation(self.data))),
"<class 'numpy.ndarray'>")
def test_mean_fill(self):
""" Mean Imputation Fill Complete Data(nothing should happen)"""
actual = mean_imputation(self.data)
self.assertTrue(np.array_equal(actual, self.data))
def test_mode_fill(self):
""" Mode Imputation Fill Complete Data(nothing should happen)"""
actual = mode_imputation(self.data)
self.assertTrue(np.array_equal(actual, self.data))
def test_median_fill(self):
""" Median Imputation Fill Complete Data(nothing should happen)"""
actual = median_imputation(self.data)
self.assertTrue(np.array_equal(actual, self.data))
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add unit tests for avging imputations<commit_after>"""test_averaging_imputations.py"""
import unittest
import numpy as np
from impy.imputations import mean_imputation
from impy.imputations import mode_imputation
from impy.imputations import median_imputation
from impy.datasets import random_int
class TestAveraging(unittest.TestCase):
""" Tests for Averaging """
def setUp(self):
self.data = random_int(missingness="complete")
def test_mean_return_type(self):
"""Mean Imputation Return Type"""
self.assertEqual(str(type(mean_imputation(self.data))),
"<class 'numpy.ndarray'>")
def test_mode_return_type(self):
"""Mode Imputation Return Type"""
self.assertEqual(str(type(mode_imputation(self.data))),
"<class 'numpy.ndarray'>")
def test_median_return_type(self):
"""Median Imputation Return Type"""
self.assertEqual(str(type(median_imputation(self.data))),
"<class 'numpy.ndarray'>")
def test_mean_fill(self):
""" Mean Imputation Fill Complete Data(nothing should happen)"""
actual = mean_imputation(self.data)
self.assertTrue(np.array_equal(actual, self.data))
def test_mode_fill(self):
""" Mode Imputation Fill Complete Data(nothing should happen)"""
actual = mode_imputation(self.data)
self.assertTrue(np.array_equal(actual, self.data))
def test_median_fill(self):
""" Median Imputation Fill Complete Data(nothing should happen)"""
actual = median_imputation(self.data)
self.assertTrue(np.array_equal(actual, self.data))
if __name__ == "__main__":
unittest.main()
|
|
17ae9e25663d029af11236584b4c759c895ae830
|
util/fileIngredients.py
|
util/fileIngredients.py
|
#!/usr/bin/env python
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import with_statement
import re
def fileContains(f, s, isRegex):
if isRegex:
return fileContainsRegex(f, re.compile(s, re.MULTILINE))
else:
return fileContainsStr(f, s), s
def fileContainsStr(f, s):
found = False
with open(f, 'rb') as g:
for line in g:
if line.find(s) != -1:
print line.rstrip()
found = True
return found
def fileContainsRegex(f, regex):
# e.g. ~/fuzzing/lithium/lithium.py crashesat --timeout=30
# --regex '^#0\s*0x.* in\s*.*(?:\n|\r\n?)#1\s*' ./js --ion -n 735957.js
# Note that putting "^" and "$" together is unlikely to work.
matchedStr = ''
found = False
with open(f, 'rb') as g:
foundRegex = regex.search(g.read())
if foundRegex:
matchedStr = foundRegex.group()
print matchedStr
found = True
return found, matchedStr
|
Improve and consolidate condition scripts of Lithium to support timeouts and regex via optparse. r=Jesse
|
Improve and consolidate condition scripts of Lithium to support timeouts and regex via optparse. r=Jesse
|
Python
|
mpl-2.0
|
nth10sd/funfuzz,nth10sd/funfuzz,MozillaSecurity/funfuzz,MozillaSecurity/funfuzz,nth10sd/funfuzz,MozillaSecurity/funfuzz
|
Improve and consolidate condition scripts of Lithium to support timeouts and regex via optparse. r=Jesse
|
#!/usr/bin/env python
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import with_statement
import re
def fileContains(f, s, isRegex):
if isRegex:
return fileContainsRegex(f, re.compile(s, re.MULTILINE))
else:
return fileContainsStr(f, s), s
def fileContainsStr(f, s):
found = False
with open(f, 'rb') as g:
for line in g:
if line.find(s) != -1:
print line.rstrip()
found = True
return found
def fileContainsRegex(f, regex):
# e.g. ~/fuzzing/lithium/lithium.py crashesat --timeout=30
# --regex '^#0\s*0x.* in\s*.*(?:\n|\r\n?)#1\s*' ./js --ion -n 735957.js
# Note that putting "^" and "$" together is unlikely to work.
matchedStr = ''
found = False
with open(f, 'rb') as g:
foundRegex = regex.search(g.read())
if foundRegex:
matchedStr = foundRegex.group()
print matchedStr
found = True
return found, matchedStr
|
<commit_before><commit_msg>Improve and consolidate condition scripts of Lithium to support timeouts and regex via optparse. r=Jesse<commit_after>
|
#!/usr/bin/env python
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import with_statement
import re
def fileContains(f, s, isRegex):
if isRegex:
return fileContainsRegex(f, re.compile(s, re.MULTILINE))
else:
return fileContainsStr(f, s), s
def fileContainsStr(f, s):
found = False
with open(f, 'rb') as g:
for line in g:
if line.find(s) != -1:
print line.rstrip()
found = True
return found
def fileContainsRegex(f, regex):
# e.g. ~/fuzzing/lithium/lithium.py crashesat --timeout=30
# --regex '^#0\s*0x.* in\s*.*(?:\n|\r\n?)#1\s*' ./js --ion -n 735957.js
# Note that putting "^" and "$" together is unlikely to work.
matchedStr = ''
found = False
with open(f, 'rb') as g:
foundRegex = regex.search(g.read())
if foundRegex:
matchedStr = foundRegex.group()
print matchedStr
found = True
return found, matchedStr
|
Improve and consolidate condition scripts of Lithium to support timeouts and regex via optparse. r=Jesse#!/usr/bin/env python
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import with_statement
import re
def fileContains(f, s, isRegex):
if isRegex:
return fileContainsRegex(f, re.compile(s, re.MULTILINE))
else:
return fileContainsStr(f, s), s
def fileContainsStr(f, s):
found = False
with open(f, 'rb') as g:
for line in g:
if line.find(s) != -1:
print line.rstrip()
found = True
return found
def fileContainsRegex(f, regex):
# e.g. ~/fuzzing/lithium/lithium.py crashesat --timeout=30
# --regex '^#0\s*0x.* in\s*.*(?:\n|\r\n?)#1\s*' ./js --ion -n 735957.js
# Note that putting "^" and "$" together is unlikely to work.
matchedStr = ''
found = False
with open(f, 'rb') as g:
foundRegex = regex.search(g.read())
if foundRegex:
matchedStr = foundRegex.group()
print matchedStr
found = True
return found, matchedStr
|
<commit_before><commit_msg>Improve and consolidate condition scripts of Lithium to support timeouts and regex via optparse. r=Jesse<commit_after>#!/usr/bin/env python
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import with_statement
import re
def fileContains(f, s, isRegex):
if isRegex:
return fileContainsRegex(f, re.compile(s, re.MULTILINE))
else:
return fileContainsStr(f, s), s
def fileContainsStr(f, s):
found = False
with open(f, 'rb') as g:
for line in g:
if line.find(s) != -1:
print line.rstrip()
found = True
return found
def fileContainsRegex(f, regex):
# e.g. ~/fuzzing/lithium/lithium.py crashesat --timeout=30
# --regex '^#0\s*0x.* in\s*.*(?:\n|\r\n?)#1\s*' ./js --ion -n 735957.js
# Note that putting "^" and "$" together is unlikely to work.
matchedStr = ''
found = False
with open(f, 'rb') as g:
foundRegex = regex.search(g.read())
if foundRegex:
matchedStr = foundRegex.group()
print matchedStr
found = True
return found, matchedStr
|
|
05aa314ac9b5d38bb7a30e30aced9b27b2797888
|
python/ql/test/experimental/dataflow/tainttracking/defaultAdditionalTaintStep/test_syntax.py
|
python/ql/test/experimental/dataflow/tainttracking/defaultAdditionalTaintStep/test_syntax.py
|
# Add taintlib to PATH so it can be imported during runtime without any hassle
import sys; import os; sys.path.append(os.path.dirname(os.path.dirname((__file__))))
from taintlib import *
# This has no runtime impact, but allows autocomplete to work
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from ..taintlib import *
# Actual tests
class Context:
def __enter__(self):
return TAINTED_STRING
def __exit__(self, exc_type, exc, tb):
pass
def test_with():
ctx = Context()
taint(ctx)
with ctx as tainted:
ensure_tainted(tainted) # $ tainted
class Iter:
def __iter__(self):
return self
def __next__(self):
raise StopIteration
def test_for():
iter = Iter()
taint(iter)
for tainted in iter:
ensure_tainted(tainted) # $ tainted
# Make tests runable
test_with()
test_for()
|
Add tests for non-async constructs
|
Python: Add tests for non-async constructs
|
Python
|
mit
|
github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql
|
Python: Add tests for non-async constructs
|
# Add taintlib to PATH so it can be imported during runtime without any hassle
import sys; import os; sys.path.append(os.path.dirname(os.path.dirname((__file__))))
from taintlib import *
# This has no runtime impact, but allows autocomplete to work
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from ..taintlib import *
# Actual tests
class Context:
def __enter__(self):
return TAINTED_STRING
def __exit__(self, exc_type, exc, tb):
pass
def test_with():
ctx = Context()
taint(ctx)
with ctx as tainted:
ensure_tainted(tainted) # $ tainted
class Iter:
def __iter__(self):
return self
def __next__(self):
raise StopIteration
def test_for():
iter = Iter()
taint(iter)
for tainted in iter:
ensure_tainted(tainted) # $ tainted
# Make tests runable
test_with()
test_for()
|
<commit_before><commit_msg>Python: Add tests for non-async constructs<commit_after>
|
# Add taintlib to PATH so it can be imported during runtime without any hassle
import sys; import os; sys.path.append(os.path.dirname(os.path.dirname((__file__))))
from taintlib import *
# This has no runtime impact, but allows autocomplete to work
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from ..taintlib import *
# Actual tests
class Context:
def __enter__(self):
return TAINTED_STRING
def __exit__(self, exc_type, exc, tb):
pass
def test_with():
ctx = Context()
taint(ctx)
with ctx as tainted:
ensure_tainted(tainted) # $ tainted
class Iter:
def __iter__(self):
return self
def __next__(self):
raise StopIteration
def test_for():
iter = Iter()
taint(iter)
for tainted in iter:
ensure_tainted(tainted) # $ tainted
# Make tests runable
test_with()
test_for()
|
Python: Add tests for non-async constructs# Add taintlib to PATH so it can be imported during runtime without any hassle
import sys; import os; sys.path.append(os.path.dirname(os.path.dirname((__file__))))
from taintlib import *
# This has no runtime impact, but allows autocomplete to work
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from ..taintlib import *
# Actual tests
class Context:
def __enter__(self):
return TAINTED_STRING
def __exit__(self, exc_type, exc, tb):
pass
def test_with():
ctx = Context()
taint(ctx)
with ctx as tainted:
ensure_tainted(tainted) # $ tainted
class Iter:
def __iter__(self):
return self
def __next__(self):
raise StopIteration
def test_for():
iter = Iter()
taint(iter)
for tainted in iter:
ensure_tainted(tainted) # $ tainted
# Make tests runable
test_with()
test_for()
|
<commit_before><commit_msg>Python: Add tests for non-async constructs<commit_after># Add taintlib to PATH so it can be imported during runtime without any hassle
import sys; import os; sys.path.append(os.path.dirname(os.path.dirname((__file__))))
from taintlib import *
# This has no runtime impact, but allows autocomplete to work
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from ..taintlib import *
# Actual tests
class Context:
def __enter__(self):
return TAINTED_STRING
def __exit__(self, exc_type, exc, tb):
pass
def test_with():
ctx = Context()
taint(ctx)
with ctx as tainted:
ensure_tainted(tainted) # $ tainted
class Iter:
def __iter__(self):
return self
def __next__(self):
raise StopIteration
def test_for():
iter = Iter()
taint(iter)
for tainted in iter:
ensure_tainted(tainted) # $ tainted
# Make tests runable
test_with()
test_for()
|
|
c1fcf54b63de95c85a9505d83062d8b320b1cbdf
|
python/example_code/cloudfront/update_distribution_certificate.py
|
python/example_code/cloudfront/update_distribution_certificate.py
|
# Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import boto3
import sys
#support for python 2 and 3 input types
def read(output):
if sys.version_info[0] < 3:
return(raw_input(output))
else:
return(input(output))
# Create CloudFront client
cf = boto3.client('cloudfront')
# List distributions with the pagination interface
print("\nAvailable CloudFront Distributions:\n")
paginator = cf.get_paginator('list_distributions')
for distributionlist in paginator.paginate():
for distribution in distributionlist['DistributionList']['Items']:
print("Domain: " + distribution['DomainName'])
print("Distribution Id: " + distribution['Id'])
print("Certificate Source: " + distribution['ViewerCertificate']['CertificateSource'])
if (distribution['ViewerCertificate']['CertificateSource'] == "acm"):
print("Certificate ARN: " + distribution['ViewerCertificate']['Certificate'])
print("")
print('Enter the Distribution Id of the CloudFront Distribution who\'s ACM Certificate you would like to replace. ')
distribution_id = read('Note that certificate source must be ACM - DistributionId: ')
distribution_config_response=cf.get_distribution_config(Id=distribution_id)
distribution_config=distribution_config_response['DistributionConfig']
distribution_etag=distribution_config_response['ETag']
if (distribution_config['ViewerCertificate']['CertificateSource'] != "acm"):
print("\nThe DistributionId you have entered is not currently using an ACM Certificate, exiting...\n")
exit()
old_cert_arn=distribution_config['ViewerCertificate']['ACMCertificateArn']
new_cert_arn=read("Please enter the ARN of the new ACM Certificate you would like to attach to Distribution " + distribution_id + ": ")
print("Replacing: " + old_cert_arn + "\nwith: " + new_cert_arn + "\n")
distribution_config['ViewerCertificate']['ACMCertificateArn']=new_cert_arn
distribution_config['ViewerCertificate']['Certificate']=new_cert_arn
cf.update_distribution(DistributionConfig=distribution_config,Id=distribution_id,IfMatch=distribution_etag)
|
Add python cloudfront update_distribution example to replace ACM Certificate
|
Add python cloudfront update_distribution example to replace ACM Certificate
Add python cloudfront update_distribution example to replace ACM Certificate
|
Python
|
apache-2.0
|
awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples
|
Add python cloudfront update_distribution example to replace ACM Certificate
Add python cloudfront update_distribution example to replace ACM Certificate
|
# Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import boto3
import sys
#support for python 2 and 3 input types
def read(output):
if sys.version_info[0] < 3:
return(raw_input(output))
else:
return(input(output))
# Create CloudFront client
cf = boto3.client('cloudfront')
# List distributions with the pagination interface
print("\nAvailable CloudFront Distributions:\n")
paginator = cf.get_paginator('list_distributions')
for distributionlist in paginator.paginate():
for distribution in distributionlist['DistributionList']['Items']:
print("Domain: " + distribution['DomainName'])
print("Distribution Id: " + distribution['Id'])
print("Certificate Source: " + distribution['ViewerCertificate']['CertificateSource'])
if (distribution['ViewerCertificate']['CertificateSource'] == "acm"):
print("Certificate ARN: " + distribution['ViewerCertificate']['Certificate'])
print("")
print('Enter the Distribution Id of the CloudFront Distribution who\'s ACM Certificate you would like to replace. ')
distribution_id = read('Note that certificate source must be ACM - DistributionId: ')
distribution_config_response=cf.get_distribution_config(Id=distribution_id)
distribution_config=distribution_config_response['DistributionConfig']
distribution_etag=distribution_config_response['ETag']
if (distribution_config['ViewerCertificate']['CertificateSource'] != "acm"):
print("\nThe DistributionId you have entered is not currently using an ACM Certificate, exiting...\n")
exit()
old_cert_arn=distribution_config['ViewerCertificate']['ACMCertificateArn']
new_cert_arn=read("Please enter the ARN of the new ACM Certificate you would like to attach to Distribution " + distribution_id + ": ")
print("Replacing: " + old_cert_arn + "\nwith: " + new_cert_arn + "\n")
distribution_config['ViewerCertificate']['ACMCertificateArn']=new_cert_arn
distribution_config['ViewerCertificate']['Certificate']=new_cert_arn
cf.update_distribution(DistributionConfig=distribution_config,Id=distribution_id,IfMatch=distribution_etag)
|
<commit_before><commit_msg>Add python cloudfront update_distribution example to replace ACM Certificate
Add python cloudfront update_distribution example to replace ACM Certificate<commit_after>
|
# Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import boto3
import sys
#support for python 2 and 3 input types
def read(output):
if sys.version_info[0] < 3:
return(raw_input(output))
else:
return(input(output))
# Create CloudFront client
cf = boto3.client('cloudfront')
# List distributions with the pagination interface
print("\nAvailable CloudFront Distributions:\n")
paginator = cf.get_paginator('list_distributions')
for distributionlist in paginator.paginate():
for distribution in distributionlist['DistributionList']['Items']:
print("Domain: " + distribution['DomainName'])
print("Distribution Id: " + distribution['Id'])
print("Certificate Source: " + distribution['ViewerCertificate']['CertificateSource'])
if (distribution['ViewerCertificate']['CertificateSource'] == "acm"):
print("Certificate ARN: " + distribution['ViewerCertificate']['Certificate'])
print("")
print('Enter the Distribution Id of the CloudFront Distribution who\'s ACM Certificate you would like to replace. ')
distribution_id = read('Note that certificate source must be ACM - DistributionId: ')
distribution_config_response=cf.get_distribution_config(Id=distribution_id)
distribution_config=distribution_config_response['DistributionConfig']
distribution_etag=distribution_config_response['ETag']
if (distribution_config['ViewerCertificate']['CertificateSource'] != "acm"):
print("\nThe DistributionId you have entered is not currently using an ACM Certificate, exiting...\n")
exit()
old_cert_arn=distribution_config['ViewerCertificate']['ACMCertificateArn']
new_cert_arn=read("Please enter the ARN of the new ACM Certificate you would like to attach to Distribution " + distribution_id + ": ")
print("Replacing: " + old_cert_arn + "\nwith: " + new_cert_arn + "\n")
distribution_config['ViewerCertificate']['ACMCertificateArn']=new_cert_arn
distribution_config['ViewerCertificate']['Certificate']=new_cert_arn
cf.update_distribution(DistributionConfig=distribution_config,Id=distribution_id,IfMatch=distribution_etag)
|
Add python cloudfront update_distribution example to replace ACM Certificate
Add python cloudfront update_distribution example to replace ACM Certificate# Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import boto3
import sys
#support for python 2 and 3 input types
def read(output):
if sys.version_info[0] < 3:
return(raw_input(output))
else:
return(input(output))
# Create CloudFront client
cf = boto3.client('cloudfront')
# List distributions with the pagination interface
print("\nAvailable CloudFront Distributions:\n")
paginator = cf.get_paginator('list_distributions')
for distributionlist in paginator.paginate():
for distribution in distributionlist['DistributionList']['Items']:
print("Domain: " + distribution['DomainName'])
print("Distribution Id: " + distribution['Id'])
print("Certificate Source: " + distribution['ViewerCertificate']['CertificateSource'])
if (distribution['ViewerCertificate']['CertificateSource'] == "acm"):
print("Certificate ARN: " + distribution['ViewerCertificate']['Certificate'])
print("")
print('Enter the Distribution Id of the CloudFront Distribution who\'s ACM Certificate you would like to replace. ')
distribution_id = read('Note that certificate source must be ACM - DistributionId: ')
distribution_config_response=cf.get_distribution_config(Id=distribution_id)
distribution_config=distribution_config_response['DistributionConfig']
distribution_etag=distribution_config_response['ETag']
if (distribution_config['ViewerCertificate']['CertificateSource'] != "acm"):
print("\nThe DistributionId you have entered is not currently using an ACM Certificate, exiting...\n")
exit()
old_cert_arn=distribution_config['ViewerCertificate']['ACMCertificateArn']
new_cert_arn=read("Please enter the ARN of the new ACM Certificate you would like to attach to Distribution " + distribution_id + ": ")
print("Replacing: " + old_cert_arn + "\nwith: " + new_cert_arn + "\n")
distribution_config['ViewerCertificate']['ACMCertificateArn']=new_cert_arn
distribution_config['ViewerCertificate']['Certificate']=new_cert_arn
cf.update_distribution(DistributionConfig=distribution_config,Id=distribution_id,IfMatch=distribution_etag)
|
<commit_before><commit_msg>Add python cloudfront update_distribution example to replace ACM Certificate
Add python cloudfront update_distribution example to replace ACM Certificate<commit_after># Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import boto3
import sys
#support for python 2 and 3 input types
def read(output):
if sys.version_info[0] < 3:
return(raw_input(output))
else:
return(input(output))
# Create CloudFront client
cf = boto3.client('cloudfront')
# List distributions with the pagination interface
print("\nAvailable CloudFront Distributions:\n")
paginator = cf.get_paginator('list_distributions')
for distributionlist in paginator.paginate():
for distribution in distributionlist['DistributionList']['Items']:
print("Domain: " + distribution['DomainName'])
print("Distribution Id: " + distribution['Id'])
print("Certificate Source: " + distribution['ViewerCertificate']['CertificateSource'])
if (distribution['ViewerCertificate']['CertificateSource'] == "acm"):
print("Certificate ARN: " + distribution['ViewerCertificate']['Certificate'])
print("")
print('Enter the Distribution Id of the CloudFront Distribution who\'s ACM Certificate you would like to replace. ')
distribution_id = read('Note that certificate source must be ACM - DistributionId: ')
distribution_config_response=cf.get_distribution_config(Id=distribution_id)
distribution_config=distribution_config_response['DistributionConfig']
distribution_etag=distribution_config_response['ETag']
if (distribution_config['ViewerCertificate']['CertificateSource'] != "acm"):
print("\nThe DistributionId you have entered is not currently using an ACM Certificate, exiting...\n")
exit()
old_cert_arn=distribution_config['ViewerCertificate']['ACMCertificateArn']
new_cert_arn=read("Please enter the ARN of the new ACM Certificate you would like to attach to Distribution " + distribution_id + ": ")
print("Replacing: " + old_cert_arn + "\nwith: " + new_cert_arn + "\n")
distribution_config['ViewerCertificate']['ACMCertificateArn']=new_cert_arn
distribution_config['ViewerCertificate']['Certificate']=new_cert_arn
cf.update_distribution(DistributionConfig=distribution_config,Id=distribution_id,IfMatch=distribution_etag)
|
|
75a882bf38c88d73e38d13fbb8b1499ff4ae4ea6
|
scripts/remove_after_use/set_meetings_users_fullnames_to_guids.py
|
scripts/remove_after_use/set_meetings_users_fullnames_to_guids.py
|
import sys
import logging
import django
from django.db import transaction
django.setup()
from osf.models import OSFUser
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def main():
dry_run = '--dry' in sys.argv
with transaction.atomic():
users = OSFUser.objects.filter(fullname__regex=r'^[A-Za-z0-9\.\+_-]+@[A-Za-z0-9\._-]+\.[a-zA-Z]*$', tags__name='osf4m')
logger.info('{} users found added by OSF 4 Meetings with emails for fullnames'.format(users.count()))
for user in users:
user.fullname = user._id
if not dry_run:
user.save()
if __name__ == '__main__':
main()
|
Add migration for changing users added by OSF for meetings with emails for fullnames to their guid
|
Add migration for changing users added by OSF for meetings with emails for fullnames to their guid
|
Python
|
apache-2.0
|
felliott/osf.io,adlius/osf.io,felliott/osf.io,adlius/osf.io,saradbowman/osf.io,HalcyonChimera/osf.io,brianjgeiger/osf.io,Johnetordoff/osf.io,CenterForOpenScience/osf.io,mfraezz/osf.io,adlius/osf.io,felliott/osf.io,mattclark/osf.io,cslzchen/osf.io,CenterForOpenScience/osf.io,felliott/osf.io,pattisdr/osf.io,brianjgeiger/osf.io,aaxelb/osf.io,cslzchen/osf.io,baylee-d/osf.io,HalcyonChimera/osf.io,mfraezz/osf.io,aaxelb/osf.io,caseyrollins/osf.io,caseyrollins/osf.io,Johnetordoff/osf.io,aaxelb/osf.io,mfraezz/osf.io,HalcyonChimera/osf.io,brianjgeiger/osf.io,pattisdr/osf.io,Johnetordoff/osf.io,saradbowman/osf.io,caseyrollins/osf.io,cslzchen/osf.io,Johnetordoff/osf.io,HalcyonChimera/osf.io,mattclark/osf.io,CenterForOpenScience/osf.io,adlius/osf.io,aaxelb/osf.io,baylee-d/osf.io,cslzchen/osf.io,CenterForOpenScience/osf.io,brianjgeiger/osf.io,mattclark/osf.io,mfraezz/osf.io,baylee-d/osf.io,pattisdr/osf.io
|
Add migration for changing users added by OSF for meetings with emails for fullnames to their guid
|
import sys
import logging
import django
from django.db import transaction
django.setup()
from osf.models import OSFUser
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def main():
dry_run = '--dry' in sys.argv
with transaction.atomic():
users = OSFUser.objects.filter(fullname__regex=r'^[A-Za-z0-9\.\+_-]+@[A-Za-z0-9\._-]+\.[a-zA-Z]*$', tags__name='osf4m')
logger.info('{} users found added by OSF 4 Meetings with emails for fullnames'.format(users.count()))
for user in users:
user.fullname = user._id
if not dry_run:
user.save()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add migration for changing users added by OSF for meetings with emails for fullnames to their guid<commit_after>
|
import sys
import logging
import django
from django.db import transaction
django.setup()
from osf.models import OSFUser
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def main():
dry_run = '--dry' in sys.argv
with transaction.atomic():
users = OSFUser.objects.filter(fullname__regex=r'^[A-Za-z0-9\.\+_-]+@[A-Za-z0-9\._-]+\.[a-zA-Z]*$', tags__name='osf4m')
logger.info('{} users found added by OSF 4 Meetings with emails for fullnames'.format(users.count()))
for user in users:
user.fullname = user._id
if not dry_run:
user.save()
if __name__ == '__main__':
main()
|
Add migration for changing users added by OSF for meetings with emails for fullnames to their guidimport sys
import logging
import django
from django.db import transaction
django.setup()
from osf.models import OSFUser
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def main():
dry_run = '--dry' in sys.argv
with transaction.atomic():
users = OSFUser.objects.filter(fullname__regex=r'^[A-Za-z0-9\.\+_-]+@[A-Za-z0-9\._-]+\.[a-zA-Z]*$', tags__name='osf4m')
logger.info('{} users found added by OSF 4 Meetings with emails for fullnames'.format(users.count()))
for user in users:
user.fullname = user._id
if not dry_run:
user.save()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add migration for changing users added by OSF for meetings with emails for fullnames to their guid<commit_after>import sys
import logging
import django
from django.db import transaction
django.setup()
from osf.models import OSFUser
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def main():
dry_run = '--dry' in sys.argv
with transaction.atomic():
users = OSFUser.objects.filter(fullname__regex=r'^[A-Za-z0-9\.\+_-]+@[A-Za-z0-9\._-]+\.[a-zA-Z]*$', tags__name='osf4m')
logger.info('{} users found added by OSF 4 Meetings with emails for fullnames'.format(users.count()))
for user in users:
user.fullname = user._id
if not dry_run:
user.save()
if __name__ == '__main__':
main()
|
|
a6b35a9a94b2e4b32c2236258812b44e81184515
|
corehq/apps/users/management/commands/fix_location_user_data.py
|
corehq/apps/users/management/commands/fix_location_user_data.py
|
from corehq.apps.locations.models import Location
from corehq.apps.users.models import CommCareUser
from dimagi.utils.couch.database import iter_docs
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
args = "domain"
help = "Fix location user data for mobile workers."
def process_user(self, user):
if user.location_id:
user.set_location(Location.get(user.location_id))
else:
user.unset_location()
def handle(self, *args, **options):
if len(args) == 0:
raise CommandError("Usage: python manage.py fix_location_user_data %s" % self.args)
domain = args[0]
ids = (
CommCareUser.ids_by_domain(domain, is_active=True) +
CommCareUser.ids_by_domain(domain, is_active=False)
)
for doc in iter_docs(CommCareUser.get_db(), ids):
user = CommCareUser.wrap(doc)
try:
self.process_user(user)
except Exception as e:
print "Error processing user %s: %s" % (user._id, e)
|
Add management command for resyncing mobile worker location user data
|
Add management command for resyncing mobile worker location user data
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq
|
Add management command for resyncing mobile worker location user data
|
from corehq.apps.locations.models import Location
from corehq.apps.users.models import CommCareUser
from dimagi.utils.couch.database import iter_docs
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
args = "domain"
help = "Fix location user data for mobile workers."
def process_user(self, user):
if user.location_id:
user.set_location(Location.get(user.location_id))
else:
user.unset_location()
def handle(self, *args, **options):
if len(args) == 0:
raise CommandError("Usage: python manage.py fix_location_user_data %s" % self.args)
domain = args[0]
ids = (
CommCareUser.ids_by_domain(domain, is_active=True) +
CommCareUser.ids_by_domain(domain, is_active=False)
)
for doc in iter_docs(CommCareUser.get_db(), ids):
user = CommCareUser.wrap(doc)
try:
self.process_user(user)
except Exception as e:
print "Error processing user %s: %s" % (user._id, e)
|
<commit_before><commit_msg>Add management command for resyncing mobile worker location user data<commit_after>
|
from corehq.apps.locations.models import Location
from corehq.apps.users.models import CommCareUser
from dimagi.utils.couch.database import iter_docs
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
args = "domain"
help = "Fix location user data for mobile workers."
def process_user(self, user):
if user.location_id:
user.set_location(Location.get(user.location_id))
else:
user.unset_location()
def handle(self, *args, **options):
if len(args) == 0:
raise CommandError("Usage: python manage.py fix_location_user_data %s" % self.args)
domain = args[0]
ids = (
CommCareUser.ids_by_domain(domain, is_active=True) +
CommCareUser.ids_by_domain(domain, is_active=False)
)
for doc in iter_docs(CommCareUser.get_db(), ids):
user = CommCareUser.wrap(doc)
try:
self.process_user(user)
except Exception as e:
print "Error processing user %s: %s" % (user._id, e)
|
Add management command for resyncing mobile worker location user datafrom corehq.apps.locations.models import Location
from corehq.apps.users.models import CommCareUser
from dimagi.utils.couch.database import iter_docs
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
args = "domain"
help = "Fix location user data for mobile workers."
def process_user(self, user):
if user.location_id:
user.set_location(Location.get(user.location_id))
else:
user.unset_location()
def handle(self, *args, **options):
if len(args) == 0:
raise CommandError("Usage: python manage.py fix_location_user_data %s" % self.args)
domain = args[0]
ids = (
CommCareUser.ids_by_domain(domain, is_active=True) +
CommCareUser.ids_by_domain(domain, is_active=False)
)
for doc in iter_docs(CommCareUser.get_db(), ids):
user = CommCareUser.wrap(doc)
try:
self.process_user(user)
except Exception as e:
print "Error processing user %s: %s" % (user._id, e)
|
<commit_before><commit_msg>Add management command for resyncing mobile worker location user data<commit_after>from corehq.apps.locations.models import Location
from corehq.apps.users.models import CommCareUser
from dimagi.utils.couch.database import iter_docs
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
args = "domain"
help = "Fix location user data for mobile workers."
def process_user(self, user):
if user.location_id:
user.set_location(Location.get(user.location_id))
else:
user.unset_location()
def handle(self, *args, **options):
if len(args) == 0:
raise CommandError("Usage: python manage.py fix_location_user_data %s" % self.args)
domain = args[0]
ids = (
CommCareUser.ids_by_domain(domain, is_active=True) +
CommCareUser.ids_by_domain(domain, is_active=False)
)
for doc in iter_docs(CommCareUser.get_db(), ids):
user = CommCareUser.wrap(doc)
try:
self.process_user(user)
except Exception as e:
print "Error processing user %s: %s" % (user._id, e)
|
|
415717bddb00ca650bef61a5c6054a7b47575b56
|
jaspyx/tests/visitor/test_break.py
|
jaspyx/tests/visitor/test_break.py
|
import ast
from jaspyx.ast_util import ast_store, ast_load
from jaspyx.tests.visitor.v8_helper import V8Helper
class TestBreak(V8Helper):
def test_break(self):
assert self.run(
[
ast.Assign(
[ast_store('i')],
ast.Num(0),
),
ast.While(
ast.Compare(
ast_load('i'),
[ast.Lt()],
[ast.Num(10)]
),
[
ast.Break(),
],
[]
)
],
'i',
int
) == 0
|
Implement unit test for break.
|
Implement unit test for break.
|
Python
|
mit
|
iksteen/jaspyx,ztane/jaspyx
|
Implement unit test for break.
|
import ast
from jaspyx.ast_util import ast_store, ast_load
from jaspyx.tests.visitor.v8_helper import V8Helper
class TestBreak(V8Helper):
def test_break(self):
assert self.run(
[
ast.Assign(
[ast_store('i')],
ast.Num(0),
),
ast.While(
ast.Compare(
ast_load('i'),
[ast.Lt()],
[ast.Num(10)]
),
[
ast.Break(),
],
[]
)
],
'i',
int
) == 0
|
<commit_before><commit_msg>Implement unit test for break.<commit_after>
|
import ast
from jaspyx.ast_util import ast_store, ast_load
from jaspyx.tests.visitor.v8_helper import V8Helper
class TestBreak(V8Helper):
def test_break(self):
assert self.run(
[
ast.Assign(
[ast_store('i')],
ast.Num(0),
),
ast.While(
ast.Compare(
ast_load('i'),
[ast.Lt()],
[ast.Num(10)]
),
[
ast.Break(),
],
[]
)
],
'i',
int
) == 0
|
Implement unit test for break.import ast
from jaspyx.ast_util import ast_store, ast_load
from jaspyx.tests.visitor.v8_helper import V8Helper
class TestBreak(V8Helper):
def test_break(self):
assert self.run(
[
ast.Assign(
[ast_store('i')],
ast.Num(0),
),
ast.While(
ast.Compare(
ast_load('i'),
[ast.Lt()],
[ast.Num(10)]
),
[
ast.Break(),
],
[]
)
],
'i',
int
) == 0
|
<commit_before><commit_msg>Implement unit test for break.<commit_after>import ast
from jaspyx.ast_util import ast_store, ast_load
from jaspyx.tests.visitor.v8_helper import V8Helper
class TestBreak(V8Helper):
def test_break(self):
assert self.run(
[
ast.Assign(
[ast_store('i')],
ast.Num(0),
),
ast.While(
ast.Compare(
ast_load('i'),
[ast.Lt()],
[ast.Num(10)]
),
[
ast.Break(),
],
[]
)
],
'i',
int
) == 0
|
|
495da73f305a2a0e79a28d251b5b93caea06656d
|
mediagenerator/filters/uglifier.py
|
mediagenerator/filters/uglifier.py
|
from django.conf import settings
from django.utils.encoding import smart_str
from mediagenerator.generators.bundles.base import Filter
class Uglifier(Filter):
def __init__(self, **kwargs):
super(Uglifier, self).__init__(**kwargs)
assert self.filetype == 'js', (
'Uglifier only supports compilation to js. '
'The parent filter expects "%s".' % self.filetype)
def get_output(self, variation):
# We import this here, so App Engine Helper users don't get import
# errors.
from subprocess import Popen, PIPE
for input in self.get_input(variation):
args = ['uglifyjs']
try:
args = args + settings.UGLIFIER_OPTIONS
except AttributeError:
pass
try:
cmd = Popen(args,
stdin=PIPE, stdout=PIPE, stderr=PIPE,
universal_newlines=True)
output, error = cmd.communicate(smart_str(input))
assert cmd.wait() == 0, 'Command returned bad result:\n%s' % error
yield output.decode('utf-8')
except Exception, e:
raise ValueError("Failed to run UglifyJs. "
"Please make sure you have Node.js and UglifyJS installed "
"and that it's in your PATH.\n"
"Error was: %s" % e)
|
Add UglifyJS as a filter.
|
Add UglifyJS as a filter.
|
Python
|
bsd-3-clause
|
Carlangueitor/django-mediagenerator,adieu/django-mediagenerator,Carlangueitor/django-mediagenerator,adieu/django-mediagenerator
|
Add UglifyJS as a filter.
|
from django.conf import settings
from django.utils.encoding import smart_str
from mediagenerator.generators.bundles.base import Filter
class Uglifier(Filter):
def __init__(self, **kwargs):
super(Uglifier, self).__init__(**kwargs)
assert self.filetype == 'js', (
'Uglifier only supports compilation to js. '
'The parent filter expects "%s".' % self.filetype)
def get_output(self, variation):
# We import this here, so App Engine Helper users don't get import
# errors.
from subprocess import Popen, PIPE
for input in self.get_input(variation):
args = ['uglifyjs']
try:
args = args + settings.UGLIFIER_OPTIONS
except AttributeError:
pass
try:
cmd = Popen(args,
stdin=PIPE, stdout=PIPE, stderr=PIPE,
universal_newlines=True)
output, error = cmd.communicate(smart_str(input))
assert cmd.wait() == 0, 'Command returned bad result:\n%s' % error
yield output.decode('utf-8')
except Exception, e:
raise ValueError("Failed to run UglifyJs. "
"Please make sure you have Node.js and UglifyJS installed "
"and that it's in your PATH.\n"
"Error was: %s" % e)
|
<commit_before><commit_msg>Add UglifyJS as a filter.<commit_after>
|
from django.conf import settings
from django.utils.encoding import smart_str
from mediagenerator.generators.bundles.base import Filter
class Uglifier(Filter):
def __init__(self, **kwargs):
super(Uglifier, self).__init__(**kwargs)
assert self.filetype == 'js', (
'Uglifier only supports compilation to js. '
'The parent filter expects "%s".' % self.filetype)
def get_output(self, variation):
# We import this here, so App Engine Helper users don't get import
# errors.
from subprocess import Popen, PIPE
for input in self.get_input(variation):
args = ['uglifyjs']
try:
args = args + settings.UGLIFIER_OPTIONS
except AttributeError:
pass
try:
cmd = Popen(args,
stdin=PIPE, stdout=PIPE, stderr=PIPE,
universal_newlines=True)
output, error = cmd.communicate(smart_str(input))
assert cmd.wait() == 0, 'Command returned bad result:\n%s' % error
yield output.decode('utf-8')
except Exception, e:
raise ValueError("Failed to run UglifyJs. "
"Please make sure you have Node.js and UglifyJS installed "
"and that it's in your PATH.\n"
"Error was: %s" % e)
|
Add UglifyJS as a filter.from django.conf import settings
from django.utils.encoding import smart_str
from mediagenerator.generators.bundles.base import Filter
class Uglifier(Filter):
def __init__(self, **kwargs):
super(Uglifier, self).__init__(**kwargs)
assert self.filetype == 'js', (
'Uglifier only supports compilation to js. '
'The parent filter expects "%s".' % self.filetype)
def get_output(self, variation):
# We import this here, so App Engine Helper users don't get import
# errors.
from subprocess import Popen, PIPE
for input in self.get_input(variation):
args = ['uglifyjs']
try:
args = args + settings.UGLIFIER_OPTIONS
except AttributeError:
pass
try:
cmd = Popen(args,
stdin=PIPE, stdout=PIPE, stderr=PIPE,
universal_newlines=True)
output, error = cmd.communicate(smart_str(input))
assert cmd.wait() == 0, 'Command returned bad result:\n%s' % error
yield output.decode('utf-8')
except Exception, e:
raise ValueError("Failed to run UglifyJs. "
"Please make sure you have Node.js and UglifyJS installed "
"and that it's in your PATH.\n"
"Error was: %s" % e)
|
<commit_before><commit_msg>Add UglifyJS as a filter.<commit_after>from django.conf import settings
from django.utils.encoding import smart_str
from mediagenerator.generators.bundles.base import Filter
class Uglifier(Filter):
def __init__(self, **kwargs):
super(Uglifier, self).__init__(**kwargs)
assert self.filetype == 'js', (
'Uglifier only supports compilation to js. '
'The parent filter expects "%s".' % self.filetype)
def get_output(self, variation):
# We import this here, so App Engine Helper users don't get import
# errors.
from subprocess import Popen, PIPE
for input in self.get_input(variation):
args = ['uglifyjs']
try:
args = args + settings.UGLIFIER_OPTIONS
except AttributeError:
pass
try:
cmd = Popen(args,
stdin=PIPE, stdout=PIPE, stderr=PIPE,
universal_newlines=True)
output, error = cmd.communicate(smart_str(input))
assert cmd.wait() == 0, 'Command returned bad result:\n%s' % error
yield output.decode('utf-8')
except Exception, e:
raise ValueError("Failed to run UglifyJs. "
"Please make sure you have Node.js and UglifyJS installed "
"and that it's in your PATH.\n"
"Error was: %s" % e)
|
|
96ed06f1f3dab3aa9d0f8150c41a5c1b943a86b0
|
frappe/tests/test_config.py
|
frappe/tests/test_config.py
|
# Copyright (c) 2022, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
import unittest
import frappe
from frappe.config import get_modules_from_all_apps_for_user
class TestConfig(unittest.TestCase):
def test_get_modules(self):
frappe_modules = frappe.get_all("Module Def", filters={"app_name": "frappe"}, pluck="name")
all_modules_data = get_modules_from_all_apps_for_user()
first_module_entry = all_modules_data[0]
all_modules = [x["module_name"] for x in all_modules_data]
self.assertIn("links", first_module_entry)
self.assertIsInstance(all_modules_data, list)
self.assertFalse([x for x in frappe_modules if x not in all_modules])
|
Add test for config module
|
test: Add test for config module
|
Python
|
mit
|
StrellaGroup/frappe,frappe/frappe,StrellaGroup/frappe,frappe/frappe,StrellaGroup/frappe,frappe/frappe
|
test: Add test for config module
|
# Copyright (c) 2022, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
import unittest
import frappe
from frappe.config import get_modules_from_all_apps_for_user
class TestConfig(unittest.TestCase):
def test_get_modules(self):
frappe_modules = frappe.get_all("Module Def", filters={"app_name": "frappe"}, pluck="name")
all_modules_data = get_modules_from_all_apps_for_user()
first_module_entry = all_modules_data[0]
all_modules = [x["module_name"] for x in all_modules_data]
self.assertIn("links", first_module_entry)
self.assertIsInstance(all_modules_data, list)
self.assertFalse([x for x in frappe_modules if x not in all_modules])
|
<commit_before><commit_msg>test: Add test for config module<commit_after>
|
# Copyright (c) 2022, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
import unittest
import frappe
from frappe.config import get_modules_from_all_apps_for_user
class TestConfig(unittest.TestCase):
def test_get_modules(self):
frappe_modules = frappe.get_all("Module Def", filters={"app_name": "frappe"}, pluck="name")
all_modules_data = get_modules_from_all_apps_for_user()
first_module_entry = all_modules_data[0]
all_modules = [x["module_name"] for x in all_modules_data]
self.assertIn("links", first_module_entry)
self.assertIsInstance(all_modules_data, list)
self.assertFalse([x for x in frappe_modules if x not in all_modules])
|
test: Add test for config module# Copyright (c) 2022, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
import unittest
import frappe
from frappe.config import get_modules_from_all_apps_for_user
class TestConfig(unittest.TestCase):
def test_get_modules(self):
frappe_modules = frappe.get_all("Module Def", filters={"app_name": "frappe"}, pluck="name")
all_modules_data = get_modules_from_all_apps_for_user()
first_module_entry = all_modules_data[0]
all_modules = [x["module_name"] for x in all_modules_data]
self.assertIn("links", first_module_entry)
self.assertIsInstance(all_modules_data, list)
self.assertFalse([x for x in frappe_modules if x not in all_modules])
|
<commit_before><commit_msg>test: Add test for config module<commit_after># Copyright (c) 2022, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
import unittest
import frappe
from frappe.config import get_modules_from_all_apps_for_user
class TestConfig(unittest.TestCase):
def test_get_modules(self):
frappe_modules = frappe.get_all("Module Def", filters={"app_name": "frappe"}, pluck="name")
all_modules_data = get_modules_from_all_apps_for_user()
first_module_entry = all_modules_data[0]
all_modules = [x["module_name"] for x in all_modules_data]
self.assertIn("links", first_module_entry)
self.assertIsInstance(all_modules_data, list)
self.assertFalse([x for x in frappe_modules if x not in all_modules])
|
|
f1ccab2168dea1b0827f4ca929f0036e84170a76
|
go/base/tests/test_views.py
|
go/base/tests/test_views.py
|
"""Test for go.base.utils."""
from mock import patch, Mock
from django.core.urlresolvers import reverse
from go.base.tests.utils import VumiGoDjangoTestCase
class BaseViewsTestCase(VumiGoDjangoTestCase):
def cross_domain_xhr(self, url):
return self.client.post(reverse('cross_domain_xhr'), {'url': url})
@patch('requests.get')
def test_cross_domain_xhr(self, mocked_get):
mocked_get.return_value = Mock(content='foo', status_code=200)
response = self.cross_domain_xhr('http://domain.com')
[call] = mocked_get.call_args_list
args, kwargs = call
self.assertEqual(args, ('http://domain.com',))
self.assertEqual(kwargs, {'auth': None})
self.assertTrue(mocked_get.called)
self.assertEqual(response.content, 'foo')
self.assertEqual(response.status_code, 200)
@patch('requests.get')
def test_basic_auth_cross_domain_xhr(self, mocked_get):
mocked_get.return_value = Mock(content='foo', status_code=200)
response = self.cross_domain_xhr('http://username:password@domain.com')
[call] = mocked_get.call_args_list
args, kwargs = call
self.assertEqual(args, ('http://domain.com',))
self.assertEqual(kwargs, {'auth': ('username', 'password')})
self.assertTrue(mocked_get.called)
self.assertEqual(response.content, 'foo')
self.assertEqual(response.status_code, 200)
@patch('requests.get')
def test_basic_auth_cross_domain_xhr_with_https_and_port(self, mocked_get):
mocked_get.return_value = Mock(content='foo', status_code=200)
response = self.cross_domain_xhr(
'https://username:password@domain.com:443/foo')
[call] = mocked_get.call_args_list
args, kwargs = call
self.assertEqual(args, ('https://domain.com:443/foo',))
self.assertEqual(kwargs, {'auth': ('username', 'password')})
self.assertTrue(mocked_get.called)
self.assertEqual(response.content, 'foo')
self.assertEqual(response.status_code, 200)
|
Add tests for cross domain xhr view
|
Add tests for cross domain xhr view
|
Python
|
bsd-3-clause
|
praekelt/vumi-go,praekelt/vumi-go,praekelt/vumi-go,praekelt/vumi-go
|
Add tests for cross domain xhr view
|
"""Test for go.base.utils."""
from mock import patch, Mock
from django.core.urlresolvers import reverse
from go.base.tests.utils import VumiGoDjangoTestCase
class BaseViewsTestCase(VumiGoDjangoTestCase):
def cross_domain_xhr(self, url):
return self.client.post(reverse('cross_domain_xhr'), {'url': url})
@patch('requests.get')
def test_cross_domain_xhr(self, mocked_get):
mocked_get.return_value = Mock(content='foo', status_code=200)
response = self.cross_domain_xhr('http://domain.com')
[call] = mocked_get.call_args_list
args, kwargs = call
self.assertEqual(args, ('http://domain.com',))
self.assertEqual(kwargs, {'auth': None})
self.assertTrue(mocked_get.called)
self.assertEqual(response.content, 'foo')
self.assertEqual(response.status_code, 200)
@patch('requests.get')
def test_basic_auth_cross_domain_xhr(self, mocked_get):
mocked_get.return_value = Mock(content='foo', status_code=200)
response = self.cross_domain_xhr('http://username:password@domain.com')
[call] = mocked_get.call_args_list
args, kwargs = call
self.assertEqual(args, ('http://domain.com',))
self.assertEqual(kwargs, {'auth': ('username', 'password')})
self.assertTrue(mocked_get.called)
self.assertEqual(response.content, 'foo')
self.assertEqual(response.status_code, 200)
@patch('requests.get')
def test_basic_auth_cross_domain_xhr_with_https_and_port(self, mocked_get):
mocked_get.return_value = Mock(content='foo', status_code=200)
response = self.cross_domain_xhr(
'https://username:password@domain.com:443/foo')
[call] = mocked_get.call_args_list
args, kwargs = call
self.assertEqual(args, ('https://domain.com:443/foo',))
self.assertEqual(kwargs, {'auth': ('username', 'password')})
self.assertTrue(mocked_get.called)
self.assertEqual(response.content, 'foo')
self.assertEqual(response.status_code, 200)
|
<commit_before><commit_msg>Add tests for cross domain xhr view<commit_after>
|
"""Test for go.base.utils."""
from mock import patch, Mock
from django.core.urlresolvers import reverse
from go.base.tests.utils import VumiGoDjangoTestCase
class BaseViewsTestCase(VumiGoDjangoTestCase):
def cross_domain_xhr(self, url):
return self.client.post(reverse('cross_domain_xhr'), {'url': url})
@patch('requests.get')
def test_cross_domain_xhr(self, mocked_get):
mocked_get.return_value = Mock(content='foo', status_code=200)
response = self.cross_domain_xhr('http://domain.com')
[call] = mocked_get.call_args_list
args, kwargs = call
self.assertEqual(args, ('http://domain.com',))
self.assertEqual(kwargs, {'auth': None})
self.assertTrue(mocked_get.called)
self.assertEqual(response.content, 'foo')
self.assertEqual(response.status_code, 200)
@patch('requests.get')
def test_basic_auth_cross_domain_xhr(self, mocked_get):
mocked_get.return_value = Mock(content='foo', status_code=200)
response = self.cross_domain_xhr('http://username:password@domain.com')
[call] = mocked_get.call_args_list
args, kwargs = call
self.assertEqual(args, ('http://domain.com',))
self.assertEqual(kwargs, {'auth': ('username', 'password')})
self.assertTrue(mocked_get.called)
self.assertEqual(response.content, 'foo')
self.assertEqual(response.status_code, 200)
@patch('requests.get')
def test_basic_auth_cross_domain_xhr_with_https_and_port(self, mocked_get):
mocked_get.return_value = Mock(content='foo', status_code=200)
response = self.cross_domain_xhr(
'https://username:password@domain.com:443/foo')
[call] = mocked_get.call_args_list
args, kwargs = call
self.assertEqual(args, ('https://domain.com:443/foo',))
self.assertEqual(kwargs, {'auth': ('username', 'password')})
self.assertTrue(mocked_get.called)
self.assertEqual(response.content, 'foo')
self.assertEqual(response.status_code, 200)
|
Add tests for cross domain xhr view"""Test for go.base.utils."""
from mock import patch, Mock
from django.core.urlresolvers import reverse
from go.base.tests.utils import VumiGoDjangoTestCase
class BaseViewsTestCase(VumiGoDjangoTestCase):
def cross_domain_xhr(self, url):
return self.client.post(reverse('cross_domain_xhr'), {'url': url})
@patch('requests.get')
def test_cross_domain_xhr(self, mocked_get):
mocked_get.return_value = Mock(content='foo', status_code=200)
response = self.cross_domain_xhr('http://domain.com')
[call] = mocked_get.call_args_list
args, kwargs = call
self.assertEqual(args, ('http://domain.com',))
self.assertEqual(kwargs, {'auth': None})
self.assertTrue(mocked_get.called)
self.assertEqual(response.content, 'foo')
self.assertEqual(response.status_code, 200)
@patch('requests.get')
def test_basic_auth_cross_domain_xhr(self, mocked_get):
mocked_get.return_value = Mock(content='foo', status_code=200)
response = self.cross_domain_xhr('http://username:password@domain.com')
[call] = mocked_get.call_args_list
args, kwargs = call
self.assertEqual(args, ('http://domain.com',))
self.assertEqual(kwargs, {'auth': ('username', 'password')})
self.assertTrue(mocked_get.called)
self.assertEqual(response.content, 'foo')
self.assertEqual(response.status_code, 200)
@patch('requests.get')
def test_basic_auth_cross_domain_xhr_with_https_and_port(self, mocked_get):
mocked_get.return_value = Mock(content='foo', status_code=200)
response = self.cross_domain_xhr(
'https://username:password@domain.com:443/foo')
[call] = mocked_get.call_args_list
args, kwargs = call
self.assertEqual(args, ('https://domain.com:443/foo',))
self.assertEqual(kwargs, {'auth': ('username', 'password')})
self.assertTrue(mocked_get.called)
self.assertEqual(response.content, 'foo')
self.assertEqual(response.status_code, 200)
|
<commit_before><commit_msg>Add tests for cross domain xhr view<commit_after>"""Test for go.base.utils."""
from mock import patch, Mock
from django.core.urlresolvers import reverse
from go.base.tests.utils import VumiGoDjangoTestCase
class BaseViewsTestCase(VumiGoDjangoTestCase):
def cross_domain_xhr(self, url):
return self.client.post(reverse('cross_domain_xhr'), {'url': url})
@patch('requests.get')
def test_cross_domain_xhr(self, mocked_get):
mocked_get.return_value = Mock(content='foo', status_code=200)
response = self.cross_domain_xhr('http://domain.com')
[call] = mocked_get.call_args_list
args, kwargs = call
self.assertEqual(args, ('http://domain.com',))
self.assertEqual(kwargs, {'auth': None})
self.assertTrue(mocked_get.called)
self.assertEqual(response.content, 'foo')
self.assertEqual(response.status_code, 200)
@patch('requests.get')
def test_basic_auth_cross_domain_xhr(self, mocked_get):
mocked_get.return_value = Mock(content='foo', status_code=200)
response = self.cross_domain_xhr('http://username:password@domain.com')
[call] = mocked_get.call_args_list
args, kwargs = call
self.assertEqual(args, ('http://domain.com',))
self.assertEqual(kwargs, {'auth': ('username', 'password')})
self.assertTrue(mocked_get.called)
self.assertEqual(response.content, 'foo')
self.assertEqual(response.status_code, 200)
@patch('requests.get')
def test_basic_auth_cross_domain_xhr_with_https_and_port(self, mocked_get):
mocked_get.return_value = Mock(content='foo', status_code=200)
response = self.cross_domain_xhr(
'https://username:password@domain.com:443/foo')
[call] = mocked_get.call_args_list
args, kwargs = call
self.assertEqual(args, ('https://domain.com:443/foo',))
self.assertEqual(kwargs, {'auth': ('username', 'password')})
self.assertTrue(mocked_get.called)
self.assertEqual(response.content, 'foo')
self.assertEqual(response.status_code, 200)
|
|
5578d11f45e9c41ab9c4311f2bed48b9c24d9bf5
|
tests/grammar_term-nonterm_test/NonterminalHaveTest.py
|
tests/grammar_term-nonterm_test/NonterminalHaveTest.py
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
|
Create file for Nonterminal have method
|
Create file for Nonterminal have method
|
Python
|
mit
|
PatrikValkovic/grammpy
|
Create file for Nonterminal have method
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
|
<commit_before><commit_msg>Create file for Nonterminal have method<commit_after>
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
|
Create file for Nonterminal have method#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
|
<commit_before><commit_msg>Create file for Nonterminal have method<commit_after>#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
|
|
913c9a10b2eb3b3d9de108a82a3251b2c0de0e10
|
cybox/test/objects/hostname_test.py
|
cybox/test/objects/hostname_test.py
|
# Copyright (c) 2014, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import unittest
from cybox.objects.hostname_object import Hostname
from cybox.test.objects import ObjectTestCase
class TestHostname(ObjectTestCase, unittest.TestCase):
object_type = "HostnameObjectType"
klass = Hostname
_full_dict = {
'is_domain_name': True,
'hostname_value': "www.example.com",
'naming_system': ["DNS", "NETBIOS"],
'xsi:type': object_type,
}
if __name__ == "__main__":
unittest.main()
|
Add test for Hostname object
|
Add test for Hostname object
|
Python
|
bsd-3-clause
|
CybOXProject/python-cybox
|
Add test for Hostname object
|
# Copyright (c) 2014, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import unittest
from cybox.objects.hostname_object import Hostname
from cybox.test.objects import ObjectTestCase
class TestHostname(ObjectTestCase, unittest.TestCase):
object_type = "HostnameObjectType"
klass = Hostname
_full_dict = {
'is_domain_name': True,
'hostname_value': "www.example.com",
'naming_system': ["DNS", "NETBIOS"],
'xsi:type': object_type,
}
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add test for Hostname object<commit_after>
|
# Copyright (c) 2014, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import unittest
from cybox.objects.hostname_object import Hostname
from cybox.test.objects import ObjectTestCase
class TestHostname(ObjectTestCase, unittest.TestCase):
object_type = "HostnameObjectType"
klass = Hostname
_full_dict = {
'is_domain_name': True,
'hostname_value': "www.example.com",
'naming_system': ["DNS", "NETBIOS"],
'xsi:type': object_type,
}
if __name__ == "__main__":
unittest.main()
|
Add test for Hostname object# Copyright (c) 2014, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import unittest
from cybox.objects.hostname_object import Hostname
from cybox.test.objects import ObjectTestCase
class TestHostname(ObjectTestCase, unittest.TestCase):
object_type = "HostnameObjectType"
klass = Hostname
_full_dict = {
'is_domain_name': True,
'hostname_value': "www.example.com",
'naming_system': ["DNS", "NETBIOS"],
'xsi:type': object_type,
}
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add test for Hostname object<commit_after># Copyright (c) 2014, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import unittest
from cybox.objects.hostname_object import Hostname
from cybox.test.objects import ObjectTestCase
class TestHostname(ObjectTestCase, unittest.TestCase):
object_type = "HostnameObjectType"
klass = Hostname
_full_dict = {
'is_domain_name': True,
'hostname_value': "www.example.com",
'naming_system': ["DNS", "NETBIOS"],
'xsi:type': object_type,
}
if __name__ == "__main__":
unittest.main()
|
|
514aca20c6f076a86819d7180f36c3b2e8bcc33b
|
tests/integration_tests/test_tensorflow_integration.py
|
tests/integration_tests/test_tensorflow_integration.py
|
from __future__ import print_function
import os
import tempfile
import pytest
import keras
from keras import layers
from keras.utils.test_utils import get_test_data
from keras.utils.test_utils import keras_test
@pytest.mark.skipif(keras.backend.backend() != 'tensorflow', reason='Requires TF backend')
@keras_test
def test_tf_optimizer():
import tensorflow as tf
num_hidden = 10
output_dim = 2
input_dim = 10
target = 0.8
optimizer = tf.train.AdadeltaOptimizer(
learning_rate=1., rho=0.95, epsilon=1e-08)
(x_train, y_train), (x_test, y_test) = get_test_data(
num_train=1000, num_test=200,
input_shape=(input_dim,),
classification=True, num_classes=output_dim)
model = keras.Sequential()
model.add(layers.Dense(num_hidden,
activation='relu',
input_shape=(input_dim,)))
model.add(layers.Dense(output_dim, activation='softmax'))
model.compile(loss='sparse_categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=8, batch_size=16,
validation_data=(x_test, y_test), verbose=2)
assert history.history['val_acc'][-1] >= target
# Test saving.
_, fname = tempfile.mkstemp('.h5')
model.save(fname)
model = keras.models.load_model(fname)
assert len(model.weights) == 4
os.remove(fname)
if __name__ == '__main__':
pytest.main([__file__])
|
Add integration test checking compatibility of Keras models with TF optimizers.
|
Add integration test checking compatibility of Keras models with TF optimizers.
|
Python
|
apache-2.0
|
keras-team/keras,keras-team/keras
|
Add integration test checking compatibility of Keras models with TF optimizers.
|
from __future__ import print_function
import os
import tempfile
import pytest
import keras
from keras import layers
from keras.utils.test_utils import get_test_data
from keras.utils.test_utils import keras_test
@pytest.mark.skipif(keras.backend.backend() != 'tensorflow', reason='Requires TF backend')
@keras_test
def test_tf_optimizer():
import tensorflow as tf
num_hidden = 10
output_dim = 2
input_dim = 10
target = 0.8
optimizer = tf.train.AdadeltaOptimizer(
learning_rate=1., rho=0.95, epsilon=1e-08)
(x_train, y_train), (x_test, y_test) = get_test_data(
num_train=1000, num_test=200,
input_shape=(input_dim,),
classification=True, num_classes=output_dim)
model = keras.Sequential()
model.add(layers.Dense(num_hidden,
activation='relu',
input_shape=(input_dim,)))
model.add(layers.Dense(output_dim, activation='softmax'))
model.compile(loss='sparse_categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=8, batch_size=16,
validation_data=(x_test, y_test), verbose=2)
assert history.history['val_acc'][-1] >= target
# Test saving.
_, fname = tempfile.mkstemp('.h5')
model.save(fname)
model = keras.models.load_model(fname)
assert len(model.weights) == 4
os.remove(fname)
if __name__ == '__main__':
pytest.main([__file__])
|
<commit_before><commit_msg>Add integration test checking compatibility of Keras models with TF optimizers.<commit_after>
|
from __future__ import print_function
import os
import tempfile
import pytest
import keras
from keras import layers
from keras.utils.test_utils import get_test_data
from keras.utils.test_utils import keras_test
@pytest.mark.skipif(keras.backend.backend() != 'tensorflow', reason='Requires TF backend')
@keras_test
def test_tf_optimizer():
import tensorflow as tf
num_hidden = 10
output_dim = 2
input_dim = 10
target = 0.8
optimizer = tf.train.AdadeltaOptimizer(
learning_rate=1., rho=0.95, epsilon=1e-08)
(x_train, y_train), (x_test, y_test) = get_test_data(
num_train=1000, num_test=200,
input_shape=(input_dim,),
classification=True, num_classes=output_dim)
model = keras.Sequential()
model.add(layers.Dense(num_hidden,
activation='relu',
input_shape=(input_dim,)))
model.add(layers.Dense(output_dim, activation='softmax'))
model.compile(loss='sparse_categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=8, batch_size=16,
validation_data=(x_test, y_test), verbose=2)
assert history.history['val_acc'][-1] >= target
# Test saving.
_, fname = tempfile.mkstemp('.h5')
model.save(fname)
model = keras.models.load_model(fname)
assert len(model.weights) == 4
os.remove(fname)
if __name__ == '__main__':
pytest.main([__file__])
|
Add integration test checking compatibility of Keras models with TF optimizers.from __future__ import print_function
import os
import tempfile
import pytest
import keras
from keras import layers
from keras.utils.test_utils import get_test_data
from keras.utils.test_utils import keras_test
@pytest.mark.skipif(keras.backend.backend() != 'tensorflow', reason='Requires TF backend')
@keras_test
def test_tf_optimizer():
import tensorflow as tf
num_hidden = 10
output_dim = 2
input_dim = 10
target = 0.8
optimizer = tf.train.AdadeltaOptimizer(
learning_rate=1., rho=0.95, epsilon=1e-08)
(x_train, y_train), (x_test, y_test) = get_test_data(
num_train=1000, num_test=200,
input_shape=(input_dim,),
classification=True, num_classes=output_dim)
model = keras.Sequential()
model.add(layers.Dense(num_hidden,
activation='relu',
input_shape=(input_dim,)))
model.add(layers.Dense(output_dim, activation='softmax'))
model.compile(loss='sparse_categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=8, batch_size=16,
validation_data=(x_test, y_test), verbose=2)
assert history.history['val_acc'][-1] >= target
# Test saving.
_, fname = tempfile.mkstemp('.h5')
model.save(fname)
model = keras.models.load_model(fname)
assert len(model.weights) == 4
os.remove(fname)
if __name__ == '__main__':
pytest.main([__file__])
|
<commit_before><commit_msg>Add integration test checking compatibility of Keras models with TF optimizers.<commit_after>from __future__ import print_function
import os
import tempfile
import pytest
import keras
from keras import layers
from keras.utils.test_utils import get_test_data
from keras.utils.test_utils import keras_test
@pytest.mark.skipif(keras.backend.backend() != 'tensorflow', reason='Requires TF backend')
@keras_test
def test_tf_optimizer():
import tensorflow as tf
num_hidden = 10
output_dim = 2
input_dim = 10
target = 0.8
optimizer = tf.train.AdadeltaOptimizer(
learning_rate=1., rho=0.95, epsilon=1e-08)
(x_train, y_train), (x_test, y_test) = get_test_data(
num_train=1000, num_test=200,
input_shape=(input_dim,),
classification=True, num_classes=output_dim)
model = keras.Sequential()
model.add(layers.Dense(num_hidden,
activation='relu',
input_shape=(input_dim,)))
model.add(layers.Dense(output_dim, activation='softmax'))
model.compile(loss='sparse_categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=8, batch_size=16,
validation_data=(x_test, y_test), verbose=2)
assert history.history['val_acc'][-1] >= target
# Test saving.
_, fname = tempfile.mkstemp('.h5')
model.save(fname)
model = keras.models.load_model(fname)
assert len(model.weights) == 4
os.remove(fname)
if __name__ == '__main__':
pytest.main([__file__])
|
|
24d742e444c84df99629d8a6aff7ca7e6c90f995
|
scheduler/misc/detect_stuck_active_invs.py
|
scheduler/misc/detect_stuck_active_invs.py
|
#!/usr/bin/env python
# Copyright 2018 The LUCI Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Finds jobs with old entries (>1d) in ActiveInvocations list.
Usage:
prpc login
./detect_stuck_active_invs.py luci-scheduler-dev.appspot.com
Requires the caller to be in 'administrators' group.
"""
import json
import subprocess
import sys
import time
def prpc(host, method, body):
p = subprocess.Popen(
['prpc', 'call', host, method],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
out, _ = p.communicate(json.dumps(body))
if p.returncode:
raise Exception('Call to %s failed' % method)
return json.loads(out)
def check_job(host, job_ref):
print 'Checking %s/%s' % (job_ref['project'], job_ref['job'])
state = prpc(host, 'internal.admin.Admin.GetDebugJobState', job_ref)
active_invs = state.get('activeInvocations', [])
if not active_invs:
print ' No active invocations'
return []
stuck = []
for inv_id in active_invs:
print ' ...checking %s' % inv_id
inv = prpc(host, 'scheduler.Scheduler.GetInvocation', {
'jobRef': job_ref,
'invocationId': inv_id,
})
started = time.time() - int(inv['startedTs']) / 1000000.0
if started > 24 * 3600:
print ' it is stuck!'
stuck.append((job_ref, inv_id))
return stuck
def main():
if len(sys.argv) != 2:
print >> sys.stderr, 'Usage: %s <host>' % sys.argv[0]
return 1
host = sys.argv[1]
stuck = []
for job in prpc(host, 'scheduler.Scheduler.GetJobs', {})['jobs']:
stuck.extend(check_job(host, job['jobRef']))
if not stuck:
print 'No invocations are stuck'
return
print
print 'All stuck invocations: '
for job_ref, inv_id in stuck:
print '%s/%s %s' % (job_ref['project'], job_ref['job'], inv_id)
return 0
if __name__ == '__main__':
sys.exit(main())
|
Add adhoc script to detect jobs with stuck ActiveInvocations list.
|
[scheduler] Add adhoc script to detect jobs with stuck ActiveInvocations list.
R=a30c74fa30536fe7ea81ed6dec202e35e149e1fd@chromium.org
BUG=852142
Change-Id: Idae7f05c5045a72ff85db8587f8bd74c0b80fb06
Reviewed-on: https://chromium-review.googlesource.com/1098463
Reviewed-by: Andrii Shyshkalov <a30c74fa30536fe7ea81ed6dec202e35e149e1fd@chromium.org>
Commit-Queue: Vadim Shtayura <9f116ddb1b24f6fc1916a676eb17161b6c07dfc1@chromium.org>
|
Python
|
apache-2.0
|
luci/luci-go,luci/luci-go,luci/luci-go,luci/luci-go,luci/luci-go,luci/luci-go
|
[scheduler] Add adhoc script to detect jobs with stuck ActiveInvocations list.
R=a30c74fa30536fe7ea81ed6dec202e35e149e1fd@chromium.org
BUG=852142
Change-Id: Idae7f05c5045a72ff85db8587f8bd74c0b80fb06
Reviewed-on: https://chromium-review.googlesource.com/1098463
Reviewed-by: Andrii Shyshkalov <a30c74fa30536fe7ea81ed6dec202e35e149e1fd@chromium.org>
Commit-Queue: Vadim Shtayura <9f116ddb1b24f6fc1916a676eb17161b6c07dfc1@chromium.org>
|
#!/usr/bin/env python
# Copyright 2018 The LUCI Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Finds jobs with old entries (>1d) in ActiveInvocations list.
Usage:
prpc login
./detect_stuck_active_invs.py luci-scheduler-dev.appspot.com
Requires the caller to be in 'administrators' group.
"""
import json
import subprocess
import sys
import time
def prpc(host, method, body):
p = subprocess.Popen(
['prpc', 'call', host, method],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
out, _ = p.communicate(json.dumps(body))
if p.returncode:
raise Exception('Call to %s failed' % method)
return json.loads(out)
def check_job(host, job_ref):
print 'Checking %s/%s' % (job_ref['project'], job_ref['job'])
state = prpc(host, 'internal.admin.Admin.GetDebugJobState', job_ref)
active_invs = state.get('activeInvocations', [])
if not active_invs:
print ' No active invocations'
return []
stuck = []
for inv_id in active_invs:
print ' ...checking %s' % inv_id
inv = prpc(host, 'scheduler.Scheduler.GetInvocation', {
'jobRef': job_ref,
'invocationId': inv_id,
})
started = time.time() - int(inv['startedTs']) / 1000000.0
if started > 24 * 3600:
print ' it is stuck!'
stuck.append((job_ref, inv_id))
return stuck
def main():
if len(sys.argv) != 2:
print >> sys.stderr, 'Usage: %s <host>' % sys.argv[0]
return 1
host = sys.argv[1]
stuck = []
for job in prpc(host, 'scheduler.Scheduler.GetJobs', {})['jobs']:
stuck.extend(check_job(host, job['jobRef']))
if not stuck:
print 'No invocations are stuck'
return
print
print 'All stuck invocations: '
for job_ref, inv_id in stuck:
print '%s/%s %s' % (job_ref['project'], job_ref['job'], inv_id)
return 0
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>[scheduler] Add adhoc script to detect jobs with stuck ActiveInvocations list.
R=a30c74fa30536fe7ea81ed6dec202e35e149e1fd@chromium.org
BUG=852142
Change-Id: Idae7f05c5045a72ff85db8587f8bd74c0b80fb06
Reviewed-on: https://chromium-review.googlesource.com/1098463
Reviewed-by: Andrii Shyshkalov <a30c74fa30536fe7ea81ed6dec202e35e149e1fd@chromium.org>
Commit-Queue: Vadim Shtayura <9f116ddb1b24f6fc1916a676eb17161b6c07dfc1@chromium.org><commit_after>
|
#!/usr/bin/env python
# Copyright 2018 The LUCI Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Finds jobs with old entries (>1d) in ActiveInvocations list.
Usage:
prpc login
./detect_stuck_active_invs.py luci-scheduler-dev.appspot.com
Requires the caller to be in 'administrators' group.
"""
import json
import subprocess
import sys
import time
def prpc(host, method, body):
p = subprocess.Popen(
['prpc', 'call', host, method],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
out, _ = p.communicate(json.dumps(body))
if p.returncode:
raise Exception('Call to %s failed' % method)
return json.loads(out)
def check_job(host, job_ref):
print 'Checking %s/%s' % (job_ref['project'], job_ref['job'])
state = prpc(host, 'internal.admin.Admin.GetDebugJobState', job_ref)
active_invs = state.get('activeInvocations', [])
if not active_invs:
print ' No active invocations'
return []
stuck = []
for inv_id in active_invs:
print ' ...checking %s' % inv_id
inv = prpc(host, 'scheduler.Scheduler.GetInvocation', {
'jobRef': job_ref,
'invocationId': inv_id,
})
started = time.time() - int(inv['startedTs']) / 1000000.0
if started > 24 * 3600:
print ' it is stuck!'
stuck.append((job_ref, inv_id))
return stuck
def main():
if len(sys.argv) != 2:
print >> sys.stderr, 'Usage: %s <host>' % sys.argv[0]
return 1
host = sys.argv[1]
stuck = []
for job in prpc(host, 'scheduler.Scheduler.GetJobs', {})['jobs']:
stuck.extend(check_job(host, job['jobRef']))
if not stuck:
print 'No invocations are stuck'
return
print
print 'All stuck invocations: '
for job_ref, inv_id in stuck:
print '%s/%s %s' % (job_ref['project'], job_ref['job'], inv_id)
return 0
if __name__ == '__main__':
sys.exit(main())
|
[scheduler] Add adhoc script to detect jobs with stuck ActiveInvocations list.
R=a30c74fa30536fe7ea81ed6dec202e35e149e1fd@chromium.org
BUG=852142
Change-Id: Idae7f05c5045a72ff85db8587f8bd74c0b80fb06
Reviewed-on: https://chromium-review.googlesource.com/1098463
Reviewed-by: Andrii Shyshkalov <a30c74fa30536fe7ea81ed6dec202e35e149e1fd@chromium.org>
Commit-Queue: Vadim Shtayura <9f116ddb1b24f6fc1916a676eb17161b6c07dfc1@chromium.org>#!/usr/bin/env python
# Copyright 2018 The LUCI Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Finds jobs with old entries (>1d) in ActiveInvocations list.
Usage:
prpc login
./detect_stuck_active_invs.py luci-scheduler-dev.appspot.com
Requires the caller to be in 'administrators' group.
"""
import json
import subprocess
import sys
import time
def prpc(host, method, body):
p = subprocess.Popen(
['prpc', 'call', host, method],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
out, _ = p.communicate(json.dumps(body))
if p.returncode:
raise Exception('Call to %s failed' % method)
return json.loads(out)
def check_job(host, job_ref):
print 'Checking %s/%s' % (job_ref['project'], job_ref['job'])
state = prpc(host, 'internal.admin.Admin.GetDebugJobState', job_ref)
active_invs = state.get('activeInvocations', [])
if not active_invs:
print ' No active invocations'
return []
stuck = []
for inv_id in active_invs:
print ' ...checking %s' % inv_id
inv = prpc(host, 'scheduler.Scheduler.GetInvocation', {
'jobRef': job_ref,
'invocationId': inv_id,
})
started = time.time() - int(inv['startedTs']) / 1000000.0
if started > 24 * 3600:
print ' it is stuck!'
stuck.append((job_ref, inv_id))
return stuck
def main():
if len(sys.argv) != 2:
print >> sys.stderr, 'Usage: %s <host>' % sys.argv[0]
return 1
host = sys.argv[1]
stuck = []
for job in prpc(host, 'scheduler.Scheduler.GetJobs', {})['jobs']:
stuck.extend(check_job(host, job['jobRef']))
if not stuck:
print 'No invocations are stuck'
return
print
print 'All stuck invocations: '
for job_ref, inv_id in stuck:
print '%s/%s %s' % (job_ref['project'], job_ref['job'], inv_id)
return 0
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>[scheduler] Add adhoc script to detect jobs with stuck ActiveInvocations list.
R=a30c74fa30536fe7ea81ed6dec202e35e149e1fd@chromium.org
BUG=852142
Change-Id: Idae7f05c5045a72ff85db8587f8bd74c0b80fb06
Reviewed-on: https://chromium-review.googlesource.com/1098463
Reviewed-by: Andrii Shyshkalov <a30c74fa30536fe7ea81ed6dec202e35e149e1fd@chromium.org>
Commit-Queue: Vadim Shtayura <9f116ddb1b24f6fc1916a676eb17161b6c07dfc1@chromium.org><commit_after>#!/usr/bin/env python
# Copyright 2018 The LUCI Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Finds jobs with old entries (>1d) in ActiveInvocations list.
Usage:
prpc login
./detect_stuck_active_invs.py luci-scheduler-dev.appspot.com
Requires the caller to be in 'administrators' group.
"""
import json
import subprocess
import sys
import time
def prpc(host, method, body):
p = subprocess.Popen(
['prpc', 'call', host, method],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
out, _ = p.communicate(json.dumps(body))
if p.returncode:
raise Exception('Call to %s failed' % method)
return json.loads(out)
def check_job(host, job_ref):
print 'Checking %s/%s' % (job_ref['project'], job_ref['job'])
state = prpc(host, 'internal.admin.Admin.GetDebugJobState', job_ref)
active_invs = state.get('activeInvocations', [])
if not active_invs:
print ' No active invocations'
return []
stuck = []
for inv_id in active_invs:
print ' ...checking %s' % inv_id
inv = prpc(host, 'scheduler.Scheduler.GetInvocation', {
'jobRef': job_ref,
'invocationId': inv_id,
})
started = time.time() - int(inv['startedTs']) / 1000000.0
if started > 24 * 3600:
print ' it is stuck!'
stuck.append((job_ref, inv_id))
return stuck
def main():
if len(sys.argv) != 2:
print >> sys.stderr, 'Usage: %s <host>' % sys.argv[0]
return 1
host = sys.argv[1]
stuck = []
for job in prpc(host, 'scheduler.Scheduler.GetJobs', {})['jobs']:
stuck.extend(check_job(host, job['jobRef']))
if not stuck:
print 'No invocations are stuck'
return
print
print 'All stuck invocations: '
for job_ref, inv_id in stuck:
print '%s/%s %s' % (job_ref['project'], job_ref['job'], inv_id)
return 0
if __name__ == '__main__':
sys.exit(main())
|
|
cf78037980a9345c12b1e2562bc4eda63cea95b3
|
test/functionalities/backticks/TestBackticksWithoutATarget.py
|
test/functionalities/backticks/TestBackticksWithoutATarget.py
|
"""
Test that backticks without a target should work (not infinite looping).
"""
import os, time
import unittest2
import lldb
from lldbtest import *
class BackticksWithNoTargetTestCase(TestBase):
mydir = "functionalities/backticks"
def test_backticks_no_target(self):
"""A simple test of backticks without a target."""
self.expect("print `1+2-3`",
substrs = [' = 0'])
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
Add a simple regression test to go with r143260. CommandInterpreter::PreprocessCommand() should not infinite loop when a target has not been specified yet.
|
Add a simple regression test to go with r143260.
CommandInterpreter::PreprocessCommand() should not infinite loop
when a target has not been specified yet.
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@143274 91177308-0d34-0410-b5e6-96231b3b80d8
|
Python
|
apache-2.0
|
apple/swift-lldb,apple/swift-lldb,apple/swift-lldb,apple/swift-lldb,apple/swift-lldb,llvm-mirror/lldb,apple/swift-lldb,llvm-mirror/lldb,llvm-mirror/lldb,llvm-mirror/lldb,llvm-mirror/lldb
|
Add a simple regression test to go with r143260.
CommandInterpreter::PreprocessCommand() should not infinite loop
when a target has not been specified yet.
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@143274 91177308-0d34-0410-b5e6-96231b3b80d8
|
"""
Test that backticks without a target should work (not infinite looping).
"""
import os, time
import unittest2
import lldb
from lldbtest import *
class BackticksWithNoTargetTestCase(TestBase):
mydir = "functionalities/backticks"
def test_backticks_no_target(self):
"""A simple test of backticks without a target."""
self.expect("print `1+2-3`",
substrs = [' = 0'])
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
<commit_before><commit_msg>Add a simple regression test to go with r143260.
CommandInterpreter::PreprocessCommand() should not infinite loop
when a target has not been specified yet.
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@143274 91177308-0d34-0410-b5e6-96231b3b80d8<commit_after>
|
"""
Test that backticks without a target should work (not infinite looping).
"""
import os, time
import unittest2
import lldb
from lldbtest import *
class BackticksWithNoTargetTestCase(TestBase):
mydir = "functionalities/backticks"
def test_backticks_no_target(self):
"""A simple test of backticks without a target."""
self.expect("print `1+2-3`",
substrs = [' = 0'])
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
Add a simple regression test to go with r143260.
CommandInterpreter::PreprocessCommand() should not infinite loop
when a target has not been specified yet.
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@143274 91177308-0d34-0410-b5e6-96231b3b80d8"""
Test that backticks without a target should work (not infinite looping).
"""
import os, time
import unittest2
import lldb
from lldbtest import *
class BackticksWithNoTargetTestCase(TestBase):
mydir = "functionalities/backticks"
def test_backticks_no_target(self):
"""A simple test of backticks without a target."""
self.expect("print `1+2-3`",
substrs = [' = 0'])
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
<commit_before><commit_msg>Add a simple regression test to go with r143260.
CommandInterpreter::PreprocessCommand() should not infinite loop
when a target has not been specified yet.
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@143274 91177308-0d34-0410-b5e6-96231b3b80d8<commit_after>"""
Test that backticks without a target should work (not infinite looping).
"""
import os, time
import unittest2
import lldb
from lldbtest import *
class BackticksWithNoTargetTestCase(TestBase):
mydir = "functionalities/backticks"
def test_backticks_no_target(self):
"""A simple test of backticks without a target."""
self.expect("print `1+2-3`",
substrs = [' = 0'])
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
|
ff2fb40e961fe7b0c3f6dd6e91d8fb79a865a631
|
src/ggrc_basic_permissions/migrations/versions/20131108224846_37b63b122038_hide_the_auditorread.py
|
src/ggrc_basic_permissions/migrations/versions/20131108224846_37b63b122038_hide_the_auditorread.py
|
"""Hide the AuditorReader role by changing its scope.
Revision ID: 37b63b122038
Revises: 1ff082d26157
Create Date: 2013-11-08 22:48:46.956836
"""
# revision identifiers, used by Alembic.
revision = '37b63b122038'
down_revision = '1ff082d26157'
import sqlalchemy as sa
from alembic import op
from datetime import datetime
from sqlalchemy.sql import table, column
roles_table = table('roles',
column('id', sa.Integer),
column('name', sa.String),
column('permissions_json', sa.Text),
column('description', sa.Text),
column('modified_by_id', sa.Integer),
column('created_at', sa.DateTime),
column('updated_at', sa.DateTime),
column('context_id', sa.Integer),
column('scope', sa.String),
)
def upgrade():
op.execute(
roles_table.update()\
.where(roles_table.c.name == 'AuditorReader')\
.values({'scope': 'System Implied'})
)
def downgrade():
op.execute(
roles_table.update()\
.where(roles_table.c.name == 'AuditorReader')\
.values({'scope': 'System'})
)
|
Hide the AuditorReader role in the system role assignment modal by placing it in an "implied" scope.
|
Hide the AuditorReader role in the system role assignment modal by
placing it in an "implied" scope.
|
Python
|
apache-2.0
|
josthkko/ggrc-core,uskudnik/ggrc-core,selahssea/ggrc-core,prasannav7/ggrc-core,j0gurt/ggrc-core,j0gurt/ggrc-core,kr41/ggrc-core,kr41/ggrc-core,vladan-m/ggrc-core,NejcZupec/ggrc-core,edofic/ggrc-core,hasanalom/ggrc-core,hasanalom/ggrc-core,andrei-karalionak/ggrc-core,selahssea/ggrc-core,josthkko/ggrc-core,andrei-karalionak/ggrc-core,plamut/ggrc-core,AleksNeStu/ggrc-core,j0gurt/ggrc-core,josthkko/ggrc-core,uskudnik/ggrc-core,edofic/ggrc-core,edofic/ggrc-core,VinnieJohns/ggrc-core,j0gurt/ggrc-core,andrei-karalionak/ggrc-core,jmakov/ggrc-core,josthkko/ggrc-core,kr41/ggrc-core,selahssea/ggrc-core,jmakov/ggrc-core,NejcZupec/ggrc-core,prasannav7/ggrc-core,hyperNURb/ggrc-core,jmakov/ggrc-core,vladan-m/ggrc-core,vladan-m/ggrc-core,vladan-m/ggrc-core,hyperNURb/ggrc-core,jmakov/ggrc-core,plamut/ggrc-core,uskudnik/ggrc-core,hyperNURb/ggrc-core,AleksNeStu/ggrc-core,kr41/ggrc-core,prasannav7/ggrc-core,VinnieJohns/ggrc-core,vladan-m/ggrc-core,VinnieJohns/ggrc-core,selahssea/ggrc-core,hyperNURb/ggrc-core,hasanalom/ggrc-core,uskudnik/ggrc-core,plamut/ggrc-core,hyperNURb/ggrc-core,uskudnik/ggrc-core,NejcZupec/ggrc-core,AleksNeStu/ggrc-core,andrei-karalionak/ggrc-core,hasanalom/ggrc-core,hasanalom/ggrc-core,plamut/ggrc-core,VinnieJohns/ggrc-core,AleksNeStu/ggrc-core,prasannav7/ggrc-core,NejcZupec/ggrc-core,jmakov/ggrc-core,edofic/ggrc-core
|
Hide the AuditorReader role in the system role assignment modal by
placing it in an "implied" scope.
|
"""Hide the AuditorReader role by changing its scope.
Revision ID: 37b63b122038
Revises: 1ff082d26157
Create Date: 2013-11-08 22:48:46.956836
"""
# revision identifiers, used by Alembic.
revision = '37b63b122038'
down_revision = '1ff082d26157'
import sqlalchemy as sa
from alembic import op
from datetime import datetime
from sqlalchemy.sql import table, column
roles_table = table('roles',
column('id', sa.Integer),
column('name', sa.String),
column('permissions_json', sa.Text),
column('description', sa.Text),
column('modified_by_id', sa.Integer),
column('created_at', sa.DateTime),
column('updated_at', sa.DateTime),
column('context_id', sa.Integer),
column('scope', sa.String),
)
def upgrade():
op.execute(
roles_table.update()\
.where(roles_table.c.name == 'AuditorReader')\
.values({'scope': 'System Implied'})
)
def downgrade():
op.execute(
roles_table.update()\
.where(roles_table.c.name == 'AuditorReader')\
.values({'scope': 'System'})
)
|
<commit_before><commit_msg>Hide the AuditorReader role in the system role assignment modal by
placing it in an "implied" scope.<commit_after>
|
"""Hide the AuditorReader role by changing its scope.
Revision ID: 37b63b122038
Revises: 1ff082d26157
Create Date: 2013-11-08 22:48:46.956836
"""
# revision identifiers, used by Alembic.
revision = '37b63b122038'
down_revision = '1ff082d26157'
import sqlalchemy as sa
from alembic import op
from datetime import datetime
from sqlalchemy.sql import table, column
roles_table = table('roles',
column('id', sa.Integer),
column('name', sa.String),
column('permissions_json', sa.Text),
column('description', sa.Text),
column('modified_by_id', sa.Integer),
column('created_at', sa.DateTime),
column('updated_at', sa.DateTime),
column('context_id', sa.Integer),
column('scope', sa.String),
)
def upgrade():
op.execute(
roles_table.update()\
.where(roles_table.c.name == 'AuditorReader')\
.values({'scope': 'System Implied'})
)
def downgrade():
op.execute(
roles_table.update()\
.where(roles_table.c.name == 'AuditorReader')\
.values({'scope': 'System'})
)
|
Hide the AuditorReader role in the system role assignment modal by
placing it in an "implied" scope.
"""Hide the AuditorReader role by changing its scope.
Revision ID: 37b63b122038
Revises: 1ff082d26157
Create Date: 2013-11-08 22:48:46.956836
"""
# revision identifiers, used by Alembic.
revision = '37b63b122038'
down_revision = '1ff082d26157'
import sqlalchemy as sa
from alembic import op
from datetime import datetime
from sqlalchemy.sql import table, column
roles_table = table('roles',
column('id', sa.Integer),
column('name', sa.String),
column('permissions_json', sa.Text),
column('description', sa.Text),
column('modified_by_id', sa.Integer),
column('created_at', sa.DateTime),
column('updated_at', sa.DateTime),
column('context_id', sa.Integer),
column('scope', sa.String),
)
def upgrade():
op.execute(
roles_table.update()\
.where(roles_table.c.name == 'AuditorReader')\
.values({'scope': 'System Implied'})
)
def downgrade():
op.execute(
roles_table.update()\
.where(roles_table.c.name == 'AuditorReader')\
.values({'scope': 'System'})
)
|
<commit_before><commit_msg>Hide the AuditorReader role in the system role assignment modal by
placing it in an "implied" scope.<commit_after>
"""Hide the AuditorReader role by changing its scope.
Revision ID: 37b63b122038
Revises: 1ff082d26157
Create Date: 2013-11-08 22:48:46.956836
"""
# revision identifiers, used by Alembic.
revision = '37b63b122038'
down_revision = '1ff082d26157'
import sqlalchemy as sa
from alembic import op
from datetime import datetime
from sqlalchemy.sql import table, column
roles_table = table('roles',
column('id', sa.Integer),
column('name', sa.String),
column('permissions_json', sa.Text),
column('description', sa.Text),
column('modified_by_id', sa.Integer),
column('created_at', sa.DateTime),
column('updated_at', sa.DateTime),
column('context_id', sa.Integer),
column('scope', sa.String),
)
def upgrade():
op.execute(
roles_table.update()\
.where(roles_table.c.name == 'AuditorReader')\
.values({'scope': 'System Implied'})
)
def downgrade():
op.execute(
roles_table.update()\
.where(roles_table.c.name == 'AuditorReader')\
.values({'scope': 'System'})
)
|
|
6e535a2d597f172d9342fb8a547335890c474b49
|
src/config-sample.py
|
src/config-sample.py
|
FLASK_SECRET_KEY = 'Enter a Flask Secret Key'
# OAuth Credentials. You can find them on
# https://www.yelp.com/developers/v3/manage_app
YELP_CLIENT_ID = 'Enter Yelp Client ID'
YELP_CLIENT_SECRET = 'Enter Yelp Client Secret'
|
Add a sample config file
|
Add a sample config file
|
Python
|
mit
|
byanofsky/playa-vista-neighborhood,byanofsky/playa-vista-neighborhood,byanofsky/playa-vista-neighborhood
|
Add a sample config file
|
FLASK_SECRET_KEY = 'Enter a Flask Secret Key'
# OAuth Credentials. You can find them on
# https://www.yelp.com/developers/v3/manage_app
YELP_CLIENT_ID = 'Enter Yelp Client ID'
YELP_CLIENT_SECRET = 'Enter Yelp Client Secret'
|
<commit_before><commit_msg>Add a sample config file<commit_after>
|
FLASK_SECRET_KEY = 'Enter a Flask Secret Key'
# OAuth Credentials. You can find them on
# https://www.yelp.com/developers/v3/manage_app
YELP_CLIENT_ID = 'Enter Yelp Client ID'
YELP_CLIENT_SECRET = 'Enter Yelp Client Secret'
|
Add a sample config fileFLASK_SECRET_KEY = 'Enter a Flask Secret Key'
# OAuth Credentials. You can find them on
# https://www.yelp.com/developers/v3/manage_app
YELP_CLIENT_ID = 'Enter Yelp Client ID'
YELP_CLIENT_SECRET = 'Enter Yelp Client Secret'
|
<commit_before><commit_msg>Add a sample config file<commit_after>FLASK_SECRET_KEY = 'Enter a Flask Secret Key'
# OAuth Credentials. You can find them on
# https://www.yelp.com/developers/v3/manage_app
YELP_CLIENT_ID = 'Enter Yelp Client ID'
YELP_CLIENT_SECRET = 'Enter Yelp Client Secret'
|
|
22298d91fff788c37395cdad9245b3e7ed20cfdf
|
python/opencv/opencv_2/images/display_image_with_matplotlib.py
|
python/opencv/opencv_2/images/display_image_with_matplotlib.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
"""
OpenCV - Display image: display an image given in arguments
Required: opencv library (Debian: aptitude install python-opencv)
See: https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_gui/py_image_display/py_image_display.html#display-an-image
"""
from __future__ import print_function
import cv2 as cv
import argparse
from matplotlib import pyplot as plt
def main():
# Parse the programm options (get the path of the image file to display)
parser = argparse.ArgumentParser(description='An opencv snippet.')
parser.add_argument("--infile", "-i", help="The picture file to display", required=True, metavar="FILE")
args = parser.parse_args()
infile_str = args.infile
# OpenCV
# imread_flags is a flag which specifies the way image should be read:
# - cv.IMREAD_COLOR loads a color image. Any transparency of image will be neglected. It is the default flag.
# - cv.IMREAD_GRAYSCALE loads image in grayscale mode
# - cv.IMREAD_UNCHANGED loads image as such including alpha channel
imread_flags = cv.IMREAD_GRAYSCALE
img_np = cv.imread(infile_str, imread_flags)
plt.imshow(img_np, cmap='gray', interpolation='none') # Display the image "img_np" with matplotlib
plt.xticks([]) # to hide tick values on X axis
plt.yticks([]) # to hide tick values on Y axis
plt.show()
if __name__ == '__main__':
main()
|
Add a snippet (Python OpenCV).
|
Add a snippet (Python OpenCV).
|
Python
|
mit
|
jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets
|
Add a snippet (Python OpenCV).
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
"""
OpenCV - Display image: display an image given in arguments
Required: opencv library (Debian: aptitude install python-opencv)
See: https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_gui/py_image_display/py_image_display.html#display-an-image
"""
from __future__ import print_function
import cv2 as cv
import argparse
from matplotlib import pyplot as plt
def main():
# Parse the programm options (get the path of the image file to display)
parser = argparse.ArgumentParser(description='An opencv snippet.')
parser.add_argument("--infile", "-i", help="The picture file to display", required=True, metavar="FILE")
args = parser.parse_args()
infile_str = args.infile
# OpenCV
# imread_flags is a flag which specifies the way image should be read:
# - cv.IMREAD_COLOR loads a color image. Any transparency of image will be neglected. It is the default flag.
# - cv.IMREAD_GRAYSCALE loads image in grayscale mode
# - cv.IMREAD_UNCHANGED loads image as such including alpha channel
imread_flags = cv.IMREAD_GRAYSCALE
img_np = cv.imread(infile_str, imread_flags)
plt.imshow(img_np, cmap='gray', interpolation='none') # Display the image "img_np" with matplotlib
plt.xticks([]) # to hide tick values on X axis
plt.yticks([]) # to hide tick values on Y axis
plt.show()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a snippet (Python OpenCV).<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
"""
OpenCV - Display image: display an image given in arguments
Required: opencv library (Debian: aptitude install python-opencv)
See: https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_gui/py_image_display/py_image_display.html#display-an-image
"""
from __future__ import print_function
import cv2 as cv
import argparse
from matplotlib import pyplot as plt
def main():
# Parse the programm options (get the path of the image file to display)
parser = argparse.ArgumentParser(description='An opencv snippet.')
parser.add_argument("--infile", "-i", help="The picture file to display", required=True, metavar="FILE")
args = parser.parse_args()
infile_str = args.infile
# OpenCV
# imread_flags is a flag which specifies the way image should be read:
# - cv.IMREAD_COLOR loads a color image. Any transparency of image will be neglected. It is the default flag.
# - cv.IMREAD_GRAYSCALE loads image in grayscale mode
# - cv.IMREAD_UNCHANGED loads image as such including alpha channel
imread_flags = cv.IMREAD_GRAYSCALE
img_np = cv.imread(infile_str, imread_flags)
plt.imshow(img_np, cmap='gray', interpolation='none') # Display the image "img_np" with matplotlib
plt.xticks([]) # to hide tick values on X axis
plt.yticks([]) # to hide tick values on Y axis
plt.show()
if __name__ == '__main__':
main()
|
Add a snippet (Python OpenCV).#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
"""
OpenCV - Display image: display an image given in arguments
Required: opencv library (Debian: aptitude install python-opencv)
See: https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_gui/py_image_display/py_image_display.html#display-an-image
"""
from __future__ import print_function
import cv2 as cv
import argparse
from matplotlib import pyplot as plt
def main():
# Parse the programm options (get the path of the image file to display)
parser = argparse.ArgumentParser(description='An opencv snippet.')
parser.add_argument("--infile", "-i", help="The picture file to display", required=True, metavar="FILE")
args = parser.parse_args()
infile_str = args.infile
# OpenCV
# imread_flags is a flag which specifies the way image should be read:
# - cv.IMREAD_COLOR loads a color image. Any transparency of image will be neglected. It is the default flag.
# - cv.IMREAD_GRAYSCALE loads image in grayscale mode
# - cv.IMREAD_UNCHANGED loads image as such including alpha channel
imread_flags = cv.IMREAD_GRAYSCALE
img_np = cv.imread(infile_str, imread_flags)
plt.imshow(img_np, cmap='gray', interpolation='none') # Display the image "img_np" with matplotlib
plt.xticks([]) # to hide tick values on X axis
plt.yticks([]) # to hide tick values on Y axis
plt.show()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a snippet (Python OpenCV).<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
"""
OpenCV - Display image: display an image given in arguments
Required: opencv library (Debian: aptitude install python-opencv)
See: https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_gui/py_image_display/py_image_display.html#display-an-image
"""
from __future__ import print_function
import cv2 as cv
import argparse
from matplotlib import pyplot as plt
def main():
# Parse the programm options (get the path of the image file to display)
parser = argparse.ArgumentParser(description='An opencv snippet.')
parser.add_argument("--infile", "-i", help="The picture file to display", required=True, metavar="FILE")
args = parser.parse_args()
infile_str = args.infile
# OpenCV
# imread_flags is a flag which specifies the way image should be read:
# - cv.IMREAD_COLOR loads a color image. Any transparency of image will be neglected. It is the default flag.
# - cv.IMREAD_GRAYSCALE loads image in grayscale mode
# - cv.IMREAD_UNCHANGED loads image as such including alpha channel
imread_flags = cv.IMREAD_GRAYSCALE
img_np = cv.imread(infile_str, imread_flags)
plt.imshow(img_np, cmap='gray', interpolation='none') # Display the image "img_np" with matplotlib
plt.xticks([]) # to hide tick values on X axis
plt.yticks([]) # to hide tick values on Y axis
plt.show()
if __name__ == '__main__':
main()
|
|
ba9e4c6b003cc002e5bc7216da960e47f9fe5424
|
copper_imidazole_csv_allnitrogen.py
|
copper_imidazole_csv_allnitrogen.py
|
#!/usr/bin/env python2
import orca_parser
from copper_imidazole_analysis import CopperImidazoleAnalysis
import argparse
import csv
cia = CopperImidazoleAnalysis()
parser = argparse.ArgumentParser(description="Given pathnames of ORCA output files, make a dump of all nitrogen parameters to a CSV file.")
parser.add_argument("--csvname", dest="csvname", metavar="<CSV output root name>", type=str, default="nitrogen.csv", help="optional name for the CSV output file")
parser.add_argument(dest="namelist", metavar="<ORCA filename>", nargs="+", type=str, default=None, help="ORCA output files")
args = parser.parse_args()
namelist = args.namelist
with open(args.csvname, 'wb') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',')
for name in namelist:
csvwriter.writerow([name])
csvwriter.writerow(["g-tensor",
"id_copper",
"A_copper (MHz)",
"euler_copper (deg.)",
"NQCC_copper (MHz)",
"eta_copper"])
orcafile = orca_parser.ORCAOutputParser(name)
gtensor, giso = orcafile.return_gtensor()
id_copper = cia.copper_id(orcafile)
atensor_copper = cia.hyperfine(orcafile, id_copper)
euler_copper = cia.euler(orcafile, id_copper)
nqi_copper, nqcc_copper, eta_copper = cia.nqi(orcafile, id_copper)
csvwriter.writerow([gtensor,
id_copper,
atensor_copper,
euler_copper,
nqcc_copper,
eta_copper])
csvwriter.writerow(["",
"id_nitrogen",
"A_nitrogen (MHz)",
"euler_nitrogen (deg.)",
"NQCC_nitrogen (MHz)",
"eta_nitrogen",
"Cu_N_distance (Angstroms)"])
nitrogen_list = orcafile.find_element("N")
for id_nitrogen in nitrogen_list:
atensor_nitrogen = cia.hyperfine(orcafile, id_nitrogen)
euler_nitrogen = cia.euler(orcafile, id_nitrogen)
nqi_nitrogen, nqcc_nitrogen, eta_nitrogen = cia.nqi(orcafile, id_nitrogen)
cu_n_dist = orcafile.pair_distance(id_copper, id_nitrogen)
csvwriter.writerow(["",
id_nitrogen,
atensor_nitrogen,
euler_nitrogen,
nqcc_nitrogen,
eta_nitrogen,
cu_n_dist])
|
Print information about all nitrogens.
|
Print information about all nitrogens.
|
Python
|
mpl-2.0
|
berquist/orcaparse
|
Print information about all nitrogens.
|
#!/usr/bin/env python2
import orca_parser
from copper_imidazole_analysis import CopperImidazoleAnalysis
import argparse
import csv
cia = CopperImidazoleAnalysis()
parser = argparse.ArgumentParser(description="Given pathnames of ORCA output files, make a dump of all nitrogen parameters to a CSV file.")
parser.add_argument("--csvname", dest="csvname", metavar="<CSV output root name>", type=str, default="nitrogen.csv", help="optional name for the CSV output file")
parser.add_argument(dest="namelist", metavar="<ORCA filename>", nargs="+", type=str, default=None, help="ORCA output files")
args = parser.parse_args()
namelist = args.namelist
with open(args.csvname, 'wb') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',')
for name in namelist:
csvwriter.writerow([name])
csvwriter.writerow(["g-tensor",
"id_copper",
"A_copper (MHz)",
"euler_copper (deg.)",
"NQCC_copper (MHz)",
"eta_copper"])
orcafile = orca_parser.ORCAOutputParser(name)
gtensor, giso = orcafile.return_gtensor()
id_copper = cia.copper_id(orcafile)
atensor_copper = cia.hyperfine(orcafile, id_copper)
euler_copper = cia.euler(orcafile, id_copper)
nqi_copper, nqcc_copper, eta_copper = cia.nqi(orcafile, id_copper)
csvwriter.writerow([gtensor,
id_copper,
atensor_copper,
euler_copper,
nqcc_copper,
eta_copper])
csvwriter.writerow(["",
"id_nitrogen",
"A_nitrogen (MHz)",
"euler_nitrogen (deg.)",
"NQCC_nitrogen (MHz)",
"eta_nitrogen",
"Cu_N_distance (Angstroms)"])
nitrogen_list = orcafile.find_element("N")
for id_nitrogen in nitrogen_list:
atensor_nitrogen = cia.hyperfine(orcafile, id_nitrogen)
euler_nitrogen = cia.euler(orcafile, id_nitrogen)
nqi_nitrogen, nqcc_nitrogen, eta_nitrogen = cia.nqi(orcafile, id_nitrogen)
cu_n_dist = orcafile.pair_distance(id_copper, id_nitrogen)
csvwriter.writerow(["",
id_nitrogen,
atensor_nitrogen,
euler_nitrogen,
nqcc_nitrogen,
eta_nitrogen,
cu_n_dist])
|
<commit_before><commit_msg>Print information about all nitrogens.<commit_after>
|
#!/usr/bin/env python2
import orca_parser
from copper_imidazole_analysis import CopperImidazoleAnalysis
import argparse
import csv
cia = CopperImidazoleAnalysis()
parser = argparse.ArgumentParser(description="Given pathnames of ORCA output files, make a dump of all nitrogen parameters to a CSV file.")
parser.add_argument("--csvname", dest="csvname", metavar="<CSV output root name>", type=str, default="nitrogen.csv", help="optional name for the CSV output file")
parser.add_argument(dest="namelist", metavar="<ORCA filename>", nargs="+", type=str, default=None, help="ORCA output files")
args = parser.parse_args()
namelist = args.namelist
with open(args.csvname, 'wb') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',')
for name in namelist:
csvwriter.writerow([name])
csvwriter.writerow(["g-tensor",
"id_copper",
"A_copper (MHz)",
"euler_copper (deg.)",
"NQCC_copper (MHz)",
"eta_copper"])
orcafile = orca_parser.ORCAOutputParser(name)
gtensor, giso = orcafile.return_gtensor()
id_copper = cia.copper_id(orcafile)
atensor_copper = cia.hyperfine(orcafile, id_copper)
euler_copper = cia.euler(orcafile, id_copper)
nqi_copper, nqcc_copper, eta_copper = cia.nqi(orcafile, id_copper)
csvwriter.writerow([gtensor,
id_copper,
atensor_copper,
euler_copper,
nqcc_copper,
eta_copper])
csvwriter.writerow(["",
"id_nitrogen",
"A_nitrogen (MHz)",
"euler_nitrogen (deg.)",
"NQCC_nitrogen (MHz)",
"eta_nitrogen",
"Cu_N_distance (Angstroms)"])
nitrogen_list = orcafile.find_element("N")
for id_nitrogen in nitrogen_list:
atensor_nitrogen = cia.hyperfine(orcafile, id_nitrogen)
euler_nitrogen = cia.euler(orcafile, id_nitrogen)
nqi_nitrogen, nqcc_nitrogen, eta_nitrogen = cia.nqi(orcafile, id_nitrogen)
cu_n_dist = orcafile.pair_distance(id_copper, id_nitrogen)
csvwriter.writerow(["",
id_nitrogen,
atensor_nitrogen,
euler_nitrogen,
nqcc_nitrogen,
eta_nitrogen,
cu_n_dist])
|
Print information about all nitrogens.#!/usr/bin/env python2
import orca_parser
from copper_imidazole_analysis import CopperImidazoleAnalysis
import argparse
import csv
cia = CopperImidazoleAnalysis()
parser = argparse.ArgumentParser(description="Given pathnames of ORCA output files, make a dump of all nitrogen parameters to a CSV file.")
parser.add_argument("--csvname", dest="csvname", metavar="<CSV output root name>", type=str, default="nitrogen.csv", help="optional name for the CSV output file")
parser.add_argument(dest="namelist", metavar="<ORCA filename>", nargs="+", type=str, default=None, help="ORCA output files")
args = parser.parse_args()
namelist = args.namelist
with open(args.csvname, 'wb') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',')
for name in namelist:
csvwriter.writerow([name])
csvwriter.writerow(["g-tensor",
"id_copper",
"A_copper (MHz)",
"euler_copper (deg.)",
"NQCC_copper (MHz)",
"eta_copper"])
orcafile = orca_parser.ORCAOutputParser(name)
gtensor, giso = orcafile.return_gtensor()
id_copper = cia.copper_id(orcafile)
atensor_copper = cia.hyperfine(orcafile, id_copper)
euler_copper = cia.euler(orcafile, id_copper)
nqi_copper, nqcc_copper, eta_copper = cia.nqi(orcafile, id_copper)
csvwriter.writerow([gtensor,
id_copper,
atensor_copper,
euler_copper,
nqcc_copper,
eta_copper])
csvwriter.writerow(["",
"id_nitrogen",
"A_nitrogen (MHz)",
"euler_nitrogen (deg.)",
"NQCC_nitrogen (MHz)",
"eta_nitrogen",
"Cu_N_distance (Angstroms)"])
nitrogen_list = orcafile.find_element("N")
for id_nitrogen in nitrogen_list:
atensor_nitrogen = cia.hyperfine(orcafile, id_nitrogen)
euler_nitrogen = cia.euler(orcafile, id_nitrogen)
nqi_nitrogen, nqcc_nitrogen, eta_nitrogen = cia.nqi(orcafile, id_nitrogen)
cu_n_dist = orcafile.pair_distance(id_copper, id_nitrogen)
csvwriter.writerow(["",
id_nitrogen,
atensor_nitrogen,
euler_nitrogen,
nqcc_nitrogen,
eta_nitrogen,
cu_n_dist])
|
<commit_before><commit_msg>Print information about all nitrogens.<commit_after>#!/usr/bin/env python2
import orca_parser
from copper_imidazole_analysis import CopperImidazoleAnalysis
import argparse
import csv
cia = CopperImidazoleAnalysis()
parser = argparse.ArgumentParser(description="Given pathnames of ORCA output files, make a dump of all nitrogen parameters to a CSV file.")
parser.add_argument("--csvname", dest="csvname", metavar="<CSV output root name>", type=str, default="nitrogen.csv", help="optional name for the CSV output file")
parser.add_argument(dest="namelist", metavar="<ORCA filename>", nargs="+", type=str, default=None, help="ORCA output files")
args = parser.parse_args()
namelist = args.namelist
with open(args.csvname, 'wb') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',')
for name in namelist:
csvwriter.writerow([name])
csvwriter.writerow(["g-tensor",
"id_copper",
"A_copper (MHz)",
"euler_copper (deg.)",
"NQCC_copper (MHz)",
"eta_copper"])
orcafile = orca_parser.ORCAOutputParser(name)
gtensor, giso = orcafile.return_gtensor()
id_copper = cia.copper_id(orcafile)
atensor_copper = cia.hyperfine(orcafile, id_copper)
euler_copper = cia.euler(orcafile, id_copper)
nqi_copper, nqcc_copper, eta_copper = cia.nqi(orcafile, id_copper)
csvwriter.writerow([gtensor,
id_copper,
atensor_copper,
euler_copper,
nqcc_copper,
eta_copper])
csvwriter.writerow(["",
"id_nitrogen",
"A_nitrogen (MHz)",
"euler_nitrogen (deg.)",
"NQCC_nitrogen (MHz)",
"eta_nitrogen",
"Cu_N_distance (Angstroms)"])
nitrogen_list = orcafile.find_element("N")
for id_nitrogen in nitrogen_list:
atensor_nitrogen = cia.hyperfine(orcafile, id_nitrogen)
euler_nitrogen = cia.euler(orcafile, id_nitrogen)
nqi_nitrogen, nqcc_nitrogen, eta_nitrogen = cia.nqi(orcafile, id_nitrogen)
cu_n_dist = orcafile.pair_distance(id_copper, id_nitrogen)
csvwriter.writerow(["",
id_nitrogen,
atensor_nitrogen,
euler_nitrogen,
nqcc_nitrogen,
eta_nitrogen,
cu_n_dist])
|
|
eb0772fc6c30d98b83bf1c8e7d83af21066ae45b
|
data_structures/Stack/Python/Stack.py
|
data_structures/Stack/Python/Stack.py
|
# Author: AlexBanks97
# Purpose: LIFO Stack implementation using python array.
# Date: October 15th 2017
class Stack(object):
def __init__(self):
# Initialize stack as empty array
self.stack = []
# Return and remove the last element of the stack array.
def pop(self):
# If the stack is not empty, pop.
if self.stack.length > 0:
return self.stack.pop()
# Add an element to the end of the stack array.
def push(self, element):
self.stack.append(element)
|
# Author: AlexBanks97
# Purpose: LIFO Stack implementation using python array.
# Date: October 15th 2017
class Stack(object):
def __init__(self):
# Initialize stack as empty array
self.stack = []
# Return and remove the last element of the stack array.
def pop(self):
# If the stack is not empty, pop.
if self.stack.length > 0:
return self.stack.pop()
# Add an element to the end of the stack array.
def push(self, element):
self.stack.append(element)
# Return the last element of the stack array (without removing it).
def peek(self):
return self.stack[-1]
|
Add peek method and implementation
|
Add peek method and implementation
|
Python
|
cc0-1.0
|
Deepak345/al-go-rithms,Cnidarias/al-go-rithms,EUNIX-TRIX/al-go-rithms,ZoranPandovski/al-go-rithms,manikTharaka/al-go-rithms,Deepak345/al-go-rithms,manikTharaka/al-go-rithms,ZoranPandovski/al-go-rithms,Cnidarias/al-go-rithms,EUNIX-TRIX/al-go-rithms,manikTharaka/al-go-rithms,ZoranPandovski/al-go-rithms,EUNIX-TRIX/al-go-rithms,EUNIX-TRIX/al-go-rithms,manikTharaka/al-go-rithms,Cnidarias/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,Deepak345/al-go-rithms,Deepak345/al-go-rithms,Deepak345/al-go-rithms,Cnidarias/al-go-rithms,Cnidarias/al-go-rithms,Cnidarias/al-go-rithms,Cnidarias/al-go-rithms,manikTharaka/al-go-rithms,ZoranPandovski/al-go-rithms,manikTharaka/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,manikTharaka/al-go-rithms,Deepak345/al-go-rithms,manikTharaka/al-go-rithms,Deepak345/al-go-rithms,ZoranPandovski/al-go-rithms,Deepak345/al-go-rithms,ZoranPandovski/al-go-rithms,Deepak345/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,EUNIX-TRIX/al-go-rithms,Deepak345/al-go-rithms,Deepak345/al-go-rithms,ZoranPandovski/al-go-rithms,Cnidarias/al-go-rithms,manikTharaka/al-go-rithms,ZoranPandovski/al-go-rithms,EUNIX-TRIX/al-go-rithms,Deepak345/al-go-rithms,Deepak345/al-go-rithms,manikTharaka/al-go-rithms,EUNIX-TRIX/al-go-rithms,ZoranPandovski/al-go-rithms,manikTharaka/al-go-rithms,Deepak345/al-go-rithms,ZoranPandovski/al-go-rithms,Deepak345/al-go-rithms,EUNIX-TRIX/al-go-rithms,EUNIX-TRIX/al-go-rithms,manikTharaka/al-go-rithms,EUNIX-TRIX/al-go-rithms,Cnidarias/al-go-rithms,Cnidarias/al-go-rithms,Cnidarias/al-go-rithms,manikTharaka/al-go-rithms,ZoranPandovski/al-go-rithms,Deepak345/al-go-rithms,manikTharaka/al-go-rithms,Cnidarias/al-go-rithms,manikTharaka/al-go-rithms,manikTharaka/al-go-rithms,Cnidarias/al-go-rithms,ZoranPandovski/al-go-rithms,EUNIX-TRIX/al-go-rithms,EUNIX-TRIX/al-go-rithms,Cnidarias/al-go-rithms,EUNIX-TRIX/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,Cnidarias/al-go-rithms
|
# Author: AlexBanks97
# Purpose: LIFO Stack implementation using python array.
# Date: October 15th 2017
class Stack(object):
def __init__(self):
# Initialize stack as empty array
self.stack = []
# Return and remove the last element of the stack array.
def pop(self):
# If the stack is not empty, pop.
if self.stack.length > 0:
return self.stack.pop()
# Add an element to the end of the stack array.
def push(self, element):
self.stack.append(element)Add peek method and implementation
|
# Author: AlexBanks97
# Purpose: LIFO Stack implementation using python array.
# Date: October 15th 2017
class Stack(object):
def __init__(self):
# Initialize stack as empty array
self.stack = []
# Return and remove the last element of the stack array.
def pop(self):
# If the stack is not empty, pop.
if self.stack.length > 0:
return self.stack.pop()
# Add an element to the end of the stack array.
def push(self, element):
self.stack.append(element)
# Return the last element of the stack array (without removing it).
def peek(self):
return self.stack[-1]
|
<commit_before># Author: AlexBanks97
# Purpose: LIFO Stack implementation using python array.
# Date: October 15th 2017
class Stack(object):
def __init__(self):
# Initialize stack as empty array
self.stack = []
# Return and remove the last element of the stack array.
def pop(self):
# If the stack is not empty, pop.
if self.stack.length > 0:
return self.stack.pop()
# Add an element to the end of the stack array.
def push(self, element):
self.stack.append(element)<commit_msg>Add peek method and implementation<commit_after>
|
# Author: AlexBanks97
# Purpose: LIFO Stack implementation using python array.
# Date: October 15th 2017
class Stack(object):
def __init__(self):
# Initialize stack as empty array
self.stack = []
# Return and remove the last element of the stack array.
def pop(self):
# If the stack is not empty, pop.
if self.stack.length > 0:
return self.stack.pop()
# Add an element to the end of the stack array.
def push(self, element):
self.stack.append(element)
# Return the last element of the stack array (without removing it).
def peek(self):
return self.stack[-1]
|
# Author: AlexBanks97
# Purpose: LIFO Stack implementation using python array.
# Date: October 15th 2017
class Stack(object):
def __init__(self):
# Initialize stack as empty array
self.stack = []
# Return and remove the last element of the stack array.
def pop(self):
# If the stack is not empty, pop.
if self.stack.length > 0:
return self.stack.pop()
# Add an element to the end of the stack array.
def push(self, element):
self.stack.append(element)Add peek method and implementation# Author: AlexBanks97
# Purpose: LIFO Stack implementation using python array.
# Date: October 15th 2017
class Stack(object):
def __init__(self):
# Initialize stack as empty array
self.stack = []
# Return and remove the last element of the stack array.
def pop(self):
# If the stack is not empty, pop.
if self.stack.length > 0:
return self.stack.pop()
# Add an element to the end of the stack array.
def push(self, element):
self.stack.append(element)
# Return the last element of the stack array (without removing it).
def peek(self):
return self.stack[-1]
|
<commit_before># Author: AlexBanks97
# Purpose: LIFO Stack implementation using python array.
# Date: October 15th 2017
class Stack(object):
def __init__(self):
# Initialize stack as empty array
self.stack = []
# Return and remove the last element of the stack array.
def pop(self):
# If the stack is not empty, pop.
if self.stack.length > 0:
return self.stack.pop()
# Add an element to the end of the stack array.
def push(self, element):
self.stack.append(element)<commit_msg>Add peek method and implementation<commit_after># Author: AlexBanks97
# Purpose: LIFO Stack implementation using python array.
# Date: October 15th 2017
class Stack(object):
def __init__(self):
# Initialize stack as empty array
self.stack = []
# Return and remove the last element of the stack array.
def pop(self):
# If the stack is not empty, pop.
if self.stack.length > 0:
return self.stack.pop()
# Add an element to the end of the stack array.
def push(self, element):
self.stack.append(element)
# Return the last element of the stack array (without removing it).
def peek(self):
return self.stack[-1]
|
825c4d613915d43aea2e6ee0bc5d5b49ed0a4500
|
emission/analysis/classification/segmentation/section_segmentation.py
|
emission/analysis/classification/segmentation/section_segmentation.py
|
# Standard imports
import attrdict as ad
import numpy as np
import datetime as pydt
# Our imports
import emission.analysis.classification.cleaning.location_smoothing as ls
import emission.analysis.point_features as pf
import emission.storage.decorations.location_queries as lq
def segment_into_sections(trip):
points_df = lq.get_activities_for_section(trip)
no_tilting_points_df = points_df[points_df.activity != lq.Activities.TILTING]
section_list = []
curr_section = ad.AttrDict({"user_id": trip.user_id, "loc_filter": trip.loc_filter,
"start_ts": trip.start_ts, "start_time": trip.start_time,
"activity": no_tilting_points_df.iloc[0].activity})
for idx, row in enumerate(no_tilting_points_df.to_dict('records')):
if row["activity"] != curr_section.activity:
# Let's add a second check here for confidence and types of activities
if (row['agc'] > 60 and
row['activity'] != lq.Activities.UNKNOWN and
row['activity'] != lq.Activities.STILL):
# Because the first section is initialized with the first activity.
# So when idx == 0, the activities will be equal and this is
# guaranteed to not be invoked
assert(idx > 0)
prev_ts = no_tilting_points_df.iloc[idx-1]["write_ts"]
print("At %s, found new activity %s compared to current %s - creating new section with start_time %s" %
(str(pydt.datetime.fromtimestamp(row["write_ts"]/1000)),
row["activity"], curr_section.activity,
str(pydt.datetime.fromtimestamp(prev_ts/1000))))
# complete this section
curr_section.end_ts = prev_ts
curr_section.end_time = str(pydt.datetime.fromtimestamp(curr_section.end_ts/1000))
section_list.append(curr_section)
# make a new section
curr_section = ad.AttrDict({"user_id": trip.user_id, "loc_filter": trip.loc_filter,
"start_ts": prev_ts,
"start_time": pydt.datetime.fromtimestamp(prev_ts/1000),
"activity": row["activity"]})
else:
print("At %s, retained existing activity %s" %
(str(pydt.datetime.fromtimestamp(row["write_ts"]/1000)), curr_section.activity))
else:
print("At %s, retained existing activity %s" %
(str(pydt.datetime.fromtimestamp(row["write_ts"]/1000)), curr_section.activity))
print("Detected trip end! Ending section at %s" % trip.end_time)
# End the last section at the same time as the trip
curr_section.end_ts = trip.end_ts
curr_section.end_time = trip.end_time
section_list.append(curr_section)
return section_list
|
Create a simple method to segment a trip into sections
|
Create a simple method to segment a trip into sections
This is purely based on the activity detection by android.
It assumes that we have all activity updates. It uses a fairly naive algorithm
with a threshold of 60% confidence, ignoring STILL, TILTING and UNKNOWN
movements, and segmenting every time the mode changes. It seems to work pretty
well for now, although I will tune it on an ongoing basis for the next few
days.
|
Python
|
bsd-3-clause
|
shankari/e-mission-server,joshzarrabi/e-mission-server,joshzarrabi/e-mission-server,sunil07t/e-mission-server,yw374cornell/e-mission-server,e-mission/e-mission-server,e-mission/e-mission-server,yw374cornell/e-mission-server,yw374cornell/e-mission-server,sunil07t/e-mission-server,sunil07t/e-mission-server,shankari/e-mission-server,sunil07t/e-mission-server,yw374cornell/e-mission-server,shankari/e-mission-server,e-mission/e-mission-server,shankari/e-mission-server,joshzarrabi/e-mission-server,joshzarrabi/e-mission-server,e-mission/e-mission-server
|
Create a simple method to segment a trip into sections
This is purely based on the activity detection by android.
It assumes that we have all activity updates. It uses a fairly naive algorithm
with a threshold of 60% confidence, ignoring STILL, TILTING and UNKNOWN
movements, and segmenting every time the mode changes. It seems to work pretty
well for now, although I will tune it on an ongoing basis for the next few
days.
|
# Standard imports
import attrdict as ad
import numpy as np
import datetime as pydt
# Our imports
import emission.analysis.classification.cleaning.location_smoothing as ls
import emission.analysis.point_features as pf
import emission.storage.decorations.location_queries as lq
def segment_into_sections(trip):
points_df = lq.get_activities_for_section(trip)
no_tilting_points_df = points_df[points_df.activity != lq.Activities.TILTING]
section_list = []
curr_section = ad.AttrDict({"user_id": trip.user_id, "loc_filter": trip.loc_filter,
"start_ts": trip.start_ts, "start_time": trip.start_time,
"activity": no_tilting_points_df.iloc[0].activity})
for idx, row in enumerate(no_tilting_points_df.to_dict('records')):
if row["activity"] != curr_section.activity:
# Let's add a second check here for confidence and types of activities
if (row['agc'] > 60 and
row['activity'] != lq.Activities.UNKNOWN and
row['activity'] != lq.Activities.STILL):
# Because the first section is initialized with the first activity.
# So when idx == 0, the activities will be equal and this is
# guaranteed to not be invoked
assert(idx > 0)
prev_ts = no_tilting_points_df.iloc[idx-1]["write_ts"]
print("At %s, found new activity %s compared to current %s - creating new section with start_time %s" %
(str(pydt.datetime.fromtimestamp(row["write_ts"]/1000)),
row["activity"], curr_section.activity,
str(pydt.datetime.fromtimestamp(prev_ts/1000))))
# complete this section
curr_section.end_ts = prev_ts
curr_section.end_time = str(pydt.datetime.fromtimestamp(curr_section.end_ts/1000))
section_list.append(curr_section)
# make a new section
curr_section = ad.AttrDict({"user_id": trip.user_id, "loc_filter": trip.loc_filter,
"start_ts": prev_ts,
"start_time": pydt.datetime.fromtimestamp(prev_ts/1000),
"activity": row["activity"]})
else:
print("At %s, retained existing activity %s" %
(str(pydt.datetime.fromtimestamp(row["write_ts"]/1000)), curr_section.activity))
else:
print("At %s, retained existing activity %s" %
(str(pydt.datetime.fromtimestamp(row["write_ts"]/1000)), curr_section.activity))
print("Detected trip end! Ending section at %s" % trip.end_time)
# End the last section at the same time as the trip
curr_section.end_ts = trip.end_ts
curr_section.end_time = trip.end_time
section_list.append(curr_section)
return section_list
|
<commit_before><commit_msg>Create a simple method to segment a trip into sections
This is purely based on the activity detection by android.
It assumes that we have all activity updates. It uses a fairly naive algorithm
with a threshold of 60% confidence, ignoring STILL, TILTING and UNKNOWN
movements, and segmenting every time the mode changes. It seems to work pretty
well for now, although I will tune it on an ongoing basis for the next few
days.<commit_after>
|
# Standard imports
import attrdict as ad
import numpy as np
import datetime as pydt
# Our imports
import emission.analysis.classification.cleaning.location_smoothing as ls
import emission.analysis.point_features as pf
import emission.storage.decorations.location_queries as lq
def segment_into_sections(trip):
points_df = lq.get_activities_for_section(trip)
no_tilting_points_df = points_df[points_df.activity != lq.Activities.TILTING]
section_list = []
curr_section = ad.AttrDict({"user_id": trip.user_id, "loc_filter": trip.loc_filter,
"start_ts": trip.start_ts, "start_time": trip.start_time,
"activity": no_tilting_points_df.iloc[0].activity})
for idx, row in enumerate(no_tilting_points_df.to_dict('records')):
if row["activity"] != curr_section.activity:
# Let's add a second check here for confidence and types of activities
if (row['agc'] > 60 and
row['activity'] != lq.Activities.UNKNOWN and
row['activity'] != lq.Activities.STILL):
# Because the first section is initialized with the first activity.
# So when idx == 0, the activities will be equal and this is
# guaranteed to not be invoked
assert(idx > 0)
prev_ts = no_tilting_points_df.iloc[idx-1]["write_ts"]
print("At %s, found new activity %s compared to current %s - creating new section with start_time %s" %
(str(pydt.datetime.fromtimestamp(row["write_ts"]/1000)),
row["activity"], curr_section.activity,
str(pydt.datetime.fromtimestamp(prev_ts/1000))))
# complete this section
curr_section.end_ts = prev_ts
curr_section.end_time = str(pydt.datetime.fromtimestamp(curr_section.end_ts/1000))
section_list.append(curr_section)
# make a new section
curr_section = ad.AttrDict({"user_id": trip.user_id, "loc_filter": trip.loc_filter,
"start_ts": prev_ts,
"start_time": pydt.datetime.fromtimestamp(prev_ts/1000),
"activity": row["activity"]})
else:
print("At %s, retained existing activity %s" %
(str(pydt.datetime.fromtimestamp(row["write_ts"]/1000)), curr_section.activity))
else:
print("At %s, retained existing activity %s" %
(str(pydt.datetime.fromtimestamp(row["write_ts"]/1000)), curr_section.activity))
print("Detected trip end! Ending section at %s" % trip.end_time)
# End the last section at the same time as the trip
curr_section.end_ts = trip.end_ts
curr_section.end_time = trip.end_time
section_list.append(curr_section)
return section_list
|
Create a simple method to segment a trip into sections
This is purely based on the activity detection by android.
It assumes that we have all activity updates. It uses a fairly naive algorithm
with a threshold of 60% confidence, ignoring STILL, TILTING and UNKNOWN
movements, and segmenting every time the mode changes. It seems to work pretty
well for now, although I will tune it on an ongoing basis for the next few
days.# Standard imports
import attrdict as ad
import numpy as np
import datetime as pydt
# Our imports
import emission.analysis.classification.cleaning.location_smoothing as ls
import emission.analysis.point_features as pf
import emission.storage.decorations.location_queries as lq
def segment_into_sections(trip):
points_df = lq.get_activities_for_section(trip)
no_tilting_points_df = points_df[points_df.activity != lq.Activities.TILTING]
section_list = []
curr_section = ad.AttrDict({"user_id": trip.user_id, "loc_filter": trip.loc_filter,
"start_ts": trip.start_ts, "start_time": trip.start_time,
"activity": no_tilting_points_df.iloc[0].activity})
for idx, row in enumerate(no_tilting_points_df.to_dict('records')):
if row["activity"] != curr_section.activity:
# Let's add a second check here for confidence and types of activities
if (row['agc'] > 60 and
row['activity'] != lq.Activities.UNKNOWN and
row['activity'] != lq.Activities.STILL):
# Because the first section is initialized with the first activity.
# So when idx == 0, the activities will be equal and this is
# guaranteed to not be invoked
assert(idx > 0)
prev_ts = no_tilting_points_df.iloc[idx-1]["write_ts"]
print("At %s, found new activity %s compared to current %s - creating new section with start_time %s" %
(str(pydt.datetime.fromtimestamp(row["write_ts"]/1000)),
row["activity"], curr_section.activity,
str(pydt.datetime.fromtimestamp(prev_ts/1000))))
# complete this section
curr_section.end_ts = prev_ts
curr_section.end_time = str(pydt.datetime.fromtimestamp(curr_section.end_ts/1000))
section_list.append(curr_section)
# make a new section
curr_section = ad.AttrDict({"user_id": trip.user_id, "loc_filter": trip.loc_filter,
"start_ts": prev_ts,
"start_time": pydt.datetime.fromtimestamp(prev_ts/1000),
"activity": row["activity"]})
else:
print("At %s, retained existing activity %s" %
(str(pydt.datetime.fromtimestamp(row["write_ts"]/1000)), curr_section.activity))
else:
print("At %s, retained existing activity %s" %
(str(pydt.datetime.fromtimestamp(row["write_ts"]/1000)), curr_section.activity))
print("Detected trip end! Ending section at %s" % trip.end_time)
# End the last section at the same time as the trip
curr_section.end_ts = trip.end_ts
curr_section.end_time = trip.end_time
section_list.append(curr_section)
return section_list
|
<commit_before><commit_msg>Create a simple method to segment a trip into sections
This is purely based on the activity detection by android.
It assumes that we have all activity updates. It uses a fairly naive algorithm
with a threshold of 60% confidence, ignoring STILL, TILTING and UNKNOWN
movements, and segmenting every time the mode changes. It seems to work pretty
well for now, although I will tune it on an ongoing basis for the next few
days.<commit_after># Standard imports
import attrdict as ad
import numpy as np
import datetime as pydt
# Our imports
import emission.analysis.classification.cleaning.location_smoothing as ls
import emission.analysis.point_features as pf
import emission.storage.decorations.location_queries as lq
def segment_into_sections(trip):
points_df = lq.get_activities_for_section(trip)
no_tilting_points_df = points_df[points_df.activity != lq.Activities.TILTING]
section_list = []
curr_section = ad.AttrDict({"user_id": trip.user_id, "loc_filter": trip.loc_filter,
"start_ts": trip.start_ts, "start_time": trip.start_time,
"activity": no_tilting_points_df.iloc[0].activity})
for idx, row in enumerate(no_tilting_points_df.to_dict('records')):
if row["activity"] != curr_section.activity:
# Let's add a second check here for confidence and types of activities
if (row['agc'] > 60 and
row['activity'] != lq.Activities.UNKNOWN and
row['activity'] != lq.Activities.STILL):
# Because the first section is initialized with the first activity.
# So when idx == 0, the activities will be equal and this is
# guaranteed to not be invoked
assert(idx > 0)
prev_ts = no_tilting_points_df.iloc[idx-1]["write_ts"]
print("At %s, found new activity %s compared to current %s - creating new section with start_time %s" %
(str(pydt.datetime.fromtimestamp(row["write_ts"]/1000)),
row["activity"], curr_section.activity,
str(pydt.datetime.fromtimestamp(prev_ts/1000))))
# complete this section
curr_section.end_ts = prev_ts
curr_section.end_time = str(pydt.datetime.fromtimestamp(curr_section.end_ts/1000))
section_list.append(curr_section)
# make a new section
curr_section = ad.AttrDict({"user_id": trip.user_id, "loc_filter": trip.loc_filter,
"start_ts": prev_ts,
"start_time": pydt.datetime.fromtimestamp(prev_ts/1000),
"activity": row["activity"]})
else:
print("At %s, retained existing activity %s" %
(str(pydt.datetime.fromtimestamp(row["write_ts"]/1000)), curr_section.activity))
else:
print("At %s, retained existing activity %s" %
(str(pydt.datetime.fromtimestamp(row["write_ts"]/1000)), curr_section.activity))
print("Detected trip end! Ending section at %s" % trip.end_time)
# End the last section at the same time as the trip
curr_section.end_ts = trip.end_ts
curr_section.end_time = trip.end_time
section_list.append(curr_section)
return section_list
|
|
5f2cd26054adff5a1fbf9ba5d56766b972f46670
|
leakcheck/thread-key-gen.py
|
leakcheck/thread-key-gen.py
|
# Copyright (C) Jean-Paul Calderone
# See LICENSE for details.
#
# Stress tester for thread-related bugs in RSA and DSA key generation. 0.12 and
# older held the GIL during these operations. Subsequent versions release it
# during them.
from threading import Thread
from OpenSSL.crypto import TYPE_RSA, TYPE_DSA, PKey
def generate_rsa():
keys = []
for i in range(100):
key = PKey()
key.generate_key(TYPE_RSA, 1024)
keys.append(key)
def generate_dsa():
keys = []
for i in range(100):
key = PKey()
key.generate_key(TYPE_DSA, 512)
keys.append(key)
def main():
threads = []
for i in range(3):
t = Thread(target=generate_rsa, args=())
threads.append(t)
t = Thread(target=generate_dsa, args=())
threads.append(t)
for t in threads:
t.start()
main()
|
Add a multithreaded stress tester for key generation. Hopefully provides additional confidence that that code is correct with respect to threading.
|
Add a multithreaded stress tester for key generation. Hopefully provides additional confidence that that code is correct with respect to threading.
|
Python
|
apache-2.0
|
mhils/pyopenssl,kediacorporation/pyopenssl,reaperhulk/pyopenssl,r0ro/pyopenssl,mhils/pyopenssl,daodaoliang/pyopenssl,elitest/pyopenssl,kjav/pyopenssl,kediacorporation/pyopenssl,alex/pyopenssl,lvh/pyopenssl,samv/pyopenssl,Lukasa/pyopenssl,mitghi/pyopenssl,msabramo/pyOpenSSL,reaperhulk/pyopenssl,hynek/pyopenssl,pyca/pyopenssl,rackerlabs/pyopenssl,rackerlabs/pyopenssl,EnerNOC/pyopenssl,lvh/pyopenssl,adamwolf/pyopenssl,Lukasa/pyopenssl,mitghi/pyopenssl,sorenh/pyopenssl,EnerNOC/pyopenssl,msabramo/pyOpenSSL,EnerNOC/pyopenssl,hynek/pyopenssl,msabramo/pyOpenSSL,aalba6675/pyopenssl,mschmo/pyopenssl,aalba6675/pyopenssl,JensTimmerman/pyopenssl,r0ro/pyopenssl,sholsapp/pyopenssl,alex/pyopenssl,JensTimmerman/pyopenssl
|
Add a multithreaded stress tester for key generation. Hopefully provides additional confidence that that code is correct with respect to threading.
|
# Copyright (C) Jean-Paul Calderone
# See LICENSE for details.
#
# Stress tester for thread-related bugs in RSA and DSA key generation. 0.12 and
# older held the GIL during these operations. Subsequent versions release it
# during them.
from threading import Thread
from OpenSSL.crypto import TYPE_RSA, TYPE_DSA, PKey
def generate_rsa():
keys = []
for i in range(100):
key = PKey()
key.generate_key(TYPE_RSA, 1024)
keys.append(key)
def generate_dsa():
keys = []
for i in range(100):
key = PKey()
key.generate_key(TYPE_DSA, 512)
keys.append(key)
def main():
threads = []
for i in range(3):
t = Thread(target=generate_rsa, args=())
threads.append(t)
t = Thread(target=generate_dsa, args=())
threads.append(t)
for t in threads:
t.start()
main()
|
<commit_before><commit_msg>Add a multithreaded stress tester for key generation. Hopefully provides additional confidence that that code is correct with respect to threading.<commit_after>
|
# Copyright (C) Jean-Paul Calderone
# See LICENSE for details.
#
# Stress tester for thread-related bugs in RSA and DSA key generation. 0.12 and
# older held the GIL during these operations. Subsequent versions release it
# during them.
from threading import Thread
from OpenSSL.crypto import TYPE_RSA, TYPE_DSA, PKey
def generate_rsa():
keys = []
for i in range(100):
key = PKey()
key.generate_key(TYPE_RSA, 1024)
keys.append(key)
def generate_dsa():
keys = []
for i in range(100):
key = PKey()
key.generate_key(TYPE_DSA, 512)
keys.append(key)
def main():
threads = []
for i in range(3):
t = Thread(target=generate_rsa, args=())
threads.append(t)
t = Thread(target=generate_dsa, args=())
threads.append(t)
for t in threads:
t.start()
main()
|
Add a multithreaded stress tester for key generation. Hopefully provides additional confidence that that code is correct with respect to threading.# Copyright (C) Jean-Paul Calderone
# See LICENSE for details.
#
# Stress tester for thread-related bugs in RSA and DSA key generation. 0.12 and
# older held the GIL during these operations. Subsequent versions release it
# during them.
from threading import Thread
from OpenSSL.crypto import TYPE_RSA, TYPE_DSA, PKey
def generate_rsa():
keys = []
for i in range(100):
key = PKey()
key.generate_key(TYPE_RSA, 1024)
keys.append(key)
def generate_dsa():
keys = []
for i in range(100):
key = PKey()
key.generate_key(TYPE_DSA, 512)
keys.append(key)
def main():
threads = []
for i in range(3):
t = Thread(target=generate_rsa, args=())
threads.append(t)
t = Thread(target=generate_dsa, args=())
threads.append(t)
for t in threads:
t.start()
main()
|
<commit_before><commit_msg>Add a multithreaded stress tester for key generation. Hopefully provides additional confidence that that code is correct with respect to threading.<commit_after># Copyright (C) Jean-Paul Calderone
# See LICENSE for details.
#
# Stress tester for thread-related bugs in RSA and DSA key generation. 0.12 and
# older held the GIL during these operations. Subsequent versions release it
# during them.
from threading import Thread
from OpenSSL.crypto import TYPE_RSA, TYPE_DSA, PKey
def generate_rsa():
keys = []
for i in range(100):
key = PKey()
key.generate_key(TYPE_RSA, 1024)
keys.append(key)
def generate_dsa():
keys = []
for i in range(100):
key = PKey()
key.generate_key(TYPE_DSA, 512)
keys.append(key)
def main():
threads = []
for i in range(3):
t = Thread(target=generate_rsa, args=())
threads.append(t)
t = Thread(target=generate_dsa, args=())
threads.append(t)
for t in threads:
t.start()
main()
|
|
4820013e207947fe7ff94777cd8dcf1ed474eab1
|
migrations/versions/fb6a6554b21_add_account_lockout_fields_to_user.py
|
migrations/versions/fb6a6554b21_add_account_lockout_fields_to_user.py
|
"""Add account lockout fields to User
Revision ID: fb6a6554b21
Revises: 1f9b411bf6df
Create Date: 2015-10-29 01:07:27.930095
"""
# revision identifiers, used by Alembic.
revision = 'fb6a6554b21'
down_revision = '1f9b411bf6df'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('failed_login_attempts', sa.Integer(), nullable=True))
op.add_column('users', sa.Column('last_failed_login_attempt', sa.DateTime(), nullable=True))
op.add_column('users', sa.Column('locked_out', sa.Boolean(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'locked_out')
op.drop_column('users', 'last_failed_login_attempt')
op.drop_column('users', 'failed_login_attempts')
### end Alembic commands ###
|
Add migration for account lockout fields on User
|
Add migration for account lockout fields on User
|
Python
|
mit
|
richgieg/flask-now,richgieg/flask-now
|
Add migration for account lockout fields on User
|
"""Add account lockout fields to User
Revision ID: fb6a6554b21
Revises: 1f9b411bf6df
Create Date: 2015-10-29 01:07:27.930095
"""
# revision identifiers, used by Alembic.
revision = 'fb6a6554b21'
down_revision = '1f9b411bf6df'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('failed_login_attempts', sa.Integer(), nullable=True))
op.add_column('users', sa.Column('last_failed_login_attempt', sa.DateTime(), nullable=True))
op.add_column('users', sa.Column('locked_out', sa.Boolean(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'locked_out')
op.drop_column('users', 'last_failed_login_attempt')
op.drop_column('users', 'failed_login_attempts')
### end Alembic commands ###
|
<commit_before><commit_msg>Add migration for account lockout fields on User<commit_after>
|
"""Add account lockout fields to User
Revision ID: fb6a6554b21
Revises: 1f9b411bf6df
Create Date: 2015-10-29 01:07:27.930095
"""
# revision identifiers, used by Alembic.
revision = 'fb6a6554b21'
down_revision = '1f9b411bf6df'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('failed_login_attempts', sa.Integer(), nullable=True))
op.add_column('users', sa.Column('last_failed_login_attempt', sa.DateTime(), nullable=True))
op.add_column('users', sa.Column('locked_out', sa.Boolean(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'locked_out')
op.drop_column('users', 'last_failed_login_attempt')
op.drop_column('users', 'failed_login_attempts')
### end Alembic commands ###
|
Add migration for account lockout fields on User"""Add account lockout fields to User
Revision ID: fb6a6554b21
Revises: 1f9b411bf6df
Create Date: 2015-10-29 01:07:27.930095
"""
# revision identifiers, used by Alembic.
revision = 'fb6a6554b21'
down_revision = '1f9b411bf6df'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('failed_login_attempts', sa.Integer(), nullable=True))
op.add_column('users', sa.Column('last_failed_login_attempt', sa.DateTime(), nullable=True))
op.add_column('users', sa.Column('locked_out', sa.Boolean(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'locked_out')
op.drop_column('users', 'last_failed_login_attempt')
op.drop_column('users', 'failed_login_attempts')
### end Alembic commands ###
|
<commit_before><commit_msg>Add migration for account lockout fields on User<commit_after>"""Add account lockout fields to User
Revision ID: fb6a6554b21
Revises: 1f9b411bf6df
Create Date: 2015-10-29 01:07:27.930095
"""
# revision identifiers, used by Alembic.
revision = 'fb6a6554b21'
down_revision = '1f9b411bf6df'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('failed_login_attempts', sa.Integer(), nullable=True))
op.add_column('users', sa.Column('last_failed_login_attempt', sa.DateTime(), nullable=True))
op.add_column('users', sa.Column('locked_out', sa.Boolean(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'locked_out')
op.drop_column('users', 'last_failed_login_attempt')
op.drop_column('users', 'failed_login_attempts')
### end Alembic commands ###
|
|
6fabbe85bb74788641897daf8b162eac3d47b0aa
|
data_crunching/indonesia_timeseries/download_indonesia_prices.py
|
data_crunching/indonesia_timeseries/download_indonesia_prices.py
|
#!/usr/bin/env python2
import urllib2
import shutil
import re
import sys
import datetime
from lxml import etree
usage_str = """
This scripts downloads daily food prices from http://m.pip.kementan.org/index.php (Indonesia).
Provide date in DD/MM/YYYY format.
Example:
./download_indonesia_prices.py 15/03/2013
"""
def download_table(date):
"""Download price table for a given date"""
main_url = 'http://m.pip.kementan.org/index.php'
params = 'laporan=LHK-01&tanggal=%s&bulan=%s&tahun=%s&pilihlaporan=View+Laporan' % (date.day, date.month, date.year)
req = urllib2.Request(main_url, params)
response = urllib2.urlopen(req)
html_code = response.read()
regex = re.compile(r'<div id="content" align="center">.*(<table.+</table>)', re.DOTALL)
match = regex.search(html_code)
if not match:
print "ERROR: table not detected"
sys.exit(1)
table_html = match.group(1)
# Remove commas
table_html = re.sub(r'(?<=\d),(?=\d)', '', table_html)
table = etree.XML(table_html)
rows = iter(table)
actual_headers = [col.text for col in next(rows)]
# TODO: translate this bullshit ;)
headers = ['Dried Grain Harvest', 'Dry unhusked', 'Rice Medium', 'Rice Premium', 'Corn', 'Local soybean', 'Local Peanuts', 'Green Beans', 'Cassava', 'Sweet potato', 'Cassava spindles']
print "; ".join(headers), "\n"
# Print table
for row in rows:
if all(v.text is None for v in row):
continue
print ('''"%s"''') % row[0].text,
for col in row[1:]:
print col.text,
print
def parse_date(date_string):
"""Check date"""
match = re.match(r'(\d{2})/(\d{2})/(\d{4})', date_string)
if not match:
sys.exit("ERROR: invalid date")
day, month, year = int(match.group(1)), int(match.group(2)), int(match.group(3))
return datetime.date(year, month, day)
def usage():
print usage_str
if __name__ == "__main__":
if len(sys.argv) == 1 or sys.argv[1] in ['-h', '--help']:
usage()
sys.exit(0)
date_string = sys.argv[1]
date = parse_date(date_string)
download_table(date)
|
Add script for downloading Indonesia price data
|
Add script for downloading Indonesia price data
(http://m.pip.kementan.org/index.php)
|
Python
|
bsd-3-clause
|
FAB4D/humanitas,FAB4D/humanitas,FAB4D/humanitas
|
Add script for downloading Indonesia price data
(http://m.pip.kementan.org/index.php)
|
#!/usr/bin/env python2
import urllib2
import shutil
import re
import sys
import datetime
from lxml import etree
usage_str = """
This scripts downloads daily food prices from http://m.pip.kementan.org/index.php (Indonesia).
Provide date in DD/MM/YYYY format.
Example:
./download_indonesia_prices.py 15/03/2013
"""
def download_table(date):
"""Download price table for a given date"""
main_url = 'http://m.pip.kementan.org/index.php'
params = 'laporan=LHK-01&tanggal=%s&bulan=%s&tahun=%s&pilihlaporan=View+Laporan' % (date.day, date.month, date.year)
req = urllib2.Request(main_url, params)
response = urllib2.urlopen(req)
html_code = response.read()
regex = re.compile(r'<div id="content" align="center">.*(<table.+</table>)', re.DOTALL)
match = regex.search(html_code)
if not match:
print "ERROR: table not detected"
sys.exit(1)
table_html = match.group(1)
# Remove commas
table_html = re.sub(r'(?<=\d),(?=\d)', '', table_html)
table = etree.XML(table_html)
rows = iter(table)
actual_headers = [col.text for col in next(rows)]
# TODO: translate this bullshit ;)
headers = ['Dried Grain Harvest', 'Dry unhusked', 'Rice Medium', 'Rice Premium', 'Corn', 'Local soybean', 'Local Peanuts', 'Green Beans', 'Cassava', 'Sweet potato', 'Cassava spindles']
print "; ".join(headers), "\n"
# Print table
for row in rows:
if all(v.text is None for v in row):
continue
print ('''"%s"''') % row[0].text,
for col in row[1:]:
print col.text,
print
def parse_date(date_string):
"""Check date"""
match = re.match(r'(\d{2})/(\d{2})/(\d{4})', date_string)
if not match:
sys.exit("ERROR: invalid date")
day, month, year = int(match.group(1)), int(match.group(2)), int(match.group(3))
return datetime.date(year, month, day)
def usage():
print usage_str
if __name__ == "__main__":
if len(sys.argv) == 1 or sys.argv[1] in ['-h', '--help']:
usage()
sys.exit(0)
date_string = sys.argv[1]
date = parse_date(date_string)
download_table(date)
|
<commit_before><commit_msg>Add script for downloading Indonesia price data
(http://m.pip.kementan.org/index.php)<commit_after>
|
#!/usr/bin/env python2
import urllib2
import shutil
import re
import sys
import datetime
from lxml import etree
usage_str = """
This scripts downloads daily food prices from http://m.pip.kementan.org/index.php (Indonesia).
Provide date in DD/MM/YYYY format.
Example:
./download_indonesia_prices.py 15/03/2013
"""
def download_table(date):
"""Download price table for a given date"""
main_url = 'http://m.pip.kementan.org/index.php'
params = 'laporan=LHK-01&tanggal=%s&bulan=%s&tahun=%s&pilihlaporan=View+Laporan' % (date.day, date.month, date.year)
req = urllib2.Request(main_url, params)
response = urllib2.urlopen(req)
html_code = response.read()
regex = re.compile(r'<div id="content" align="center">.*(<table.+</table>)', re.DOTALL)
match = regex.search(html_code)
if not match:
print "ERROR: table not detected"
sys.exit(1)
table_html = match.group(1)
# Remove commas
table_html = re.sub(r'(?<=\d),(?=\d)', '', table_html)
table = etree.XML(table_html)
rows = iter(table)
actual_headers = [col.text for col in next(rows)]
# TODO: translate this bullshit ;)
headers = ['Dried Grain Harvest', 'Dry unhusked', 'Rice Medium', 'Rice Premium', 'Corn', 'Local soybean', 'Local Peanuts', 'Green Beans', 'Cassava', 'Sweet potato', 'Cassava spindles']
print "; ".join(headers), "\n"
# Print table
for row in rows:
if all(v.text is None for v in row):
continue
print ('''"%s"''') % row[0].text,
for col in row[1:]:
print col.text,
print
def parse_date(date_string):
"""Check date"""
match = re.match(r'(\d{2})/(\d{2})/(\d{4})', date_string)
if not match:
sys.exit("ERROR: invalid date")
day, month, year = int(match.group(1)), int(match.group(2)), int(match.group(3))
return datetime.date(year, month, day)
def usage():
print usage_str
if __name__ == "__main__":
if len(sys.argv) == 1 or sys.argv[1] in ['-h', '--help']:
usage()
sys.exit(0)
date_string = sys.argv[1]
date = parse_date(date_string)
download_table(date)
|
Add script for downloading Indonesia price data
(http://m.pip.kementan.org/index.php)#!/usr/bin/env python2
import urllib2
import shutil
import re
import sys
import datetime
from lxml import etree
usage_str = """
This scripts downloads daily food prices from http://m.pip.kementan.org/index.php (Indonesia).
Provide date in DD/MM/YYYY format.
Example:
./download_indonesia_prices.py 15/03/2013
"""
def download_table(date):
"""Download price table for a given date"""
main_url = 'http://m.pip.kementan.org/index.php'
params = 'laporan=LHK-01&tanggal=%s&bulan=%s&tahun=%s&pilihlaporan=View+Laporan' % (date.day, date.month, date.year)
req = urllib2.Request(main_url, params)
response = urllib2.urlopen(req)
html_code = response.read()
regex = re.compile(r'<div id="content" align="center">.*(<table.+</table>)', re.DOTALL)
match = regex.search(html_code)
if not match:
print "ERROR: table not detected"
sys.exit(1)
table_html = match.group(1)
# Remove commas
table_html = re.sub(r'(?<=\d),(?=\d)', '', table_html)
table = etree.XML(table_html)
rows = iter(table)
actual_headers = [col.text for col in next(rows)]
# TODO: translate this bullshit ;)
headers = ['Dried Grain Harvest', 'Dry unhusked', 'Rice Medium', 'Rice Premium', 'Corn', 'Local soybean', 'Local Peanuts', 'Green Beans', 'Cassava', 'Sweet potato', 'Cassava spindles']
print "; ".join(headers), "\n"
# Print table
for row in rows:
if all(v.text is None for v in row):
continue
print ('''"%s"''') % row[0].text,
for col in row[1:]:
print col.text,
print
def parse_date(date_string):
"""Check date"""
match = re.match(r'(\d{2})/(\d{2})/(\d{4})', date_string)
if not match:
sys.exit("ERROR: invalid date")
day, month, year = int(match.group(1)), int(match.group(2)), int(match.group(3))
return datetime.date(year, month, day)
def usage():
print usage_str
if __name__ == "__main__":
if len(sys.argv) == 1 or sys.argv[1] in ['-h', '--help']:
usage()
sys.exit(0)
date_string = sys.argv[1]
date = parse_date(date_string)
download_table(date)
|
<commit_before><commit_msg>Add script for downloading Indonesia price data
(http://m.pip.kementan.org/index.php)<commit_after>#!/usr/bin/env python2
import urllib2
import shutil
import re
import sys
import datetime
from lxml import etree
usage_str = """
This scripts downloads daily food prices from http://m.pip.kementan.org/index.php (Indonesia).
Provide date in DD/MM/YYYY format.
Example:
./download_indonesia_prices.py 15/03/2013
"""
def download_table(date):
"""Download price table for a given date"""
main_url = 'http://m.pip.kementan.org/index.php'
params = 'laporan=LHK-01&tanggal=%s&bulan=%s&tahun=%s&pilihlaporan=View+Laporan' % (date.day, date.month, date.year)
req = urllib2.Request(main_url, params)
response = urllib2.urlopen(req)
html_code = response.read()
regex = re.compile(r'<div id="content" align="center">.*(<table.+</table>)', re.DOTALL)
match = regex.search(html_code)
if not match:
print "ERROR: table not detected"
sys.exit(1)
table_html = match.group(1)
# Remove commas
table_html = re.sub(r'(?<=\d),(?=\d)', '', table_html)
table = etree.XML(table_html)
rows = iter(table)
actual_headers = [col.text for col in next(rows)]
# TODO: translate this bullshit ;)
headers = ['Dried Grain Harvest', 'Dry unhusked', 'Rice Medium', 'Rice Premium', 'Corn', 'Local soybean', 'Local Peanuts', 'Green Beans', 'Cassava', 'Sweet potato', 'Cassava spindles']
print "; ".join(headers), "\n"
# Print table
for row in rows:
if all(v.text is None for v in row):
continue
print ('''"%s"''') % row[0].text,
for col in row[1:]:
print col.text,
print
def parse_date(date_string):
"""Check date"""
match = re.match(r'(\d{2})/(\d{2})/(\d{4})', date_string)
if not match:
sys.exit("ERROR: invalid date")
day, month, year = int(match.group(1)), int(match.group(2)), int(match.group(3))
return datetime.date(year, month, day)
def usage():
print usage_str
if __name__ == "__main__":
if len(sys.argv) == 1 or sys.argv[1] in ['-h', '--help']:
usage()
sys.exit(0)
date_string = sys.argv[1]
date = parse_date(date_string)
download_table(date)
|
|
a795d94a9c885b97ab5bffc313524ae46626d556
|
tools/analyze_code_size.py
|
tools/analyze_code_size.py
|
import os
import re
import sys
import optparse
MARKER_START_FUNCS = "// EMSCRIPTEN_START_FUNCS"
MARKER_END_FUNCS = "// EMSCRIPTEN_END_FUNCS"
FUNCTION_CODE_RE = re.compile(
r"function (?P<name>[a-zA-Z0-9_]+)(?P<defn>.*?)((?=function)|(?=$))"
)
def analyze_code_size(fileobj, opts):
funcs = {}
name_re = None
if opts.grep is not None:
name_re = re.compile(opts.grep, re.I)
# Split out and analyze the code for each individual function.
# XXX TODO: read incrementally to reduce memory usage.
data = fileobj.read()
pre_code, data = data.split(MARKER_START_FUNCS, 1)
data, post_code = data.split(MARKER_END_FUNCS, 1)
for match in FUNCTION_CODE_RE.finditer(data):
name = match.group("name")
defn = match.group("defn")
if name_re and not name_re.search(name):
continue
funcs[name] = FunctionMetrics(name, defn)
# Print summary metrics.
total = 0
funcs_by_size = ((f.size, f.name) for f in funcs.itervalues())
for (size, name) in sorted(funcs_by_size, reverse=True):
print size, name, human_readable(size)
total += size
print "Total size:", total, human_readable(total)
class FunctionMetrics(object):
def __init__(self, name, defn):
self.name = name
self.defn = defn
self.size = len(defn)
def human_readable(size):
units = ((1024*1024, "M"), (1024, "k"))
for (scale, unit) in units:
scale = float(scale)
if size / scale > 0.1:
return "(%.2f%s)" % (size / scale, unit)
return ""
def main(args=None):
usage = "usage: %prog [options] file"
descr = "Analyze code size and complexity for emscripten-compiled output"
parser = optparse.OptionParser(usage=usage, description=descr)
parser.add_option("-g", "--grep", metavar="REGEXP",
help="only analyze functions matching this regexp")
opts, args = parser.parse_args(args)
with open(args[0], "r") as infile:
analyze_code_size(infile, opts)
return 0
if __name__ == "__main__":
try:
exitcode = main()
except KeyboardInterrupt:
exitcode = 1
sys.exit(exitcode)
|
Add simple function-size analysis tool.
|
Add simple function-size analysis tool.
|
Python
|
mit
|
pypyjs/pypyjs,perkinslr/pypyjs,perkinslr/pypyjs,perkinslr/pypyjs,albertjan/pypyjs,pombredanne/pypyjs,albertjan/pypyjs,perkinslr/pypyjs,pombredanne/pypyjs,pypyjs/pypyjs,trinketapp/pypyjs,pypyjs/pypyjs,pypyjs/pypyjs,perkinslr/pypyjs,trinketapp/pypyjs,albertjan/pypyjs
|
Add simple function-size analysis tool.
|
import os
import re
import sys
import optparse
MARKER_START_FUNCS = "// EMSCRIPTEN_START_FUNCS"
MARKER_END_FUNCS = "// EMSCRIPTEN_END_FUNCS"
FUNCTION_CODE_RE = re.compile(
r"function (?P<name>[a-zA-Z0-9_]+)(?P<defn>.*?)((?=function)|(?=$))"
)
def analyze_code_size(fileobj, opts):
funcs = {}
name_re = None
if opts.grep is not None:
name_re = re.compile(opts.grep, re.I)
# Split out and analyze the code for each individual function.
# XXX TODO: read incrementally to reduce memory usage.
data = fileobj.read()
pre_code, data = data.split(MARKER_START_FUNCS, 1)
data, post_code = data.split(MARKER_END_FUNCS, 1)
for match in FUNCTION_CODE_RE.finditer(data):
name = match.group("name")
defn = match.group("defn")
if name_re and not name_re.search(name):
continue
funcs[name] = FunctionMetrics(name, defn)
# Print summary metrics.
total = 0
funcs_by_size = ((f.size, f.name) for f in funcs.itervalues())
for (size, name) in sorted(funcs_by_size, reverse=True):
print size, name, human_readable(size)
total += size
print "Total size:", total, human_readable(total)
class FunctionMetrics(object):
def __init__(self, name, defn):
self.name = name
self.defn = defn
self.size = len(defn)
def human_readable(size):
units = ((1024*1024, "M"), (1024, "k"))
for (scale, unit) in units:
scale = float(scale)
if size / scale > 0.1:
return "(%.2f%s)" % (size / scale, unit)
return ""
def main(args=None):
usage = "usage: %prog [options] file"
descr = "Analyze code size and complexity for emscripten-compiled output"
parser = optparse.OptionParser(usage=usage, description=descr)
parser.add_option("-g", "--grep", metavar="REGEXP",
help="only analyze functions matching this regexp")
opts, args = parser.parse_args(args)
with open(args[0], "r") as infile:
analyze_code_size(infile, opts)
return 0
if __name__ == "__main__":
try:
exitcode = main()
except KeyboardInterrupt:
exitcode = 1
sys.exit(exitcode)
|
<commit_before><commit_msg>Add simple function-size analysis tool.<commit_after>
|
import os
import re
import sys
import optparse
MARKER_START_FUNCS = "// EMSCRIPTEN_START_FUNCS"
MARKER_END_FUNCS = "// EMSCRIPTEN_END_FUNCS"
FUNCTION_CODE_RE = re.compile(
r"function (?P<name>[a-zA-Z0-9_]+)(?P<defn>.*?)((?=function)|(?=$))"
)
def analyze_code_size(fileobj, opts):
funcs = {}
name_re = None
if opts.grep is not None:
name_re = re.compile(opts.grep, re.I)
# Split out and analyze the code for each individual function.
# XXX TODO: read incrementally to reduce memory usage.
data = fileobj.read()
pre_code, data = data.split(MARKER_START_FUNCS, 1)
data, post_code = data.split(MARKER_END_FUNCS, 1)
for match in FUNCTION_CODE_RE.finditer(data):
name = match.group("name")
defn = match.group("defn")
if name_re and not name_re.search(name):
continue
funcs[name] = FunctionMetrics(name, defn)
# Print summary metrics.
total = 0
funcs_by_size = ((f.size, f.name) for f in funcs.itervalues())
for (size, name) in sorted(funcs_by_size, reverse=True):
print size, name, human_readable(size)
total += size
print "Total size:", total, human_readable(total)
class FunctionMetrics(object):
def __init__(self, name, defn):
self.name = name
self.defn = defn
self.size = len(defn)
def human_readable(size):
units = ((1024*1024, "M"), (1024, "k"))
for (scale, unit) in units:
scale = float(scale)
if size / scale > 0.1:
return "(%.2f%s)" % (size / scale, unit)
return ""
def main(args=None):
usage = "usage: %prog [options] file"
descr = "Analyze code size and complexity for emscripten-compiled output"
parser = optparse.OptionParser(usage=usage, description=descr)
parser.add_option("-g", "--grep", metavar="REGEXP",
help="only analyze functions matching this regexp")
opts, args = parser.parse_args(args)
with open(args[0], "r") as infile:
analyze_code_size(infile, opts)
return 0
if __name__ == "__main__":
try:
exitcode = main()
except KeyboardInterrupt:
exitcode = 1
sys.exit(exitcode)
|
Add simple function-size analysis tool.
import os
import re
import sys
import optparse
MARKER_START_FUNCS = "// EMSCRIPTEN_START_FUNCS"
MARKER_END_FUNCS = "// EMSCRIPTEN_END_FUNCS"
FUNCTION_CODE_RE = re.compile(
r"function (?P<name>[a-zA-Z0-9_]+)(?P<defn>.*?)((?=function)|(?=$))"
)
def analyze_code_size(fileobj, opts):
funcs = {}
name_re = None
if opts.grep is not None:
name_re = re.compile(opts.grep, re.I)
# Split out and analyze the code for each individual function.
# XXX TODO: read incrementally to reduce memory usage.
data = fileobj.read()
pre_code, data = data.split(MARKER_START_FUNCS, 1)
data, post_code = data.split(MARKER_END_FUNCS, 1)
for match in FUNCTION_CODE_RE.finditer(data):
name = match.group("name")
defn = match.group("defn")
if name_re and not name_re.search(name):
continue
funcs[name] = FunctionMetrics(name, defn)
# Print summary metrics.
total = 0
funcs_by_size = ((f.size, f.name) for f in funcs.itervalues())
for (size, name) in sorted(funcs_by_size, reverse=True):
print size, name, human_readable(size)
total += size
print "Total size:", total, human_readable(total)
class FunctionMetrics(object):
def __init__(self, name, defn):
self.name = name
self.defn = defn
self.size = len(defn)
def human_readable(size):
units = ((1024*1024, "M"), (1024, "k"))
for (scale, unit) in units:
scale = float(scale)
if size / scale > 0.1:
return "(%.2f%s)" % (size / scale, unit)
return ""
def main(args=None):
usage = "usage: %prog [options] file"
descr = "Analyze code size and complexity for emscripten-compiled output"
parser = optparse.OptionParser(usage=usage, description=descr)
parser.add_option("-g", "--grep", metavar="REGEXP",
help="only analyze functions matching this regexp")
opts, args = parser.parse_args(args)
with open(args[0], "r") as infile:
analyze_code_size(infile, opts)
return 0
if __name__ == "__main__":
try:
exitcode = main()
except KeyboardInterrupt:
exitcode = 1
sys.exit(exitcode)
|
<commit_before><commit_msg>Add simple function-size analysis tool.<commit_after>
import os
import re
import sys
import optparse
MARKER_START_FUNCS = "// EMSCRIPTEN_START_FUNCS"
MARKER_END_FUNCS = "// EMSCRIPTEN_END_FUNCS"
FUNCTION_CODE_RE = re.compile(
r"function (?P<name>[a-zA-Z0-9_]+)(?P<defn>.*?)((?=function)|(?=$))"
)
def analyze_code_size(fileobj, opts):
funcs = {}
name_re = None
if opts.grep is not None:
name_re = re.compile(opts.grep, re.I)
# Split out and analyze the code for each individual function.
# XXX TODO: read incrementally to reduce memory usage.
data = fileobj.read()
pre_code, data = data.split(MARKER_START_FUNCS, 1)
data, post_code = data.split(MARKER_END_FUNCS, 1)
for match in FUNCTION_CODE_RE.finditer(data):
name = match.group("name")
defn = match.group("defn")
if name_re and not name_re.search(name):
continue
funcs[name] = FunctionMetrics(name, defn)
# Print summary metrics.
total = 0
funcs_by_size = ((f.size, f.name) for f in funcs.itervalues())
for (size, name) in sorted(funcs_by_size, reverse=True):
print size, name, human_readable(size)
total += size
print "Total size:", total, human_readable(total)
class FunctionMetrics(object):
def __init__(self, name, defn):
self.name = name
self.defn = defn
self.size = len(defn)
def human_readable(size):
units = ((1024*1024, "M"), (1024, "k"))
for (scale, unit) in units:
scale = float(scale)
if size / scale > 0.1:
return "(%.2f%s)" % (size / scale, unit)
return ""
def main(args=None):
usage = "usage: %prog [options] file"
descr = "Analyze code size and complexity for emscripten-compiled output"
parser = optparse.OptionParser(usage=usage, description=descr)
parser.add_option("-g", "--grep", metavar="REGEXP",
help="only analyze functions matching this regexp")
opts, args = parser.parse_args(args)
with open(args[0], "r") as infile:
analyze_code_size(infile, opts)
return 0
if __name__ == "__main__":
try:
exitcode = main()
except KeyboardInterrupt:
exitcode = 1
sys.exit(exitcode)
|
|
bfaeeec3f5f5582822e2918491090815a606ba44
|
test/test_api.py
|
test/test_api.py
|
# -*- coding: utf-8 -*-
import warthog.api
def test_public_exports():
exports = set([item for item in dir(warthog.api) if not item.startswith('_')])
declared = set(warthog.api.__all__)
assert exports == declared, 'Exports and __all__ members should match'
|
Add test to make sure imports and __all__ matches
|
Add test to make sure imports and __all__ matches
|
Python
|
mit
|
smarter-travel-media/warthog
|
Add test to make sure imports and __all__ matches
|
# -*- coding: utf-8 -*-
import warthog.api
def test_public_exports():
exports = set([item for item in dir(warthog.api) if not item.startswith('_')])
declared = set(warthog.api.__all__)
assert exports == declared, 'Exports and __all__ members should match'
|
<commit_before><commit_msg>Add test to make sure imports and __all__ matches<commit_after>
|
# -*- coding: utf-8 -*-
import warthog.api
def test_public_exports():
exports = set([item for item in dir(warthog.api) if not item.startswith('_')])
declared = set(warthog.api.__all__)
assert exports == declared, 'Exports and __all__ members should match'
|
Add test to make sure imports and __all__ matches# -*- coding: utf-8 -*-
import warthog.api
def test_public_exports():
exports = set([item for item in dir(warthog.api) if not item.startswith('_')])
declared = set(warthog.api.__all__)
assert exports == declared, 'Exports and __all__ members should match'
|
<commit_before><commit_msg>Add test to make sure imports and __all__ matches<commit_after># -*- coding: utf-8 -*-
import warthog.api
def test_public_exports():
exports = set([item for item in dir(warthog.api) if not item.startswith('_')])
declared = set(warthog.api.__all__)
assert exports == declared, 'Exports and __all__ members should match'
|
|
48857638694ceca08c64d7b9c6825e2178c53279
|
pylearn2/utils/doc.py
|
pylearn2/utils/doc.py
|
"""
Documentation-related helper classes/functions
"""
class soft_wraps:
"""
A Python decorator which concatenates two functions' docstrings: one
function is defined at initialization and the other one is defined when
soft_wraps is called.
This helps reduce the ammount of documentation to write: one can use
this decorator on child classes' functions when their implementation is
similar to the one of the parent class. Conversely, if a function defined
in a child class departs from its parent's implementation, one can simply
explain the differences in a 'Notes' section without re-writing the whole
docstring.
Examples
--------
>>> class Parent(object):
... def f(x):
... '''
... Adds 1 to x
...
... Parameters
... ----------
... x : int
... Variable to increment by 1
...
... Returns
... -------
... rval : int
... x incremented by 1
... '''
... rval = x + 1
... return rval
...
>>> class Child(Parent):
... @soft_wraps(Parent.f)
... def f(x):
... '''
... Notes
... -----
... Also prints the incremented value
... '''
... rval = x + 1
... print rval
... return rval
...
>>> c = Child()
>>> print c.f.__doc__
Adds 1 to x
Parameters
----------
x : int
Variable to increment by 1
Returns
-------
rval : int
x incremented by 1
Notes
-----
Also prints the incremented value
"""
def __init__(self, f, append=False):
"""
Parameters
----------
f : function
Function whose docstring will be concatenated with the decorated
function's docstring
prepend : bool, optional
If True, appends f's docstring to the decorated function's
docstring instead of prepending it. Defaults to False.
"""
self.f = f
self.append = append
def __call__(self, f):
"""
Prepend self.f's docstring to f's docstring (or append it if
`self.append == True`).
Parameters
----------
f : function
Function to decorate
Returns
-------
f : function
Function f passed as argument with self.f's docstring
{pre,ap}pended to it
"""
if self.append:
f.__doc__ += + self.f.__doc__
else:
f.__doc__ = self.f.__doc__ + f.__doc__
return f
|
Add function decorator to improve functools.wraps
|
Add function decorator to improve functools.wraps
|
Python
|
bsd-3-clause
|
goodfeli/pylearn2,JesseLivezey/pylearn2,TNick/pylearn2,fulmicoton/pylearn2,pkainz/pylearn2,Refefer/pylearn2,woozzu/pylearn2,kastnerkyle/pylearn2,CIFASIS/pylearn2,mclaughlin6464/pylearn2,hyqneuron/pylearn2-maxsom,aalmah/pylearn2,bartvm/pylearn2,JesseLivezey/pylearn2,nouiz/pylearn2,lamblin/pylearn2,CIFASIS/pylearn2,junbochen/pylearn2,ddboline/pylearn2,junbochen/pylearn2,ddboline/pylearn2,alexjc/pylearn2,w1kke/pylearn2,abergeron/pylearn2,mkraemer67/pylearn2,jamessergeant/pylearn2,fyffyt/pylearn2,fishcorn/pylearn2,fyffyt/pylearn2,matrogers/pylearn2,matrogers/pylearn2,aalmah/pylearn2,lunyang/pylearn2,skearnes/pylearn2,mkraemer67/pylearn2,theoryno3/pylearn2,kose-y/pylearn2,se4u/pylearn2,aalmah/pylearn2,daemonmaker/pylearn2,jeremyfix/pylearn2,hyqneuron/pylearn2-maxsom,lancezlin/pylearn2,Refefer/pylearn2,ddboline/pylearn2,lancezlin/pylearn2,kose-y/pylearn2,JesseLivezey/plankton,hantek/pylearn2,goodfeli/pylearn2,woozzu/pylearn2,ashhher3/pylearn2,bartvm/pylearn2,shiquanwang/pylearn2,TNick/pylearn2,hantek/pylearn2,lancezlin/pylearn2,TNick/pylearn2,daemonmaker/pylearn2,pkainz/pylearn2,fyffyt/pylearn2,alexjc/pylearn2,lunyang/pylearn2,ddboline/pylearn2,hantek/pylearn2,woozzu/pylearn2,pombredanne/pylearn2,TNick/pylearn2,jeremyfix/pylearn2,hyqneuron/pylearn2-maxsom,fishcorn/pylearn2,lisa-lab/pylearn2,kastnerkyle/pylearn2,mkraemer67/pylearn2,abergeron/pylearn2,alexjc/pylearn2,pombredanne/pylearn2,hyqneuron/pylearn2-maxsom,daemonmaker/pylearn2,KennethPierce/pylearnk,cosmoharrigan/pylearn2,aalmah/pylearn2,shiquanwang/pylearn2,JesseLivezey/plankton,KennethPierce/pylearnk,jamessergeant/pylearn2,caidongyun/pylearn2,fishcorn/pylearn2,mkraemer67/pylearn2,jamessergeant/pylearn2,CIFASIS/pylearn2,chrish42/pylearn,lunyang/pylearn2,fulmicoton/pylearn2,bartvm/pylearn2,mclaughlin6464/pylearn2,lunyang/pylearn2,se4u/pylearn2,fyffyt/pylearn2,Refefer/pylearn2,matrogers/pylearn2,mclaughlin6464/pylearn2,se4u/pylearn2,nouiz/pylearn2,jamessergeant/pylearn2,JesseLivezey/pylearn2,chrish42/pylearn,KennethPierce/pylearnk,cosmoharrigan/pylearn2,sandeepkbhat/pylearn2,theoryno3/pylearn2,w1kke/pylearn2,goodfeli/pylearn2,JesseLivezey/pylearn2,Refefer/pylearn2,caidongyun/pylearn2,msingh172/pylearn2,kastnerkyle/pylearn2,skearnes/pylearn2,KennethPierce/pylearnk,abergeron/pylearn2,kastnerkyle/pylearn2,skearnes/pylearn2,jeremyfix/pylearn2,pombredanne/pylearn2,ashhher3/pylearn2,lisa-lab/pylearn2,lamblin/pylearn2,junbochen/pylearn2,ashhher3/pylearn2,lamblin/pylearn2,CIFASIS/pylearn2,lisa-lab/pylearn2,pkainz/pylearn2,pombredanne/pylearn2,fishcorn/pylearn2,se4u/pylearn2,pkainz/pylearn2,junbochen/pylearn2,cosmoharrigan/pylearn2,goodfeli/pylearn2,theoryno3/pylearn2,ashhher3/pylearn2,alexjc/pylearn2,jeremyfix/pylearn2,JesseLivezey/plankton,chrish42/pylearn,msingh172/pylearn2,mclaughlin6464/pylearn2,sandeepkbhat/pylearn2,shiquanwang/pylearn2,w1kke/pylearn2,caidongyun/pylearn2,skearnes/pylearn2,matrogers/pylearn2,fulmicoton/pylearn2,shiquanwang/pylearn2,chrish42/pylearn,lisa-lab/pylearn2,kose-y/pylearn2,hantek/pylearn2,kose-y/pylearn2,msingh172/pylearn2,sandeepkbhat/pylearn2,w1kke/pylearn2,msingh172/pylearn2,lancezlin/pylearn2,woozzu/pylearn2,daemonmaker/pylearn2,JesseLivezey/plankton,fulmicoton/pylearn2,caidongyun/pylearn2,bartvm/pylearn2,nouiz/pylearn2,cosmoharrigan/pylearn2,sandeepkbhat/pylearn2,nouiz/pylearn2,lamblin/pylearn2,theoryno3/pylearn2,abergeron/pylearn2
|
Add function decorator to improve functools.wraps
|
"""
Documentation-related helper classes/functions
"""
class soft_wraps:
"""
A Python decorator which concatenates two functions' docstrings: one
function is defined at initialization and the other one is defined when
soft_wraps is called.
This helps reduce the ammount of documentation to write: one can use
this decorator on child classes' functions when their implementation is
similar to the one of the parent class. Conversely, if a function defined
in a child class departs from its parent's implementation, one can simply
explain the differences in a 'Notes' section without re-writing the whole
docstring.
Examples
--------
>>> class Parent(object):
... def f(x):
... '''
... Adds 1 to x
...
... Parameters
... ----------
... x : int
... Variable to increment by 1
...
... Returns
... -------
... rval : int
... x incremented by 1
... '''
... rval = x + 1
... return rval
...
>>> class Child(Parent):
... @soft_wraps(Parent.f)
... def f(x):
... '''
... Notes
... -----
... Also prints the incremented value
... '''
... rval = x + 1
... print rval
... return rval
...
>>> c = Child()
>>> print c.f.__doc__
Adds 1 to x
Parameters
----------
x : int
Variable to increment by 1
Returns
-------
rval : int
x incremented by 1
Notes
-----
Also prints the incremented value
"""
def __init__(self, f, append=False):
"""
Parameters
----------
f : function
Function whose docstring will be concatenated with the decorated
function's docstring
prepend : bool, optional
If True, appends f's docstring to the decorated function's
docstring instead of prepending it. Defaults to False.
"""
self.f = f
self.append = append
def __call__(self, f):
"""
Prepend self.f's docstring to f's docstring (or append it if
`self.append == True`).
Parameters
----------
f : function
Function to decorate
Returns
-------
f : function
Function f passed as argument with self.f's docstring
{pre,ap}pended to it
"""
if self.append:
f.__doc__ += + self.f.__doc__
else:
f.__doc__ = self.f.__doc__ + f.__doc__
return f
|
<commit_before><commit_msg>Add function decorator to improve functools.wraps<commit_after>
|
"""
Documentation-related helper classes/functions
"""
class soft_wraps:
"""
A Python decorator which concatenates two functions' docstrings: one
function is defined at initialization and the other one is defined when
soft_wraps is called.
This helps reduce the ammount of documentation to write: one can use
this decorator on child classes' functions when their implementation is
similar to the one of the parent class. Conversely, if a function defined
in a child class departs from its parent's implementation, one can simply
explain the differences in a 'Notes' section without re-writing the whole
docstring.
Examples
--------
>>> class Parent(object):
... def f(x):
... '''
... Adds 1 to x
...
... Parameters
... ----------
... x : int
... Variable to increment by 1
...
... Returns
... -------
... rval : int
... x incremented by 1
... '''
... rval = x + 1
... return rval
...
>>> class Child(Parent):
... @soft_wraps(Parent.f)
... def f(x):
... '''
... Notes
... -----
... Also prints the incremented value
... '''
... rval = x + 1
... print rval
... return rval
...
>>> c = Child()
>>> print c.f.__doc__
Adds 1 to x
Parameters
----------
x : int
Variable to increment by 1
Returns
-------
rval : int
x incremented by 1
Notes
-----
Also prints the incremented value
"""
def __init__(self, f, append=False):
"""
Parameters
----------
f : function
Function whose docstring will be concatenated with the decorated
function's docstring
prepend : bool, optional
If True, appends f's docstring to the decorated function's
docstring instead of prepending it. Defaults to False.
"""
self.f = f
self.append = append
def __call__(self, f):
"""
Prepend self.f's docstring to f's docstring (or append it if
`self.append == True`).
Parameters
----------
f : function
Function to decorate
Returns
-------
f : function
Function f passed as argument with self.f's docstring
{pre,ap}pended to it
"""
if self.append:
f.__doc__ += + self.f.__doc__
else:
f.__doc__ = self.f.__doc__ + f.__doc__
return f
|
Add function decorator to improve functools.wraps"""
Documentation-related helper classes/functions
"""
class soft_wraps:
"""
A Python decorator which concatenates two functions' docstrings: one
function is defined at initialization and the other one is defined when
soft_wraps is called.
This helps reduce the ammount of documentation to write: one can use
this decorator on child classes' functions when their implementation is
similar to the one of the parent class. Conversely, if a function defined
in a child class departs from its parent's implementation, one can simply
explain the differences in a 'Notes' section without re-writing the whole
docstring.
Examples
--------
>>> class Parent(object):
... def f(x):
... '''
... Adds 1 to x
...
... Parameters
... ----------
... x : int
... Variable to increment by 1
...
... Returns
... -------
... rval : int
... x incremented by 1
... '''
... rval = x + 1
... return rval
...
>>> class Child(Parent):
... @soft_wraps(Parent.f)
... def f(x):
... '''
... Notes
... -----
... Also prints the incremented value
... '''
... rval = x + 1
... print rval
... return rval
...
>>> c = Child()
>>> print c.f.__doc__
Adds 1 to x
Parameters
----------
x : int
Variable to increment by 1
Returns
-------
rval : int
x incremented by 1
Notes
-----
Also prints the incremented value
"""
def __init__(self, f, append=False):
"""
Parameters
----------
f : function
Function whose docstring will be concatenated with the decorated
function's docstring
prepend : bool, optional
If True, appends f's docstring to the decorated function's
docstring instead of prepending it. Defaults to False.
"""
self.f = f
self.append = append
def __call__(self, f):
"""
Prepend self.f's docstring to f's docstring (or append it if
`self.append == True`).
Parameters
----------
f : function
Function to decorate
Returns
-------
f : function
Function f passed as argument with self.f's docstring
{pre,ap}pended to it
"""
if self.append:
f.__doc__ += + self.f.__doc__
else:
f.__doc__ = self.f.__doc__ + f.__doc__
return f
|
<commit_before><commit_msg>Add function decorator to improve functools.wraps<commit_after>"""
Documentation-related helper classes/functions
"""
class soft_wraps:
"""
A Python decorator which concatenates two functions' docstrings: one
function is defined at initialization and the other one is defined when
soft_wraps is called.
This helps reduce the ammount of documentation to write: one can use
this decorator on child classes' functions when their implementation is
similar to the one of the parent class. Conversely, if a function defined
in a child class departs from its parent's implementation, one can simply
explain the differences in a 'Notes' section without re-writing the whole
docstring.
Examples
--------
>>> class Parent(object):
... def f(x):
... '''
... Adds 1 to x
...
... Parameters
... ----------
... x : int
... Variable to increment by 1
...
... Returns
... -------
... rval : int
... x incremented by 1
... '''
... rval = x + 1
... return rval
...
>>> class Child(Parent):
... @soft_wraps(Parent.f)
... def f(x):
... '''
... Notes
... -----
... Also prints the incremented value
... '''
... rval = x + 1
... print rval
... return rval
...
>>> c = Child()
>>> print c.f.__doc__
Adds 1 to x
Parameters
----------
x : int
Variable to increment by 1
Returns
-------
rval : int
x incremented by 1
Notes
-----
Also prints the incremented value
"""
def __init__(self, f, append=False):
"""
Parameters
----------
f : function
Function whose docstring will be concatenated with the decorated
function's docstring
prepend : bool, optional
If True, appends f's docstring to the decorated function's
docstring instead of prepending it. Defaults to False.
"""
self.f = f
self.append = append
def __call__(self, f):
"""
Prepend self.f's docstring to f's docstring (or append it if
`self.append == True`).
Parameters
----------
f : function
Function to decorate
Returns
-------
f : function
Function f passed as argument with self.f's docstring
{pre,ap}pended to it
"""
if self.append:
f.__doc__ += + self.f.__doc__
else:
f.__doc__ = self.f.__doc__ + f.__doc__
return f
|
|
7ff614950163b1fb6a8fe0fef5b8de9bfa3a9d85
|
transmutagen/tests/test_partialfrac.py
|
transmutagen/tests/test_partialfrac.py
|
from sympy import together, expand_complex, re, im, symbols
from ..partialfrac import t
def test_re_form():
theta, alpha = symbols('theta, alpha')
# Check that this doesn't change
re_form = together(expand_complex(re(alpha/(t - theta))))
assert re_form == (t*re(alpha) - re(alpha)*re(theta) -
im(alpha)*im(theta))/((t - re(theta))**2 + im(theta)**2)
|
Add a test for the hard-coded re() partial frac form
|
Add a test for the hard-coded re() partial frac form
|
Python
|
bsd-3-clause
|
ergs/transmutagen,ergs/transmutagen
|
Add a test for the hard-coded re() partial frac form
|
from sympy import together, expand_complex, re, im, symbols
from ..partialfrac import t
def test_re_form():
theta, alpha = symbols('theta, alpha')
# Check that this doesn't change
re_form = together(expand_complex(re(alpha/(t - theta))))
assert re_form == (t*re(alpha) - re(alpha)*re(theta) -
im(alpha)*im(theta))/((t - re(theta))**2 + im(theta)**2)
|
<commit_before><commit_msg>Add a test for the hard-coded re() partial frac form<commit_after>
|
from sympy import together, expand_complex, re, im, symbols
from ..partialfrac import t
def test_re_form():
theta, alpha = symbols('theta, alpha')
# Check that this doesn't change
re_form = together(expand_complex(re(alpha/(t - theta))))
assert re_form == (t*re(alpha) - re(alpha)*re(theta) -
im(alpha)*im(theta))/((t - re(theta))**2 + im(theta)**2)
|
Add a test for the hard-coded re() partial frac formfrom sympy import together, expand_complex, re, im, symbols
from ..partialfrac import t
def test_re_form():
theta, alpha = symbols('theta, alpha')
# Check that this doesn't change
re_form = together(expand_complex(re(alpha/(t - theta))))
assert re_form == (t*re(alpha) - re(alpha)*re(theta) -
im(alpha)*im(theta))/((t - re(theta))**2 + im(theta)**2)
|
<commit_before><commit_msg>Add a test for the hard-coded re() partial frac form<commit_after>from sympy import together, expand_complex, re, im, symbols
from ..partialfrac import t
def test_re_form():
theta, alpha = symbols('theta, alpha')
# Check that this doesn't change
re_form = together(expand_complex(re(alpha/(t - theta))))
assert re_form == (t*re(alpha) - re(alpha)*re(theta) -
im(alpha)*im(theta))/((t - re(theta))**2 + im(theta)**2)
|
|
8dc7a1e239dc22dd4eb69cfe1754586e3a1690dc
|
tests/test_run_js.py
|
tests/test_run_js.py
|
import os
from py2js import JavaScript
def f(x):
return x
def test(func, run):
func_source = str(JavaScript(func))
run_file = "/tmp/run.js"
with open(run_file, "w") as f:
f.write(func_source)
f.write("\n")
f.write(run)
r = os.system('js -f defs.js -f %s' % run_file)
assert r == 0
test(f, "assert(f(3) == 3)")
test(f, "assert(f(3) != 4)")
|
Test javascript using the "js"
|
Test javascript using the "js"
|
Python
|
mit
|
qsnake/py2js,mattpap/py2js,chrivers/pyjaco,buchuki/pyjaco,mattpap/py2js,chrivers/pyjaco,chrivers/pyjaco,buchuki/pyjaco,buchuki/pyjaco,qsnake/py2js
|
Test javascript using the "js"
|
import os
from py2js import JavaScript
def f(x):
return x
def test(func, run):
func_source = str(JavaScript(func))
run_file = "/tmp/run.js"
with open(run_file, "w") as f:
f.write(func_source)
f.write("\n")
f.write(run)
r = os.system('js -f defs.js -f %s' % run_file)
assert r == 0
test(f, "assert(f(3) == 3)")
test(f, "assert(f(3) != 4)")
|
<commit_before><commit_msg>Test javascript using the "js"<commit_after>
|
import os
from py2js import JavaScript
def f(x):
return x
def test(func, run):
func_source = str(JavaScript(func))
run_file = "/tmp/run.js"
with open(run_file, "w") as f:
f.write(func_source)
f.write("\n")
f.write(run)
r = os.system('js -f defs.js -f %s' % run_file)
assert r == 0
test(f, "assert(f(3) == 3)")
test(f, "assert(f(3) != 4)")
|
Test javascript using the "js"import os
from py2js import JavaScript
def f(x):
return x
def test(func, run):
func_source = str(JavaScript(func))
run_file = "/tmp/run.js"
with open(run_file, "w") as f:
f.write(func_source)
f.write("\n")
f.write(run)
r = os.system('js -f defs.js -f %s' % run_file)
assert r == 0
test(f, "assert(f(3) == 3)")
test(f, "assert(f(3) != 4)")
|
<commit_before><commit_msg>Test javascript using the "js"<commit_after>import os
from py2js import JavaScript
def f(x):
return x
def test(func, run):
func_source = str(JavaScript(func))
run_file = "/tmp/run.js"
with open(run_file, "w") as f:
f.write(func_source)
f.write("\n")
f.write(run)
r = os.system('js -f defs.js -f %s' % run_file)
assert r == 0
test(f, "assert(f(3) == 3)")
test(f, "assert(f(3) != 4)")
|
|
3dd71c02ea1fa9e39054bd82bf9e8657ec77d6b9
|
tools/get_chat_id.py
|
tools/get_chat_id.py
|
#! /usr/bin/python3
# -*- coding:utf-8 -*-
# by antoine@2ohm.fr
import sys
import time
import telepot
def handle(msg):
content_type, chat_type, chat_id = telepot.glance(msg)
print("\tchat_id: {}".format(chat_id))
if content_type == 'text' and msg['text'] == '/start':
ans = """
Hello <b>{first_name}</b>, nice to meet you!\n
Your chat_id is <code>{chat_id}</code>.\n
You can stop the <code>get_chat_id</code> script with <code>CTRL+C</code> and start using the ProgressBot right now.\n
See you soon!
""".format(first_name = msg['from']['first_name'],
chat_id = chat_id)
bot.sendMessage(chat_id, ans, parse_mode = "HTML")
TOKEN = "PUT_YOUR_TOKKEN_HERE"
bot = telepot.Bot(TOKEN)
bot.message_loop(handle)
print ('Listening ...')
# Keep the program running.
while 1:
try:
time.sleep(10)
except KeyboardInterrupt:
print()
sys.exit()
|
Add a script to recover the chat_id
|
Add a script to recover the chat_id
|
Python
|
apache-2.0
|
a2ohm/ProgressBot
|
Add a script to recover the chat_id
|
#! /usr/bin/python3
# -*- coding:utf-8 -*-
# by antoine@2ohm.fr
import sys
import time
import telepot
def handle(msg):
content_type, chat_type, chat_id = telepot.glance(msg)
print("\tchat_id: {}".format(chat_id))
if content_type == 'text' and msg['text'] == '/start':
ans = """
Hello <b>{first_name}</b>, nice to meet you!\n
Your chat_id is <code>{chat_id}</code>.\n
You can stop the <code>get_chat_id</code> script with <code>CTRL+C</code> and start using the ProgressBot right now.\n
See you soon!
""".format(first_name = msg['from']['first_name'],
chat_id = chat_id)
bot.sendMessage(chat_id, ans, parse_mode = "HTML")
TOKEN = "PUT_YOUR_TOKKEN_HERE"
bot = telepot.Bot(TOKEN)
bot.message_loop(handle)
print ('Listening ...')
# Keep the program running.
while 1:
try:
time.sleep(10)
except KeyboardInterrupt:
print()
sys.exit()
|
<commit_before><commit_msg>Add a script to recover the chat_id<commit_after>
|
#! /usr/bin/python3
# -*- coding:utf-8 -*-
# by antoine@2ohm.fr
import sys
import time
import telepot
def handle(msg):
content_type, chat_type, chat_id = telepot.glance(msg)
print("\tchat_id: {}".format(chat_id))
if content_type == 'text' and msg['text'] == '/start':
ans = """
Hello <b>{first_name}</b>, nice to meet you!\n
Your chat_id is <code>{chat_id}</code>.\n
You can stop the <code>get_chat_id</code> script with <code>CTRL+C</code> and start using the ProgressBot right now.\n
See you soon!
""".format(first_name = msg['from']['first_name'],
chat_id = chat_id)
bot.sendMessage(chat_id, ans, parse_mode = "HTML")
TOKEN = "PUT_YOUR_TOKKEN_HERE"
bot = telepot.Bot(TOKEN)
bot.message_loop(handle)
print ('Listening ...')
# Keep the program running.
while 1:
try:
time.sleep(10)
except KeyboardInterrupt:
print()
sys.exit()
|
Add a script to recover the chat_id#! /usr/bin/python3
# -*- coding:utf-8 -*-
# by antoine@2ohm.fr
import sys
import time
import telepot
def handle(msg):
content_type, chat_type, chat_id = telepot.glance(msg)
print("\tchat_id: {}".format(chat_id))
if content_type == 'text' and msg['text'] == '/start':
ans = """
Hello <b>{first_name}</b>, nice to meet you!\n
Your chat_id is <code>{chat_id}</code>.\n
You can stop the <code>get_chat_id</code> script with <code>CTRL+C</code> and start using the ProgressBot right now.\n
See you soon!
""".format(first_name = msg['from']['first_name'],
chat_id = chat_id)
bot.sendMessage(chat_id, ans, parse_mode = "HTML")
TOKEN = "PUT_YOUR_TOKKEN_HERE"
bot = telepot.Bot(TOKEN)
bot.message_loop(handle)
print ('Listening ...')
# Keep the program running.
while 1:
try:
time.sleep(10)
except KeyboardInterrupt:
print()
sys.exit()
|
<commit_before><commit_msg>Add a script to recover the chat_id<commit_after>#! /usr/bin/python3
# -*- coding:utf-8 -*-
# by antoine@2ohm.fr
import sys
import time
import telepot
def handle(msg):
content_type, chat_type, chat_id = telepot.glance(msg)
print("\tchat_id: {}".format(chat_id))
if content_type == 'text' and msg['text'] == '/start':
ans = """
Hello <b>{first_name}</b>, nice to meet you!\n
Your chat_id is <code>{chat_id}</code>.\n
You can stop the <code>get_chat_id</code> script with <code>CTRL+C</code> and start using the ProgressBot right now.\n
See you soon!
""".format(first_name = msg['from']['first_name'],
chat_id = chat_id)
bot.sendMessage(chat_id, ans, parse_mode = "HTML")
TOKEN = "PUT_YOUR_TOKKEN_HERE"
bot = telepot.Bot(TOKEN)
bot.message_loop(handle)
print ('Listening ...')
# Keep the program running.
while 1:
try:
time.sleep(10)
except KeyboardInterrupt:
print()
sys.exit()
|
|
fed2e3f9bdb3a00b077b5e7df1aed4d927b77b6c
|
tests/clifford_test.py
|
tests/clifford_test.py
|
"""Test for the Clifford algebra drudge."""
from drudge import CliffordDrudge, Vec, inner_by_delta
def test_clifford_drudge_by_quaternions(spark_ctx):
"""Test basic functionality of Clifford drudge by quaternions.
"""
dr = CliffordDrudge(
spark_ctx, inner=lambda v1, v2: -inner_by_delta(v1, v2)
)
e_ = Vec('e')
i_ = dr.sum(e_[2] * e_[3]).simplify()
j_ = dr.sum(e_[3] * e_[1]).simplify()
k_ = dr.sum(e_[1] * e_[2]).simplify()
for i in [i_, j_, k_]:
assert (i * i).simplify() == -1
assert (i_ * j_ * k_).simplify() == -1
assert (i_ * j_).simplify() == k_
assert (j_ * k_).simplify() == i_
assert (k_ * i_).simplify() == j_
|
Add test for Clifford drudge by quaternions
|
Add test for Clifford drudge by quaternions
|
Python
|
mit
|
tschijnmo/drudge,tschijnmo/drudge,tschijnmo/drudge
|
Add test for Clifford drudge by quaternions
|
"""Test for the Clifford algebra drudge."""
from drudge import CliffordDrudge, Vec, inner_by_delta
def test_clifford_drudge_by_quaternions(spark_ctx):
"""Test basic functionality of Clifford drudge by quaternions.
"""
dr = CliffordDrudge(
spark_ctx, inner=lambda v1, v2: -inner_by_delta(v1, v2)
)
e_ = Vec('e')
i_ = dr.sum(e_[2] * e_[3]).simplify()
j_ = dr.sum(e_[3] * e_[1]).simplify()
k_ = dr.sum(e_[1] * e_[2]).simplify()
for i in [i_, j_, k_]:
assert (i * i).simplify() == -1
assert (i_ * j_ * k_).simplify() == -1
assert (i_ * j_).simplify() == k_
assert (j_ * k_).simplify() == i_
assert (k_ * i_).simplify() == j_
|
<commit_before><commit_msg>Add test for Clifford drudge by quaternions<commit_after>
|
"""Test for the Clifford algebra drudge."""
from drudge import CliffordDrudge, Vec, inner_by_delta
def test_clifford_drudge_by_quaternions(spark_ctx):
"""Test basic functionality of Clifford drudge by quaternions.
"""
dr = CliffordDrudge(
spark_ctx, inner=lambda v1, v2: -inner_by_delta(v1, v2)
)
e_ = Vec('e')
i_ = dr.sum(e_[2] * e_[3]).simplify()
j_ = dr.sum(e_[3] * e_[1]).simplify()
k_ = dr.sum(e_[1] * e_[2]).simplify()
for i in [i_, j_, k_]:
assert (i * i).simplify() == -1
assert (i_ * j_ * k_).simplify() == -1
assert (i_ * j_).simplify() == k_
assert (j_ * k_).simplify() == i_
assert (k_ * i_).simplify() == j_
|
Add test for Clifford drudge by quaternions"""Test for the Clifford algebra drudge."""
from drudge import CliffordDrudge, Vec, inner_by_delta
def test_clifford_drudge_by_quaternions(spark_ctx):
"""Test basic functionality of Clifford drudge by quaternions.
"""
dr = CliffordDrudge(
spark_ctx, inner=lambda v1, v2: -inner_by_delta(v1, v2)
)
e_ = Vec('e')
i_ = dr.sum(e_[2] * e_[3]).simplify()
j_ = dr.sum(e_[3] * e_[1]).simplify()
k_ = dr.sum(e_[1] * e_[2]).simplify()
for i in [i_, j_, k_]:
assert (i * i).simplify() == -1
assert (i_ * j_ * k_).simplify() == -1
assert (i_ * j_).simplify() == k_
assert (j_ * k_).simplify() == i_
assert (k_ * i_).simplify() == j_
|
<commit_before><commit_msg>Add test for Clifford drudge by quaternions<commit_after>"""Test for the Clifford algebra drudge."""
from drudge import CliffordDrudge, Vec, inner_by_delta
def test_clifford_drudge_by_quaternions(spark_ctx):
"""Test basic functionality of Clifford drudge by quaternions.
"""
dr = CliffordDrudge(
spark_ctx, inner=lambda v1, v2: -inner_by_delta(v1, v2)
)
e_ = Vec('e')
i_ = dr.sum(e_[2] * e_[3]).simplify()
j_ = dr.sum(e_[3] * e_[1]).simplify()
k_ = dr.sum(e_[1] * e_[2]).simplify()
for i in [i_, j_, k_]:
assert (i * i).simplify() == -1
assert (i_ * j_ * k_).simplify() == -1
assert (i_ * j_).simplify() == k_
assert (j_ * k_).simplify() == i_
assert (k_ * i_).simplify() == j_
|
|
09a0689b8e521c1d5c0ea68ac448dc9ae7abcff5
|
fitsHeader.py
|
fitsHeader.py
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# My imports
from __future__ import division
from astropy.io import fits
from pydoc import pager
import argparse
def _parser():
parser = argparse.ArgumentParser(description='View the header of a fits file')
parser.add_argument('input', help='File name of fits file')
parser.add_argument('-key', help='Look up a given key (case insensitive)', default=None)
return parser.parse_args()
if __name__ == '__main__':
args = _parser()
h = fits.getheader(args.input)
h.keys = map(str.lower, h.keys())
if args.key:
args.key = args.key.lower()
try:
print h[args.key]
except KeyError:
raise KeyError('Key was not found')
else:
string = '\n'.join("{!s} : {!r}".format(key, val) for (key, val) in h.items())
pager(string)
|
Read the header of a fits file and/or look up a single key (case insensitive).
|
Read the header of a fits file and/or look up a
single key (case insensitive).
|
Python
|
mit
|
DanielAndreasen/astro_scripts
|
Read the header of a fits file and/or look up a
single key (case insensitive).
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# My imports
from __future__ import division
from astropy.io import fits
from pydoc import pager
import argparse
def _parser():
parser = argparse.ArgumentParser(description='View the header of a fits file')
parser.add_argument('input', help='File name of fits file')
parser.add_argument('-key', help='Look up a given key (case insensitive)', default=None)
return parser.parse_args()
if __name__ == '__main__':
args = _parser()
h = fits.getheader(args.input)
h.keys = map(str.lower, h.keys())
if args.key:
args.key = args.key.lower()
try:
print h[args.key]
except KeyError:
raise KeyError('Key was not found')
else:
string = '\n'.join("{!s} : {!r}".format(key, val) for (key, val) in h.items())
pager(string)
|
<commit_before><commit_msg>Read the header of a fits file and/or look up a
single key (case insensitive).<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# My imports
from __future__ import division
from astropy.io import fits
from pydoc import pager
import argparse
def _parser():
parser = argparse.ArgumentParser(description='View the header of a fits file')
parser.add_argument('input', help='File name of fits file')
parser.add_argument('-key', help='Look up a given key (case insensitive)', default=None)
return parser.parse_args()
if __name__ == '__main__':
args = _parser()
h = fits.getheader(args.input)
h.keys = map(str.lower, h.keys())
if args.key:
args.key = args.key.lower()
try:
print h[args.key]
except KeyError:
raise KeyError('Key was not found')
else:
string = '\n'.join("{!s} : {!r}".format(key, val) for (key, val) in h.items())
pager(string)
|
Read the header of a fits file and/or look up a
single key (case insensitive).#!/usr/bin/env python
# -*- coding: utf8 -*-
# My imports
from __future__ import division
from astropy.io import fits
from pydoc import pager
import argparse
def _parser():
parser = argparse.ArgumentParser(description='View the header of a fits file')
parser.add_argument('input', help='File name of fits file')
parser.add_argument('-key', help='Look up a given key (case insensitive)', default=None)
return parser.parse_args()
if __name__ == '__main__':
args = _parser()
h = fits.getheader(args.input)
h.keys = map(str.lower, h.keys())
if args.key:
args.key = args.key.lower()
try:
print h[args.key]
except KeyError:
raise KeyError('Key was not found')
else:
string = '\n'.join("{!s} : {!r}".format(key, val) for (key, val) in h.items())
pager(string)
|
<commit_before><commit_msg>Read the header of a fits file and/or look up a
single key (case insensitive).<commit_after>#!/usr/bin/env python
# -*- coding: utf8 -*-
# My imports
from __future__ import division
from astropy.io import fits
from pydoc import pager
import argparse
def _parser():
parser = argparse.ArgumentParser(description='View the header of a fits file')
parser.add_argument('input', help='File name of fits file')
parser.add_argument('-key', help='Look up a given key (case insensitive)', default=None)
return parser.parse_args()
if __name__ == '__main__':
args = _parser()
h = fits.getheader(args.input)
h.keys = map(str.lower, h.keys())
if args.key:
args.key = args.key.lower()
try:
print h[args.key]
except KeyError:
raise KeyError('Key was not found')
else:
string = '\n'.join("{!s} : {!r}".format(key, val) for (key, val) in h.items())
pager(string)
|
|
b674f921a8e5cffb2d3e320f564c61ca01455a9f
|
wafer/management/commands/wafer_talk_video_reviewers.py
|
wafer/management/commands/wafer_talk_video_reviewers.py
|
import sys
import csv
from django.core.management.base import BaseCommand
from django.contrib.auth import get_user_model
from wafer.talks.models import Talk, ACCEPTED, PROVISIONAL
class Command(BaseCommand):
help = ("List talks and the associated video_reviewer emails."
" Only reviewers for accepted talks are listed")
def _video_reviewers(self, options):
talks = Talk.objects.filter(status=ACCEPTED)
csv_file = csv.writer(sys.stdout)
for talk in talks:
reviewer = talk.video_reviewer
if not reviewer:
reviewer = 'NO REVIEWER'
row = [x.encode("utf-8") for x in (
talk.title,
reviewer,
)]
csv_file.writerow(row)
def handle(self, *args, **options):
self._video_reviewers(options)
|
Add command to generate a csv of talk titles and video reviewers
|
Add command to generate a csv of talk titles and video reviewers
|
Python
|
isc
|
CTPUG/wafer,CTPUG/wafer,CTPUG/wafer,CTPUG/wafer
|
Add command to generate a csv of talk titles and video reviewers
|
import sys
import csv
from django.core.management.base import BaseCommand
from django.contrib.auth import get_user_model
from wafer.talks.models import Talk, ACCEPTED, PROVISIONAL
class Command(BaseCommand):
help = ("List talks and the associated video_reviewer emails."
" Only reviewers for accepted talks are listed")
def _video_reviewers(self, options):
talks = Talk.objects.filter(status=ACCEPTED)
csv_file = csv.writer(sys.stdout)
for talk in talks:
reviewer = talk.video_reviewer
if not reviewer:
reviewer = 'NO REVIEWER'
row = [x.encode("utf-8") for x in (
talk.title,
reviewer,
)]
csv_file.writerow(row)
def handle(self, *args, **options):
self._video_reviewers(options)
|
<commit_before><commit_msg>Add command to generate a csv of talk titles and video reviewers<commit_after>
|
import sys
import csv
from django.core.management.base import BaseCommand
from django.contrib.auth import get_user_model
from wafer.talks.models import Talk, ACCEPTED, PROVISIONAL
class Command(BaseCommand):
help = ("List talks and the associated video_reviewer emails."
" Only reviewers for accepted talks are listed")
def _video_reviewers(self, options):
talks = Talk.objects.filter(status=ACCEPTED)
csv_file = csv.writer(sys.stdout)
for talk in talks:
reviewer = talk.video_reviewer
if not reviewer:
reviewer = 'NO REVIEWER'
row = [x.encode("utf-8") for x in (
talk.title,
reviewer,
)]
csv_file.writerow(row)
def handle(self, *args, **options):
self._video_reviewers(options)
|
Add command to generate a csv of talk titles and video reviewersimport sys
import csv
from django.core.management.base import BaseCommand
from django.contrib.auth import get_user_model
from wafer.talks.models import Talk, ACCEPTED, PROVISIONAL
class Command(BaseCommand):
help = ("List talks and the associated video_reviewer emails."
" Only reviewers for accepted talks are listed")
def _video_reviewers(self, options):
talks = Talk.objects.filter(status=ACCEPTED)
csv_file = csv.writer(sys.stdout)
for talk in talks:
reviewer = talk.video_reviewer
if not reviewer:
reviewer = 'NO REVIEWER'
row = [x.encode("utf-8") for x in (
talk.title,
reviewer,
)]
csv_file.writerow(row)
def handle(self, *args, **options):
self._video_reviewers(options)
|
<commit_before><commit_msg>Add command to generate a csv of talk titles and video reviewers<commit_after>import sys
import csv
from django.core.management.base import BaseCommand
from django.contrib.auth import get_user_model
from wafer.talks.models import Talk, ACCEPTED, PROVISIONAL
class Command(BaseCommand):
help = ("List talks and the associated video_reviewer emails."
" Only reviewers for accepted talks are listed")
def _video_reviewers(self, options):
talks = Talk.objects.filter(status=ACCEPTED)
csv_file = csv.writer(sys.stdout)
for talk in talks:
reviewer = talk.video_reviewer
if not reviewer:
reviewer = 'NO REVIEWER'
row = [x.encode("utf-8") for x in (
talk.title,
reviewer,
)]
csv_file.writerow(row)
def handle(self, *args, **options):
self._video_reviewers(options)
|
|
3db3c22d83071550d8bbd70062f957cf43c5e54a
|
cart/_compatibility.py
|
cart/_compatibility.py
|
import sys
is_py3 = sys.version_info[0] >= 3
def utf8(string):
"""Cast to unicode DAMMIT!
Written because Python2 repr always implicitly casts to a string, so we
have to cast back to a unicode (and we now that we always deal with valid
unicode, because we check that in the beginning).
"""
if is_py3:
return str(string)
elif not isinstance(string, unicode):
return unicode(str(string), 'UTF-8')
return string
|
Add a compatibility module, because of Python 2/3 compatibility issues.
|
Add a compatibility module, because of Python 2/3 compatibility issues.
|
Python
|
mit
|
davidhalter-archive/shopping_cart_example
|
Add a compatibility module, because of Python 2/3 compatibility issues.
|
import sys
is_py3 = sys.version_info[0] >= 3
def utf8(string):
"""Cast to unicode DAMMIT!
Written because Python2 repr always implicitly casts to a string, so we
have to cast back to a unicode (and we now that we always deal with valid
unicode, because we check that in the beginning).
"""
if is_py3:
return str(string)
elif not isinstance(string, unicode):
return unicode(str(string), 'UTF-8')
return string
|
<commit_before><commit_msg>Add a compatibility module, because of Python 2/3 compatibility issues.<commit_after>
|
import sys
is_py3 = sys.version_info[0] >= 3
def utf8(string):
"""Cast to unicode DAMMIT!
Written because Python2 repr always implicitly casts to a string, so we
have to cast back to a unicode (and we now that we always deal with valid
unicode, because we check that in the beginning).
"""
if is_py3:
return str(string)
elif not isinstance(string, unicode):
return unicode(str(string), 'UTF-8')
return string
|
Add a compatibility module, because of Python 2/3 compatibility issues.import sys
is_py3 = sys.version_info[0] >= 3
def utf8(string):
"""Cast to unicode DAMMIT!
Written because Python2 repr always implicitly casts to a string, so we
have to cast back to a unicode (and we now that we always deal with valid
unicode, because we check that in the beginning).
"""
if is_py3:
return str(string)
elif not isinstance(string, unicode):
return unicode(str(string), 'UTF-8')
return string
|
<commit_before><commit_msg>Add a compatibility module, because of Python 2/3 compatibility issues.<commit_after>import sys
is_py3 = sys.version_info[0] >= 3
def utf8(string):
"""Cast to unicode DAMMIT!
Written because Python2 repr always implicitly casts to a string, so we
have to cast back to a unicode (and we now that we always deal with valid
unicode, because we check that in the beginning).
"""
if is_py3:
return str(string)
elif not isinstance(string, unicode):
return unicode(str(string), 'UTF-8')
return string
|
|
156b7dfc11f24a7d77d2280e8ddade3cb7a474b7
|
misc/list_all_es_indexes.py
|
misc/list_all_es_indexes.py
|
#!/usr/bin/env python
# -*- encoding: utf-8
import boto3
import hcl
import requests
def get_terraform_vars():
s3_client = boto3.client("s3")
tfvars_body = s3_client.get_object(
Bucket="wellcomecollection-platform-infra",
Key="terraform.tfvars"
)["Body"]
return hcl.load(tfvars_body)
def build_url(es_credentials):
protocol = es_credentials["protocol"]
name = es_credentials["name"]
region = es_credentials["region"]
port = es_credentials["port"]
return f"{protocol}://{name}.{region}.aws.found.io:{port}"
def get_all_indexes(es_url, username, password):
resp = requests.get(
f"{es_url}/_cat/indices",
auth=(username, password),
params={"format": "json"}
)
resp.raise_for_status()
return resp.json()
if __name__ == "__main__":
terraform_vars = get_terraform_vars()
es_cluster_credentials = terraform_vars["es_cluster_credentials"]
es_url = build_url(es_cluster_credentials)
username = es_cluster_credentials["username"]
password = es_cluster_credentials["password"]
indexes = get_all_indexes(es_url, username=username, password=password)
print(
'\n'.join(sorted(
idx["index"]
for idx in indexes
if not idx["index"].startswith(".")
))
)
|
Add a script for listing all Elasticsearch indexes
|
Add a script for listing all Elasticsearch indexes
|
Python
|
mit
|
wellcometrust/platform-api,wellcometrust/platform-api,wellcometrust/platform-api,wellcometrust/platform-api
|
Add a script for listing all Elasticsearch indexes
|
#!/usr/bin/env python
# -*- encoding: utf-8
import boto3
import hcl
import requests
def get_terraform_vars():
s3_client = boto3.client("s3")
tfvars_body = s3_client.get_object(
Bucket="wellcomecollection-platform-infra",
Key="terraform.tfvars"
)["Body"]
return hcl.load(tfvars_body)
def build_url(es_credentials):
protocol = es_credentials["protocol"]
name = es_credentials["name"]
region = es_credentials["region"]
port = es_credentials["port"]
return f"{protocol}://{name}.{region}.aws.found.io:{port}"
def get_all_indexes(es_url, username, password):
resp = requests.get(
f"{es_url}/_cat/indices",
auth=(username, password),
params={"format": "json"}
)
resp.raise_for_status()
return resp.json()
if __name__ == "__main__":
terraform_vars = get_terraform_vars()
es_cluster_credentials = terraform_vars["es_cluster_credentials"]
es_url = build_url(es_cluster_credentials)
username = es_cluster_credentials["username"]
password = es_cluster_credentials["password"]
indexes = get_all_indexes(es_url, username=username, password=password)
print(
'\n'.join(sorted(
idx["index"]
for idx in indexes
if not idx["index"].startswith(".")
))
)
|
<commit_before><commit_msg>Add a script for listing all Elasticsearch indexes<commit_after>
|
#!/usr/bin/env python
# -*- encoding: utf-8
import boto3
import hcl
import requests
def get_terraform_vars():
s3_client = boto3.client("s3")
tfvars_body = s3_client.get_object(
Bucket="wellcomecollection-platform-infra",
Key="terraform.tfvars"
)["Body"]
return hcl.load(tfvars_body)
def build_url(es_credentials):
protocol = es_credentials["protocol"]
name = es_credentials["name"]
region = es_credentials["region"]
port = es_credentials["port"]
return f"{protocol}://{name}.{region}.aws.found.io:{port}"
def get_all_indexes(es_url, username, password):
resp = requests.get(
f"{es_url}/_cat/indices",
auth=(username, password),
params={"format": "json"}
)
resp.raise_for_status()
return resp.json()
if __name__ == "__main__":
terraform_vars = get_terraform_vars()
es_cluster_credentials = terraform_vars["es_cluster_credentials"]
es_url = build_url(es_cluster_credentials)
username = es_cluster_credentials["username"]
password = es_cluster_credentials["password"]
indexes = get_all_indexes(es_url, username=username, password=password)
print(
'\n'.join(sorted(
idx["index"]
for idx in indexes
if not idx["index"].startswith(".")
))
)
|
Add a script for listing all Elasticsearch indexes#!/usr/bin/env python
# -*- encoding: utf-8
import boto3
import hcl
import requests
def get_terraform_vars():
s3_client = boto3.client("s3")
tfvars_body = s3_client.get_object(
Bucket="wellcomecollection-platform-infra",
Key="terraform.tfvars"
)["Body"]
return hcl.load(tfvars_body)
def build_url(es_credentials):
protocol = es_credentials["protocol"]
name = es_credentials["name"]
region = es_credentials["region"]
port = es_credentials["port"]
return f"{protocol}://{name}.{region}.aws.found.io:{port}"
def get_all_indexes(es_url, username, password):
resp = requests.get(
f"{es_url}/_cat/indices",
auth=(username, password),
params={"format": "json"}
)
resp.raise_for_status()
return resp.json()
if __name__ == "__main__":
terraform_vars = get_terraform_vars()
es_cluster_credentials = terraform_vars["es_cluster_credentials"]
es_url = build_url(es_cluster_credentials)
username = es_cluster_credentials["username"]
password = es_cluster_credentials["password"]
indexes = get_all_indexes(es_url, username=username, password=password)
print(
'\n'.join(sorted(
idx["index"]
for idx in indexes
if not idx["index"].startswith(".")
))
)
|
<commit_before><commit_msg>Add a script for listing all Elasticsearch indexes<commit_after>#!/usr/bin/env python
# -*- encoding: utf-8
import boto3
import hcl
import requests
def get_terraform_vars():
s3_client = boto3.client("s3")
tfvars_body = s3_client.get_object(
Bucket="wellcomecollection-platform-infra",
Key="terraform.tfvars"
)["Body"]
return hcl.load(tfvars_body)
def build_url(es_credentials):
protocol = es_credentials["protocol"]
name = es_credentials["name"]
region = es_credentials["region"]
port = es_credentials["port"]
return f"{protocol}://{name}.{region}.aws.found.io:{port}"
def get_all_indexes(es_url, username, password):
resp = requests.get(
f"{es_url}/_cat/indices",
auth=(username, password),
params={"format": "json"}
)
resp.raise_for_status()
return resp.json()
if __name__ == "__main__":
terraform_vars = get_terraform_vars()
es_cluster_credentials = terraform_vars["es_cluster_credentials"]
es_url = build_url(es_cluster_credentials)
username = es_cluster_credentials["username"]
password = es_cluster_credentials["password"]
indexes = get_all_indexes(es_url, username=username, password=password)
print(
'\n'.join(sorted(
idx["index"]
for idx in indexes
if not idx["index"].startswith(".")
))
)
|
|
006a921f19f6c4f64d694c86346ad85ada2c8bb8
|
tests/subclass_test.py
|
tests/subclass_test.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vi:ts=4:et
try:
import unittest2 as unittest
except ImportError:
import unittest
import pycurl
CLASSES = (pycurl.Curl, pycurl.CurlMulti, pycurl.CurlShare)
class SubclassTest(unittest.TestCase):
def test_baseclass_init(self):
# base classes do not accept any arguments on initialization
for baseclass in CLASSES:
try:
baseclass(0)
except TypeError:
pass
else:
raise AssertionError('Base class accepted invalid args')
try:
baseclass(a=1)
except TypeError:
pass
else:
raise AssertionError('Base class accepted invalid kwargs')
def test_subclass_create(self):
for baseclass in CLASSES:
# test creation of a subclass
class MyCurlClass(baseclass):
pass
# test creation of its object
obj = MyCurlClass()
# must be of type subclass, but also an instance of base class
assert type(obj) == MyCurlClass
assert isinstance(obj, baseclass)
def test_subclass_init(self):
for baseclass in CLASSES:
class MyCurlClass(baseclass):
def __init__(self, x, y=4):
self.x = x
self.y = y
# subclass __init__ must be able to accept args and kwargs
obj = MyCurlClass(3)
assert obj.x == 3
assert obj.y == 4
obj = MyCurlClass(5, y=6)
assert obj.x == 5
assert obj.y == 6
# and it must throw TypeError if arguments don't match
try:
MyCurlClass(1, 2, 3, kwarg=4)
except TypeError:
pass
else:
raise AssertionError('Subclass accepted invalid arguments')
def test_subclass_method(self):
for baseclass in CLASSES:
class MyCurlClass(baseclass):
def my_method(self, x):
return x + 1
obj = MyCurlClass()
# methods must be able to accept arguments and return a value
assert obj.my_method(1) == 2
def test_subclass_method_override(self):
# setopt args for each base class
args = {
pycurl.Curl: (pycurl.VERBOSE, 1),
pycurl.CurlMulti: (pycurl.M_MAXCONNECTS, 3),
pycurl.CurlShare: (pycurl.SH_SHARE, pycurl.LOCK_DATA_COOKIE),
}
for baseclass in CLASSES:
class MyCurlClass(baseclass):
def setopt(self, option, value):
# base method must not be overwritten
assert super().setopt != self.setopt
# base method mut be callable, setopt must return None
assert super().setopt(option, value) is None
# return something else
return 'my setopt'
obj = MyCurlClass()
assert obj.setopt(*args[baseclass]) == 'my setopt'
|
Add tests for subclass support
|
Add tests for subclass support
|
Python
|
lgpl-2.1
|
pycurl/pycurl,pycurl/pycurl,pycurl/pycurl
|
Add tests for subclass support
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vi:ts=4:et
try:
import unittest2 as unittest
except ImportError:
import unittest
import pycurl
CLASSES = (pycurl.Curl, pycurl.CurlMulti, pycurl.CurlShare)
class SubclassTest(unittest.TestCase):
def test_baseclass_init(self):
# base classes do not accept any arguments on initialization
for baseclass in CLASSES:
try:
baseclass(0)
except TypeError:
pass
else:
raise AssertionError('Base class accepted invalid args')
try:
baseclass(a=1)
except TypeError:
pass
else:
raise AssertionError('Base class accepted invalid kwargs')
def test_subclass_create(self):
for baseclass in CLASSES:
# test creation of a subclass
class MyCurlClass(baseclass):
pass
# test creation of its object
obj = MyCurlClass()
# must be of type subclass, but also an instance of base class
assert type(obj) == MyCurlClass
assert isinstance(obj, baseclass)
def test_subclass_init(self):
for baseclass in CLASSES:
class MyCurlClass(baseclass):
def __init__(self, x, y=4):
self.x = x
self.y = y
# subclass __init__ must be able to accept args and kwargs
obj = MyCurlClass(3)
assert obj.x == 3
assert obj.y == 4
obj = MyCurlClass(5, y=6)
assert obj.x == 5
assert obj.y == 6
# and it must throw TypeError if arguments don't match
try:
MyCurlClass(1, 2, 3, kwarg=4)
except TypeError:
pass
else:
raise AssertionError('Subclass accepted invalid arguments')
def test_subclass_method(self):
for baseclass in CLASSES:
class MyCurlClass(baseclass):
def my_method(self, x):
return x + 1
obj = MyCurlClass()
# methods must be able to accept arguments and return a value
assert obj.my_method(1) == 2
def test_subclass_method_override(self):
# setopt args for each base class
args = {
pycurl.Curl: (pycurl.VERBOSE, 1),
pycurl.CurlMulti: (pycurl.M_MAXCONNECTS, 3),
pycurl.CurlShare: (pycurl.SH_SHARE, pycurl.LOCK_DATA_COOKIE),
}
for baseclass in CLASSES:
class MyCurlClass(baseclass):
def setopt(self, option, value):
# base method must not be overwritten
assert super().setopt != self.setopt
# base method mut be callable, setopt must return None
assert super().setopt(option, value) is None
# return something else
return 'my setopt'
obj = MyCurlClass()
assert obj.setopt(*args[baseclass]) == 'my setopt'
|
<commit_before><commit_msg>Add tests for subclass support<commit_after>
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vi:ts=4:et
try:
import unittest2 as unittest
except ImportError:
import unittest
import pycurl
CLASSES = (pycurl.Curl, pycurl.CurlMulti, pycurl.CurlShare)
class SubclassTest(unittest.TestCase):
def test_baseclass_init(self):
# base classes do not accept any arguments on initialization
for baseclass in CLASSES:
try:
baseclass(0)
except TypeError:
pass
else:
raise AssertionError('Base class accepted invalid args')
try:
baseclass(a=1)
except TypeError:
pass
else:
raise AssertionError('Base class accepted invalid kwargs')
def test_subclass_create(self):
for baseclass in CLASSES:
# test creation of a subclass
class MyCurlClass(baseclass):
pass
# test creation of its object
obj = MyCurlClass()
# must be of type subclass, but also an instance of base class
assert type(obj) == MyCurlClass
assert isinstance(obj, baseclass)
def test_subclass_init(self):
for baseclass in CLASSES:
class MyCurlClass(baseclass):
def __init__(self, x, y=4):
self.x = x
self.y = y
# subclass __init__ must be able to accept args and kwargs
obj = MyCurlClass(3)
assert obj.x == 3
assert obj.y == 4
obj = MyCurlClass(5, y=6)
assert obj.x == 5
assert obj.y == 6
# and it must throw TypeError if arguments don't match
try:
MyCurlClass(1, 2, 3, kwarg=4)
except TypeError:
pass
else:
raise AssertionError('Subclass accepted invalid arguments')
def test_subclass_method(self):
for baseclass in CLASSES:
class MyCurlClass(baseclass):
def my_method(self, x):
return x + 1
obj = MyCurlClass()
# methods must be able to accept arguments and return a value
assert obj.my_method(1) == 2
def test_subclass_method_override(self):
# setopt args for each base class
args = {
pycurl.Curl: (pycurl.VERBOSE, 1),
pycurl.CurlMulti: (pycurl.M_MAXCONNECTS, 3),
pycurl.CurlShare: (pycurl.SH_SHARE, pycurl.LOCK_DATA_COOKIE),
}
for baseclass in CLASSES:
class MyCurlClass(baseclass):
def setopt(self, option, value):
# base method must not be overwritten
assert super().setopt != self.setopt
# base method mut be callable, setopt must return None
assert super().setopt(option, value) is None
# return something else
return 'my setopt'
obj = MyCurlClass()
assert obj.setopt(*args[baseclass]) == 'my setopt'
|
Add tests for subclass support#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vi:ts=4:et
try:
import unittest2 as unittest
except ImportError:
import unittest
import pycurl
CLASSES = (pycurl.Curl, pycurl.CurlMulti, pycurl.CurlShare)
class SubclassTest(unittest.TestCase):
def test_baseclass_init(self):
# base classes do not accept any arguments on initialization
for baseclass in CLASSES:
try:
baseclass(0)
except TypeError:
pass
else:
raise AssertionError('Base class accepted invalid args')
try:
baseclass(a=1)
except TypeError:
pass
else:
raise AssertionError('Base class accepted invalid kwargs')
def test_subclass_create(self):
for baseclass in CLASSES:
# test creation of a subclass
class MyCurlClass(baseclass):
pass
# test creation of its object
obj = MyCurlClass()
# must be of type subclass, but also an instance of base class
assert type(obj) == MyCurlClass
assert isinstance(obj, baseclass)
def test_subclass_init(self):
for baseclass in CLASSES:
class MyCurlClass(baseclass):
def __init__(self, x, y=4):
self.x = x
self.y = y
# subclass __init__ must be able to accept args and kwargs
obj = MyCurlClass(3)
assert obj.x == 3
assert obj.y == 4
obj = MyCurlClass(5, y=6)
assert obj.x == 5
assert obj.y == 6
# and it must throw TypeError if arguments don't match
try:
MyCurlClass(1, 2, 3, kwarg=4)
except TypeError:
pass
else:
raise AssertionError('Subclass accepted invalid arguments')
def test_subclass_method(self):
for baseclass in CLASSES:
class MyCurlClass(baseclass):
def my_method(self, x):
return x + 1
obj = MyCurlClass()
# methods must be able to accept arguments and return a value
assert obj.my_method(1) == 2
def test_subclass_method_override(self):
# setopt args for each base class
args = {
pycurl.Curl: (pycurl.VERBOSE, 1),
pycurl.CurlMulti: (pycurl.M_MAXCONNECTS, 3),
pycurl.CurlShare: (pycurl.SH_SHARE, pycurl.LOCK_DATA_COOKIE),
}
for baseclass in CLASSES:
class MyCurlClass(baseclass):
def setopt(self, option, value):
# base method must not be overwritten
assert super().setopt != self.setopt
# base method mut be callable, setopt must return None
assert super().setopt(option, value) is None
# return something else
return 'my setopt'
obj = MyCurlClass()
assert obj.setopt(*args[baseclass]) == 'my setopt'
|
<commit_before><commit_msg>Add tests for subclass support<commit_after>#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vi:ts=4:et
try:
import unittest2 as unittest
except ImportError:
import unittest
import pycurl
CLASSES = (pycurl.Curl, pycurl.CurlMulti, pycurl.CurlShare)
class SubclassTest(unittest.TestCase):
def test_baseclass_init(self):
# base classes do not accept any arguments on initialization
for baseclass in CLASSES:
try:
baseclass(0)
except TypeError:
pass
else:
raise AssertionError('Base class accepted invalid args')
try:
baseclass(a=1)
except TypeError:
pass
else:
raise AssertionError('Base class accepted invalid kwargs')
def test_subclass_create(self):
for baseclass in CLASSES:
# test creation of a subclass
class MyCurlClass(baseclass):
pass
# test creation of its object
obj = MyCurlClass()
# must be of type subclass, but also an instance of base class
assert type(obj) == MyCurlClass
assert isinstance(obj, baseclass)
def test_subclass_init(self):
for baseclass in CLASSES:
class MyCurlClass(baseclass):
def __init__(self, x, y=4):
self.x = x
self.y = y
# subclass __init__ must be able to accept args and kwargs
obj = MyCurlClass(3)
assert obj.x == 3
assert obj.y == 4
obj = MyCurlClass(5, y=6)
assert obj.x == 5
assert obj.y == 6
# and it must throw TypeError if arguments don't match
try:
MyCurlClass(1, 2, 3, kwarg=4)
except TypeError:
pass
else:
raise AssertionError('Subclass accepted invalid arguments')
def test_subclass_method(self):
for baseclass in CLASSES:
class MyCurlClass(baseclass):
def my_method(self, x):
return x + 1
obj = MyCurlClass()
# methods must be able to accept arguments and return a value
assert obj.my_method(1) == 2
def test_subclass_method_override(self):
# setopt args for each base class
args = {
pycurl.Curl: (pycurl.VERBOSE, 1),
pycurl.CurlMulti: (pycurl.M_MAXCONNECTS, 3),
pycurl.CurlShare: (pycurl.SH_SHARE, pycurl.LOCK_DATA_COOKIE),
}
for baseclass in CLASSES:
class MyCurlClass(baseclass):
def setopt(self, option, value):
# base method must not be overwritten
assert super().setopt != self.setopt
# base method mut be callable, setopt must return None
assert super().setopt(option, value) is None
# return something else
return 'my setopt'
obj = MyCurlClass()
assert obj.setopt(*args[baseclass]) == 'my setopt'
|
|
d764a483497afc5d029a82db14cc5cc88f45f4c0
|
nova/api/openstack/contrib/multinic.py
|
nova/api/openstack/contrib/multinic.py
|
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The multinic extension."""
from webob import exc
from nova import compute
from nova import log as logging
from nova.api.openstack import extensions
from nova.api.openstack import faults
LOG = logging.getLogger("nova.api.multinic")
class Multinic(extensions.ExtensionDescriptor):
def __init__(self, *args, **kwargs):
super(Multinic, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
def get_name(self):
return "Multinic"
def get_alias(self):
return "NMN"
def get_description(self):
return "Multiple network support"
def get_namespace(self):
return "http://docs.openstack.org/ext/multinic/api/v1.1"
def get_updated(self):
return "2011-06-09T00:00:00+00:00"
def get_actions(self):
actions = []
# Add the add_fixed_ip action
act = extensions.ActionExtension("servers", "addFixedIp",
self._add_fixed_ip)
actions.append(act)
# Add the remove_fixed_ip action
act = extensions.ActionExtension("servers", "removeFixedIp",
self._remove_fixed_ip)
actions.append(act)
return actions
def _add_fixed_ip(self, input_dict, req, id):
"""Adds an IP on a given network to an instance."""
try:
# Validate the input entity
if 'networkId' not in input_dict['addFixedIp']:
LOG.exception(_("Missing 'networkId' argument for addFixedIp"))
return faults.Fault(exc.HTTPUnprocessableEntity())
# Add the fixed IP
network_id = input_dict['addFixedIp']['networkId']
self.compute_api.add_fixed_ip(req.environ['nova.context'], id,
network_id)
except Exception, e:
LOG.exception(_("Error in addFixedIp %s"), e)
return faults.Fault(exc.HTTPBadRequest())
return exc.HTTPAccepted()
def _remove_fixed_ip(self, input_dict, req, id):
# Not yet implemented
raise faults.Fault(exc.HTTPNotImplemented())
|
Add an extension to allow for an addFixedIp action on instances
|
Add an extension to allow for an addFixedIp action on instances
|
Python
|
apache-2.0
|
saleemjaveds/https-github.com-openstack-nova,bigswitch/nova,virtualopensystems/nova,leilihh/nova,cyx1231st/nova,rajalokan/nova,whitepages/nova,eneabio/nova,devendermishrajio/nova,virtualopensystems/nova,luogangyi/bcec-nova,tanglei528/nova,cloudbau/nova,sileht/deb-openstack-nova,tianweizhang/nova,mandeepdhami/nova,salv-orlando/MyRepo,cloudbase/nova,devoid/nova,sridevikoushik31/openstack,JioCloud/nova_test_latest,savi-dev/nova,yosshy/nova,klmitch/nova,cernops/nova,dstroppa/openstack-smartos-nova-grizzly,psiwczak/openstack,BeyondTheClouds/nova,CiscoSystems/nova,kimjaejoong/nova,mikalstill/nova,SUSE-Cloud/nova,takeshineshiro/nova,hanlind/nova,redhat-openstack/nova,adelina-t/nova,viggates/nova,shahar-stratoscale/nova,gspilio/nova,maoy/zknova,maheshp/novatest,LoHChina/nova,DirectXMan12/nova-hacking,edulramirez/nova,rahulunair/nova,double12gzh/nova,MountainWei/nova,leilihh/nova,j-carpentier/nova,yrobla/nova,NewpTone/stacklab-nova,eneabio/nova,viggates/nova,imsplitbit/nova,zhimin711/nova,JianyuWang/nova,houshengbo/nova_vmware_compute_driver,vmturbo/nova,Triv90/Nova,gooddata/openstack-nova,JianyuWang/nova,double12gzh/nova,mmnelemane/nova,jianghuaw/nova,Francis-Liu/animated-broccoli,devendermishrajio/nova_test_latest,dawnpower/nova,usc-isi/extra-specs,JioCloud/nova_test_latest,savi-dev/nova,mikalstill/nova,rajalokan/nova,psiwczak/openstack,Yusuke1987/openstack_template,usc-isi/nova,rrader/nova-docker-plugin,citrix-openstack-build/nova,maoy/zknova,yrobla/nova,psiwczak/openstack,ntt-sic/nova,Yuriy-Leonov/nova,kimjaejoong/nova,rrader/nova-docker-plugin,cloudbase/nova,barnsnake351/nova,eayunstack/nova,tianweizhang/nova,orbitfp7/nova,phenoxim/nova,maelnor/nova,nikesh-mahalka/nova,orbitfp7/nova,maelnor/nova,tudorvio/nova,shahar-stratoscale/nova,cloudbau/nova,mahak/nova,gooddata/openstack-nova,joker946/nova,CCI-MOC/nova,mmnelemane/nova,zaina/nova,mgagne/nova,j-carpentier/nova,plumgrid/plumgrid-nova,gooddata/openstack-nova,openstack/nova,salv-orlando/MyRepo,adelina-t/nova,watonyweng/nova,berrange/nova,OpenAcademy-OpenStack/nova-scheduler,NewpTone/stacklab-nova,rajalokan/nova,belmiromoreira/nova,plumgrid/plumgrid-nova,LoHChina/nova,eonpatapon/nova,devoid/nova,usc-isi/nova,iuliat/nova,tanglei528/nova,leilihh/novaha,akash1808/nova_test_latest,barnsnake351/nova,JioCloud/nova,mikalstill/nova,fajoy/nova,edulramirez/nova,ewindisch/nova,russellb/nova,Yusuke1987/openstack_template,sacharya/nova,yatinkumbhare/openstack-nova,jeffrey4l/nova,ntt-sic/nova,isyippee/nova,sridevikoushik31/nova,NoBodyCam/TftpPxeBootBareMetal,ted-gould/nova,shootstar/novatest,bclau/nova,whitepages/nova,petrutlucian94/nova,Stavitsky/nova,josephsuh/extra-specs,fajoy/nova,sridevikoushik31/openstack,maheshp/novatest,jianghuaw/nova,belmiromoreira/nova,dims/nova,devendermishrajio/nova_test_latest,klmitch/nova,badock/nova,luogangyi/bcec-nova,josephsuh/extra-specs,openstack/nova,tangfeixiong/nova,mgagne/nova,mahak/nova,alvarolopez/nova,fajoy/nova,spring-week-topos/nova-week,hanlind/nova,saleemjaveds/https-github.com-openstack-nova,JioCloud/nova,cloudbase/nova-virtualbox,KarimAllah/nova,qwefi/nova,redhat-openstack/nova,mahak/nova,berrange/nova,alexandrucoman/vbox-nova-driver,thomasem/nova,maheshp/novatest,NeCTAR-RC/nova,scripnichenko/nova,vladikr/nova_drafts,yosshy/nova,angdraug/nova,Metaswitch/calico-nova,hanlind/nova,zzicewind/nova,vmturbo/nova,gooddata/openstack-nova,blueboxgroup/nova,rahulunair/nova,sridevikoushik31/openstack,CCI-MOC/nova,MountainWei/nova,alaski/nova,savi-dev/nova,NoBodyCam/TftpPxeBootBareMetal,leilihh/novaha,paulmathews/nova,CiscoSystems/nova,BeyondTheClouds/nova,rickerc/nova_audit,raildo/nova,silenceli/nova,petrutlucian94/nova_dev,cyx1231st/nova,CloudServer/nova,russellb/nova,usc-isi/nova,salv-orlando/MyRepo,eonpatapon/nova,eharney/nova,apporc/nova,Triv90/Nova,iuliat/nova,cernops/nova,jianghuaw/nova,bclau/nova,eharney/nova,imsplitbit/nova,klmitch/nova,NoBodyCam/TftpPxeBootBareMetal,nikesh-mahalka/nova,petrutlucian94/nova,mandeepdhami/nova,felixma/nova,watonyweng/nova,DirectXMan12/nova-hacking,Triv90/Nova,eayunstack/nova,takeshineshiro/nova,felixma/nova,paulmathews/nova,jeffrey4l/nova,sebrandon1/nova,affo/nova,vmturbo/nova,usc-isi/extra-specs,qwefi/nova,alvarolopez/nova,apporc/nova,gspilio/nova,bgxavier/nova,zzicewind/nova,projectcalico/calico-nova,Metaswitch/calico-nova,alexandrucoman/vbox-nova-driver,blueboxgroup/nova,TieWei/nova,Tehsmash/nova,sileht/deb-openstack-nova,rajalokan/nova,affo/nova,Juniper/nova,bigswitch/nova,dstroppa/openstack-smartos-nova-grizzly,aristanetworks/arista-ovs-nova,aristanetworks/arista-ovs-nova,sebrandon1/nova,sridevikoushik31/nova,paulmathews/nova,eneabio/nova,TieWei/nova,Juniper/nova,yrobla/nova,NewpTone/stacklab-nova,sebrandon1/nova,CEG-FYP-OpenStack/scheduler,KarimAllah/nova,Tehsmash/nova,TwinkleChawla/nova,KarimAllah/nova,usc-isi/extra-specs,TwinkleChawla/nova,OpenAcademy-OpenStack/nova-scheduler,scripnichenko/nova,houshengbo/nova_vmware_compute_driver,maoy/zknova,ewindisch/nova,ruslanloman/nova,silenceli/nova,akash1808/nova,citrix-openstack-build/nova,sileht/deb-openstack-nova,sacharya/nova,houshengbo/nova_vmware_compute_driver,tudorvio/nova,gspilio/nova,projectcalico/calico-nova,zhimin711/nova,yatinkumbhare/openstack-nova,Brocade-OpenSource/OpenStack-DNRM-Nova,Yuriy-Leonov/nova,tangfeixiong/nova,CloudServer/nova,thomasem/nova,SUSE-Cloud/nova,rickerc/nova_audit,Juniper/nova,cloudbase/nova-virtualbox,badock/nova,aristanetworks/arista-ovs-nova,fnordahl/nova,phenoxim/nova,cernops/nova,spring-week-topos/nova-week,BeyondTheClouds/nova,shootstar/novatest,tealover/nova,alaski/nova,sridevikoushik31/nova,devendermishrajio/nova,russellb/nova,dstroppa/openstack-smartos-nova-grizzly,akash1808/nova,shail2810/nova,tealover/nova,raildo/nova,NeCTAR-RC/nova,fnordahl/nova,klmitch/nova,vladikr/nova_drafts,DirectXMan12/nova-hacking,cloudbase/nova,zaina/nova,rahulunair/nova,ted-gould/nova,jianghuaw/nova,josephsuh/extra-specs,sridevikoushik31/nova,petrutlucian94/nova_dev,isyippee/nova,noironetworks/nova,shail2810/nova,Brocade-OpenSource/OpenStack-DNRM-Nova,Francis-Liu/animated-broccoli,ruslanloman/nova,dims/nova,bgxavier/nova,noironetworks/nova,varunarya10/nova_test_latest,dawnpower/nova,varunarya10/nova_test_latest,vmturbo/nova,Juniper/nova,Stavitsky/nova,angdraug/nova,openstack/nova,akash1808/nova_test_latest,CEG-FYP-OpenStack/scheduler,joker946/nova
|
Add an extension to allow for an addFixedIp action on instances
|
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The multinic extension."""
from webob import exc
from nova import compute
from nova import log as logging
from nova.api.openstack import extensions
from nova.api.openstack import faults
LOG = logging.getLogger("nova.api.multinic")
class Multinic(extensions.ExtensionDescriptor):
def __init__(self, *args, **kwargs):
super(Multinic, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
def get_name(self):
return "Multinic"
def get_alias(self):
return "NMN"
def get_description(self):
return "Multiple network support"
def get_namespace(self):
return "http://docs.openstack.org/ext/multinic/api/v1.1"
def get_updated(self):
return "2011-06-09T00:00:00+00:00"
def get_actions(self):
actions = []
# Add the add_fixed_ip action
act = extensions.ActionExtension("servers", "addFixedIp",
self._add_fixed_ip)
actions.append(act)
# Add the remove_fixed_ip action
act = extensions.ActionExtension("servers", "removeFixedIp",
self._remove_fixed_ip)
actions.append(act)
return actions
def _add_fixed_ip(self, input_dict, req, id):
"""Adds an IP on a given network to an instance."""
try:
# Validate the input entity
if 'networkId' not in input_dict['addFixedIp']:
LOG.exception(_("Missing 'networkId' argument for addFixedIp"))
return faults.Fault(exc.HTTPUnprocessableEntity())
# Add the fixed IP
network_id = input_dict['addFixedIp']['networkId']
self.compute_api.add_fixed_ip(req.environ['nova.context'], id,
network_id)
except Exception, e:
LOG.exception(_("Error in addFixedIp %s"), e)
return faults.Fault(exc.HTTPBadRequest())
return exc.HTTPAccepted()
def _remove_fixed_ip(self, input_dict, req, id):
# Not yet implemented
raise faults.Fault(exc.HTTPNotImplemented())
|
<commit_before><commit_msg>Add an extension to allow for an addFixedIp action on instances<commit_after>
|
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The multinic extension."""
from webob import exc
from nova import compute
from nova import log as logging
from nova.api.openstack import extensions
from nova.api.openstack import faults
LOG = logging.getLogger("nova.api.multinic")
class Multinic(extensions.ExtensionDescriptor):
def __init__(self, *args, **kwargs):
super(Multinic, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
def get_name(self):
return "Multinic"
def get_alias(self):
return "NMN"
def get_description(self):
return "Multiple network support"
def get_namespace(self):
return "http://docs.openstack.org/ext/multinic/api/v1.1"
def get_updated(self):
return "2011-06-09T00:00:00+00:00"
def get_actions(self):
actions = []
# Add the add_fixed_ip action
act = extensions.ActionExtension("servers", "addFixedIp",
self._add_fixed_ip)
actions.append(act)
# Add the remove_fixed_ip action
act = extensions.ActionExtension("servers", "removeFixedIp",
self._remove_fixed_ip)
actions.append(act)
return actions
def _add_fixed_ip(self, input_dict, req, id):
"""Adds an IP on a given network to an instance."""
try:
# Validate the input entity
if 'networkId' not in input_dict['addFixedIp']:
LOG.exception(_("Missing 'networkId' argument for addFixedIp"))
return faults.Fault(exc.HTTPUnprocessableEntity())
# Add the fixed IP
network_id = input_dict['addFixedIp']['networkId']
self.compute_api.add_fixed_ip(req.environ['nova.context'], id,
network_id)
except Exception, e:
LOG.exception(_("Error in addFixedIp %s"), e)
return faults.Fault(exc.HTTPBadRequest())
return exc.HTTPAccepted()
def _remove_fixed_ip(self, input_dict, req, id):
# Not yet implemented
raise faults.Fault(exc.HTTPNotImplemented())
|
Add an extension to allow for an addFixedIp action on instances# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The multinic extension."""
from webob import exc
from nova import compute
from nova import log as logging
from nova.api.openstack import extensions
from nova.api.openstack import faults
LOG = logging.getLogger("nova.api.multinic")
class Multinic(extensions.ExtensionDescriptor):
def __init__(self, *args, **kwargs):
super(Multinic, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
def get_name(self):
return "Multinic"
def get_alias(self):
return "NMN"
def get_description(self):
return "Multiple network support"
def get_namespace(self):
return "http://docs.openstack.org/ext/multinic/api/v1.1"
def get_updated(self):
return "2011-06-09T00:00:00+00:00"
def get_actions(self):
actions = []
# Add the add_fixed_ip action
act = extensions.ActionExtension("servers", "addFixedIp",
self._add_fixed_ip)
actions.append(act)
# Add the remove_fixed_ip action
act = extensions.ActionExtension("servers", "removeFixedIp",
self._remove_fixed_ip)
actions.append(act)
return actions
def _add_fixed_ip(self, input_dict, req, id):
"""Adds an IP on a given network to an instance."""
try:
# Validate the input entity
if 'networkId' not in input_dict['addFixedIp']:
LOG.exception(_("Missing 'networkId' argument for addFixedIp"))
return faults.Fault(exc.HTTPUnprocessableEntity())
# Add the fixed IP
network_id = input_dict['addFixedIp']['networkId']
self.compute_api.add_fixed_ip(req.environ['nova.context'], id,
network_id)
except Exception, e:
LOG.exception(_("Error in addFixedIp %s"), e)
return faults.Fault(exc.HTTPBadRequest())
return exc.HTTPAccepted()
def _remove_fixed_ip(self, input_dict, req, id):
# Not yet implemented
raise faults.Fault(exc.HTTPNotImplemented())
|
<commit_before><commit_msg>Add an extension to allow for an addFixedIp action on instances<commit_after># Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The multinic extension."""
from webob import exc
from nova import compute
from nova import log as logging
from nova.api.openstack import extensions
from nova.api.openstack import faults
LOG = logging.getLogger("nova.api.multinic")
class Multinic(extensions.ExtensionDescriptor):
def __init__(self, *args, **kwargs):
super(Multinic, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
def get_name(self):
return "Multinic"
def get_alias(self):
return "NMN"
def get_description(self):
return "Multiple network support"
def get_namespace(self):
return "http://docs.openstack.org/ext/multinic/api/v1.1"
def get_updated(self):
return "2011-06-09T00:00:00+00:00"
def get_actions(self):
actions = []
# Add the add_fixed_ip action
act = extensions.ActionExtension("servers", "addFixedIp",
self._add_fixed_ip)
actions.append(act)
# Add the remove_fixed_ip action
act = extensions.ActionExtension("servers", "removeFixedIp",
self._remove_fixed_ip)
actions.append(act)
return actions
def _add_fixed_ip(self, input_dict, req, id):
"""Adds an IP on a given network to an instance."""
try:
# Validate the input entity
if 'networkId' not in input_dict['addFixedIp']:
LOG.exception(_("Missing 'networkId' argument for addFixedIp"))
return faults.Fault(exc.HTTPUnprocessableEntity())
# Add the fixed IP
network_id = input_dict['addFixedIp']['networkId']
self.compute_api.add_fixed_ip(req.environ['nova.context'], id,
network_id)
except Exception, e:
LOG.exception(_("Error in addFixedIp %s"), e)
return faults.Fault(exc.HTTPBadRequest())
return exc.HTTPAccepted()
def _remove_fixed_ip(self, input_dict, req, id):
# Not yet implemented
raise faults.Fault(exc.HTTPNotImplemented())
|
|
fe479bf2a8ec547922c6643bbdf0ba768eb79c9d
|
ludo/simulator.py
|
ludo/simulator.py
|
#!/usr/bin/env python3
from game import Game
print("Welcome to a game of ludo!")
average_throw_counter = 0
min_throws_per_game = 10000000
max_throws_per_game = 0
NUM_GAMES = 100
for i in range(0, NUM_GAMES):
game = Game()
throw_counter = 0
while game.next_move():
throw_counter += 1
average_throw_counter += throw_counter
if throw_counter < min_throws_per_game:
min_throws_per_game = throw_counter
if throw_counter > max_throws_per_game:
max_throws_per_game = throw_counter
print("Game:", i+1)
print("Average throws:", average_throw_counter/NUM_GAMES)
print("Min", min_throws_per_game)
print("Max", max_throws_per_game)
|
Add script to simulate multiple games
|
Add script to simulate multiple games
|
Python
|
mit
|
risteon/ludo_python
|
Add script to simulate multiple games
|
#!/usr/bin/env python3
from game import Game
print("Welcome to a game of ludo!")
average_throw_counter = 0
min_throws_per_game = 10000000
max_throws_per_game = 0
NUM_GAMES = 100
for i in range(0, NUM_GAMES):
game = Game()
throw_counter = 0
while game.next_move():
throw_counter += 1
average_throw_counter += throw_counter
if throw_counter < min_throws_per_game:
min_throws_per_game = throw_counter
if throw_counter > max_throws_per_game:
max_throws_per_game = throw_counter
print("Game:", i+1)
print("Average throws:", average_throw_counter/NUM_GAMES)
print("Min", min_throws_per_game)
print("Max", max_throws_per_game)
|
<commit_before><commit_msg>Add script to simulate multiple games<commit_after>
|
#!/usr/bin/env python3
from game import Game
print("Welcome to a game of ludo!")
average_throw_counter = 0
min_throws_per_game = 10000000
max_throws_per_game = 0
NUM_GAMES = 100
for i in range(0, NUM_GAMES):
game = Game()
throw_counter = 0
while game.next_move():
throw_counter += 1
average_throw_counter += throw_counter
if throw_counter < min_throws_per_game:
min_throws_per_game = throw_counter
if throw_counter > max_throws_per_game:
max_throws_per_game = throw_counter
print("Game:", i+1)
print("Average throws:", average_throw_counter/NUM_GAMES)
print("Min", min_throws_per_game)
print("Max", max_throws_per_game)
|
Add script to simulate multiple games#!/usr/bin/env python3
from game import Game
print("Welcome to a game of ludo!")
average_throw_counter = 0
min_throws_per_game = 10000000
max_throws_per_game = 0
NUM_GAMES = 100
for i in range(0, NUM_GAMES):
game = Game()
throw_counter = 0
while game.next_move():
throw_counter += 1
average_throw_counter += throw_counter
if throw_counter < min_throws_per_game:
min_throws_per_game = throw_counter
if throw_counter > max_throws_per_game:
max_throws_per_game = throw_counter
print("Game:", i+1)
print("Average throws:", average_throw_counter/NUM_GAMES)
print("Min", min_throws_per_game)
print("Max", max_throws_per_game)
|
<commit_before><commit_msg>Add script to simulate multiple games<commit_after>#!/usr/bin/env python3
from game import Game
print("Welcome to a game of ludo!")
average_throw_counter = 0
min_throws_per_game = 10000000
max_throws_per_game = 0
NUM_GAMES = 100
for i in range(0, NUM_GAMES):
game = Game()
throw_counter = 0
while game.next_move():
throw_counter += 1
average_throw_counter += throw_counter
if throw_counter < min_throws_per_game:
min_throws_per_game = throw_counter
if throw_counter > max_throws_per_game:
max_throws_per_game = throw_counter
print("Game:", i+1)
print("Average throws:", average_throw_counter/NUM_GAMES)
print("Min", min_throws_per_game)
print("Max", max_throws_per_game)
|
|
c89cce1a47c1e379958d7cced624ec0317cd3407
|
examples/demo3.py
|
examples/demo3.py
|
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
import logging
import threading
import xmpp2
import time
import select
from xmpp2 import XML
# non-blocking, poll example.
USERNAME = 'yourusername'
PASSWORD = 'yourpassword'
SERVER = 'example.com'
logging.basicConfig(level=logging.DEBUG)
logging.getLogger('xmpp2.xml.handler').setLevel(logging.INFO)
c = xmpp2.Client(SERVER, stream_log_level=xmpp2.LOG_NONE)
c.connect()
c.auth(USERNAME, password=PASSWORD)
c.write(XML.presence.add(XML.priority.add(1)))
c.setblocking(False)
po = select.poll()
po.register(c, select.POLLIN)
while True:
for fd, event in po.poll():
msg = c.gen.next()
if msg:
sys.stdout.write(msg.pretty_print() + '\n')
|
Add demo for non-blocking with poll().
|
Add demo for non-blocking with poll().
|
Python
|
mit
|
easies/xmpp2
|
Add demo for non-blocking with poll().
|
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
import logging
import threading
import xmpp2
import time
import select
from xmpp2 import XML
# non-blocking, poll example.
USERNAME = 'yourusername'
PASSWORD = 'yourpassword'
SERVER = 'example.com'
logging.basicConfig(level=logging.DEBUG)
logging.getLogger('xmpp2.xml.handler').setLevel(logging.INFO)
c = xmpp2.Client(SERVER, stream_log_level=xmpp2.LOG_NONE)
c.connect()
c.auth(USERNAME, password=PASSWORD)
c.write(XML.presence.add(XML.priority.add(1)))
c.setblocking(False)
po = select.poll()
po.register(c, select.POLLIN)
while True:
for fd, event in po.poll():
msg = c.gen.next()
if msg:
sys.stdout.write(msg.pretty_print() + '\n')
|
<commit_before><commit_msg>Add demo for non-blocking with poll().<commit_after>
|
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
import logging
import threading
import xmpp2
import time
import select
from xmpp2 import XML
# non-blocking, poll example.
USERNAME = 'yourusername'
PASSWORD = 'yourpassword'
SERVER = 'example.com'
logging.basicConfig(level=logging.DEBUG)
logging.getLogger('xmpp2.xml.handler').setLevel(logging.INFO)
c = xmpp2.Client(SERVER, stream_log_level=xmpp2.LOG_NONE)
c.connect()
c.auth(USERNAME, password=PASSWORD)
c.write(XML.presence.add(XML.priority.add(1)))
c.setblocking(False)
po = select.poll()
po.register(c, select.POLLIN)
while True:
for fd, event in po.poll():
msg = c.gen.next()
if msg:
sys.stdout.write(msg.pretty_print() + '\n')
|
Add demo for non-blocking with poll().import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
import logging
import threading
import xmpp2
import time
import select
from xmpp2 import XML
# non-blocking, poll example.
USERNAME = 'yourusername'
PASSWORD = 'yourpassword'
SERVER = 'example.com'
logging.basicConfig(level=logging.DEBUG)
logging.getLogger('xmpp2.xml.handler').setLevel(logging.INFO)
c = xmpp2.Client(SERVER, stream_log_level=xmpp2.LOG_NONE)
c.connect()
c.auth(USERNAME, password=PASSWORD)
c.write(XML.presence.add(XML.priority.add(1)))
c.setblocking(False)
po = select.poll()
po.register(c, select.POLLIN)
while True:
for fd, event in po.poll():
msg = c.gen.next()
if msg:
sys.stdout.write(msg.pretty_print() + '\n')
|
<commit_before><commit_msg>Add demo for non-blocking with poll().<commit_after>import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
import logging
import threading
import xmpp2
import time
import select
from xmpp2 import XML
# non-blocking, poll example.
USERNAME = 'yourusername'
PASSWORD = 'yourpassword'
SERVER = 'example.com'
logging.basicConfig(level=logging.DEBUG)
logging.getLogger('xmpp2.xml.handler').setLevel(logging.INFO)
c = xmpp2.Client(SERVER, stream_log_level=xmpp2.LOG_NONE)
c.connect()
c.auth(USERNAME, password=PASSWORD)
c.write(XML.presence.add(XML.priority.add(1)))
c.setblocking(False)
po = select.poll()
po.register(c, select.POLLIN)
while True:
for fd, event in po.poll():
msg = c.gen.next()
if msg:
sys.stdout.write(msg.pretty_print() + '\n')
|
|
1e65555a08ff3ee1a06e92d9dd054abf3cfaf711
|
media_tree/migrations/0003_alter_tree_fields.py
|
media_tree/migrations/0003_alter_tree_fields.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('media_tree', '0002_mptt_to_treebeard'),
]
operations = [
migrations.AlterField(
model_name='filenode',
name='depth',
field=models.PositiveIntegerField(db_index=True),
),
migrations.AlterField(
model_name='filenode',
name='lft',
field=models.PositiveIntegerField(db_index=True),
),
migrations.AlterField(
model_name='filenode',
name='rgt',
field=models.PositiveIntegerField(db_index=True),
),
migrations.AlterField(
model_name='filenode',
name='tree_id',
field=models.PositiveIntegerField(db_index=True),
),
]
|
Add a migration to update to final tree fields
|
Add a migration to update to final tree fields
|
Python
|
bsd-3-clause
|
samluescher/django-media-tree,samluescher/django-media-tree,samluescher/django-media-tree
|
Add a migration to update to final tree fields
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('media_tree', '0002_mptt_to_treebeard'),
]
operations = [
migrations.AlterField(
model_name='filenode',
name='depth',
field=models.PositiveIntegerField(db_index=True),
),
migrations.AlterField(
model_name='filenode',
name='lft',
field=models.PositiveIntegerField(db_index=True),
),
migrations.AlterField(
model_name='filenode',
name='rgt',
field=models.PositiveIntegerField(db_index=True),
),
migrations.AlterField(
model_name='filenode',
name='tree_id',
field=models.PositiveIntegerField(db_index=True),
),
]
|
<commit_before><commit_msg>Add a migration to update to final tree fields<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('media_tree', '0002_mptt_to_treebeard'),
]
operations = [
migrations.AlterField(
model_name='filenode',
name='depth',
field=models.PositiveIntegerField(db_index=True),
),
migrations.AlterField(
model_name='filenode',
name='lft',
field=models.PositiveIntegerField(db_index=True),
),
migrations.AlterField(
model_name='filenode',
name='rgt',
field=models.PositiveIntegerField(db_index=True),
),
migrations.AlterField(
model_name='filenode',
name='tree_id',
field=models.PositiveIntegerField(db_index=True),
),
]
|
Add a migration to update to final tree fields# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('media_tree', '0002_mptt_to_treebeard'),
]
operations = [
migrations.AlterField(
model_name='filenode',
name='depth',
field=models.PositiveIntegerField(db_index=True),
),
migrations.AlterField(
model_name='filenode',
name='lft',
field=models.PositiveIntegerField(db_index=True),
),
migrations.AlterField(
model_name='filenode',
name='rgt',
field=models.PositiveIntegerField(db_index=True),
),
migrations.AlterField(
model_name='filenode',
name='tree_id',
field=models.PositiveIntegerField(db_index=True),
),
]
|
<commit_before><commit_msg>Add a migration to update to final tree fields<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('media_tree', '0002_mptt_to_treebeard'),
]
operations = [
migrations.AlterField(
model_name='filenode',
name='depth',
field=models.PositiveIntegerField(db_index=True),
),
migrations.AlterField(
model_name='filenode',
name='lft',
field=models.PositiveIntegerField(db_index=True),
),
migrations.AlterField(
model_name='filenode',
name='rgt',
field=models.PositiveIntegerField(db_index=True),
),
migrations.AlterField(
model_name='filenode',
name='tree_id',
field=models.PositiveIntegerField(db_index=True),
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.