commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
971570b4288c9ac7131a1756e17574acbe6d1b9a
|
python/misc/solarized-dark-high-contrast.py
|
python/misc/solarized-dark-high-contrast.py
|
#!/usr/bin/env python
import sys
if sys.version_info < (3, 4):
sys.exit('ERROR: Requires Python 3.4')
from enum import Enum
def main():
Cases = Enum('Cases', 'lower upper')
infile_case = None
if len(sys.argv) < 2:
sys.stderr.write('ERROR: Must provide a file to modify\n')
sys.exit('Usage: {} FILE'.format(sys.argv[0]))
# Keep these in lists instead of a dict to preserve ordering
color_codes_dark = [
'eee8d5',
'93a1a1',
'839496',
'657b83',
'586e75',
]
color_codes_dark_high_contrast = [
'fdf6e3',
'eee8d5',
'93a1a1',
'839496',
'657b83',
]
with open(sys.argv[1], 'r') as infile:
outfile_data = infile.read()
# Figure out whether the input is using upper or lower case color codes
for color_code in color_codes_dark:
# Skip color codes that don't contain letters
if color_code.lower() == color_code.upper():
continue
if outfile_data.find(color_code.lower()) != -1:
infile_case = Cases.lower
# Use the first one we find as the decisive case
break
elif outfile_data.find(color_code.upper()) != -1:
infile_case = Cases.upper
break
for i in range(len(color_codes_dark)):
if infile_case == Cases.lower:
outfile_data = outfile_data.replace(color_codes_dark[i].lower(), color_codes_dark_high_contrast[i].lower())
outfile_data = outfile_data.replace(color_codes_dark[i].upper(), color_codes_dark_high_contrast[i].lower())
elif infile_case == Cases.upper:
outfile_data = outfile_data.replace(color_codes_dark[i].lower(), color_codes_dark_high_contrast[i].upper())
outfile_data = outfile_data.replace(color_codes_dark[i].upper(), color_codes_dark_high_contrast[i].upper())
with open('{}-high-contrast.{}'.format(*sys.argv[1].rsplit('.', 1)), 'w') as outfile:
outfile.write(outfile_data)
if __name__ == '__main__':
main()
|
Add script for converting a solarized dark file to solarized dark high contrast
|
Add script for converting a solarized dark file to solarized dark high contrast
|
Python
|
mit
|
bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile
|
Add script for converting a solarized dark file to solarized dark high contrast
|
#!/usr/bin/env python
import sys
if sys.version_info < (3, 4):
sys.exit('ERROR: Requires Python 3.4')
from enum import Enum
def main():
Cases = Enum('Cases', 'lower upper')
infile_case = None
if len(sys.argv) < 2:
sys.stderr.write('ERROR: Must provide a file to modify\n')
sys.exit('Usage: {} FILE'.format(sys.argv[0]))
# Keep these in lists instead of a dict to preserve ordering
color_codes_dark = [
'eee8d5',
'93a1a1',
'839496',
'657b83',
'586e75',
]
color_codes_dark_high_contrast = [
'fdf6e3',
'eee8d5',
'93a1a1',
'839496',
'657b83',
]
with open(sys.argv[1], 'r') as infile:
outfile_data = infile.read()
# Figure out whether the input is using upper or lower case color codes
for color_code in color_codes_dark:
# Skip color codes that don't contain letters
if color_code.lower() == color_code.upper():
continue
if outfile_data.find(color_code.lower()) != -1:
infile_case = Cases.lower
# Use the first one we find as the decisive case
break
elif outfile_data.find(color_code.upper()) != -1:
infile_case = Cases.upper
break
for i in range(len(color_codes_dark)):
if infile_case == Cases.lower:
outfile_data = outfile_data.replace(color_codes_dark[i].lower(), color_codes_dark_high_contrast[i].lower())
outfile_data = outfile_data.replace(color_codes_dark[i].upper(), color_codes_dark_high_contrast[i].lower())
elif infile_case == Cases.upper:
outfile_data = outfile_data.replace(color_codes_dark[i].lower(), color_codes_dark_high_contrast[i].upper())
outfile_data = outfile_data.replace(color_codes_dark[i].upper(), color_codes_dark_high_contrast[i].upper())
with open('{}-high-contrast.{}'.format(*sys.argv[1].rsplit('.', 1)), 'w') as outfile:
outfile.write(outfile_data)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script for converting a solarized dark file to solarized dark high contrast<commit_after>
|
#!/usr/bin/env python
import sys
if sys.version_info < (3, 4):
sys.exit('ERROR: Requires Python 3.4')
from enum import Enum
def main():
Cases = Enum('Cases', 'lower upper')
infile_case = None
if len(sys.argv) < 2:
sys.stderr.write('ERROR: Must provide a file to modify\n')
sys.exit('Usage: {} FILE'.format(sys.argv[0]))
# Keep these in lists instead of a dict to preserve ordering
color_codes_dark = [
'eee8d5',
'93a1a1',
'839496',
'657b83',
'586e75',
]
color_codes_dark_high_contrast = [
'fdf6e3',
'eee8d5',
'93a1a1',
'839496',
'657b83',
]
with open(sys.argv[1], 'r') as infile:
outfile_data = infile.read()
# Figure out whether the input is using upper or lower case color codes
for color_code in color_codes_dark:
# Skip color codes that don't contain letters
if color_code.lower() == color_code.upper():
continue
if outfile_data.find(color_code.lower()) != -1:
infile_case = Cases.lower
# Use the first one we find as the decisive case
break
elif outfile_data.find(color_code.upper()) != -1:
infile_case = Cases.upper
break
for i in range(len(color_codes_dark)):
if infile_case == Cases.lower:
outfile_data = outfile_data.replace(color_codes_dark[i].lower(), color_codes_dark_high_contrast[i].lower())
outfile_data = outfile_data.replace(color_codes_dark[i].upper(), color_codes_dark_high_contrast[i].lower())
elif infile_case == Cases.upper:
outfile_data = outfile_data.replace(color_codes_dark[i].lower(), color_codes_dark_high_contrast[i].upper())
outfile_data = outfile_data.replace(color_codes_dark[i].upper(), color_codes_dark_high_contrast[i].upper())
with open('{}-high-contrast.{}'.format(*sys.argv[1].rsplit('.', 1)), 'w') as outfile:
outfile.write(outfile_data)
if __name__ == '__main__':
main()
|
Add script for converting a solarized dark file to solarized dark high contrast#!/usr/bin/env python
import sys
if sys.version_info < (3, 4):
sys.exit('ERROR: Requires Python 3.4')
from enum import Enum
def main():
Cases = Enum('Cases', 'lower upper')
infile_case = None
if len(sys.argv) < 2:
sys.stderr.write('ERROR: Must provide a file to modify\n')
sys.exit('Usage: {} FILE'.format(sys.argv[0]))
# Keep these in lists instead of a dict to preserve ordering
color_codes_dark = [
'eee8d5',
'93a1a1',
'839496',
'657b83',
'586e75',
]
color_codes_dark_high_contrast = [
'fdf6e3',
'eee8d5',
'93a1a1',
'839496',
'657b83',
]
with open(sys.argv[1], 'r') as infile:
outfile_data = infile.read()
# Figure out whether the input is using upper or lower case color codes
for color_code in color_codes_dark:
# Skip color codes that don't contain letters
if color_code.lower() == color_code.upper():
continue
if outfile_data.find(color_code.lower()) != -1:
infile_case = Cases.lower
# Use the first one we find as the decisive case
break
elif outfile_data.find(color_code.upper()) != -1:
infile_case = Cases.upper
break
for i in range(len(color_codes_dark)):
if infile_case == Cases.lower:
outfile_data = outfile_data.replace(color_codes_dark[i].lower(), color_codes_dark_high_contrast[i].lower())
outfile_data = outfile_data.replace(color_codes_dark[i].upper(), color_codes_dark_high_contrast[i].lower())
elif infile_case == Cases.upper:
outfile_data = outfile_data.replace(color_codes_dark[i].lower(), color_codes_dark_high_contrast[i].upper())
outfile_data = outfile_data.replace(color_codes_dark[i].upper(), color_codes_dark_high_contrast[i].upper())
with open('{}-high-contrast.{}'.format(*sys.argv[1].rsplit('.', 1)), 'w') as outfile:
outfile.write(outfile_data)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script for converting a solarized dark file to solarized dark high contrast<commit_after>#!/usr/bin/env python
import sys
if sys.version_info < (3, 4):
sys.exit('ERROR: Requires Python 3.4')
from enum import Enum
def main():
Cases = Enum('Cases', 'lower upper')
infile_case = None
if len(sys.argv) < 2:
sys.stderr.write('ERROR: Must provide a file to modify\n')
sys.exit('Usage: {} FILE'.format(sys.argv[0]))
# Keep these in lists instead of a dict to preserve ordering
color_codes_dark = [
'eee8d5',
'93a1a1',
'839496',
'657b83',
'586e75',
]
color_codes_dark_high_contrast = [
'fdf6e3',
'eee8d5',
'93a1a1',
'839496',
'657b83',
]
with open(sys.argv[1], 'r') as infile:
outfile_data = infile.read()
# Figure out whether the input is using upper or lower case color codes
for color_code in color_codes_dark:
# Skip color codes that don't contain letters
if color_code.lower() == color_code.upper():
continue
if outfile_data.find(color_code.lower()) != -1:
infile_case = Cases.lower
# Use the first one we find as the decisive case
break
elif outfile_data.find(color_code.upper()) != -1:
infile_case = Cases.upper
break
for i in range(len(color_codes_dark)):
if infile_case == Cases.lower:
outfile_data = outfile_data.replace(color_codes_dark[i].lower(), color_codes_dark_high_contrast[i].lower())
outfile_data = outfile_data.replace(color_codes_dark[i].upper(), color_codes_dark_high_contrast[i].lower())
elif infile_case == Cases.upper:
outfile_data = outfile_data.replace(color_codes_dark[i].lower(), color_codes_dark_high_contrast[i].upper())
outfile_data = outfile_data.replace(color_codes_dark[i].upper(), color_codes_dark_high_contrast[i].upper())
with open('{}-high-contrast.{}'.format(*sys.argv[1].rsplit('.', 1)), 'w') as outfile:
outfile.write(outfile_data)
if __name__ == '__main__':
main()
|
|
b72c421696b5714d256b7ac461833bc692ca5354
|
robot/robot/src/autonomous/hot_aim_shoot.py
|
robot/robot/src/autonomous/hot_aim_shoot.py
|
try:
import wpilib
except ImportError:
from pyfrc import wpilib
import timed_shoot
class HotShootAutonomous(timed_shoot.TimedShootAutonomous):
'''
Based on the TimedShootAutonomous mode. Modified to allow
shooting based on whether the hot goal is enabled or not.
'''
DEFAULT = False
MODE_NAME = "Hot Aim shoot"
def __init__(self, components):
super().__init__(components)
wpilib.SmartDashboard.PutNumber('DriveStrafeSpeed', 0.5)
wpilib.SmartDashboard.PutBoolean('IsHotLeft', False)
wpilib.SmartDashboard.PutBoolean('IsHotRight', False)
def on_enable(self):
'''these are called when autonomous starts'''
super().on_enable()
self.drive_strafe_speed = wpilib.SmartDashboard.GetNumber('DriveStrafeSpeed')
print("-> Drive strafe:", self.drive_strafe_speed)
self.decided = False
self.start_time = None
def on_disable(self):
'''This function is called when autonomous mode is disabled'''
pass
def update(self, time_elapsed):
'''The actual autonomous program'''
# decide if it's hot or not
if not self.decided:
self.hotLeft = wpilib.SmartDashboard.GetBoolean("IsHotLeft")
self.hotRight = wpilib.SmartDashboard.GetBoolean("IsHotRight")
if (self.hotLeft or self.hotRight) and not (self.hotLeft and self.hotRight):
self.decided = True
if self.hotLeft:
self.drive_strafe_speed *= -1
elif time_elapsed > 6:
# at 6 seconds, give up and shoot anyways
self.decided = True
# always keep the arm down
self.intake.armDown()
# wait a split second for the arm to come down, then
# keep bringing the catapult down so we're ready to go
if time_elapsed > 0.3:
self.catapult.pulldown()
# wait some period before we start driving
if time_elapsed < self.drive_wait:
pass
else:
if self.decided:
# only set this once, so we can calculate time from this
# point on
if self.start_time is None:
self.start_time = time_elapsed
time_elapsed = time_elapsed - self.start_time
if time_elapsed < self.drive_time:
# Drive slowly forward for N seconds
self.drive.move(self.drive_strafe_speed, self.drive_speed, 0)
elif time_elapsed < self.drive_time + 1.0:
# Finally, fire and keep firing for 1 seconds
self.catapult.launchNoSensor()
|
Add an autonomous mode to strafe and shoot. Doesn't work
|
Add an autonomous mode to strafe and shoot. Doesn't work
|
Python
|
bsd-3-clause
|
frc1418/2014
|
Add an autonomous mode to strafe and shoot. Doesn't work
|
try:
import wpilib
except ImportError:
from pyfrc import wpilib
import timed_shoot
class HotShootAutonomous(timed_shoot.TimedShootAutonomous):
'''
Based on the TimedShootAutonomous mode. Modified to allow
shooting based on whether the hot goal is enabled or not.
'''
DEFAULT = False
MODE_NAME = "Hot Aim shoot"
def __init__(self, components):
super().__init__(components)
wpilib.SmartDashboard.PutNumber('DriveStrafeSpeed', 0.5)
wpilib.SmartDashboard.PutBoolean('IsHotLeft', False)
wpilib.SmartDashboard.PutBoolean('IsHotRight', False)
def on_enable(self):
'''these are called when autonomous starts'''
super().on_enable()
self.drive_strafe_speed = wpilib.SmartDashboard.GetNumber('DriveStrafeSpeed')
print("-> Drive strafe:", self.drive_strafe_speed)
self.decided = False
self.start_time = None
def on_disable(self):
'''This function is called when autonomous mode is disabled'''
pass
def update(self, time_elapsed):
'''The actual autonomous program'''
# decide if it's hot or not
if not self.decided:
self.hotLeft = wpilib.SmartDashboard.GetBoolean("IsHotLeft")
self.hotRight = wpilib.SmartDashboard.GetBoolean("IsHotRight")
if (self.hotLeft or self.hotRight) and not (self.hotLeft and self.hotRight):
self.decided = True
if self.hotLeft:
self.drive_strafe_speed *= -1
elif time_elapsed > 6:
# at 6 seconds, give up and shoot anyways
self.decided = True
# always keep the arm down
self.intake.armDown()
# wait a split second for the arm to come down, then
# keep bringing the catapult down so we're ready to go
if time_elapsed > 0.3:
self.catapult.pulldown()
# wait some period before we start driving
if time_elapsed < self.drive_wait:
pass
else:
if self.decided:
# only set this once, so we can calculate time from this
# point on
if self.start_time is None:
self.start_time = time_elapsed
time_elapsed = time_elapsed - self.start_time
if time_elapsed < self.drive_time:
# Drive slowly forward for N seconds
self.drive.move(self.drive_strafe_speed, self.drive_speed, 0)
elif time_elapsed < self.drive_time + 1.0:
# Finally, fire and keep firing for 1 seconds
self.catapult.launchNoSensor()
|
<commit_before><commit_msg>Add an autonomous mode to strafe and shoot. Doesn't work<commit_after>
|
try:
import wpilib
except ImportError:
from pyfrc import wpilib
import timed_shoot
class HotShootAutonomous(timed_shoot.TimedShootAutonomous):
'''
Based on the TimedShootAutonomous mode. Modified to allow
shooting based on whether the hot goal is enabled or not.
'''
DEFAULT = False
MODE_NAME = "Hot Aim shoot"
def __init__(self, components):
super().__init__(components)
wpilib.SmartDashboard.PutNumber('DriveStrafeSpeed', 0.5)
wpilib.SmartDashboard.PutBoolean('IsHotLeft', False)
wpilib.SmartDashboard.PutBoolean('IsHotRight', False)
def on_enable(self):
'''these are called when autonomous starts'''
super().on_enable()
self.drive_strafe_speed = wpilib.SmartDashboard.GetNumber('DriveStrafeSpeed')
print("-> Drive strafe:", self.drive_strafe_speed)
self.decided = False
self.start_time = None
def on_disable(self):
'''This function is called when autonomous mode is disabled'''
pass
def update(self, time_elapsed):
'''The actual autonomous program'''
# decide if it's hot or not
if not self.decided:
self.hotLeft = wpilib.SmartDashboard.GetBoolean("IsHotLeft")
self.hotRight = wpilib.SmartDashboard.GetBoolean("IsHotRight")
if (self.hotLeft or self.hotRight) and not (self.hotLeft and self.hotRight):
self.decided = True
if self.hotLeft:
self.drive_strafe_speed *= -1
elif time_elapsed > 6:
# at 6 seconds, give up and shoot anyways
self.decided = True
# always keep the arm down
self.intake.armDown()
# wait a split second for the arm to come down, then
# keep bringing the catapult down so we're ready to go
if time_elapsed > 0.3:
self.catapult.pulldown()
# wait some period before we start driving
if time_elapsed < self.drive_wait:
pass
else:
if self.decided:
# only set this once, so we can calculate time from this
# point on
if self.start_time is None:
self.start_time = time_elapsed
time_elapsed = time_elapsed - self.start_time
if time_elapsed < self.drive_time:
# Drive slowly forward for N seconds
self.drive.move(self.drive_strafe_speed, self.drive_speed, 0)
elif time_elapsed < self.drive_time + 1.0:
# Finally, fire and keep firing for 1 seconds
self.catapult.launchNoSensor()
|
Add an autonomous mode to strafe and shoot. Doesn't work
try:
import wpilib
except ImportError:
from pyfrc import wpilib
import timed_shoot
class HotShootAutonomous(timed_shoot.TimedShootAutonomous):
'''
Based on the TimedShootAutonomous mode. Modified to allow
shooting based on whether the hot goal is enabled or not.
'''
DEFAULT = False
MODE_NAME = "Hot Aim shoot"
def __init__(self, components):
super().__init__(components)
wpilib.SmartDashboard.PutNumber('DriveStrafeSpeed', 0.5)
wpilib.SmartDashboard.PutBoolean('IsHotLeft', False)
wpilib.SmartDashboard.PutBoolean('IsHotRight', False)
def on_enable(self):
'''these are called when autonomous starts'''
super().on_enable()
self.drive_strafe_speed = wpilib.SmartDashboard.GetNumber('DriveStrafeSpeed')
print("-> Drive strafe:", self.drive_strafe_speed)
self.decided = False
self.start_time = None
def on_disable(self):
'''This function is called when autonomous mode is disabled'''
pass
def update(self, time_elapsed):
'''The actual autonomous program'''
# decide if it's hot or not
if not self.decided:
self.hotLeft = wpilib.SmartDashboard.GetBoolean("IsHotLeft")
self.hotRight = wpilib.SmartDashboard.GetBoolean("IsHotRight")
if (self.hotLeft or self.hotRight) and not (self.hotLeft and self.hotRight):
self.decided = True
if self.hotLeft:
self.drive_strafe_speed *= -1
elif time_elapsed > 6:
# at 6 seconds, give up and shoot anyways
self.decided = True
# always keep the arm down
self.intake.armDown()
# wait a split second for the arm to come down, then
# keep bringing the catapult down so we're ready to go
if time_elapsed > 0.3:
self.catapult.pulldown()
# wait some period before we start driving
if time_elapsed < self.drive_wait:
pass
else:
if self.decided:
# only set this once, so we can calculate time from this
# point on
if self.start_time is None:
self.start_time = time_elapsed
time_elapsed = time_elapsed - self.start_time
if time_elapsed < self.drive_time:
# Drive slowly forward for N seconds
self.drive.move(self.drive_strafe_speed, self.drive_speed, 0)
elif time_elapsed < self.drive_time + 1.0:
# Finally, fire and keep firing for 1 seconds
self.catapult.launchNoSensor()
|
<commit_before><commit_msg>Add an autonomous mode to strafe and shoot. Doesn't work<commit_after>
try:
import wpilib
except ImportError:
from pyfrc import wpilib
import timed_shoot
class HotShootAutonomous(timed_shoot.TimedShootAutonomous):
'''
Based on the TimedShootAutonomous mode. Modified to allow
shooting based on whether the hot goal is enabled or not.
'''
DEFAULT = False
MODE_NAME = "Hot Aim shoot"
def __init__(self, components):
super().__init__(components)
wpilib.SmartDashboard.PutNumber('DriveStrafeSpeed', 0.5)
wpilib.SmartDashboard.PutBoolean('IsHotLeft', False)
wpilib.SmartDashboard.PutBoolean('IsHotRight', False)
def on_enable(self):
'''these are called when autonomous starts'''
super().on_enable()
self.drive_strafe_speed = wpilib.SmartDashboard.GetNumber('DriveStrafeSpeed')
print("-> Drive strafe:", self.drive_strafe_speed)
self.decided = False
self.start_time = None
def on_disable(self):
'''This function is called when autonomous mode is disabled'''
pass
def update(self, time_elapsed):
'''The actual autonomous program'''
# decide if it's hot or not
if not self.decided:
self.hotLeft = wpilib.SmartDashboard.GetBoolean("IsHotLeft")
self.hotRight = wpilib.SmartDashboard.GetBoolean("IsHotRight")
if (self.hotLeft or self.hotRight) and not (self.hotLeft and self.hotRight):
self.decided = True
if self.hotLeft:
self.drive_strafe_speed *= -1
elif time_elapsed > 6:
# at 6 seconds, give up and shoot anyways
self.decided = True
# always keep the arm down
self.intake.armDown()
# wait a split second for the arm to come down, then
# keep bringing the catapult down so we're ready to go
if time_elapsed > 0.3:
self.catapult.pulldown()
# wait some period before we start driving
if time_elapsed < self.drive_wait:
pass
else:
if self.decided:
# only set this once, so we can calculate time from this
# point on
if self.start_time is None:
self.start_time = time_elapsed
time_elapsed = time_elapsed - self.start_time
if time_elapsed < self.drive_time:
# Drive slowly forward for N seconds
self.drive.move(self.drive_strafe_speed, self.drive_speed, 0)
elif time_elapsed < self.drive_time + 1.0:
# Finally, fire and keep firing for 1 seconds
self.catapult.launchNoSensor()
|
|
17e2b9ecb67c8b1f3a6f71b752bc70b21584092e
|
tests/test_scriptserver.py
|
tests/test_scriptserver.py
|
import unittest
from mock import patch, Mock
import sys
sys.path.append(".")
from scriptserver import ZoneScriptRunner
class TestZoneScriptRunner(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.mongoengine_patch = patch('scriptserver.me')
cls.mongoengine_patch.start()
@classmethod
def tearDownClass(cls):
cls.mongoengine_patch.stop()
def test___init__(self):
zoneid = "zoneid"
with patch('scriptserver.Object'):
with patch.object(ZoneScriptRunner, 'load_scripts') as mock_load_scripts:
zone_script_runner = ZoneScriptRunner(zoneid)
self.assertTrue(zone_script_runner)
self.assertEqual(1, mock_load_scripts.call_count)
def test_load_scripts(self):
expected = {}
zoneid = "zoneid"
with patch.object(ZoneScriptRunner, 'load_scripts'):
with patch('scriptserver.Object'):
zone_script_runner = ZoneScriptRunner(zoneid)
with patch('scriptserver.ScriptedObject') as ScriptedObject:
MockThing = Mock()
with patch.dict('sys.modules', {'thing': MockThing, 'thing.fake': MockThing.fake,
'thing.fake.chicken': MockThing.fake.chicken}):
MockThing.fake.chicken.Chicken.tick = Mock()
MockScriptedObject = Mock()
MockScriptedObject.scripts = ['thing.fake.chicken']
ScriptedObject.objects.return_value = [MockScriptedObject]
result = zone_script_runner.load_scripts()
self.assertNotEqual(expected, result)
self.assertIn('thing.fake.chicken', result)
def test_start(self):
# zone_script_runner = ZoneScriptRunner(zoneid)
# self.assertEqual(expected, zone_script_runner.start())
pass # TODO: implement your test here
def test_tick(self):
# zone_script_runner = ZoneScriptRunner(zoneid)
# self.assertEqual(expected, zone_script_runner.tick())
pass # TODO: implement your test here
if __name__ == '__main__':
unittest.main()
|
Add initial tests for scriptserver.
|
Add initial tests for scriptserver.
Covers ZoneScriptRunner.__init__ and ZoneScriptRunner.load_scripts.
|
Python
|
agpl-3.0
|
cnelsonsic/SimpleMMO,cnelsonsic/SimpleMMO,cnelsonsic/SimpleMMO
|
Add initial tests for scriptserver.
Covers ZoneScriptRunner.__init__ and ZoneScriptRunner.load_scripts.
|
import unittest
from mock import patch, Mock
import sys
sys.path.append(".")
from scriptserver import ZoneScriptRunner
class TestZoneScriptRunner(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.mongoengine_patch = patch('scriptserver.me')
cls.mongoengine_patch.start()
@classmethod
def tearDownClass(cls):
cls.mongoengine_patch.stop()
def test___init__(self):
zoneid = "zoneid"
with patch('scriptserver.Object'):
with patch.object(ZoneScriptRunner, 'load_scripts') as mock_load_scripts:
zone_script_runner = ZoneScriptRunner(zoneid)
self.assertTrue(zone_script_runner)
self.assertEqual(1, mock_load_scripts.call_count)
def test_load_scripts(self):
expected = {}
zoneid = "zoneid"
with patch.object(ZoneScriptRunner, 'load_scripts'):
with patch('scriptserver.Object'):
zone_script_runner = ZoneScriptRunner(zoneid)
with patch('scriptserver.ScriptedObject') as ScriptedObject:
MockThing = Mock()
with patch.dict('sys.modules', {'thing': MockThing, 'thing.fake': MockThing.fake,
'thing.fake.chicken': MockThing.fake.chicken}):
MockThing.fake.chicken.Chicken.tick = Mock()
MockScriptedObject = Mock()
MockScriptedObject.scripts = ['thing.fake.chicken']
ScriptedObject.objects.return_value = [MockScriptedObject]
result = zone_script_runner.load_scripts()
self.assertNotEqual(expected, result)
self.assertIn('thing.fake.chicken', result)
def test_start(self):
# zone_script_runner = ZoneScriptRunner(zoneid)
# self.assertEqual(expected, zone_script_runner.start())
pass # TODO: implement your test here
def test_tick(self):
# zone_script_runner = ZoneScriptRunner(zoneid)
# self.assertEqual(expected, zone_script_runner.tick())
pass # TODO: implement your test here
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add initial tests for scriptserver.
Covers ZoneScriptRunner.__init__ and ZoneScriptRunner.load_scripts.<commit_after>
|
import unittest
from mock import patch, Mock
import sys
sys.path.append(".")
from scriptserver import ZoneScriptRunner
class TestZoneScriptRunner(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.mongoengine_patch = patch('scriptserver.me')
cls.mongoengine_patch.start()
@classmethod
def tearDownClass(cls):
cls.mongoengine_patch.stop()
def test___init__(self):
zoneid = "zoneid"
with patch('scriptserver.Object'):
with patch.object(ZoneScriptRunner, 'load_scripts') as mock_load_scripts:
zone_script_runner = ZoneScriptRunner(zoneid)
self.assertTrue(zone_script_runner)
self.assertEqual(1, mock_load_scripts.call_count)
def test_load_scripts(self):
expected = {}
zoneid = "zoneid"
with patch.object(ZoneScriptRunner, 'load_scripts'):
with patch('scriptserver.Object'):
zone_script_runner = ZoneScriptRunner(zoneid)
with patch('scriptserver.ScriptedObject') as ScriptedObject:
MockThing = Mock()
with patch.dict('sys.modules', {'thing': MockThing, 'thing.fake': MockThing.fake,
'thing.fake.chicken': MockThing.fake.chicken}):
MockThing.fake.chicken.Chicken.tick = Mock()
MockScriptedObject = Mock()
MockScriptedObject.scripts = ['thing.fake.chicken']
ScriptedObject.objects.return_value = [MockScriptedObject]
result = zone_script_runner.load_scripts()
self.assertNotEqual(expected, result)
self.assertIn('thing.fake.chicken', result)
def test_start(self):
# zone_script_runner = ZoneScriptRunner(zoneid)
# self.assertEqual(expected, zone_script_runner.start())
pass # TODO: implement your test here
def test_tick(self):
# zone_script_runner = ZoneScriptRunner(zoneid)
# self.assertEqual(expected, zone_script_runner.tick())
pass # TODO: implement your test here
if __name__ == '__main__':
unittest.main()
|
Add initial tests for scriptserver.
Covers ZoneScriptRunner.__init__ and ZoneScriptRunner.load_scripts.import unittest
from mock import patch, Mock
import sys
sys.path.append(".")
from scriptserver import ZoneScriptRunner
class TestZoneScriptRunner(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.mongoengine_patch = patch('scriptserver.me')
cls.mongoengine_patch.start()
@classmethod
def tearDownClass(cls):
cls.mongoengine_patch.stop()
def test___init__(self):
zoneid = "zoneid"
with patch('scriptserver.Object'):
with patch.object(ZoneScriptRunner, 'load_scripts') as mock_load_scripts:
zone_script_runner = ZoneScriptRunner(zoneid)
self.assertTrue(zone_script_runner)
self.assertEqual(1, mock_load_scripts.call_count)
def test_load_scripts(self):
expected = {}
zoneid = "zoneid"
with patch.object(ZoneScriptRunner, 'load_scripts'):
with patch('scriptserver.Object'):
zone_script_runner = ZoneScriptRunner(zoneid)
with patch('scriptserver.ScriptedObject') as ScriptedObject:
MockThing = Mock()
with patch.dict('sys.modules', {'thing': MockThing, 'thing.fake': MockThing.fake,
'thing.fake.chicken': MockThing.fake.chicken}):
MockThing.fake.chicken.Chicken.tick = Mock()
MockScriptedObject = Mock()
MockScriptedObject.scripts = ['thing.fake.chicken']
ScriptedObject.objects.return_value = [MockScriptedObject]
result = zone_script_runner.load_scripts()
self.assertNotEqual(expected, result)
self.assertIn('thing.fake.chicken', result)
def test_start(self):
# zone_script_runner = ZoneScriptRunner(zoneid)
# self.assertEqual(expected, zone_script_runner.start())
pass # TODO: implement your test here
def test_tick(self):
# zone_script_runner = ZoneScriptRunner(zoneid)
# self.assertEqual(expected, zone_script_runner.tick())
pass # TODO: implement your test here
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add initial tests for scriptserver.
Covers ZoneScriptRunner.__init__ and ZoneScriptRunner.load_scripts.<commit_after>import unittest
from mock import patch, Mock
import sys
sys.path.append(".")
from scriptserver import ZoneScriptRunner
class TestZoneScriptRunner(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.mongoengine_patch = patch('scriptserver.me')
cls.mongoengine_patch.start()
@classmethod
def tearDownClass(cls):
cls.mongoengine_patch.stop()
def test___init__(self):
zoneid = "zoneid"
with patch('scriptserver.Object'):
with patch.object(ZoneScriptRunner, 'load_scripts') as mock_load_scripts:
zone_script_runner = ZoneScriptRunner(zoneid)
self.assertTrue(zone_script_runner)
self.assertEqual(1, mock_load_scripts.call_count)
def test_load_scripts(self):
expected = {}
zoneid = "zoneid"
with patch.object(ZoneScriptRunner, 'load_scripts'):
with patch('scriptserver.Object'):
zone_script_runner = ZoneScriptRunner(zoneid)
with patch('scriptserver.ScriptedObject') as ScriptedObject:
MockThing = Mock()
with patch.dict('sys.modules', {'thing': MockThing, 'thing.fake': MockThing.fake,
'thing.fake.chicken': MockThing.fake.chicken}):
MockThing.fake.chicken.Chicken.tick = Mock()
MockScriptedObject = Mock()
MockScriptedObject.scripts = ['thing.fake.chicken']
ScriptedObject.objects.return_value = [MockScriptedObject]
result = zone_script_runner.load_scripts()
self.assertNotEqual(expected, result)
self.assertIn('thing.fake.chicken', result)
def test_start(self):
# zone_script_runner = ZoneScriptRunner(zoneid)
# self.assertEqual(expected, zone_script_runner.start())
pass # TODO: implement your test here
def test_tick(self):
# zone_script_runner = ZoneScriptRunner(zoneid)
# self.assertEqual(expected, zone_script_runner.tick())
pass # TODO: implement your test here
if __name__ == '__main__':
unittest.main()
|
|
b63e65b1a41f809caf1c2dcd689955df76add20f
|
test/test_delta.py
|
test/test_delta.py
|
import matplotlib.pyplot as plt
import numpy as np
import scattering
import scipy.constants as consts
def plot_csec(scatterer, d, var, name):
plt.plot(d / consts.centi, var,
label='%.1f cm' % (scatterer.wavelength / consts.centi))
plt.xlabel('Diameter (cm)')
plt.ylabel(name)
def plot_csecs(d, scatterers):
for s in scatterers:
plt.subplot(1,1,1)
plot_csec(s, d, np.rad2deg(np.unwrap(-np.angle(-s.S_bkwd[0,0].conj() *
s.S_bkwd[1,1]).squeeze())), 'delta')
plt.gca().set_ylim(-4, 20)
d = np.linspace(0.01, 0.7, 200).reshape(200, 1) * consts.centi
sband = 3e8 / 2.8e9
cband = 3e8 / 5.4e9
xband = 3e8 / 9.4e9
temp = 10.0
x_fixed = scattering.scatterer(xband, temp, 'water', diameters=d, shape='oblate')
x_fixed.set_scattering_model('tmatrix')
c_fixed = scattering.scatterer(cband, temp, 'water', diameters=d, shape='oblate')
c_fixed.set_scattering_model('tmatrix')
s_fixed = scattering.scatterer(sband, temp, 'water', diameters=d, shape='oblate')
s_fixed.set_scattering_model('tmatrix')
plot_csecs(d, [x_fixed, c_fixed, s_fixed])
plt.legend(loc = 'upper left')
plt.show()
|
Add a plot just of backscatter phase vs. diameter.
|
Add a plot just of backscatter phase vs. diameter.
This (mostly) reproduces a figure in Matrosov et al. 2002 in JAM.
|
Python
|
bsd-2-clause
|
dopplershift/Scattering
|
Add a plot just of backscatter phase vs. diameter.
This (mostly) reproduces a figure in Matrosov et al. 2002 in JAM.
|
import matplotlib.pyplot as plt
import numpy as np
import scattering
import scipy.constants as consts
def plot_csec(scatterer, d, var, name):
plt.plot(d / consts.centi, var,
label='%.1f cm' % (scatterer.wavelength / consts.centi))
plt.xlabel('Diameter (cm)')
plt.ylabel(name)
def plot_csecs(d, scatterers):
for s in scatterers:
plt.subplot(1,1,1)
plot_csec(s, d, np.rad2deg(np.unwrap(-np.angle(-s.S_bkwd[0,0].conj() *
s.S_bkwd[1,1]).squeeze())), 'delta')
plt.gca().set_ylim(-4, 20)
d = np.linspace(0.01, 0.7, 200).reshape(200, 1) * consts.centi
sband = 3e8 / 2.8e9
cband = 3e8 / 5.4e9
xband = 3e8 / 9.4e9
temp = 10.0
x_fixed = scattering.scatterer(xband, temp, 'water', diameters=d, shape='oblate')
x_fixed.set_scattering_model('tmatrix')
c_fixed = scattering.scatterer(cband, temp, 'water', diameters=d, shape='oblate')
c_fixed.set_scattering_model('tmatrix')
s_fixed = scattering.scatterer(sband, temp, 'water', diameters=d, shape='oblate')
s_fixed.set_scattering_model('tmatrix')
plot_csecs(d, [x_fixed, c_fixed, s_fixed])
plt.legend(loc = 'upper left')
plt.show()
|
<commit_before><commit_msg>Add a plot just of backscatter phase vs. diameter.
This (mostly) reproduces a figure in Matrosov et al. 2002 in JAM.<commit_after>
|
import matplotlib.pyplot as plt
import numpy as np
import scattering
import scipy.constants as consts
def plot_csec(scatterer, d, var, name):
plt.plot(d / consts.centi, var,
label='%.1f cm' % (scatterer.wavelength / consts.centi))
plt.xlabel('Diameter (cm)')
plt.ylabel(name)
def plot_csecs(d, scatterers):
for s in scatterers:
plt.subplot(1,1,1)
plot_csec(s, d, np.rad2deg(np.unwrap(-np.angle(-s.S_bkwd[0,0].conj() *
s.S_bkwd[1,1]).squeeze())), 'delta')
plt.gca().set_ylim(-4, 20)
d = np.linspace(0.01, 0.7, 200).reshape(200, 1) * consts.centi
sband = 3e8 / 2.8e9
cband = 3e8 / 5.4e9
xband = 3e8 / 9.4e9
temp = 10.0
x_fixed = scattering.scatterer(xband, temp, 'water', diameters=d, shape='oblate')
x_fixed.set_scattering_model('tmatrix')
c_fixed = scattering.scatterer(cband, temp, 'water', diameters=d, shape='oblate')
c_fixed.set_scattering_model('tmatrix')
s_fixed = scattering.scatterer(sband, temp, 'water', diameters=d, shape='oblate')
s_fixed.set_scattering_model('tmatrix')
plot_csecs(d, [x_fixed, c_fixed, s_fixed])
plt.legend(loc = 'upper left')
plt.show()
|
Add a plot just of backscatter phase vs. diameter.
This (mostly) reproduces a figure in Matrosov et al. 2002 in JAM.import matplotlib.pyplot as plt
import numpy as np
import scattering
import scipy.constants as consts
def plot_csec(scatterer, d, var, name):
plt.plot(d / consts.centi, var,
label='%.1f cm' % (scatterer.wavelength / consts.centi))
plt.xlabel('Diameter (cm)')
plt.ylabel(name)
def plot_csecs(d, scatterers):
for s in scatterers:
plt.subplot(1,1,1)
plot_csec(s, d, np.rad2deg(np.unwrap(-np.angle(-s.S_bkwd[0,0].conj() *
s.S_bkwd[1,1]).squeeze())), 'delta')
plt.gca().set_ylim(-4, 20)
d = np.linspace(0.01, 0.7, 200).reshape(200, 1) * consts.centi
sband = 3e8 / 2.8e9
cband = 3e8 / 5.4e9
xband = 3e8 / 9.4e9
temp = 10.0
x_fixed = scattering.scatterer(xband, temp, 'water', diameters=d, shape='oblate')
x_fixed.set_scattering_model('tmatrix')
c_fixed = scattering.scatterer(cband, temp, 'water', diameters=d, shape='oblate')
c_fixed.set_scattering_model('tmatrix')
s_fixed = scattering.scatterer(sband, temp, 'water', diameters=d, shape='oblate')
s_fixed.set_scattering_model('tmatrix')
plot_csecs(d, [x_fixed, c_fixed, s_fixed])
plt.legend(loc = 'upper left')
plt.show()
|
<commit_before><commit_msg>Add a plot just of backscatter phase vs. diameter.
This (mostly) reproduces a figure in Matrosov et al. 2002 in JAM.<commit_after>import matplotlib.pyplot as plt
import numpy as np
import scattering
import scipy.constants as consts
def plot_csec(scatterer, d, var, name):
plt.plot(d / consts.centi, var,
label='%.1f cm' % (scatterer.wavelength / consts.centi))
plt.xlabel('Diameter (cm)')
plt.ylabel(name)
def plot_csecs(d, scatterers):
for s in scatterers:
plt.subplot(1,1,1)
plot_csec(s, d, np.rad2deg(np.unwrap(-np.angle(-s.S_bkwd[0,0].conj() *
s.S_bkwd[1,1]).squeeze())), 'delta')
plt.gca().set_ylim(-4, 20)
d = np.linspace(0.01, 0.7, 200).reshape(200, 1) * consts.centi
sband = 3e8 / 2.8e9
cband = 3e8 / 5.4e9
xband = 3e8 / 9.4e9
temp = 10.0
x_fixed = scattering.scatterer(xband, temp, 'water', diameters=d, shape='oblate')
x_fixed.set_scattering_model('tmatrix')
c_fixed = scattering.scatterer(cband, temp, 'water', diameters=d, shape='oblate')
c_fixed.set_scattering_model('tmatrix')
s_fixed = scattering.scatterer(sband, temp, 'water', diameters=d, shape='oblate')
s_fixed.set_scattering_model('tmatrix')
plot_csecs(d, [x_fixed, c_fixed, s_fixed])
plt.legend(loc = 'upper left')
plt.show()
|
|
da2a4fa9e618b212ddbb2fcbc079fa37970ae596
|
tfd/loggingutil.py
|
tfd/loggingutil.py
|
'''
Utilities to assist with logging in python
'''
import logging
class ConcurrentFileHandler(logging.Handler):
"""
A handler class which writes logging records to a file. Every time it
writes a record it opens the file, writes to it, flushes the buffer, and
closes the file. Perhaps this could create problems in a very tight loop.
This handler is an attempt to overcome concurrent write issues that
the standard FileHandler has when multiple processes distributed across
a cluster are all writing to the same log file. Specifically, the records
can become interleaved/garbled with one another.
"""
def __init__(self, filename, mode="a"):
"""
Open the specified file and use it as the stream for logging.
:param mode: defaults to 'a', append.
"""
logging.Handler.__init__(self)
# keep the absolute path, otherwise derived classes which use this
# may come a cropper when the current directory changes
self.filename = os.path.abspath(filename)
self.mode = mode
def _openWriteClose(self, msg):
f = open(self.filename, self.mode)
f.write(msg)
f.flush() # improves consistency of writes in a concurrent environment
f.close()
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline
[N.B. this may be removed depending on feedback]. If exception
information is present, it is formatted using
traceback.print_exception and appended to the stream.
"""
try:
msg = self.format(record)
fs = "%s\n"
self._openWriteClose(fs % msg)
except:
self.handleError(record)
|
Add handler for concurrently logging to a file
|
Add handler for concurrently logging to a file
|
Python
|
mit
|
todddeluca/tfd
|
Add handler for concurrently logging to a file
|
'''
Utilities to assist with logging in python
'''
import logging
class ConcurrentFileHandler(logging.Handler):
"""
A handler class which writes logging records to a file. Every time it
writes a record it opens the file, writes to it, flushes the buffer, and
closes the file. Perhaps this could create problems in a very tight loop.
This handler is an attempt to overcome concurrent write issues that
the standard FileHandler has when multiple processes distributed across
a cluster are all writing to the same log file. Specifically, the records
can become interleaved/garbled with one another.
"""
def __init__(self, filename, mode="a"):
"""
Open the specified file and use it as the stream for logging.
:param mode: defaults to 'a', append.
"""
logging.Handler.__init__(self)
# keep the absolute path, otherwise derived classes which use this
# may come a cropper when the current directory changes
self.filename = os.path.abspath(filename)
self.mode = mode
def _openWriteClose(self, msg):
f = open(self.filename, self.mode)
f.write(msg)
f.flush() # improves consistency of writes in a concurrent environment
f.close()
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline
[N.B. this may be removed depending on feedback]. If exception
information is present, it is formatted using
traceback.print_exception and appended to the stream.
"""
try:
msg = self.format(record)
fs = "%s\n"
self._openWriteClose(fs % msg)
except:
self.handleError(record)
|
<commit_before><commit_msg>Add handler for concurrently logging to a file<commit_after>
|
'''
Utilities to assist with logging in python
'''
import logging
class ConcurrentFileHandler(logging.Handler):
"""
A handler class which writes logging records to a file. Every time it
writes a record it opens the file, writes to it, flushes the buffer, and
closes the file. Perhaps this could create problems in a very tight loop.
This handler is an attempt to overcome concurrent write issues that
the standard FileHandler has when multiple processes distributed across
a cluster are all writing to the same log file. Specifically, the records
can become interleaved/garbled with one another.
"""
def __init__(self, filename, mode="a"):
"""
Open the specified file and use it as the stream for logging.
:param mode: defaults to 'a', append.
"""
logging.Handler.__init__(self)
# keep the absolute path, otherwise derived classes which use this
# may come a cropper when the current directory changes
self.filename = os.path.abspath(filename)
self.mode = mode
def _openWriteClose(self, msg):
f = open(self.filename, self.mode)
f.write(msg)
f.flush() # improves consistency of writes in a concurrent environment
f.close()
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline
[N.B. this may be removed depending on feedback]. If exception
information is present, it is formatted using
traceback.print_exception and appended to the stream.
"""
try:
msg = self.format(record)
fs = "%s\n"
self._openWriteClose(fs % msg)
except:
self.handleError(record)
|
Add handler for concurrently logging to a file
'''
Utilities to assist with logging in python
'''
import logging
class ConcurrentFileHandler(logging.Handler):
"""
A handler class which writes logging records to a file. Every time it
writes a record it opens the file, writes to it, flushes the buffer, and
closes the file. Perhaps this could create problems in a very tight loop.
This handler is an attempt to overcome concurrent write issues that
the standard FileHandler has when multiple processes distributed across
a cluster are all writing to the same log file. Specifically, the records
can become interleaved/garbled with one another.
"""
def __init__(self, filename, mode="a"):
"""
Open the specified file and use it as the stream for logging.
:param mode: defaults to 'a', append.
"""
logging.Handler.__init__(self)
# keep the absolute path, otherwise derived classes which use this
# may come a cropper when the current directory changes
self.filename = os.path.abspath(filename)
self.mode = mode
def _openWriteClose(self, msg):
f = open(self.filename, self.mode)
f.write(msg)
f.flush() # improves consistency of writes in a concurrent environment
f.close()
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline
[N.B. this may be removed depending on feedback]. If exception
information is present, it is formatted using
traceback.print_exception and appended to the stream.
"""
try:
msg = self.format(record)
fs = "%s\n"
self._openWriteClose(fs % msg)
except:
self.handleError(record)
|
<commit_before><commit_msg>Add handler for concurrently logging to a file<commit_after>
'''
Utilities to assist with logging in python
'''
import logging
class ConcurrentFileHandler(logging.Handler):
"""
A handler class which writes logging records to a file. Every time it
writes a record it opens the file, writes to it, flushes the buffer, and
closes the file. Perhaps this could create problems in a very tight loop.
This handler is an attempt to overcome concurrent write issues that
the standard FileHandler has when multiple processes distributed across
a cluster are all writing to the same log file. Specifically, the records
can become interleaved/garbled with one another.
"""
def __init__(self, filename, mode="a"):
"""
Open the specified file and use it as the stream for logging.
:param mode: defaults to 'a', append.
"""
logging.Handler.__init__(self)
# keep the absolute path, otherwise derived classes which use this
# may come a cropper when the current directory changes
self.filename = os.path.abspath(filename)
self.mode = mode
def _openWriteClose(self, msg):
f = open(self.filename, self.mode)
f.write(msg)
f.flush() # improves consistency of writes in a concurrent environment
f.close()
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline
[N.B. this may be removed depending on feedback]. If exception
information is present, it is formatted using
traceback.print_exception and appended to the stream.
"""
try:
msg = self.format(record)
fs = "%s\n"
self._openWriteClose(fs % msg)
except:
self.handleError(record)
|
|
700db5c742be8a893b1c362ae0955a934b88c39b
|
test_journal.py
|
test_journal.py
|
# -*- coding: utf-8 -*-
from contextlib import closing
import pytest
from journal import app
from journal import connect_db
from journal import get_database_connection
from journal import init_db
TEST_DSN = 'dbname=test_learning_journal'
def clear_db():
with closing(connect_db()) as db:
db.cursor().execute("DROP TABLE entries")
db.commit()
@pytest.fixture(scope='session')
def test_app():
"""configure our app for use in testing"""
app.config['DATABASE'] = TEST_DSN
app.config['TESTING'] = True
|
Add test_learning_journal.py with test_app() for configuring the app for testing
|
Add test_learning_journal.py with test_app() for configuring the app for testing
|
Python
|
mit
|
sazlin/learning_journal
|
Add test_learning_journal.py with test_app() for configuring the app for testing
|
# -*- coding: utf-8 -*-
from contextlib import closing
import pytest
from journal import app
from journal import connect_db
from journal import get_database_connection
from journal import init_db
TEST_DSN = 'dbname=test_learning_journal'
def clear_db():
with closing(connect_db()) as db:
db.cursor().execute("DROP TABLE entries")
db.commit()
@pytest.fixture(scope='session')
def test_app():
"""configure our app for use in testing"""
app.config['DATABASE'] = TEST_DSN
app.config['TESTING'] = True
|
<commit_before><commit_msg>Add test_learning_journal.py with test_app() for configuring the app for testing<commit_after>
|
# -*- coding: utf-8 -*-
from contextlib import closing
import pytest
from journal import app
from journal import connect_db
from journal import get_database_connection
from journal import init_db
TEST_DSN = 'dbname=test_learning_journal'
def clear_db():
with closing(connect_db()) as db:
db.cursor().execute("DROP TABLE entries")
db.commit()
@pytest.fixture(scope='session')
def test_app():
"""configure our app for use in testing"""
app.config['DATABASE'] = TEST_DSN
app.config['TESTING'] = True
|
Add test_learning_journal.py with test_app() for configuring the app for testing# -*- coding: utf-8 -*-
from contextlib import closing
import pytest
from journal import app
from journal import connect_db
from journal import get_database_connection
from journal import init_db
TEST_DSN = 'dbname=test_learning_journal'
def clear_db():
with closing(connect_db()) as db:
db.cursor().execute("DROP TABLE entries")
db.commit()
@pytest.fixture(scope='session')
def test_app():
"""configure our app for use in testing"""
app.config['DATABASE'] = TEST_DSN
app.config['TESTING'] = True
|
<commit_before><commit_msg>Add test_learning_journal.py with test_app() for configuring the app for testing<commit_after># -*- coding: utf-8 -*-
from contextlib import closing
import pytest
from journal import app
from journal import connect_db
from journal import get_database_connection
from journal import init_db
TEST_DSN = 'dbname=test_learning_journal'
def clear_db():
with closing(connect_db()) as db:
db.cursor().execute("DROP TABLE entries")
db.commit()
@pytest.fixture(scope='session')
def test_app():
"""configure our app for use in testing"""
app.config['DATABASE'] = TEST_DSN
app.config['TESTING'] = True
|
|
eb1c7d1c2bfaa063c98612d64bbe35dedf217143
|
tests/test_alerter.py
|
tests/test_alerter.py
|
import unittest
import datetime
import Alerters.alerter
class TestAlerter(unittest.TestCase):
def test_groups(self):
config_options = {'groups': 'a,b,c'}
a = Alerters.alerter.Alerter(config_options)
self.assertEqual(['a', 'b', 'c'], a.groups)
def test_times_always(self):
config_options = {'times_type': 'always'}
a = Alerters.alerter.Alerter(config_options)
self.assertEqual(a.times_type, 'always')
self.assertEqual(a.time_info, [None, None])
def test_times_only(self):
config_options = {
'times_type': 'only',
'time_lower': '10:00',
'time_upper': '11:00'
}
a = Alerters.alerter.Alerter(config_options)
self.assertEqual(a.times_type, 'only')
self.assertEqual(a.time_info, [
datetime.time(10, 00), datetime.time(11, 00)
])
|
Add initial tests for alerter class
|
Add initial tests for alerter class
|
Python
|
bsd-3-clause
|
jamesoff/simplemonitor,jamesoff/simplemonitor,jamesoff/simplemonitor,jamesoff/simplemonitor,jamesoff/simplemonitor
|
Add initial tests for alerter class
|
import unittest
import datetime
import Alerters.alerter
class TestAlerter(unittest.TestCase):
def test_groups(self):
config_options = {'groups': 'a,b,c'}
a = Alerters.alerter.Alerter(config_options)
self.assertEqual(['a', 'b', 'c'], a.groups)
def test_times_always(self):
config_options = {'times_type': 'always'}
a = Alerters.alerter.Alerter(config_options)
self.assertEqual(a.times_type, 'always')
self.assertEqual(a.time_info, [None, None])
def test_times_only(self):
config_options = {
'times_type': 'only',
'time_lower': '10:00',
'time_upper': '11:00'
}
a = Alerters.alerter.Alerter(config_options)
self.assertEqual(a.times_type, 'only')
self.assertEqual(a.time_info, [
datetime.time(10, 00), datetime.time(11, 00)
])
|
<commit_before><commit_msg>Add initial tests for alerter class<commit_after>
|
import unittest
import datetime
import Alerters.alerter
class TestAlerter(unittest.TestCase):
def test_groups(self):
config_options = {'groups': 'a,b,c'}
a = Alerters.alerter.Alerter(config_options)
self.assertEqual(['a', 'b', 'c'], a.groups)
def test_times_always(self):
config_options = {'times_type': 'always'}
a = Alerters.alerter.Alerter(config_options)
self.assertEqual(a.times_type, 'always')
self.assertEqual(a.time_info, [None, None])
def test_times_only(self):
config_options = {
'times_type': 'only',
'time_lower': '10:00',
'time_upper': '11:00'
}
a = Alerters.alerter.Alerter(config_options)
self.assertEqual(a.times_type, 'only')
self.assertEqual(a.time_info, [
datetime.time(10, 00), datetime.time(11, 00)
])
|
Add initial tests for alerter classimport unittest
import datetime
import Alerters.alerter
class TestAlerter(unittest.TestCase):
def test_groups(self):
config_options = {'groups': 'a,b,c'}
a = Alerters.alerter.Alerter(config_options)
self.assertEqual(['a', 'b', 'c'], a.groups)
def test_times_always(self):
config_options = {'times_type': 'always'}
a = Alerters.alerter.Alerter(config_options)
self.assertEqual(a.times_type, 'always')
self.assertEqual(a.time_info, [None, None])
def test_times_only(self):
config_options = {
'times_type': 'only',
'time_lower': '10:00',
'time_upper': '11:00'
}
a = Alerters.alerter.Alerter(config_options)
self.assertEqual(a.times_type, 'only')
self.assertEqual(a.time_info, [
datetime.time(10, 00), datetime.time(11, 00)
])
|
<commit_before><commit_msg>Add initial tests for alerter class<commit_after>import unittest
import datetime
import Alerters.alerter
class TestAlerter(unittest.TestCase):
def test_groups(self):
config_options = {'groups': 'a,b,c'}
a = Alerters.alerter.Alerter(config_options)
self.assertEqual(['a', 'b', 'c'], a.groups)
def test_times_always(self):
config_options = {'times_type': 'always'}
a = Alerters.alerter.Alerter(config_options)
self.assertEqual(a.times_type, 'always')
self.assertEqual(a.time_info, [None, None])
def test_times_only(self):
config_options = {
'times_type': 'only',
'time_lower': '10:00',
'time_upper': '11:00'
}
a = Alerters.alerter.Alerter(config_options)
self.assertEqual(a.times_type, 'only')
self.assertEqual(a.time_info, [
datetime.time(10, 00), datetime.time(11, 00)
])
|
|
33658163b909073aae074b5b2cdae40a0e5c44e8
|
tests/test_asyncio.py
|
tests/test_asyncio.py
|
from trollius import test_utils
from trollius import From, Return
import trollius
import unittest
try:
import asyncio
except ImportError:
from trollius.test_utils import SkipTest
raise SkipTest('need asyncio')
# "yield from" syntax cannot be used directly, because Python 2 should be able
# to execute this file (to raise SkipTest)
code = '''
@asyncio.coroutine
def asyncio_noop(value):
yield from []
return (value,)
@asyncio.coroutine
def asyncio_coroutine(coro, value):
res = yield from coro
return res + (value,)
'''
exec(code)
@trollius.coroutine
def trollius_noop(value):
yield From(None)
raise Return((value,))
@trollius.coroutine
def trollius_coroutine(coro, value):
res = yield trollius.From(coro)
raise trollius.Return(res + (value,))
class AsyncioTests(test_utils.TestCase):
def setUp(self):
policy = trollius.get_event_loop_policy()
self.loop = policy.new_event_loop()
self.set_event_loop(self.loop)
asyncio_policy = asyncio.get_event_loop_policy()
self.addCleanup(asyncio.set_event_loop_policy, asyncio_policy)
asyncio.set_event_loop_policy(policy)
def test_policy(self):
trollius.set_event_loop(self.loop)
self.assertIs(asyncio.get_event_loop(), self.loop)
def test_asyncio(self):
coro = asyncio_noop("asyncio")
res = self.loop.run_until_complete(coro)
self.assertEqual(res, ("asyncio",))
def test_asyncio_in_trollius(self):
coro1 = asyncio_noop(1)
coro2 = asyncio_coroutine(coro1, 2)
res = self.loop.run_until_complete(trollius_coroutine(coro2, 3))
self.assertEqual(res, (1, 2, 3))
def test_trollius_in_asyncio(self):
coro1 = trollius_noop(4)
coro2 = trollius_coroutine(coro1, 5)
res = self.loop.run_until_complete(asyncio_coroutine(coro2, 6))
self.assertEqual(res, (4, 5, 6))
if __name__ == '__main__':
unittest.main()
|
Add unit tests for asyncio coroutines
|
Add unit tests for asyncio coroutines
|
Python
|
apache-2.0
|
overcastcloud/trollius,overcastcloud/trollius,overcastcloud/trollius
|
Add unit tests for asyncio coroutines
|
from trollius import test_utils
from trollius import From, Return
import trollius
import unittest
try:
import asyncio
except ImportError:
from trollius.test_utils import SkipTest
raise SkipTest('need asyncio')
# "yield from" syntax cannot be used directly, because Python 2 should be able
# to execute this file (to raise SkipTest)
code = '''
@asyncio.coroutine
def asyncio_noop(value):
yield from []
return (value,)
@asyncio.coroutine
def asyncio_coroutine(coro, value):
res = yield from coro
return res + (value,)
'''
exec(code)
@trollius.coroutine
def trollius_noop(value):
yield From(None)
raise Return((value,))
@trollius.coroutine
def trollius_coroutine(coro, value):
res = yield trollius.From(coro)
raise trollius.Return(res + (value,))
class AsyncioTests(test_utils.TestCase):
def setUp(self):
policy = trollius.get_event_loop_policy()
self.loop = policy.new_event_loop()
self.set_event_loop(self.loop)
asyncio_policy = asyncio.get_event_loop_policy()
self.addCleanup(asyncio.set_event_loop_policy, asyncio_policy)
asyncio.set_event_loop_policy(policy)
def test_policy(self):
trollius.set_event_loop(self.loop)
self.assertIs(asyncio.get_event_loop(), self.loop)
def test_asyncio(self):
coro = asyncio_noop("asyncio")
res = self.loop.run_until_complete(coro)
self.assertEqual(res, ("asyncio",))
def test_asyncio_in_trollius(self):
coro1 = asyncio_noop(1)
coro2 = asyncio_coroutine(coro1, 2)
res = self.loop.run_until_complete(trollius_coroutine(coro2, 3))
self.assertEqual(res, (1, 2, 3))
def test_trollius_in_asyncio(self):
coro1 = trollius_noop(4)
coro2 = trollius_coroutine(coro1, 5)
res = self.loop.run_until_complete(asyncio_coroutine(coro2, 6))
self.assertEqual(res, (4, 5, 6))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unit tests for asyncio coroutines<commit_after>
|
from trollius import test_utils
from trollius import From, Return
import trollius
import unittest
try:
import asyncio
except ImportError:
from trollius.test_utils import SkipTest
raise SkipTest('need asyncio')
# "yield from" syntax cannot be used directly, because Python 2 should be able
# to execute this file (to raise SkipTest)
code = '''
@asyncio.coroutine
def asyncio_noop(value):
yield from []
return (value,)
@asyncio.coroutine
def asyncio_coroutine(coro, value):
res = yield from coro
return res + (value,)
'''
exec(code)
@trollius.coroutine
def trollius_noop(value):
yield From(None)
raise Return((value,))
@trollius.coroutine
def trollius_coroutine(coro, value):
res = yield trollius.From(coro)
raise trollius.Return(res + (value,))
class AsyncioTests(test_utils.TestCase):
def setUp(self):
policy = trollius.get_event_loop_policy()
self.loop = policy.new_event_loop()
self.set_event_loop(self.loop)
asyncio_policy = asyncio.get_event_loop_policy()
self.addCleanup(asyncio.set_event_loop_policy, asyncio_policy)
asyncio.set_event_loop_policy(policy)
def test_policy(self):
trollius.set_event_loop(self.loop)
self.assertIs(asyncio.get_event_loop(), self.loop)
def test_asyncio(self):
coro = asyncio_noop("asyncio")
res = self.loop.run_until_complete(coro)
self.assertEqual(res, ("asyncio",))
def test_asyncio_in_trollius(self):
coro1 = asyncio_noop(1)
coro2 = asyncio_coroutine(coro1, 2)
res = self.loop.run_until_complete(trollius_coroutine(coro2, 3))
self.assertEqual(res, (1, 2, 3))
def test_trollius_in_asyncio(self):
coro1 = trollius_noop(4)
coro2 = trollius_coroutine(coro1, 5)
res = self.loop.run_until_complete(asyncio_coroutine(coro2, 6))
self.assertEqual(res, (4, 5, 6))
if __name__ == '__main__':
unittest.main()
|
Add unit tests for asyncio coroutinesfrom trollius import test_utils
from trollius import From, Return
import trollius
import unittest
try:
import asyncio
except ImportError:
from trollius.test_utils import SkipTest
raise SkipTest('need asyncio')
# "yield from" syntax cannot be used directly, because Python 2 should be able
# to execute this file (to raise SkipTest)
code = '''
@asyncio.coroutine
def asyncio_noop(value):
yield from []
return (value,)
@asyncio.coroutine
def asyncio_coroutine(coro, value):
res = yield from coro
return res + (value,)
'''
exec(code)
@trollius.coroutine
def trollius_noop(value):
yield From(None)
raise Return((value,))
@trollius.coroutine
def trollius_coroutine(coro, value):
res = yield trollius.From(coro)
raise trollius.Return(res + (value,))
class AsyncioTests(test_utils.TestCase):
def setUp(self):
policy = trollius.get_event_loop_policy()
self.loop = policy.new_event_loop()
self.set_event_loop(self.loop)
asyncio_policy = asyncio.get_event_loop_policy()
self.addCleanup(asyncio.set_event_loop_policy, asyncio_policy)
asyncio.set_event_loop_policy(policy)
def test_policy(self):
trollius.set_event_loop(self.loop)
self.assertIs(asyncio.get_event_loop(), self.loop)
def test_asyncio(self):
coro = asyncio_noop("asyncio")
res = self.loop.run_until_complete(coro)
self.assertEqual(res, ("asyncio",))
def test_asyncio_in_trollius(self):
coro1 = asyncio_noop(1)
coro2 = asyncio_coroutine(coro1, 2)
res = self.loop.run_until_complete(trollius_coroutine(coro2, 3))
self.assertEqual(res, (1, 2, 3))
def test_trollius_in_asyncio(self):
coro1 = trollius_noop(4)
coro2 = trollius_coroutine(coro1, 5)
res = self.loop.run_until_complete(asyncio_coroutine(coro2, 6))
self.assertEqual(res, (4, 5, 6))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unit tests for asyncio coroutines<commit_after>from trollius import test_utils
from trollius import From, Return
import trollius
import unittest
try:
import asyncio
except ImportError:
from trollius.test_utils import SkipTest
raise SkipTest('need asyncio')
# "yield from" syntax cannot be used directly, because Python 2 should be able
# to execute this file (to raise SkipTest)
code = '''
@asyncio.coroutine
def asyncio_noop(value):
yield from []
return (value,)
@asyncio.coroutine
def asyncio_coroutine(coro, value):
res = yield from coro
return res + (value,)
'''
exec(code)
@trollius.coroutine
def trollius_noop(value):
yield From(None)
raise Return((value,))
@trollius.coroutine
def trollius_coroutine(coro, value):
res = yield trollius.From(coro)
raise trollius.Return(res + (value,))
class AsyncioTests(test_utils.TestCase):
def setUp(self):
policy = trollius.get_event_loop_policy()
self.loop = policy.new_event_loop()
self.set_event_loop(self.loop)
asyncio_policy = asyncio.get_event_loop_policy()
self.addCleanup(asyncio.set_event_loop_policy, asyncio_policy)
asyncio.set_event_loop_policy(policy)
def test_policy(self):
trollius.set_event_loop(self.loop)
self.assertIs(asyncio.get_event_loop(), self.loop)
def test_asyncio(self):
coro = asyncio_noop("asyncio")
res = self.loop.run_until_complete(coro)
self.assertEqual(res, ("asyncio",))
def test_asyncio_in_trollius(self):
coro1 = asyncio_noop(1)
coro2 = asyncio_coroutine(coro1, 2)
res = self.loop.run_until_complete(trollius_coroutine(coro2, 3))
self.assertEqual(res, (1, 2, 3))
def test_trollius_in_asyncio(self):
coro1 = trollius_noop(4)
coro2 = trollius_coroutine(coro1, 5)
res = self.loop.run_until_complete(asyncio_coroutine(coro2, 6))
self.assertEqual(res, (4, 5, 6))
if __name__ == '__main__':
unittest.main()
|
|
1ffdfc3c7ae11c583b2ea4d45b50136996bcf3e3
|
tests/mocks.py
|
tests/mocks.py
|
from http.server import BaseHTTPRequestHandler, HTTPServer
import json
import socket
from threading import Thread
import requests
# https://realpython.com/blog/python/testing-third-party-apis-with-mock-servers/
class MockHTTPServerRequestHandler(BaseHTTPRequestHandler):
def do_OPTIONS(self):
# add response codes
self.send_response(requests.codes.okay)
# add response headers
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Methods', 'GET, POST, OPTIONS')
self.send_header('Access-Control-Allow-Headers', 'dataType, accept, authoriziation')
self.end_headers()
def do_GET(self):
# add response codes
self.send_response(requests.codes.ok)
# add response headers
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# add response content
response_content = json.dumps({'Message': 'Success'})
self.wfile.write(response_content.encode('utf-8'))
return
def do_POST(self):
# add response codes
self.send_response(requests.codes.created)
# add response headers
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Access=Control-Allow-Methods', 'POST, GET, OPTIONS, DELETE, PUT')
self.end_headers()
# add response content
response_content = json.dumps({'Message': {'task_ids': [1234]}})
self.wfile.write(response_content.encode('utf-8'))
return
def get_free_server_port():
s = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM)
s.bind(('localhost', 0))
address, port = s.getsockname()
s.close()
return port
def start_mock_server(port=8080):
mock_server = HTTPServer(('localhost', port), MockHTTPServerRequestHandler)
mock_server_thread = Thread(target=mock_server.serve_forever)
mock_server_thread.setDaemon(True)
mock_server_thread.start()
|
Add mock HTTP server to respond to requests from web UI
|
Add mock HTTP server to respond to requests from web UI
|
Python
|
mpl-2.0
|
jmlong1027/multiscanner,jmlong1027/multiscanner,mitre/multiscanner,mitre/multiscanner,mitre/multiscanner,jmlong1027/multiscanner,MITRECND/multiscanner,MITRECND/multiscanner,jmlong1027/multiscanner
|
Add mock HTTP server to respond to requests from web UI
|
from http.server import BaseHTTPRequestHandler, HTTPServer
import json
import socket
from threading import Thread
import requests
# https://realpython.com/blog/python/testing-third-party-apis-with-mock-servers/
class MockHTTPServerRequestHandler(BaseHTTPRequestHandler):
def do_OPTIONS(self):
# add response codes
self.send_response(requests.codes.okay)
# add response headers
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Methods', 'GET, POST, OPTIONS')
self.send_header('Access-Control-Allow-Headers', 'dataType, accept, authoriziation')
self.end_headers()
def do_GET(self):
# add response codes
self.send_response(requests.codes.ok)
# add response headers
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# add response content
response_content = json.dumps({'Message': 'Success'})
self.wfile.write(response_content.encode('utf-8'))
return
def do_POST(self):
# add response codes
self.send_response(requests.codes.created)
# add response headers
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Access=Control-Allow-Methods', 'POST, GET, OPTIONS, DELETE, PUT')
self.end_headers()
# add response content
response_content = json.dumps({'Message': {'task_ids': [1234]}})
self.wfile.write(response_content.encode('utf-8'))
return
def get_free_server_port():
s = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM)
s.bind(('localhost', 0))
address, port = s.getsockname()
s.close()
return port
def start_mock_server(port=8080):
mock_server = HTTPServer(('localhost', port), MockHTTPServerRequestHandler)
mock_server_thread = Thread(target=mock_server.serve_forever)
mock_server_thread.setDaemon(True)
mock_server_thread.start()
|
<commit_before><commit_msg>Add mock HTTP server to respond to requests from web UI<commit_after>
|
from http.server import BaseHTTPRequestHandler, HTTPServer
import json
import socket
from threading import Thread
import requests
# https://realpython.com/blog/python/testing-third-party-apis-with-mock-servers/
class MockHTTPServerRequestHandler(BaseHTTPRequestHandler):
def do_OPTIONS(self):
# add response codes
self.send_response(requests.codes.okay)
# add response headers
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Methods', 'GET, POST, OPTIONS')
self.send_header('Access-Control-Allow-Headers', 'dataType, accept, authoriziation')
self.end_headers()
def do_GET(self):
# add response codes
self.send_response(requests.codes.ok)
# add response headers
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# add response content
response_content = json.dumps({'Message': 'Success'})
self.wfile.write(response_content.encode('utf-8'))
return
def do_POST(self):
# add response codes
self.send_response(requests.codes.created)
# add response headers
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Access=Control-Allow-Methods', 'POST, GET, OPTIONS, DELETE, PUT')
self.end_headers()
# add response content
response_content = json.dumps({'Message': {'task_ids': [1234]}})
self.wfile.write(response_content.encode('utf-8'))
return
def get_free_server_port():
s = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM)
s.bind(('localhost', 0))
address, port = s.getsockname()
s.close()
return port
def start_mock_server(port=8080):
mock_server = HTTPServer(('localhost', port), MockHTTPServerRequestHandler)
mock_server_thread = Thread(target=mock_server.serve_forever)
mock_server_thread.setDaemon(True)
mock_server_thread.start()
|
Add mock HTTP server to respond to requests from web UIfrom http.server import BaseHTTPRequestHandler, HTTPServer
import json
import socket
from threading import Thread
import requests
# https://realpython.com/blog/python/testing-third-party-apis-with-mock-servers/
class MockHTTPServerRequestHandler(BaseHTTPRequestHandler):
def do_OPTIONS(self):
# add response codes
self.send_response(requests.codes.okay)
# add response headers
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Methods', 'GET, POST, OPTIONS')
self.send_header('Access-Control-Allow-Headers', 'dataType, accept, authoriziation')
self.end_headers()
def do_GET(self):
# add response codes
self.send_response(requests.codes.ok)
# add response headers
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# add response content
response_content = json.dumps({'Message': 'Success'})
self.wfile.write(response_content.encode('utf-8'))
return
def do_POST(self):
# add response codes
self.send_response(requests.codes.created)
# add response headers
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Access=Control-Allow-Methods', 'POST, GET, OPTIONS, DELETE, PUT')
self.end_headers()
# add response content
response_content = json.dumps({'Message': {'task_ids': [1234]}})
self.wfile.write(response_content.encode('utf-8'))
return
def get_free_server_port():
s = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM)
s.bind(('localhost', 0))
address, port = s.getsockname()
s.close()
return port
def start_mock_server(port=8080):
mock_server = HTTPServer(('localhost', port), MockHTTPServerRequestHandler)
mock_server_thread = Thread(target=mock_server.serve_forever)
mock_server_thread.setDaemon(True)
mock_server_thread.start()
|
<commit_before><commit_msg>Add mock HTTP server to respond to requests from web UI<commit_after>from http.server import BaseHTTPRequestHandler, HTTPServer
import json
import socket
from threading import Thread
import requests
# https://realpython.com/blog/python/testing-third-party-apis-with-mock-servers/
class MockHTTPServerRequestHandler(BaseHTTPRequestHandler):
def do_OPTIONS(self):
# add response codes
self.send_response(requests.codes.okay)
# add response headers
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Methods', 'GET, POST, OPTIONS')
self.send_header('Access-Control-Allow-Headers', 'dataType, accept, authoriziation')
self.end_headers()
def do_GET(self):
# add response codes
self.send_response(requests.codes.ok)
# add response headers
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.end_headers()
# add response content
response_content = json.dumps({'Message': 'Success'})
self.wfile.write(response_content.encode('utf-8'))
return
def do_POST(self):
# add response codes
self.send_response(requests.codes.created)
# add response headers
self.send_header('Content-Type', 'application/json; charset=utf-8')
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Access=Control-Allow-Methods', 'POST, GET, OPTIONS, DELETE, PUT')
self.end_headers()
# add response content
response_content = json.dumps({'Message': {'task_ids': [1234]}})
self.wfile.write(response_content.encode('utf-8'))
return
def get_free_server_port():
s = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM)
s.bind(('localhost', 0))
address, port = s.getsockname()
s.close()
return port
def start_mock_server(port=8080):
mock_server = HTTPServer(('localhost', port), MockHTTPServerRequestHandler)
mock_server_thread = Thread(target=mock_server.serve_forever)
mock_server_thread.setDaemon(True)
mock_server_thread.start()
|
|
744d7971926bf7672ce01388b8617be1ee35df0e
|
xunit-autolabeler-v2/ast_parser/core/test_data/parser/exclude_tags/exclude_tags_main.py
|
xunit-autolabeler-v2/ast_parser/core/test_data/parser/exclude_tags/exclude_tags_main.py
|
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START main_method]
def main():
return 'main method'
# [START_EXCLUDE]
# [END_EXCLUDE]
def not_main():
return 'not main'
# [END main_method]
|
Add missing test data folder
|
Add missing test data folder
|
Python
|
apache-2.0
|
GoogleCloudPlatform/repo-automation-playground,GoogleCloudPlatform/repo-automation-playground,GoogleCloudPlatform/repo-automation-playground,GoogleCloudPlatform/repo-automation-playground,GoogleCloudPlatform/repo-automation-playground,GoogleCloudPlatform/repo-automation-playground,GoogleCloudPlatform/repo-automation-playground,GoogleCloudPlatform/repo-automation-playground,GoogleCloudPlatform/repo-automation-playground
|
Add missing test data folder
|
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START main_method]
def main():
return 'main method'
# [START_EXCLUDE]
# [END_EXCLUDE]
def not_main():
return 'not main'
# [END main_method]
|
<commit_before><commit_msg>Add missing test data folder<commit_after>
|
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START main_method]
def main():
return 'main method'
# [START_EXCLUDE]
# [END_EXCLUDE]
def not_main():
return 'not main'
# [END main_method]
|
Add missing test data folder# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START main_method]
def main():
return 'main method'
# [START_EXCLUDE]
# [END_EXCLUDE]
def not_main():
return 'not main'
# [END main_method]
|
<commit_before><commit_msg>Add missing test data folder<commit_after># Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START main_method]
def main():
return 'main method'
# [START_EXCLUDE]
# [END_EXCLUDE]
def not_main():
return 'not main'
# [END main_method]
|
|
24baf3e5e7a608d0b34d74be25f96f1b74b7622e
|
web/social/tasks.py
|
web/social/tasks.py
|
from celery.task import PeriodicTask
from datetime import timedelta
from social.utils import FeedUpdater, UpdateError
class UpdateFeedsTask(PeriodicTask):
run_every = timedelta(minutes=15)
def run(self, **kwargs):
logger = self.get_logger()
updater = FeedUpdater(logger)
print "Updating feeds"
updater.update_feeds()
print "Feed update done"
|
Add feed update task for social app
|
Add feed update task for social app
|
Python
|
agpl-3.0
|
kansanmuisti/datavaalit,kansanmuisti/datavaalit
|
Add feed update task for social app
|
from celery.task import PeriodicTask
from datetime import timedelta
from social.utils import FeedUpdater, UpdateError
class UpdateFeedsTask(PeriodicTask):
run_every = timedelta(minutes=15)
def run(self, **kwargs):
logger = self.get_logger()
updater = FeedUpdater(logger)
print "Updating feeds"
updater.update_feeds()
print "Feed update done"
|
<commit_before><commit_msg>Add feed update task for social app<commit_after>
|
from celery.task import PeriodicTask
from datetime import timedelta
from social.utils import FeedUpdater, UpdateError
class UpdateFeedsTask(PeriodicTask):
run_every = timedelta(minutes=15)
def run(self, **kwargs):
logger = self.get_logger()
updater = FeedUpdater(logger)
print "Updating feeds"
updater.update_feeds()
print "Feed update done"
|
Add feed update task for social appfrom celery.task import PeriodicTask
from datetime import timedelta
from social.utils import FeedUpdater, UpdateError
class UpdateFeedsTask(PeriodicTask):
run_every = timedelta(minutes=15)
def run(self, **kwargs):
logger = self.get_logger()
updater = FeedUpdater(logger)
print "Updating feeds"
updater.update_feeds()
print "Feed update done"
|
<commit_before><commit_msg>Add feed update task for social app<commit_after>from celery.task import PeriodicTask
from datetime import timedelta
from social.utils import FeedUpdater, UpdateError
class UpdateFeedsTask(PeriodicTask):
run_every = timedelta(minutes=15)
def run(self, **kwargs):
logger = self.get_logger()
updater = FeedUpdater(logger)
print "Updating feeds"
updater.update_feeds()
print "Feed update done"
|
|
3fb4d7b630fb7a4b34dcc4e1b72947e61f73a80f
|
TestData/download_test_data.py
|
TestData/download_test_data.py
|
def set_test_db():
from sys import path
path.insert(0, "..")
from MyEdgarDb import get_list_sec_filings, get_cik_ticker_lookup_db, lookup_cik_ticker
get_list_sec_filings (7, 'test_idx.db')
get_cik_ticker_lookup_db ('test_idx.db')
def download_test_data():
import sqlite3
from datetime import datetime
import pandas as pd
testDir = "..\\TestData\\"
testTickers = {
"AAPL": [datetime(2014, 8, 1), datetime(2018, 8, 1)],
"ACLS": [datetime(2014, 8, 31), datetime(2018, 8, 31)],
"ADSK": [datetime(2014, 4, 15), datetime(2018, 4, 15)],
"ALEX": [datetime(2015, 12, 31), datetime(2019, 12, 31)],
"MMM": [datetime(2015, 7, 1), datetime(2019, 7, 1)],
"NRP": [datetime(2015, 12, 31), datetime(2019, 12, 31)],
"NVDA": [datetime(2015, 12, 31), datetime(2019, 12, 31)]
}
conn3 = sqlite3.connect('test_idx.db')
cursor = conn3.cursor()
for ticker in testTickers:
#cursor.execute('''SELECT * FROM idx WHERE Symbol=?;''', ("ABBV",))
cursor.execute('''SELECT * FROM cik_ticker_name WHERE ticker=?;''',(ticker,))
res = cursor.fetchall()
print(res)
cursor.execute('''SELECT * FROM idx WHERE cik=?;''', (res[0][0],))
recs = cursor.fetchall()
print(len(recs))
names = list(map(lambda x: x[0], cursor.description))
#print(names)
df = pd.DataFrame(data=recs, columns=names)
df['date'] = pd.to_datetime(df['date'])
beginDate = testTickers[ticker][0]
endDate = testTickers[ticker][1]
df1 = df[(df.date >= beginDate) & (df.date <= endDate)]
## Sort by date in descending order (most recent is first)
df1.sort_values(by=['date'], inplace=True, ascending=False)
df1[df1.type == "10-Q"].to_csv(testDir+ticker.lower()+"_all_10qs.csv", index=None)
df1[df1.type == "10-K"].to_csv(testDir+ticker.lower()+"_all_10ks.csv", index=None)
conn3.close()
if __name__ == "__main__":
#set_test_db()
download_test_data()
|
Create script to dowload requisite test urls.
|
Create script to dowload requisite test urls.
|
Python
|
agpl-3.0
|
cielling/jupyternbs
|
Create script to dowload requisite test urls.
|
def set_test_db():
from sys import path
path.insert(0, "..")
from MyEdgarDb import get_list_sec_filings, get_cik_ticker_lookup_db, lookup_cik_ticker
get_list_sec_filings (7, 'test_idx.db')
get_cik_ticker_lookup_db ('test_idx.db')
def download_test_data():
import sqlite3
from datetime import datetime
import pandas as pd
testDir = "..\\TestData\\"
testTickers = {
"AAPL": [datetime(2014, 8, 1), datetime(2018, 8, 1)],
"ACLS": [datetime(2014, 8, 31), datetime(2018, 8, 31)],
"ADSK": [datetime(2014, 4, 15), datetime(2018, 4, 15)],
"ALEX": [datetime(2015, 12, 31), datetime(2019, 12, 31)],
"MMM": [datetime(2015, 7, 1), datetime(2019, 7, 1)],
"NRP": [datetime(2015, 12, 31), datetime(2019, 12, 31)],
"NVDA": [datetime(2015, 12, 31), datetime(2019, 12, 31)]
}
conn3 = sqlite3.connect('test_idx.db')
cursor = conn3.cursor()
for ticker in testTickers:
#cursor.execute('''SELECT * FROM idx WHERE Symbol=?;''', ("ABBV",))
cursor.execute('''SELECT * FROM cik_ticker_name WHERE ticker=?;''',(ticker,))
res = cursor.fetchall()
print(res)
cursor.execute('''SELECT * FROM idx WHERE cik=?;''', (res[0][0],))
recs = cursor.fetchall()
print(len(recs))
names = list(map(lambda x: x[0], cursor.description))
#print(names)
df = pd.DataFrame(data=recs, columns=names)
df['date'] = pd.to_datetime(df['date'])
beginDate = testTickers[ticker][0]
endDate = testTickers[ticker][1]
df1 = df[(df.date >= beginDate) & (df.date <= endDate)]
## Sort by date in descending order (most recent is first)
df1.sort_values(by=['date'], inplace=True, ascending=False)
df1[df1.type == "10-Q"].to_csv(testDir+ticker.lower()+"_all_10qs.csv", index=None)
df1[df1.type == "10-K"].to_csv(testDir+ticker.lower()+"_all_10ks.csv", index=None)
conn3.close()
if __name__ == "__main__":
#set_test_db()
download_test_data()
|
<commit_before><commit_msg>Create script to dowload requisite test urls.<commit_after>
|
def set_test_db():
from sys import path
path.insert(0, "..")
from MyEdgarDb import get_list_sec_filings, get_cik_ticker_lookup_db, lookup_cik_ticker
get_list_sec_filings (7, 'test_idx.db')
get_cik_ticker_lookup_db ('test_idx.db')
def download_test_data():
import sqlite3
from datetime import datetime
import pandas as pd
testDir = "..\\TestData\\"
testTickers = {
"AAPL": [datetime(2014, 8, 1), datetime(2018, 8, 1)],
"ACLS": [datetime(2014, 8, 31), datetime(2018, 8, 31)],
"ADSK": [datetime(2014, 4, 15), datetime(2018, 4, 15)],
"ALEX": [datetime(2015, 12, 31), datetime(2019, 12, 31)],
"MMM": [datetime(2015, 7, 1), datetime(2019, 7, 1)],
"NRP": [datetime(2015, 12, 31), datetime(2019, 12, 31)],
"NVDA": [datetime(2015, 12, 31), datetime(2019, 12, 31)]
}
conn3 = sqlite3.connect('test_idx.db')
cursor = conn3.cursor()
for ticker in testTickers:
#cursor.execute('''SELECT * FROM idx WHERE Symbol=?;''', ("ABBV",))
cursor.execute('''SELECT * FROM cik_ticker_name WHERE ticker=?;''',(ticker,))
res = cursor.fetchall()
print(res)
cursor.execute('''SELECT * FROM idx WHERE cik=?;''', (res[0][0],))
recs = cursor.fetchall()
print(len(recs))
names = list(map(lambda x: x[0], cursor.description))
#print(names)
df = pd.DataFrame(data=recs, columns=names)
df['date'] = pd.to_datetime(df['date'])
beginDate = testTickers[ticker][0]
endDate = testTickers[ticker][1]
df1 = df[(df.date >= beginDate) & (df.date <= endDate)]
## Sort by date in descending order (most recent is first)
df1.sort_values(by=['date'], inplace=True, ascending=False)
df1[df1.type == "10-Q"].to_csv(testDir+ticker.lower()+"_all_10qs.csv", index=None)
df1[df1.type == "10-K"].to_csv(testDir+ticker.lower()+"_all_10ks.csv", index=None)
conn3.close()
if __name__ == "__main__":
#set_test_db()
download_test_data()
|
Create script to dowload requisite test urls.def set_test_db():
from sys import path
path.insert(0, "..")
from MyEdgarDb import get_list_sec_filings, get_cik_ticker_lookup_db, lookup_cik_ticker
get_list_sec_filings (7, 'test_idx.db')
get_cik_ticker_lookup_db ('test_idx.db')
def download_test_data():
import sqlite3
from datetime import datetime
import pandas as pd
testDir = "..\\TestData\\"
testTickers = {
"AAPL": [datetime(2014, 8, 1), datetime(2018, 8, 1)],
"ACLS": [datetime(2014, 8, 31), datetime(2018, 8, 31)],
"ADSK": [datetime(2014, 4, 15), datetime(2018, 4, 15)],
"ALEX": [datetime(2015, 12, 31), datetime(2019, 12, 31)],
"MMM": [datetime(2015, 7, 1), datetime(2019, 7, 1)],
"NRP": [datetime(2015, 12, 31), datetime(2019, 12, 31)],
"NVDA": [datetime(2015, 12, 31), datetime(2019, 12, 31)]
}
conn3 = sqlite3.connect('test_idx.db')
cursor = conn3.cursor()
for ticker in testTickers:
#cursor.execute('''SELECT * FROM idx WHERE Symbol=?;''', ("ABBV",))
cursor.execute('''SELECT * FROM cik_ticker_name WHERE ticker=?;''',(ticker,))
res = cursor.fetchall()
print(res)
cursor.execute('''SELECT * FROM idx WHERE cik=?;''', (res[0][0],))
recs = cursor.fetchall()
print(len(recs))
names = list(map(lambda x: x[0], cursor.description))
#print(names)
df = pd.DataFrame(data=recs, columns=names)
df['date'] = pd.to_datetime(df['date'])
beginDate = testTickers[ticker][0]
endDate = testTickers[ticker][1]
df1 = df[(df.date >= beginDate) & (df.date <= endDate)]
## Sort by date in descending order (most recent is first)
df1.sort_values(by=['date'], inplace=True, ascending=False)
df1[df1.type == "10-Q"].to_csv(testDir+ticker.lower()+"_all_10qs.csv", index=None)
df1[df1.type == "10-K"].to_csv(testDir+ticker.lower()+"_all_10ks.csv", index=None)
conn3.close()
if __name__ == "__main__":
#set_test_db()
download_test_data()
|
<commit_before><commit_msg>Create script to dowload requisite test urls.<commit_after>def set_test_db():
from sys import path
path.insert(0, "..")
from MyEdgarDb import get_list_sec_filings, get_cik_ticker_lookup_db, lookup_cik_ticker
get_list_sec_filings (7, 'test_idx.db')
get_cik_ticker_lookup_db ('test_idx.db')
def download_test_data():
import sqlite3
from datetime import datetime
import pandas as pd
testDir = "..\\TestData\\"
testTickers = {
"AAPL": [datetime(2014, 8, 1), datetime(2018, 8, 1)],
"ACLS": [datetime(2014, 8, 31), datetime(2018, 8, 31)],
"ADSK": [datetime(2014, 4, 15), datetime(2018, 4, 15)],
"ALEX": [datetime(2015, 12, 31), datetime(2019, 12, 31)],
"MMM": [datetime(2015, 7, 1), datetime(2019, 7, 1)],
"NRP": [datetime(2015, 12, 31), datetime(2019, 12, 31)],
"NVDA": [datetime(2015, 12, 31), datetime(2019, 12, 31)]
}
conn3 = sqlite3.connect('test_idx.db')
cursor = conn3.cursor()
for ticker in testTickers:
#cursor.execute('''SELECT * FROM idx WHERE Symbol=?;''', ("ABBV",))
cursor.execute('''SELECT * FROM cik_ticker_name WHERE ticker=?;''',(ticker,))
res = cursor.fetchall()
print(res)
cursor.execute('''SELECT * FROM idx WHERE cik=?;''', (res[0][0],))
recs = cursor.fetchall()
print(len(recs))
names = list(map(lambda x: x[0], cursor.description))
#print(names)
df = pd.DataFrame(data=recs, columns=names)
df['date'] = pd.to_datetime(df['date'])
beginDate = testTickers[ticker][0]
endDate = testTickers[ticker][1]
df1 = df[(df.date >= beginDate) & (df.date <= endDate)]
## Sort by date in descending order (most recent is first)
df1.sort_values(by=['date'], inplace=True, ascending=False)
df1[df1.type == "10-Q"].to_csv(testDir+ticker.lower()+"_all_10qs.csv", index=None)
df1[df1.type == "10-K"].to_csv(testDir+ticker.lower()+"_all_10ks.csv", index=None)
conn3.close()
if __name__ == "__main__":
#set_test_db()
download_test_data()
|
|
3ebb2731d6389170e0bef0dab66dc7c4ab41152e
|
thread_pool_test.py
|
thread_pool_test.py
|
import thread_pool
import unittest
from six.moves import queue
class TestThreadPool(unittest.TestCase):
def _producer_thread(self, results):
for i in range(10):
results.put(i)
def _consumer_thread(self, results):
for i in range(10):
self.assertEqual(results.get(), i)
def testContextManager(self):
results = queue.Queue(maxsize=1)
with thread_pool.ThreadPool(2) as pool:
pool.add(self._producer_thread, results)
pool.add(self._consumer_thread, results)
def testJoin(self):
results = queue.Queue(maxsize=1)
pool = thread_pool.ThreadPool(2)
pool.add(self._producer_thread, results)
pool.add(self._consumer_thread, results)
pool.join()
|
Add a unit-test for thread_pool.py.
|
Add a unit-test for thread_pool.py.
|
Python
|
mit
|
graveljp/smugcli
|
Add a unit-test for thread_pool.py.
|
import thread_pool
import unittest
from six.moves import queue
class TestThreadPool(unittest.TestCase):
def _producer_thread(self, results):
for i in range(10):
results.put(i)
def _consumer_thread(self, results):
for i in range(10):
self.assertEqual(results.get(), i)
def testContextManager(self):
results = queue.Queue(maxsize=1)
with thread_pool.ThreadPool(2) as pool:
pool.add(self._producer_thread, results)
pool.add(self._consumer_thread, results)
def testJoin(self):
results = queue.Queue(maxsize=1)
pool = thread_pool.ThreadPool(2)
pool.add(self._producer_thread, results)
pool.add(self._consumer_thread, results)
pool.join()
|
<commit_before><commit_msg>Add a unit-test for thread_pool.py.<commit_after>
|
import thread_pool
import unittest
from six.moves import queue
class TestThreadPool(unittest.TestCase):
def _producer_thread(self, results):
for i in range(10):
results.put(i)
def _consumer_thread(self, results):
for i in range(10):
self.assertEqual(results.get(), i)
def testContextManager(self):
results = queue.Queue(maxsize=1)
with thread_pool.ThreadPool(2) as pool:
pool.add(self._producer_thread, results)
pool.add(self._consumer_thread, results)
def testJoin(self):
results = queue.Queue(maxsize=1)
pool = thread_pool.ThreadPool(2)
pool.add(self._producer_thread, results)
pool.add(self._consumer_thread, results)
pool.join()
|
Add a unit-test for thread_pool.py.import thread_pool
import unittest
from six.moves import queue
class TestThreadPool(unittest.TestCase):
def _producer_thread(self, results):
for i in range(10):
results.put(i)
def _consumer_thread(self, results):
for i in range(10):
self.assertEqual(results.get(), i)
def testContextManager(self):
results = queue.Queue(maxsize=1)
with thread_pool.ThreadPool(2) as pool:
pool.add(self._producer_thread, results)
pool.add(self._consumer_thread, results)
def testJoin(self):
results = queue.Queue(maxsize=1)
pool = thread_pool.ThreadPool(2)
pool.add(self._producer_thread, results)
pool.add(self._consumer_thread, results)
pool.join()
|
<commit_before><commit_msg>Add a unit-test for thread_pool.py.<commit_after>import thread_pool
import unittest
from six.moves import queue
class TestThreadPool(unittest.TestCase):
def _producer_thread(self, results):
for i in range(10):
results.put(i)
def _consumer_thread(self, results):
for i in range(10):
self.assertEqual(results.get(), i)
def testContextManager(self):
results = queue.Queue(maxsize=1)
with thread_pool.ThreadPool(2) as pool:
pool.add(self._producer_thread, results)
pool.add(self._consumer_thread, results)
def testJoin(self):
results = queue.Queue(maxsize=1)
pool = thread_pool.ThreadPool(2)
pool.add(self._producer_thread, results)
pool.add(self._consumer_thread, results)
pool.join()
|
|
19712e8e7b9423d4cb4bb22c37c7d8d2ea0559c5
|
examples/list-usb.py
|
examples/list-usb.py
|
#!/usr/bin/env python2
#
# This file is Public Domain and provided only for documentation purposes.
#
# Run : python2 ./list-usb.py
#
# Note: This will happily run with Python3 too, I just picked a common baseline
#
import gi
gi.require_version('Ldm', '0.1')
from gi.repository import Ldm, GObject
class PretendyPlugin(Ldm.Plugin):
# Not really needed but good practice
__gtype_name__ = "PretendyPlugin"
def __init__(self):
Ldm.Plugin.__init__(self)
def do_get_provider(self, device):
""" Demonstrate basic matching with custom plugins """
if not device.has_type(Ldm.DeviceType.AUDIO):
return None
return Ldm.Provider.new(self, device, "pretendy-package")
def main():
manager = Ldm.Manager()
manager.add_plugin(PretendyPlugin())
for device in manager.get_devices(Ldm.DeviceType.USB):
# Use gobject properties or methods
print("USB Device: {} {}".format(
device.props.vendor,
device.get_name()))
if device.has_type(Ldm.DeviceType.HID):
print("\tHID Device!")
for provider in manager.get_providers(device):
plugin = provider.get_plugin()
print("\tSuggested package: {}".format(provider.get_package()))
if __name__ == "__main__":
main()
|
Add example to show listing of USB devices
|
examples: Add example to show listing of USB devices
Also we add a quick example of how to implement a custom plugin to provide
runtime plugin support for LdmManager.
Signed-off-by: Ikey Doherty <d8d992cf0016e35c2a8339d5e7d44bebd12a2d77@solus-project.com>
|
Python
|
lgpl-2.1
|
solus-project/linux-driver-management,solus-project/linux-driver-management
|
examples: Add example to show listing of USB devices
Also we add a quick example of how to implement a custom plugin to provide
runtime plugin support for LdmManager.
Signed-off-by: Ikey Doherty <d8d992cf0016e35c2a8339d5e7d44bebd12a2d77@solus-project.com>
|
#!/usr/bin/env python2
#
# This file is Public Domain and provided only for documentation purposes.
#
# Run : python2 ./list-usb.py
#
# Note: This will happily run with Python3 too, I just picked a common baseline
#
import gi
gi.require_version('Ldm', '0.1')
from gi.repository import Ldm, GObject
class PretendyPlugin(Ldm.Plugin):
# Not really needed but good practice
__gtype_name__ = "PretendyPlugin"
def __init__(self):
Ldm.Plugin.__init__(self)
def do_get_provider(self, device):
""" Demonstrate basic matching with custom plugins """
if not device.has_type(Ldm.DeviceType.AUDIO):
return None
return Ldm.Provider.new(self, device, "pretendy-package")
def main():
manager = Ldm.Manager()
manager.add_plugin(PretendyPlugin())
for device in manager.get_devices(Ldm.DeviceType.USB):
# Use gobject properties or methods
print("USB Device: {} {}".format(
device.props.vendor,
device.get_name()))
if device.has_type(Ldm.DeviceType.HID):
print("\tHID Device!")
for provider in manager.get_providers(device):
plugin = provider.get_plugin()
print("\tSuggested package: {}".format(provider.get_package()))
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>examples: Add example to show listing of USB devices
Also we add a quick example of how to implement a custom plugin to provide
runtime plugin support for LdmManager.
Signed-off-by: Ikey Doherty <d8d992cf0016e35c2a8339d5e7d44bebd12a2d77@solus-project.com><commit_after>
|
#!/usr/bin/env python2
#
# This file is Public Domain and provided only for documentation purposes.
#
# Run : python2 ./list-usb.py
#
# Note: This will happily run with Python3 too, I just picked a common baseline
#
import gi
gi.require_version('Ldm', '0.1')
from gi.repository import Ldm, GObject
class PretendyPlugin(Ldm.Plugin):
# Not really needed but good practice
__gtype_name__ = "PretendyPlugin"
def __init__(self):
Ldm.Plugin.__init__(self)
def do_get_provider(self, device):
""" Demonstrate basic matching with custom plugins """
if not device.has_type(Ldm.DeviceType.AUDIO):
return None
return Ldm.Provider.new(self, device, "pretendy-package")
def main():
manager = Ldm.Manager()
manager.add_plugin(PretendyPlugin())
for device in manager.get_devices(Ldm.DeviceType.USB):
# Use gobject properties or methods
print("USB Device: {} {}".format(
device.props.vendor,
device.get_name()))
if device.has_type(Ldm.DeviceType.HID):
print("\tHID Device!")
for provider in manager.get_providers(device):
plugin = provider.get_plugin()
print("\tSuggested package: {}".format(provider.get_package()))
if __name__ == "__main__":
main()
|
examples: Add example to show listing of USB devices
Also we add a quick example of how to implement a custom plugin to provide
runtime plugin support for LdmManager.
Signed-off-by: Ikey Doherty <d8d992cf0016e35c2a8339d5e7d44bebd12a2d77@solus-project.com>#!/usr/bin/env python2
#
# This file is Public Domain and provided only for documentation purposes.
#
# Run : python2 ./list-usb.py
#
# Note: This will happily run with Python3 too, I just picked a common baseline
#
import gi
gi.require_version('Ldm', '0.1')
from gi.repository import Ldm, GObject
class PretendyPlugin(Ldm.Plugin):
# Not really needed but good practice
__gtype_name__ = "PretendyPlugin"
def __init__(self):
Ldm.Plugin.__init__(self)
def do_get_provider(self, device):
""" Demonstrate basic matching with custom plugins """
if not device.has_type(Ldm.DeviceType.AUDIO):
return None
return Ldm.Provider.new(self, device, "pretendy-package")
def main():
manager = Ldm.Manager()
manager.add_plugin(PretendyPlugin())
for device in manager.get_devices(Ldm.DeviceType.USB):
# Use gobject properties or methods
print("USB Device: {} {}".format(
device.props.vendor,
device.get_name()))
if device.has_type(Ldm.DeviceType.HID):
print("\tHID Device!")
for provider in manager.get_providers(device):
plugin = provider.get_plugin()
print("\tSuggested package: {}".format(provider.get_package()))
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>examples: Add example to show listing of USB devices
Also we add a quick example of how to implement a custom plugin to provide
runtime plugin support for LdmManager.
Signed-off-by: Ikey Doherty <d8d992cf0016e35c2a8339d5e7d44bebd12a2d77@solus-project.com><commit_after>#!/usr/bin/env python2
#
# This file is Public Domain and provided only for documentation purposes.
#
# Run : python2 ./list-usb.py
#
# Note: This will happily run with Python3 too, I just picked a common baseline
#
import gi
gi.require_version('Ldm', '0.1')
from gi.repository import Ldm, GObject
class PretendyPlugin(Ldm.Plugin):
# Not really needed but good practice
__gtype_name__ = "PretendyPlugin"
def __init__(self):
Ldm.Plugin.__init__(self)
def do_get_provider(self, device):
""" Demonstrate basic matching with custom plugins """
if not device.has_type(Ldm.DeviceType.AUDIO):
return None
return Ldm.Provider.new(self, device, "pretendy-package")
def main():
manager = Ldm.Manager()
manager.add_plugin(PretendyPlugin())
for device in manager.get_devices(Ldm.DeviceType.USB):
# Use gobject properties or methods
print("USB Device: {} {}".format(
device.props.vendor,
device.get_name()))
if device.has_type(Ldm.DeviceType.HID):
print("\tHID Device!")
for provider in manager.get_providers(device):
plugin = provider.get_plugin()
print("\tSuggested package: {}".format(provider.get_package()))
if __name__ == "__main__":
main()
|
|
b56690d046021e036b5b15c484d86c92f3519600
|
scikits/learn/common/myfunctools.py
|
scikits/learn/common/myfunctools.py
|
# Last Change: Mon Aug 20 01:00 PM 2007 J
# Implement partial application (should only be used if functools is not
# available (eg python < 2.5)
class partial:
def __init__(self, fun, *args, **kwargs):
self.fun = fun
self.pending = args[:]
self.kwargs = kwargs.copy()
def __call__(self, *args, **kwargs):
if kwargs and self.kwargs:
kw = self.kwargs.copy()
kw.update(kwargs)
else:
kw = kwargs or self.kwargs
return self.fun(*(self.pending + args), **kw)
|
Add partial evaluation tool to replace functools module for python < 2.5
|
Add partial evaluation tool to replace functools module for python < 2.5
From: cdavid <cdavid@cb17146a-f446-4be1-a4f7-bd7c5bb65646>
git-svn-id: a2d1b0e147e530765aaf3e1662d4a98e2f63c719@236 22fbfee3-77ab-4535-9bad-27d1bd3bc7d8
|
Python
|
bsd-3-clause
|
shikhardb/scikit-learn,ClimbsRocks/scikit-learn,bthirion/scikit-learn,samuel1208/scikit-learn,sinhrks/scikit-learn,OshynSong/scikit-learn,ky822/scikit-learn,treycausey/scikit-learn,TomDLT/scikit-learn,RomainBrault/scikit-learn,zuku1985/scikit-learn,icdishb/scikit-learn,arabenjamin/scikit-learn,sarahgrogan/scikit-learn,davidgbe/scikit-learn,mugizico/scikit-learn,MartinDelzant/scikit-learn,pythonvietnam/scikit-learn,q1ang/scikit-learn,djgagne/scikit-learn,xyguo/scikit-learn,kylerbrown/scikit-learn,arjoly/scikit-learn,khkaminska/scikit-learn,bhargav/scikit-learn,xiaoxiamii/scikit-learn,pianomania/scikit-learn,larsmans/scikit-learn,larsmans/scikit-learn,BiaDarkia/scikit-learn,mattilyra/scikit-learn,UNR-AERIAL/scikit-learn,DonBeo/scikit-learn,MohammedWasim/scikit-learn,fabioticconi/scikit-learn,shusenl/scikit-learn,ZENGXH/scikit-learn,ky822/scikit-learn,shangwuhencc/scikit-learn,Adai0808/scikit-learn,depet/scikit-learn,terkkila/scikit-learn,wzbozon/scikit-learn,cainiaocome/scikit-learn,pnedunuri/scikit-learn,Sentient07/scikit-learn,alvarofierroclavero/scikit-learn,ycaihua/scikit-learn,LiaoPan/scikit-learn,shahankhatch/scikit-learn,vermouthmjl/scikit-learn,simon-pepin/scikit-learn,Srisai85/scikit-learn,toastedcornflakes/scikit-learn,ilo10/scikit-learn,lin-credible/scikit-learn,IshankGulati/scikit-learn,loli/sklearn-ensembletrees,terkkila/scikit-learn,dhruv13J/scikit-learn,marcocaccin/scikit-learn,fredhusser/scikit-learn,maheshakya/scikit-learn,henrykironde/scikit-learn,florian-f/sklearn,Aasmi/scikit-learn,samzhang111/scikit-learn,herilalaina/scikit-learn,Achuth17/scikit-learn,YinongLong/scikit-learn,vybstat/scikit-learn,hitszxp/scikit-learn,manashmndl/scikit-learn,zhenv5/scikit-learn,B3AU/waveTree,billy-inn/scikit-learn,kjung/scikit-learn,pompiduskus/scikit-learn,mjudsp/Tsallis,PatrickOReilly/scikit-learn,chrsrds/scikit-learn,OshynSong/scikit-learn,roxyboy/scikit-learn,liyu1990/sklearn,wanggang3333/scikit-learn,anntzer/scikit-learn,zihua/scikit-learn,joernhees/scikit-learn,sonnyhu/scikit-learn,MartinDelzant/scikit-learn,vigilv/scikit-learn,russel1237/scikit-learn,bigdataelephants/scikit-learn,plissonf/scikit-learn,madjelan/scikit-learn,thilbern/scikit-learn,shikhardb/scikit-learn,yask123/scikit-learn,rrohan/scikit-learn,jzt5132/scikit-learn,jseabold/scikit-learn,ashhher3/scikit-learn,fyffyt/scikit-learn,jpautom/scikit-learn,manashmndl/scikit-learn,appapantula/scikit-learn,anurag313/scikit-learn,CforED/Machine-Learning,abhishekgahlot/scikit-learn,Windy-Ground/scikit-learn,hitszxp/scikit-learn,kashif/scikit-learn,Fireblend/scikit-learn,ky822/scikit-learn,theoryno3/scikit-learn,ivannz/scikit-learn,rsivapr/scikit-learn,MohammedWasim/scikit-learn,zorroblue/scikit-learn,xwolf12/scikit-learn,mattilyra/scikit-learn,q1ang/scikit-learn,kjung/scikit-learn,zorroblue/scikit-learn,evgchz/scikit-learn,mblondel/scikit-learn,waterponey/scikit-learn,IssamLaradji/scikit-learn,gotomypc/scikit-learn,adamgreenhall/scikit-learn,hrjn/scikit-learn,rvraghav93/scikit-learn,zorroblue/scikit-learn,mayblue9/scikit-learn,hlin117/scikit-learn,AlexRobson/scikit-learn,JeanKossaifi/scikit-learn,mugizico/scikit-learn,frank-tancf/scikit-learn,rsivapr/scikit-learn,rahuldhote/scikit-learn,mugizico/scikit-learn,russel1237/scikit-learn,treycausey/scikit-learn,RayMick/scikit-learn,ZENGXH/scikit-learn,ndingwall/scikit-learn,bnaul/scikit-learn,meduz/scikit-learn,siutanwong/scikit-learn,olologin/scikit-learn,pv/scikit-learn,smartscheduling/scikit-learn-categorical-tree,scikit-learn/scikit-learn,MartinDelzant/scikit-learn,mjgrav2001/scikit-learn,belltailjp/scikit-learn,sgenoud/scikit-learn,jblackburne/scikit-learn,manhhomienbienthuy/scikit-learn,aminert/scikit-learn,yonglehou/scikit-learn,yask123/scikit-learn,ldirer/scikit-learn,jaidevd/scikit-learn,trankmichael/scikit-learn,harshaneelhg/scikit-learn,jaidevd/scikit-learn,Titan-C/scikit-learn,waterponey/scikit-learn,arabenjamin/scikit-learn,ahoyosid/scikit-learn,walterreade/scikit-learn,Nyker510/scikit-learn,PatrickOReilly/scikit-learn,mehdidc/scikit-learn,anirudhjayaraman/scikit-learn,joshloyal/scikit-learn,IndraVikas/scikit-learn,huobaowangxi/scikit-learn,tdhopper/scikit-learn,JPFrancoia/scikit-learn,adamgreenhall/scikit-learn,ephes/scikit-learn,YinongLong/scikit-learn,Jimmy-Morzaria/scikit-learn,schets/scikit-learn,Titan-C/scikit-learn,joernhees/scikit-learn,maheshakya/scikit-learn,procoder317/scikit-learn,tawsifkhan/scikit-learn,lenovor/scikit-learn,hainm/scikit-learn,bikong2/scikit-learn,nomadcube/scikit-learn,nikitasingh981/scikit-learn,arabenjamin/scikit-learn,walterreade/scikit-learn,untom/scikit-learn,adamgreenhall/scikit-learn,loli/semisupervisedforests,mikebenfield/scikit-learn,h2educ/scikit-learn,vivekmishra1991/scikit-learn,ngoix/OCRF,robbymeals/scikit-learn,cdegroc/scikit-learn,ngoix/OCRF,shikhardb/scikit-learn,pypot/scikit-learn,JosmanPS/scikit-learn,xwolf12/scikit-learn,JosmanPS/scikit-learn,larsmans/scikit-learn,Fireblend/scikit-learn,ahoyosid/scikit-learn,JosmanPS/scikit-learn,tmhm/scikit-learn,yanlend/scikit-learn,anirudhjayaraman/scikit-learn,sanketloke/scikit-learn,Srisai85/scikit-learn,B3AU/waveTree,tawsifkhan/scikit-learn,alexeyum/scikit-learn,vigilv/scikit-learn,murali-munna/scikit-learn,heli522/scikit-learn,themrmax/scikit-learn,dingocuster/scikit-learn,YinongLong/scikit-learn,madjelan/scikit-learn,jayflo/scikit-learn,nesterione/scikit-learn,evgchz/scikit-learn,ndingwall/scikit-learn,JeanKossaifi/scikit-learn,pratapvardhan/scikit-learn,rohanp/scikit-learn,pnedunuri/scikit-learn,nmayorov/scikit-learn,depet/scikit-learn,3manuek/scikit-learn,poryfly/scikit-learn,dsullivan7/scikit-learn,jorge2703/scikit-learn,aminert/scikit-learn,rahuldhote/scikit-learn,shahankhatch/scikit-learn,vivekmishra1991/scikit-learn,zorojean/scikit-learn,jseabold/scikit-learn,tmhm/scikit-learn,wlamond/scikit-learn,yanlend/scikit-learn,saiwing-yeung/scikit-learn,anurag313/scikit-learn,PrashntS/scikit-learn,anirudhjayaraman/scikit-learn,billy-inn/scikit-learn,zorojean/scikit-learn,mattgiguere/scikit-learn,Lawrence-Liu/scikit-learn,shusenl/scikit-learn,Achuth17/scikit-learn,ishanic/scikit-learn,altairpearl/scikit-learn,dingocuster/scikit-learn,robin-lai/scikit-learn,meduz/scikit-learn,ZenDevelopmentSystems/scikit-learn,gotomypc/scikit-learn,Barmaley-exe/scikit-learn,zaxtax/scikit-learn,devanshdalal/scikit-learn,Nyker510/scikit-learn,Garrett-R/scikit-learn,dsquareindia/scikit-learn,elkingtonmcb/scikit-learn,vybstat/scikit-learn,mayblue9/scikit-learn,justincassidy/scikit-learn,equialgo/scikit-learn,elkingtonmcb/scikit-learn,eickenberg/scikit-learn,jereze/scikit-learn,xzh86/scikit-learn,toastedcornflakes/scikit-learn,mxjl620/scikit-learn,IssamLaradji/scikit-learn,mlyundin/scikit-learn,costypetrisor/scikit-learn,ChanChiChoi/scikit-learn,trungnt13/scikit-learn,0asa/scikit-learn,olologin/scikit-learn,shusenl/scikit-learn,f3r/scikit-learn,kashif/scikit-learn,cainiaocome/scikit-learn,xyguo/scikit-learn,huzq/scikit-learn,carrillo/scikit-learn,massmutual/scikit-learn,liberatorqjw/scikit-learn,fabioticconi/scikit-learn,Barmaley-exe/scikit-learn,sanketloke/scikit-learn,mrshu/scikit-learn,0asa/scikit-learn,DSLituiev/scikit-learn,3manuek/scikit-learn,justincassidy/scikit-learn,xavierwu/scikit-learn,ndingwall/scikit-learn,Djabbz/scikit-learn,kmike/scikit-learn,simon-pepin/scikit-learn,0asa/scikit-learn,raghavrv/scikit-learn,YinongLong/scikit-learn,sgenoud/scikit-learn,gclenaghan/scikit-learn,jayflo/scikit-learn,rsivapr/scikit-learn,xzh86/scikit-learn,AnasGhrab/scikit-learn,ilo10/scikit-learn,iismd17/scikit-learn,depet/scikit-learn,Nyker510/scikit-learn,ZenDevelopmentSystems/scikit-learn,lin-credible/scikit-learn,mehdidc/scikit-learn,dhruv13J/scikit-learn,clemkoa/scikit-learn,gclenaghan/scikit-learn,thientu/scikit-learn,belltailjp/scikit-learn,terkkila/scikit-learn,quheng/scikit-learn,yanlend/scikit-learn,ephes/scikit-learn,victorbergelin/scikit-learn,nvoron23/scikit-learn,mjudsp/Tsallis,mlyundin/scikit-learn,dhruv13J/scikit-learn,DSLituiev/scikit-learn,mattgiguere/scikit-learn,ivannz/scikit-learn,mayblue9/scikit-learn,beepee14/scikit-learn,hsiaoyi0504/scikit-learn,gclenaghan/scikit-learn,murali-munna/scikit-learn,mattgiguere/scikit-learn,rahuldhote/scikit-learn,sumspr/scikit-learn,fredhusser/scikit-learn,sonnyhu/scikit-learn,mikebenfield/scikit-learn,ZenDevelopmentSystems/scikit-learn,Myasuka/scikit-learn,RayMick/scikit-learn,NelisVerhoef/scikit-learn,HolgerPeters/scikit-learn,anntzer/scikit-learn,bhargav/scikit-learn,spallavolu/scikit-learn,xzh86/scikit-learn,costypetrisor/scikit-learn,rohanp/scikit-learn,mfjb/scikit-learn,zaxtax/scikit-learn,ningchi/scikit-learn,nrhine1/scikit-learn,jakobworldpeace/scikit-learn,jzt5132/scikit-learn,marcocaccin/scikit-learn,ltiao/scikit-learn,theoryno3/scikit-learn,cauchycui/scikit-learn,alvarofierroclavero/scikit-learn,heli522/scikit-learn,tosolveit/scikit-learn,JeanKossaifi/scikit-learn,olologin/scikit-learn,harshaneelhg/scikit-learn,JPFrancoia/scikit-learn,IssamLaradji/scikit-learn,zhenv5/scikit-learn,moutai/scikit-learn,smartscheduling/scikit-learn-categorical-tree,jm-begon/scikit-learn,0x0all/scikit-learn,murali-munna/scikit-learn,plissonf/scikit-learn,kevin-intel/scikit-learn,heli522/scikit-learn,hlin117/scikit-learn,pompiduskus/scikit-learn,smartscheduling/scikit-learn-categorical-tree,ashhher3/scikit-learn,ashhher3/scikit-learn,PrashntS/scikit-learn,mlyundin/scikit-learn,jlegendary/scikit-learn,xubenben/scikit-learn,sonnyhu/scikit-learn,ankurankan/scikit-learn,trungnt13/scikit-learn,florian-f/sklearn,harshaneelhg/scikit-learn,jkarnows/scikit-learn,michigraber/scikit-learn,vinayak-mehta/scikit-learn,zhenv5/scikit-learn,mblondel/scikit-learn,hrjn/scikit-learn,ankurankan/scikit-learn,michigraber/scikit-learn,hsiaoyi0504/scikit-learn,loli/semisupervisedforests,mxjl620/scikit-learn,UNR-AERIAL/scikit-learn,pypot/scikit-learn,xuewei4d/scikit-learn,ltiao/scikit-learn,PrashntS/scikit-learn,kmike/scikit-learn,mojoboss/scikit-learn,jjx02230808/project0223,evgchz/scikit-learn,rajat1994/scikit-learn,aabadie/scikit-learn,MatthieuBizien/scikit-learn,yunfeilu/scikit-learn,joernhees/scikit-learn,jayflo/scikit-learn,cl4rke/scikit-learn,bikong2/scikit-learn,vermouthmjl/scikit-learn,Vimos/scikit-learn,mwv/scikit-learn,Windy-Ground/scikit-learn,ningchi/scikit-learn,vermouthmjl/scikit-learn,fabianp/scikit-learn,mxjl620/scikit-learn,djgagne/scikit-learn,manhhomienbienthuy/scikit-learn,zihua/scikit-learn,lbishal/scikit-learn,glemaitre/scikit-learn,treycausey/scikit-learn,CVML/scikit-learn,btabibian/scikit-learn,huzq/scikit-learn,wlamond/scikit-learn,mjgrav2001/scikit-learn,espg/scikit-learn,bikong2/scikit-learn,manashmndl/scikit-learn,TomDLT/scikit-learn,macks22/scikit-learn,meduz/scikit-learn,rahul-c1/scikit-learn,Srisai85/scikit-learn,BiaDarkia/scikit-learn,zaxtax/scikit-learn,MartinSavc/scikit-learn,ogrisel/scikit-learn,liyu1990/sklearn,jm-begon/scikit-learn,jzt5132/scikit-learn,MechCoder/scikit-learn,jpautom/scikit-learn,liberatorqjw/scikit-learn,ilyes14/scikit-learn,ycaihua/scikit-learn,lucidfrontier45/scikit-learn,dsquareindia/scikit-learn,tosolveit/scikit-learn,loli/semisupervisedforests,ominux/scikit-learn,TomDLT/scikit-learn,jkarnows/scikit-learn,nvoron23/scikit-learn,xuewei4d/scikit-learn,NelisVerhoef/scikit-learn,zihua/scikit-learn,deepesch/scikit-learn,equialgo/scikit-learn,rvraghav93/scikit-learn,IshankGulati/scikit-learn,RayMick/scikit-learn,Obus/scikit-learn,Sentient07/scikit-learn,nrhine1/scikit-learn,jlegendary/scikit-learn,anntzer/scikit-learn,Clyde-fare/scikit-learn,ltiao/scikit-learn,Achuth17/scikit-learn,pompiduskus/scikit-learn,IndraVikas/scikit-learn,jseabold/scikit-learn,vigilv/scikit-learn,meduz/scikit-learn,tosolveit/scikit-learn,liangz0707/scikit-learn,trungnt13/scikit-learn,HolgerPeters/scikit-learn,Clyde-fare/scikit-learn,samzhang111/scikit-learn,rajat1994/scikit-learn,dingocuster/scikit-learn,arjoly/scikit-learn,xyguo/scikit-learn,vibhorag/scikit-learn,anntzer/scikit-learn,jakirkham/scikit-learn,f3r/scikit-learn,carrillo/scikit-learn,AnasGhrab/scikit-learn,nvoron23/scikit-learn,nikitasingh981/scikit-learn,rahul-c1/scikit-learn,potash/scikit-learn,rsivapr/scikit-learn,frank-tancf/scikit-learn,amueller/scikit-learn,0x0all/scikit-learn,cl4rke/scikit-learn,yyjiang/scikit-learn,liangz0707/scikit-learn,wlamond/scikit-learn,kagayakidan/scikit-learn,pianomania/scikit-learn,loli/sklearn-ensembletrees,RomainBrault/scikit-learn,shangwuhencc/scikit-learn,alexeyum/scikit-learn,ilyes14/scikit-learn,alexsavio/scikit-learn,schets/scikit-learn,AIML/scikit-learn,mhue/scikit-learn,cauchycui/scikit-learn,nomadcube/scikit-learn,Aasmi/scikit-learn,jakobworldpeace/scikit-learn,q1ang/scikit-learn,eg-zhang/scikit-learn,ElDeveloper/scikit-learn,djgagne/scikit-learn,MartinDelzant/scikit-learn,vshtanko/scikit-learn,rvraghav93/scikit-learn,rexshihaoren/scikit-learn,henrykironde/scikit-learn,massmutual/scikit-learn,aabadie/scikit-learn,tdhopper/scikit-learn,3manuek/scikit-learn,glemaitre/scikit-learn,murali-munna/scikit-learn,luo66/scikit-learn,abimannans/scikit-learn,glouppe/scikit-learn,florian-f/sklearn,simon-pepin/scikit-learn,h2educ/scikit-learn,vibhorag/scikit-learn,jpautom/scikit-learn,mhue/scikit-learn,ogrisel/scikit-learn,h2educ/scikit-learn,icdishb/scikit-learn,moutai/scikit-learn,aflaxman/scikit-learn,pythonvietnam/scikit-learn,glennq/scikit-learn,loli/sklearn-ensembletrees,pianomania/scikit-learn,iismd17/scikit-learn,BiaDarkia/scikit-learn,tmhm/scikit-learn,shenzebang/scikit-learn,TomDLT/scikit-learn,victorbergelin/scikit-learn,vybstat/scikit-learn,wazeerzulfikar/scikit-learn,glemaitre/scikit-learn,andrewnc/scikit-learn,AIML/scikit-learn,toastedcornflakes/scikit-learn,q1ang/scikit-learn,ycaihua/scikit-learn,CforED/Machine-Learning,sgenoud/scikit-learn,davidgbe/scikit-learn,NunoEdgarGub1/scikit-learn,nmayorov/scikit-learn,tdhopper/scikit-learn,bthirion/scikit-learn,thientu/scikit-learn,loli/sklearn-ensembletrees,stylianos-kampakis/scikit-learn,lin-credible/scikit-learn,mrshu/scikit-learn,MechCoder/scikit-learn,sergeyf/scikit-learn,ashhher3/scikit-learn,voxlol/scikit-learn,chrisburr/scikit-learn,NunoEdgarGub1/scikit-learn,jorik041/scikit-learn,RomainBrault/scikit-learn,jereze/scikit-learn,jakirkham/scikit-learn,jaidevd/scikit-learn,vortex-ape/scikit-learn,hainm/scikit-learn,pianomania/scikit-learn,pnedunuri/scikit-learn,rohanp/scikit-learn,ssaeger/scikit-learn,Sentient07/scikit-learn,Aasmi/scikit-learn,raghavrv/scikit-learn,qifeigit/scikit-learn,schets/scikit-learn,DSLituiev/scikit-learn,wazeerzulfikar/scikit-learn,Achuth17/scikit-learn,ZenDevelopmentSystems/scikit-learn,kylerbrown/scikit-learn,elkingtonmcb/scikit-learn,hugobowne/scikit-learn,costypetrisor/scikit-learn,etkirsch/scikit-learn,rrohan/scikit-learn,kmike/scikit-learn,glouppe/scikit-learn,alexsavio/scikit-learn,ChanderG/scikit-learn,macks22/scikit-learn,appapantula/scikit-learn,shikhardb/scikit-learn,fengzhyuan/scikit-learn,lenovor/scikit-learn,thientu/scikit-learn,eg-zhang/scikit-learn,alexeyum/scikit-learn,phdowling/scikit-learn,MechCoder/scikit-learn,krez13/scikit-learn,Lawrence-Liu/scikit-learn,thilbern/scikit-learn,r-mart/scikit-learn,maheshakya/scikit-learn,zorojean/scikit-learn,hdmetor/scikit-learn,sinhrks/scikit-learn,betatim/scikit-learn,anirudhjayaraman/scikit-learn,hugobowne/scikit-learn,zihua/scikit-learn,russel1237/scikit-learn,tosolveit/scikit-learn,fbagirov/scikit-learn,lesteve/scikit-learn,larsmans/scikit-learn,nesterione/scikit-learn,hsiaoyi0504/scikit-learn,ogrisel/scikit-learn,cybernet14/scikit-learn,lbishal/scikit-learn,abhishekkrthakur/scikit-learn,DonBeo/scikit-learn,akionakamura/scikit-learn,hainm/scikit-learn,sergeyf/scikit-learn,AlexRobson/scikit-learn,xiaoxiamii/scikit-learn,xuewei4d/scikit-learn,jmetzen/scikit-learn,ivannz/scikit-learn,imaculate/scikit-learn,herilalaina/scikit-learn,pkruskal/scikit-learn,wzbozon/scikit-learn,kaichogami/scikit-learn,hsuantien/scikit-learn,hsuantien/scikit-learn,vinayak-mehta/scikit-learn,0asa/scikit-learn,hrjn/scikit-learn,xwolf12/scikit-learn,nhejazi/scikit-learn,phdowling/scikit-learn,hitszxp/scikit-learn,PatrickOReilly/scikit-learn,justincassidy/scikit-learn,plissonf/scikit-learn,hdmetor/scikit-learn,nhejazi/scikit-learn,lenovor/scikit-learn,siutanwong/scikit-learn,joshloyal/scikit-learn,nvoron23/scikit-learn,cl4rke/scikit-learn,adamgreenhall/scikit-learn,giorgiop/scikit-learn,vermouthmjl/scikit-learn,jmschrei/scikit-learn,0x0all/scikit-learn,huobaowangxi/scikit-learn,ClimbsRocks/scikit-learn,rishikksh20/scikit-learn,xavierwu/scikit-learn,ilo10/scikit-learn,etkirsch/scikit-learn,OshynSong/scikit-learn,NunoEdgarGub1/scikit-learn,nelson-liu/scikit-learn,jzt5132/scikit-learn,joshloyal/scikit-learn,B3AU/waveTree,pypot/scikit-learn,evgchz/scikit-learn,mwv/scikit-learn,spallavolu/scikit-learn,RachitKansal/scikit-learn,arabenjamin/scikit-learn,cdegroc/scikit-learn,JosmanPS/scikit-learn,cainiaocome/scikit-learn,pompiduskus/scikit-learn,ishanic/scikit-learn,abimannans/scikit-learn,walterreade/scikit-learn,cwu2011/scikit-learn,trungnt13/scikit-learn,ssaeger/scikit-learn,altairpearl/scikit-learn,sgenoud/scikit-learn,xyguo/scikit-learn,ndingwall/scikit-learn,sergeyf/scikit-learn,ChanChiChoi/scikit-learn,theoryno3/scikit-learn,fyffyt/scikit-learn,JPFrancoia/scikit-learn,clemkoa/scikit-learn,ChanderG/scikit-learn,hlin117/scikit-learn,jaidevd/scikit-learn,JsNoNo/scikit-learn,shyamalschandra/scikit-learn,rrohan/scikit-learn,vybstat/scikit-learn,zaxtax/scikit-learn,abhishekgahlot/scikit-learn,cdegroc/scikit-learn,MohammedWasim/scikit-learn,cauchycui/scikit-learn,thilbern/scikit-learn,lucidfrontier45/scikit-learn,mugizico/scikit-learn,zuku1985/scikit-learn,Djabbz/scikit-learn,yunfeilu/scikit-learn,abhishekkrthakur/scikit-learn,devanshdalal/scikit-learn,466152112/scikit-learn,joshloyal/scikit-learn,Myasuka/scikit-learn,shyamalschandra/scikit-learn,btabibian/scikit-learn,ChanderG/scikit-learn,jm-begon/scikit-learn,B3AU/waveTree,MartinSavc/scikit-learn,HolgerPeters/scikit-learn,LohithBlaze/scikit-learn,nikitasingh981/scikit-learn,ephes/scikit-learn,dsullivan7/scikit-learn,MartinSavc/scikit-learn,LohithBlaze/scikit-learn,luo66/scikit-learn,sergeyf/scikit-learn,arahuja/scikit-learn,kmike/scikit-learn,h2educ/scikit-learn,tomlof/scikit-learn,eickenberg/scikit-learn,ChanChiChoi/scikit-learn,ankurankan/scikit-learn,btabibian/scikit-learn,scikit-learn/scikit-learn,tomlof/scikit-learn,harshaneelhg/scikit-learn,ivannz/scikit-learn,zuku1985/scikit-learn,petosegan/scikit-learn,466152112/scikit-learn,jmetzen/scikit-learn,icdishb/scikit-learn,yunfeilu/scikit-learn,huobaowangxi/scikit-learn,fabioticconi/scikit-learn,yonglehou/scikit-learn,jjx02230808/project0223,Akshay0724/scikit-learn,idlead/scikit-learn,kjung/scikit-learn,samuel1208/scikit-learn,jorik041/scikit-learn,akionakamura/scikit-learn,simon-pepin/scikit-learn,JeanKossaifi/scikit-learn,abhishekgahlot/scikit-learn,wanggang3333/scikit-learn,procoder317/scikit-learn,kevin-intel/scikit-learn,petosegan/scikit-learn,elkingtonmcb/scikit-learn,shenzebang/scikit-learn,cdegroc/scikit-learn,JsNoNo/scikit-learn,ClimbsRocks/scikit-learn,kylerbrown/scikit-learn,andaag/scikit-learn,xzh86/scikit-learn,ishanic/scikit-learn,sarahgrogan/scikit-learn,hugobowne/scikit-learn,akionakamura/scikit-learn,andaag/scikit-learn,bnaul/scikit-learn,krez13/scikit-learn,jereze/scikit-learn,terkkila/scikit-learn,giorgiop/scikit-learn,yyjiang/scikit-learn,smartscheduling/scikit-learn-categorical-tree,marcocaccin/scikit-learn,hitszxp/scikit-learn,IndraVikas/scikit-learn,Vimos/scikit-learn,aetilley/scikit-learn,jorge2703/scikit-learn,BiaDarkia/scikit-learn,vortex-ape/scikit-learn,poryfly/scikit-learn,Titan-C/scikit-learn,hlin117/scikit-learn,ldirer/scikit-learn,liyu1990/sklearn,huzq/scikit-learn,jorge2703/scikit-learn,Garrett-R/scikit-learn,dsquareindia/scikit-learn,lin-credible/scikit-learn,jlegendary/scikit-learn,stylianos-kampakis/scikit-learn,jorge2703/scikit-learn,sumspr/scikit-learn,justincassidy/scikit-learn,hsuantien/scikit-learn,andrewnc/scikit-learn,untom/scikit-learn,phdowling/scikit-learn,ilyes14/scikit-learn,dsquareindia/scikit-learn,ssaeger/scikit-learn,xiaoxiamii/scikit-learn,jmschrei/scikit-learn,Obus/scikit-learn,ankurankan/scikit-learn,rohanp/scikit-learn,kmike/scikit-learn,macks22/scikit-learn,zorroblue/scikit-learn,MatthieuBizien/scikit-learn,xuewei4d/scikit-learn,Adai0808/scikit-learn,UNR-AERIAL/scikit-learn,fbagirov/scikit-learn,nrhine1/scikit-learn,nelson-liu/scikit-learn,glouppe/scikit-learn,f3r/scikit-learn,khkaminska/scikit-learn,roxyboy/scikit-learn,hdmetor/scikit-learn,Srisai85/scikit-learn,MartinSavc/scikit-learn,ltiao/scikit-learn,mattilyra/scikit-learn,trankmichael/scikit-learn,akionakamura/scikit-learn,aflaxman/scikit-learn,gotomypc/scikit-learn,RPGOne/scikit-learn,mfjb/scikit-learn,mblondel/scikit-learn,scikit-learn/scikit-learn,robbymeals/scikit-learn,imaculate/scikit-learn,quheng/scikit-learn,vibhorag/scikit-learn,xavierwu/scikit-learn,RPGOne/scikit-learn,bthirion/scikit-learn,IshankGulati/scikit-learn,tawsifkhan/scikit-learn,mhdella/scikit-learn,fengzhyuan/scikit-learn,nhejazi/scikit-learn,Jimmy-Morzaria/scikit-learn,mfjb/scikit-learn,untom/scikit-learn,qifeigit/scikit-learn,billy-inn/scikit-learn,glennq/scikit-learn,arahuja/scikit-learn,abhishekgahlot/scikit-learn,luo66/scikit-learn,mayblue9/scikit-learn,ldirer/scikit-learn,jm-begon/scikit-learn,JsNoNo/scikit-learn,voxlol/scikit-learn,glennq/scikit-learn,aetilley/scikit-learn,arahuja/scikit-learn,jayflo/scikit-learn,stylianos-kampakis/scikit-learn,ilo10/scikit-learn,clemkoa/scikit-learn,AlexandreAbraham/scikit-learn,arahuja/scikit-learn,fyffyt/scikit-learn,AlexanderFabisch/scikit-learn,pv/scikit-learn,betatim/scikit-learn,Garrett-R/scikit-learn,jmschrei/scikit-learn,ZENGXH/scikit-learn,rrohan/scikit-learn,Adai0808/scikit-learn,ngoix/OCRF,chrisburr/scikit-learn,saiwing-yeung/scikit-learn,lenovor/scikit-learn,AlexandreAbraham/scikit-learn,mehdidc/scikit-learn,lucidfrontier45/scikit-learn,yask123/scikit-learn,rsivapr/scikit-learn,nmayorov/scikit-learn,deepesch/scikit-learn,abhishekkrthakur/scikit-learn,yyjiang/scikit-learn,wanggang3333/scikit-learn,CforED/Machine-Learning,arjoly/scikit-learn,samzhang111/scikit-learn,victorbergelin/scikit-learn,3manuek/scikit-learn,ClimbsRocks/scikit-learn,mojoboss/scikit-learn,jkarnows/scikit-learn,voxlol/scikit-learn,kevin-intel/scikit-learn,fzalkow/scikit-learn,vshtanko/scikit-learn,pkruskal/scikit-learn,ky822/scikit-learn,spallavolu/scikit-learn,Titan-C/scikit-learn,loli/sklearn-ensembletrees,kashif/scikit-learn,zuku1985/scikit-learn,ominux/scikit-learn,iismd17/scikit-learn,ilyes14/scikit-learn,ningchi/scikit-learn,ngoix/OCRF,xavierwu/scikit-learn,jpautom/scikit-learn,saiwing-yeung/scikit-learn,shenzebang/scikit-learn,Windy-Ground/scikit-learn,amueller/scikit-learn,cybernet14/scikit-learn,dsullivan7/scikit-learn,sanketloke/scikit-learn,rajat1994/scikit-learn,sonnyhu/scikit-learn,khkaminska/scikit-learn,moutai/scikit-learn,AlexRobson/scikit-learn,beepee14/scikit-learn,NunoEdgarGub1/scikit-learn,mojoboss/scikit-learn,aflaxman/scikit-learn,vivekmishra1991/scikit-learn,henrykironde/scikit-learn,btabibian/scikit-learn,sanketloke/scikit-learn,mhdella/scikit-learn,altairpearl/scikit-learn,schets/scikit-learn,vigilv/scikit-learn,Lawrence-Liu/scikit-learn,andrewnc/scikit-learn,mikebenfield/scikit-learn,fengzhyuan/scikit-learn,giorgiop/scikit-learn,giorgiop/scikit-learn,deepesch/scikit-learn,CforED/Machine-Learning,sgenoud/scikit-learn,beepee14/scikit-learn,tomlof/scikit-learn,rishikksh20/scikit-learn,henridwyer/scikit-learn,theoryno3/scikit-learn,glouppe/scikit-learn,shenzebang/scikit-learn,lucidfrontier45/scikit-learn,herilalaina/scikit-learn,icdishb/scikit-learn,fredhusser/scikit-learn,r-mart/scikit-learn,toastedcornflakes/scikit-learn,mehdidc/scikit-learn,jkarnows/scikit-learn,jakobworldpeace/scikit-learn,andrewnc/scikit-learn,lazywei/scikit-learn,madjelan/scikit-learn,jblackburne/scikit-learn,jakirkham/scikit-learn,lucidfrontier45/scikit-learn,manashmndl/scikit-learn,chrisburr/scikit-learn,sumspr/scikit-learn,hainm/scikit-learn,fzalkow/scikit-learn,imaculate/scikit-learn,etkirsch/scikit-learn,bhargav/scikit-learn,PatrickChrist/scikit-learn,espg/scikit-learn,fbagirov/scikit-learn,mxjl620/scikit-learn,0x0all/scikit-learn,jmetzen/scikit-learn,ahoyosid/scikit-learn,aewhatley/scikit-learn,huzq/scikit-learn,nelson-liu/scikit-learn,olologin/scikit-learn,thientu/scikit-learn,samuel1208/scikit-learn,nomadcube/scikit-learn,pkruskal/scikit-learn,AIML/scikit-learn,plissonf/scikit-learn,aewhatley/scikit-learn,pypot/scikit-learn,LohithBlaze/scikit-learn,HolgerPeters/scikit-learn,rishikksh20/scikit-learn,Djabbz/scikit-learn,rishikksh20/scikit-learn,nesterione/scikit-learn,ssaeger/scikit-learn,mojoboss/scikit-learn,lbishal/scikit-learn,appapantula/scikit-learn,anurag313/scikit-learn,bigdataelephants/scikit-learn,robin-lai/scikit-learn,rexshihaoren/scikit-learn,Akshay0724/scikit-learn,pratapvardhan/scikit-learn,MatthieuBizien/scikit-learn,shusenl/scikit-learn,bikong2/scikit-learn,appapantula/scikit-learn,ephes/scikit-learn,Sentient07/scikit-learn,kjung/scikit-learn,idlead/scikit-learn,hrjn/scikit-learn,jjx02230808/project0223,mwv/scikit-learn,kashif/scikit-learn,krez13/scikit-learn,RomainBrault/scikit-learn,quheng/scikit-learn,AnasGhrab/scikit-learn,henrykironde/scikit-learn,wzbozon/scikit-learn,aewhatley/scikit-learn,vibhorag/scikit-learn,themrmax/scikit-learn,maheshakya/scikit-learn,nrhine1/scikit-learn,beepee14/scikit-learn,aminert/scikit-learn,potash/scikit-learn,mwv/scikit-learn,cwu2011/scikit-learn,florian-f/sklearn,Adai0808/scikit-learn,larsmans/scikit-learn,krez13/scikit-learn,RachitKansal/scikit-learn,wanggang3333/scikit-learn,JPFrancoia/scikit-learn,wlamond/scikit-learn,devanshdalal/scikit-learn,anurag313/scikit-learn,massmutual/scikit-learn,ycaihua/scikit-learn,themrmax/scikit-learn,AlexandreAbraham/scikit-learn,jblackburne/scikit-learn,Myasuka/scikit-learn,moutai/scikit-learn,tawsifkhan/scikit-learn,mblondel/scikit-learn,yanlend/scikit-learn,AnasGhrab/scikit-learn,madjelan/scikit-learn,shahankhatch/scikit-learn,IshankGulati/scikit-learn,saiwing-yeung/scikit-learn,xubenben/scikit-learn,pkruskal/scikit-learn,henridwyer/scikit-learn,rahul-c1/scikit-learn,pv/scikit-learn,lazywei/scikit-learn,aflaxman/scikit-learn,NelisVerhoef/scikit-learn,liberatorqjw/scikit-learn,untom/scikit-learn,Obus/scikit-learn,tmhm/scikit-learn,yonglehou/scikit-learn,gotomypc/scikit-learn,jmetzen/scikit-learn,mhdella/scikit-learn,petosegan/scikit-learn,NelisVerhoef/scikit-learn,mhdella/scikit-learn,pratapvardhan/scikit-learn,ogrisel/scikit-learn,liangz0707/scikit-learn,xiaoxiamii/scikit-learn,rahul-c1/scikit-learn,robin-lai/scikit-learn,bnaul/scikit-learn,ChanderG/scikit-learn,Nyker510/scikit-learn,fabianp/scikit-learn,mrshu/scikit-learn,espg/scikit-learn,poryfly/scikit-learn,bigdataelephants/scikit-learn,eg-zhang/scikit-learn,vinayak-mehta/scikit-learn,mjudsp/Tsallis,AlexanderFabisch/scikit-learn,PatrickOReilly/scikit-learn,hugobowne/scikit-learn,UNR-AERIAL/scikit-learn,idlead/scikit-learn,jseabold/scikit-learn,belltailjp/scikit-learn,mrshu/scikit-learn,Obus/scikit-learn,quheng/scikit-learn,wzbozon/scikit-learn,Clyde-fare/scikit-learn,pnedunuri/scikit-learn,mjgrav2001/scikit-learn,billy-inn/scikit-learn,yunfeilu/scikit-learn,LohithBlaze/scikit-learn,fbagirov/scikit-learn,stylianos-kampakis/scikit-learn,Barmaley-exe/scikit-learn,RayMick/scikit-learn,waterponey/scikit-learn,marcocaccin/scikit-learn,themrmax/scikit-learn,andaag/scikit-learn,hsiaoyi0504/scikit-learn,f3r/scikit-learn,lazywei/scikit-learn,Akshay0724/scikit-learn,vortex-ape/scikit-learn,macks22/scikit-learn,abhishekgahlot/scikit-learn,AlexanderFabisch/scikit-learn,roxyboy/scikit-learn,hsuantien/scikit-learn,chrisburr/scikit-learn,liangz0707/scikit-learn,r-mart/scikit-learn,roxyboy/scikit-learn,jorik041/scikit-learn,nomadcube/scikit-learn,fzalkow/scikit-learn,mhue/scikit-learn,vshtanko/scikit-learn,chrsrds/scikit-learn,treycausey/scikit-learn,Aasmi/scikit-learn,raghavrv/scikit-learn,walterreade/scikit-learn,B3AU/waveTree,shahankhatch/scikit-learn,liberatorqjw/scikit-learn,heli522/scikit-learn,nhejazi/scikit-learn,huobaowangxi/scikit-learn,ngoix/OCRF,JsNoNo/scikit-learn,DonBeo/scikit-learn,RPGOne/scikit-learn,treycausey/scikit-learn,michigraber/scikit-learn,mikebenfield/scikit-learn,0asa/scikit-learn,florian-f/sklearn,procoder317/scikit-learn,jakirkham/scikit-learn,equialgo/scikit-learn,PatrickChrist/scikit-learn,Garrett-R/scikit-learn,vinayak-mehta/scikit-learn,Garrett-R/scikit-learn,RachitKansal/scikit-learn,pythonvietnam/scikit-learn,mlyundin/scikit-learn,jmschrei/scikit-learn,pratapvardhan/scikit-learn,xubenben/scikit-learn,aabadie/scikit-learn,idlead/scikit-learn,liyu1990/sklearn,fredhusser/scikit-learn,shyamalschandra/scikit-learn,ominux/scikit-learn,henridwyer/scikit-learn,scikit-learn/scikit-learn,wazeerzulfikar/scikit-learn,DonBeo/scikit-learn,Fireblend/scikit-learn,fabioticconi/scikit-learn,ankurankan/scikit-learn,sumspr/scikit-learn,massmutual/scikit-learn,dsullivan7/scikit-learn,yask123/scikit-learn,shyamalschandra/scikit-learn,kaichogami/scikit-learn,siutanwong/scikit-learn,betatim/scikit-learn,samzhang111/scikit-learn,PatrickChrist/scikit-learn,chrsrds/scikit-learn,altairpearl/scikit-learn,kagayakidan/scikit-learn,iismd17/scikit-learn,abimannans/scikit-learn,AlexandreAbraham/scikit-learn,bigdataelephants/scikit-learn,raghavrv/scikit-learn,Fireblend/scikit-learn,equialgo/scikit-learn,mattgiguere/scikit-learn,loli/semisupervisedforests,kylerbrown/scikit-learn,aabadie/scikit-learn,djgagne/scikit-learn,rvraghav93/scikit-learn,carrillo/scikit-learn,aetilley/scikit-learn,cl4rke/scikit-learn,spallavolu/scikit-learn,andaag/scikit-learn,jblackburne/scikit-learn,yonglehou/scikit-learn,vshtanko/scikit-learn,ldirer/scikit-learn,belltailjp/scikit-learn,aetilley/scikit-learn,vivekmishra1991/scikit-learn,jereze/scikit-learn,kaichogami/scikit-learn,MohammedWasim/scikit-learn,qifeigit/scikit-learn,alexsavio/scikit-learn,MatthieuBizien/scikit-learn,clemkoa/scikit-learn,alexeyum/scikit-learn,nmayorov/scikit-learn,glennq/scikit-learn,depet/scikit-learn,etkirsch/scikit-learn,ngoix/OCRF,bnaul/scikit-learn,manhhomienbienthuy/scikit-learn,RachitKansal/scikit-learn,PrashntS/scikit-learn,kevin-intel/scikit-learn,frank-tancf/scikit-learn,maheshakya/scikit-learn,cwu2011/scikit-learn,rahuldhote/scikit-learn,eickenberg/scikit-learn,jorik041/scikit-learn,eickenberg/scikit-learn,mattilyra/scikit-learn,imaculate/scikit-learn,aewhatley/scikit-learn,mjudsp/Tsallis,kaichogami/scikit-learn,amueller/scikit-learn,lesteve/scikit-learn,zorojean/scikit-learn,nesterione/scikit-learn,vortex-ape/scikit-learn,AlexRobson/scikit-learn,alexsavio/scikit-learn,davidgbe/scikit-learn,rexshihaoren/scikit-learn,LiaoPan/scikit-learn,amueller/scikit-learn,jlegendary/scikit-learn,betatim/scikit-learn,mrshu/scikit-learn,ahoyosid/scikit-learn,ElDeveloper/scikit-learn,lesteve/scikit-learn,zhenv5/scikit-learn,Barmaley-exe/scikit-learn,thilbern/scikit-learn,bthirion/scikit-learn,ChanChiChoi/scikit-learn,michigraber/scikit-learn,Akshay0724/scikit-learn,Windy-Ground/scikit-learn,phdowling/scikit-learn,LiaoPan/scikit-learn,xwolf12/scikit-learn,eg-zhang/scikit-learn,espg/scikit-learn,abhishekkrthakur/scikit-learn,ycaihua/scikit-learn,abimannans/scikit-learn,cybernet14/scikit-learn,fyffyt/scikit-learn,OshynSong/scikit-learn,devanshdalal/scikit-learn,robbymeals/scikit-learn,Vimos/scikit-learn,Lawrence-Liu/scikit-learn,Myasuka/scikit-learn,mjudsp/Tsallis,khkaminska/scikit-learn,r-mart/scikit-learn,CVML/scikit-learn,frank-tancf/scikit-learn,luo66/scikit-learn,petosegan/scikit-learn,IssamLaradji/scikit-learn,ElDeveloper/scikit-learn,aminert/scikit-learn,jjx02230808/project0223,cauchycui/scikit-learn,qifeigit/scikit-learn,LiaoPan/scikit-learn,joernhees/scikit-learn,mhue/scikit-learn,mattilyra/scikit-learn,DSLituiev/scikit-learn,bhargav/scikit-learn,hdmetor/scikit-learn,evgchz/scikit-learn,466152112/scikit-learn,pythonvietnam/scikit-learn,ominux/scikit-learn,sarahgrogan/scikit-learn,rajat1994/scikit-learn,0x0all/scikit-learn,Jimmy-Morzaria/scikit-learn,siutanwong/scikit-learn,ningchi/scikit-learn,robbymeals/scikit-learn,herilalaina/scikit-learn,arjoly/scikit-learn,AIML/scikit-learn,IndraVikas/scikit-learn,lazywei/scikit-learn,Vimos/scikit-learn,CVML/scikit-learn,shangwuhencc/scikit-learn,nikitasingh981/scikit-learn,trankmichael/scikit-learn,dhruv13J/scikit-learn,dingocuster/scikit-learn,depet/scikit-learn,deepesch/scikit-learn,RPGOne/scikit-learn,Jimmy-Morzaria/scikit-learn,tomlof/scikit-learn,manhhomienbienthuy/scikit-learn,shangwuhencc/scikit-learn,lbishal/scikit-learn,jakobworldpeace/scikit-learn,MechCoder/scikit-learn,Clyde-fare/scikit-learn,glemaitre/scikit-learn,pv/scikit-learn,cainiaocome/scikit-learn,mfjb/scikit-learn,fabianp/scikit-learn,gclenaghan/scikit-learn,tdhopper/scikit-learn,fengzhyuan/scikit-learn,alvarofierroclavero/scikit-learn,potash/scikit-learn,trankmichael/scikit-learn,eickenberg/scikit-learn,Djabbz/scikit-learn,ishanic/scikit-learn,fzalkow/scikit-learn,cybernet14/scikit-learn,AlexanderFabisch/scikit-learn,samuel1208/scikit-learn,CVML/scikit-learn,sinhrks/scikit-learn,sarahgrogan/scikit-learn,alvarofierroclavero/scikit-learn,costypetrisor/scikit-learn,PatrickChrist/scikit-learn,nelson-liu/scikit-learn,russel1237/scikit-learn,mjgrav2001/scikit-learn,sinhrks/scikit-learn,waterponey/scikit-learn,wazeerzulfikar/scikit-learn,potash/scikit-learn,ElDeveloper/scikit-learn,poryfly/scikit-learn,466152112/scikit-learn,robin-lai/scikit-learn,kagayakidan/scikit-learn,cwu2011/scikit-learn,davidgbe/scikit-learn,kagayakidan/scikit-learn,carrillo/scikit-learn,lesteve/scikit-learn,victorbergelin/scikit-learn,chrsrds/scikit-learn,fabianp/scikit-learn,hitszxp/scikit-learn,procoder317/scikit-learn,xubenben/scikit-learn,henridwyer/scikit-learn,yyjiang/scikit-learn,ZENGXH/scikit-learn,rexshihaoren/scikit-learn,voxlol/scikit-learn
|
Add partial evaluation tool to replace functools module for python < 2.5
From: cdavid <cdavid@cb17146a-f446-4be1-a4f7-bd7c5bb65646>
git-svn-id: a2d1b0e147e530765aaf3e1662d4a98e2f63c719@236 22fbfee3-77ab-4535-9bad-27d1bd3bc7d8
|
# Last Change: Mon Aug 20 01:00 PM 2007 J
# Implement partial application (should only be used if functools is not
# available (eg python < 2.5)
class partial:
def __init__(self, fun, *args, **kwargs):
self.fun = fun
self.pending = args[:]
self.kwargs = kwargs.copy()
def __call__(self, *args, **kwargs):
if kwargs and self.kwargs:
kw = self.kwargs.copy()
kw.update(kwargs)
else:
kw = kwargs or self.kwargs
return self.fun(*(self.pending + args), **kw)
|
<commit_before><commit_msg>Add partial evaluation tool to replace functools module for python < 2.5
From: cdavid <cdavid@cb17146a-f446-4be1-a4f7-bd7c5bb65646>
git-svn-id: a2d1b0e147e530765aaf3e1662d4a98e2f63c719@236 22fbfee3-77ab-4535-9bad-27d1bd3bc7d8<commit_after>
|
# Last Change: Mon Aug 20 01:00 PM 2007 J
# Implement partial application (should only be used if functools is not
# available (eg python < 2.5)
class partial:
def __init__(self, fun, *args, **kwargs):
self.fun = fun
self.pending = args[:]
self.kwargs = kwargs.copy()
def __call__(self, *args, **kwargs):
if kwargs and self.kwargs:
kw = self.kwargs.copy()
kw.update(kwargs)
else:
kw = kwargs or self.kwargs
return self.fun(*(self.pending + args), **kw)
|
Add partial evaluation tool to replace functools module for python < 2.5
From: cdavid <cdavid@cb17146a-f446-4be1-a4f7-bd7c5bb65646>
git-svn-id: a2d1b0e147e530765aaf3e1662d4a98e2f63c719@236 22fbfee3-77ab-4535-9bad-27d1bd3bc7d8# Last Change: Mon Aug 20 01:00 PM 2007 J
# Implement partial application (should only be used if functools is not
# available (eg python < 2.5)
class partial:
def __init__(self, fun, *args, **kwargs):
self.fun = fun
self.pending = args[:]
self.kwargs = kwargs.copy()
def __call__(self, *args, **kwargs):
if kwargs and self.kwargs:
kw = self.kwargs.copy()
kw.update(kwargs)
else:
kw = kwargs or self.kwargs
return self.fun(*(self.pending + args), **kw)
|
<commit_before><commit_msg>Add partial evaluation tool to replace functools module for python < 2.5
From: cdavid <cdavid@cb17146a-f446-4be1-a4f7-bd7c5bb65646>
git-svn-id: a2d1b0e147e530765aaf3e1662d4a98e2f63c719@236 22fbfee3-77ab-4535-9bad-27d1bd3bc7d8<commit_after># Last Change: Mon Aug 20 01:00 PM 2007 J
# Implement partial application (should only be used if functools is not
# available (eg python < 2.5)
class partial:
def __init__(self, fun, *args, **kwargs):
self.fun = fun
self.pending = args[:]
self.kwargs = kwargs.copy()
def __call__(self, *args, **kwargs):
if kwargs and self.kwargs:
kw = self.kwargs.copy()
kw.update(kwargs)
else:
kw = kwargs or self.kwargs
return self.fun(*(self.pending + args), **kw)
|
|
51e04ff17bccb4b71b8d5db4057a782fd2f8520c
|
tools/touch_all_files.py
|
tools/touch_all_files.py
|
#!/usr/bin/python
"""
This script touches all files known to the database, creating a skeletal
mirror for local development.
"""
import sys, os
import store
def get_paths(cursor, prefix=None):
store.safe_execute(cursor, "SELECT python_version, name, filename FROM release_files")
for type, name, filename in cursor.fetchall():
yield os.path.join(prefix, type, name[0], name, filename)
if __name__ == '__main__':
import config
try:
config = config.Config(sys.argv[1])
except IndexError:
print "Usage: touch_all_files.py config.ini"
raise SystemExit
datastore = store.Store(config)
datastore.open()
cursor = datastore.get_cursor()
prefix = config.database_files_dir
for path in get_paths(cursor, prefix):
dir = os.path.dirname(path)
if not os.path.exists(dir):
print "Creating directory %s" % dir
os.makedirs(dir)
if not os.path.exists(path):
print "Creating file %s" % path
open(path, "a")
|
Add script to synthesize all uploaded files. Patch by Dan Callahan.
|
Add script to synthesize all uploaded files.
Patch by Dan Callahan.
|
Python
|
bsd-3-clause
|
techtonik/pydotorg.pypi,techtonik/pydotorg.pypi
|
Add script to synthesize all uploaded files.
Patch by Dan Callahan.
|
#!/usr/bin/python
"""
This script touches all files known to the database, creating a skeletal
mirror for local development.
"""
import sys, os
import store
def get_paths(cursor, prefix=None):
store.safe_execute(cursor, "SELECT python_version, name, filename FROM release_files")
for type, name, filename in cursor.fetchall():
yield os.path.join(prefix, type, name[0], name, filename)
if __name__ == '__main__':
import config
try:
config = config.Config(sys.argv[1])
except IndexError:
print "Usage: touch_all_files.py config.ini"
raise SystemExit
datastore = store.Store(config)
datastore.open()
cursor = datastore.get_cursor()
prefix = config.database_files_dir
for path in get_paths(cursor, prefix):
dir = os.path.dirname(path)
if not os.path.exists(dir):
print "Creating directory %s" % dir
os.makedirs(dir)
if not os.path.exists(path):
print "Creating file %s" % path
open(path, "a")
|
<commit_before><commit_msg>Add script to synthesize all uploaded files.
Patch by Dan Callahan.<commit_after>
|
#!/usr/bin/python
"""
This script touches all files known to the database, creating a skeletal
mirror for local development.
"""
import sys, os
import store
def get_paths(cursor, prefix=None):
store.safe_execute(cursor, "SELECT python_version, name, filename FROM release_files")
for type, name, filename in cursor.fetchall():
yield os.path.join(prefix, type, name[0], name, filename)
if __name__ == '__main__':
import config
try:
config = config.Config(sys.argv[1])
except IndexError:
print "Usage: touch_all_files.py config.ini"
raise SystemExit
datastore = store.Store(config)
datastore.open()
cursor = datastore.get_cursor()
prefix = config.database_files_dir
for path in get_paths(cursor, prefix):
dir = os.path.dirname(path)
if not os.path.exists(dir):
print "Creating directory %s" % dir
os.makedirs(dir)
if not os.path.exists(path):
print "Creating file %s" % path
open(path, "a")
|
Add script to synthesize all uploaded files.
Patch by Dan Callahan.#!/usr/bin/python
"""
This script touches all files known to the database, creating a skeletal
mirror for local development.
"""
import sys, os
import store
def get_paths(cursor, prefix=None):
store.safe_execute(cursor, "SELECT python_version, name, filename FROM release_files")
for type, name, filename in cursor.fetchall():
yield os.path.join(prefix, type, name[0], name, filename)
if __name__ == '__main__':
import config
try:
config = config.Config(sys.argv[1])
except IndexError:
print "Usage: touch_all_files.py config.ini"
raise SystemExit
datastore = store.Store(config)
datastore.open()
cursor = datastore.get_cursor()
prefix = config.database_files_dir
for path in get_paths(cursor, prefix):
dir = os.path.dirname(path)
if not os.path.exists(dir):
print "Creating directory %s" % dir
os.makedirs(dir)
if not os.path.exists(path):
print "Creating file %s" % path
open(path, "a")
|
<commit_before><commit_msg>Add script to synthesize all uploaded files.
Patch by Dan Callahan.<commit_after>#!/usr/bin/python
"""
This script touches all files known to the database, creating a skeletal
mirror for local development.
"""
import sys, os
import store
def get_paths(cursor, prefix=None):
store.safe_execute(cursor, "SELECT python_version, name, filename FROM release_files")
for type, name, filename in cursor.fetchall():
yield os.path.join(prefix, type, name[0], name, filename)
if __name__ == '__main__':
import config
try:
config = config.Config(sys.argv[1])
except IndexError:
print "Usage: touch_all_files.py config.ini"
raise SystemExit
datastore = store.Store(config)
datastore.open()
cursor = datastore.get_cursor()
prefix = config.database_files_dir
for path in get_paths(cursor, prefix):
dir = os.path.dirname(path)
if not os.path.exists(dir):
print "Creating directory %s" % dir
os.makedirs(dir)
if not os.path.exists(path):
print "Creating file %s" % path
open(path, "a")
|
|
533559e20e377ce042591709e53d7dc7031d6205
|
tests/test_directives.py
|
tests/test_directives.py
|
"""tests/test_directives.py.
Tests to ensure that directives interact in the etimerpected mannor
Copyright (C) 2015 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import hug
def test_timer():
timer = hug.directives.timer()
assert isinstance(timer.taken(), float)
assert isinstance(timer.start, float)
timer = hug.directives.timer('Time: {0}')
assert isinstance(timer.taken(), str)
assert isinstance(timer.start, float)
|
"""tests/test_directives.py.
Tests to ensure that directives interact in the etimerpected mannor
Copyright (C) 2015 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
import hug
api = sys.modules[__name__]
def test_timer():
timer = hug.directives.timer()
assert isinstance(timer.taken(), float)
assert isinstance(timer.start, float)
timer = hug.directives.timer('Time: {0}')
assert isinstance(timer.taken(), str)
assert isinstance(timer.start, float)
@hug.get()
def timer_tester(timer):
return timer.taken()
assert isinstance(hug.test.get(api, 'timer_tester').data, float)
|
Add test for timer automatically inserted due to directive
|
Add test for timer automatically inserted due to directive
|
Python
|
mit
|
giserh/hug,STANAPO/hug,MuhammadAlkarouri/hug,janusnic/hug,MuhammadAlkarouri/hug,janusnic/hug,gbn972/hug,MuhammadAlkarouri/hug,STANAPO/hug,shaunstanislaus/hug,yasoob/hug,jean/hug,philiptzou/hug,philiptzou/hug,jean/hug,shaunstanislaus/hug,origingod/hug,giserh/hug,yasoob/hug,timothycrosley/hug,alisaifee/hug,timothycrosley/hug,alisaifee/hug,timothycrosley/hug,gbn972/hug,origingod/hug
|
"""tests/test_directives.py.
Tests to ensure that directives interact in the etimerpected mannor
Copyright (C) 2015 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import hug
def test_timer():
timer = hug.directives.timer()
assert isinstance(timer.taken(), float)
assert isinstance(timer.start, float)
timer = hug.directives.timer('Time: {0}')
assert isinstance(timer.taken(), str)
assert isinstance(timer.start, float)
Add test for timer automatically inserted due to directive
|
"""tests/test_directives.py.
Tests to ensure that directives interact in the etimerpected mannor
Copyright (C) 2015 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
import hug
api = sys.modules[__name__]
def test_timer():
timer = hug.directives.timer()
assert isinstance(timer.taken(), float)
assert isinstance(timer.start, float)
timer = hug.directives.timer('Time: {0}')
assert isinstance(timer.taken(), str)
assert isinstance(timer.start, float)
@hug.get()
def timer_tester(timer):
return timer.taken()
assert isinstance(hug.test.get(api, 'timer_tester').data, float)
|
<commit_before>"""tests/test_directives.py.
Tests to ensure that directives interact in the etimerpected mannor
Copyright (C) 2015 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import hug
def test_timer():
timer = hug.directives.timer()
assert isinstance(timer.taken(), float)
assert isinstance(timer.start, float)
timer = hug.directives.timer('Time: {0}')
assert isinstance(timer.taken(), str)
assert isinstance(timer.start, float)
<commit_msg>Add test for timer automatically inserted due to directive<commit_after>
|
"""tests/test_directives.py.
Tests to ensure that directives interact in the etimerpected mannor
Copyright (C) 2015 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
import hug
api = sys.modules[__name__]
def test_timer():
timer = hug.directives.timer()
assert isinstance(timer.taken(), float)
assert isinstance(timer.start, float)
timer = hug.directives.timer('Time: {0}')
assert isinstance(timer.taken(), str)
assert isinstance(timer.start, float)
@hug.get()
def timer_tester(timer):
return timer.taken()
assert isinstance(hug.test.get(api, 'timer_tester').data, float)
|
"""tests/test_directives.py.
Tests to ensure that directives interact in the etimerpected mannor
Copyright (C) 2015 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import hug
def test_timer():
timer = hug.directives.timer()
assert isinstance(timer.taken(), float)
assert isinstance(timer.start, float)
timer = hug.directives.timer('Time: {0}')
assert isinstance(timer.taken(), str)
assert isinstance(timer.start, float)
Add test for timer automatically inserted due to directive"""tests/test_directives.py.
Tests to ensure that directives interact in the etimerpected mannor
Copyright (C) 2015 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
import hug
api = sys.modules[__name__]
def test_timer():
timer = hug.directives.timer()
assert isinstance(timer.taken(), float)
assert isinstance(timer.start, float)
timer = hug.directives.timer('Time: {0}')
assert isinstance(timer.taken(), str)
assert isinstance(timer.start, float)
@hug.get()
def timer_tester(timer):
return timer.taken()
assert isinstance(hug.test.get(api, 'timer_tester').data, float)
|
<commit_before>"""tests/test_directives.py.
Tests to ensure that directives interact in the etimerpected mannor
Copyright (C) 2015 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import hug
def test_timer():
timer = hug.directives.timer()
assert isinstance(timer.taken(), float)
assert isinstance(timer.start, float)
timer = hug.directives.timer('Time: {0}')
assert isinstance(timer.taken(), str)
assert isinstance(timer.start, float)
<commit_msg>Add test for timer automatically inserted due to directive<commit_after>"""tests/test_directives.py.
Tests to ensure that directives interact in the etimerpected mannor
Copyright (C) 2015 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
import hug
api = sys.modules[__name__]
def test_timer():
timer = hug.directives.timer()
assert isinstance(timer.taken(), float)
assert isinstance(timer.start, float)
timer = hug.directives.timer('Time: {0}')
assert isinstance(timer.taken(), str)
assert isinstance(timer.start, float)
@hug.get()
def timer_tester(timer):
return timer.taken()
assert isinstance(hug.test.get(api, 'timer_tester').data, float)
|
a3939b572c51b7a721b758cb5b93364e4b156c13
|
dev_tools/syspath.py
|
dev_tools/syspath.py
|
#!/usr/bin/env python
import sys
# path[0], is the directory containing the script that was used to invoke the Python interpreter
for s in sorted(sys.path[1:]):
print s
|
Add script that dumps the python path
|
Add script that dumps the python path
|
Python
|
apache-2.0
|
DataONEorg/d1_python,DataONEorg/d1_python,DataONEorg/d1_python,DataONEorg/d1_python
|
Add script that dumps the python path
|
#!/usr/bin/env python
import sys
# path[0], is the directory containing the script that was used to invoke the Python interpreter
for s in sorted(sys.path[1:]):
print s
|
<commit_before><commit_msg>Add script that dumps the python path<commit_after>
|
#!/usr/bin/env python
import sys
# path[0], is the directory containing the script that was used to invoke the Python interpreter
for s in sorted(sys.path[1:]):
print s
|
Add script that dumps the python path#!/usr/bin/env python
import sys
# path[0], is the directory containing the script that was used to invoke the Python interpreter
for s in sorted(sys.path[1:]):
print s
|
<commit_before><commit_msg>Add script that dumps the python path<commit_after>#!/usr/bin/env python
import sys
# path[0], is the directory containing the script that was used to invoke the Python interpreter
for s in sorted(sys.path[1:]):
print s
|
|
f06a71a87daaaf0bc4b1f5701ce4c59805b70f6b
|
usr/bin/json_readable.py
|
usr/bin/json_readable.py
|
#!/usr/bin/env python
import json, os
for filename in os.listdir('.'):
if os.path.isfile(filename) and os.path.splitext(filename)[1].lower() == '.json':
with open(filename) as in_file:
data = json.load(in_file)
with open(filename, 'w') as out_file:
json.dump(data, out_file, indent=4) # indent=4 makes the files human readable
|
Format all local .json files for human readability
|
Format all local .json files for human readability
|
Python
|
apache-2.0
|
cclauss/Ten-lines-or-less
|
Format all local .json files for human readability
|
#!/usr/bin/env python
import json, os
for filename in os.listdir('.'):
if os.path.isfile(filename) and os.path.splitext(filename)[1].lower() == '.json':
with open(filename) as in_file:
data = json.load(in_file)
with open(filename, 'w') as out_file:
json.dump(data, out_file, indent=4) # indent=4 makes the files human readable
|
<commit_before><commit_msg>Format all local .json files for human readability<commit_after>
|
#!/usr/bin/env python
import json, os
for filename in os.listdir('.'):
if os.path.isfile(filename) and os.path.splitext(filename)[1].lower() == '.json':
with open(filename) as in_file:
data = json.load(in_file)
with open(filename, 'w') as out_file:
json.dump(data, out_file, indent=4) # indent=4 makes the files human readable
|
Format all local .json files for human readability#!/usr/bin/env python
import json, os
for filename in os.listdir('.'):
if os.path.isfile(filename) and os.path.splitext(filename)[1].lower() == '.json':
with open(filename) as in_file:
data = json.load(in_file)
with open(filename, 'w') as out_file:
json.dump(data, out_file, indent=4) # indent=4 makes the files human readable
|
<commit_before><commit_msg>Format all local .json files for human readability<commit_after>#!/usr/bin/env python
import json, os
for filename in os.listdir('.'):
if os.path.isfile(filename) and os.path.splitext(filename)[1].lower() == '.json':
with open(filename) as in_file:
data = json.load(in_file)
with open(filename, 'w') as out_file:
json.dump(data, out_file, indent=4) # indent=4 makes the files human readable
|
|
ed4d07fb2a7fa8f1dd30a2b7982940a5fe78275b
|
dakota_run_driver.py
|
dakota_run_driver.py
|
#! /usr/bin/env python
# Brokers communication between Dakota and SWASH through files.
#
# Arguments:
# $1 is 'params.in' from Dakota
# $2 is 'results.out' returned to Dakota
import sys
import os
import shutil
from subprocess import call
def driver():
"""Broker communication between Dakota and SWASH through files."""
# Files and directories.
start_dir = os.path.dirname(os.path.realpath(__file__))
input_file = 'INPUT'
input_template = input_file + '.template'
output_file = 'bot07.mat'
output_file_var = 'Botlev'
data_file = 'sand.bot'
run_script = 'run_swash.sh'
# Use `dprepro` (from $DAKOTA_DIR/bin) to substitute parameter
# values from Dakota into the SWASH input template, creating a new
# SWASH input file.
shutil.copy(os.path.join(start_dir, input_template), os.curdir)
call(['dprepro', sys.argv[1], input_template, input_file])
# Copy the data file into the active directory.
shutil.copy(os.path.join(start_dir, data_file), os.curdir)
# Call SWASH through a PBS submission script. Note that `qsub`
# returns immediately, so jobs do not block.
job_name = 'SWASH-Dakota' + os.path.splitext(os.getcwd())[-1]
call(['qsub', '-N', job_name, os.path.join(start_dir, run_script)])
# Provide a dummy results file to advance Dakota.
with open(sys.argv[2], 'w') as fp:
fp.write('0.0\n1.0\n')
if __name__ == '__main__':
driver()
|
Add the analysis driver for the run step of the study
|
Add the analysis driver for the run step of the study
|
Python
|
mit
|
mdpiper/dakota-swash-parameter-study,mdpiper/dakota-swash-parameter-study
|
Add the analysis driver for the run step of the study
|
#! /usr/bin/env python
# Brokers communication between Dakota and SWASH through files.
#
# Arguments:
# $1 is 'params.in' from Dakota
# $2 is 'results.out' returned to Dakota
import sys
import os
import shutil
from subprocess import call
def driver():
"""Broker communication between Dakota and SWASH through files."""
# Files and directories.
start_dir = os.path.dirname(os.path.realpath(__file__))
input_file = 'INPUT'
input_template = input_file + '.template'
output_file = 'bot07.mat'
output_file_var = 'Botlev'
data_file = 'sand.bot'
run_script = 'run_swash.sh'
# Use `dprepro` (from $DAKOTA_DIR/bin) to substitute parameter
# values from Dakota into the SWASH input template, creating a new
# SWASH input file.
shutil.copy(os.path.join(start_dir, input_template), os.curdir)
call(['dprepro', sys.argv[1], input_template, input_file])
# Copy the data file into the active directory.
shutil.copy(os.path.join(start_dir, data_file), os.curdir)
# Call SWASH through a PBS submission script. Note that `qsub`
# returns immediately, so jobs do not block.
job_name = 'SWASH-Dakota' + os.path.splitext(os.getcwd())[-1]
call(['qsub', '-N', job_name, os.path.join(start_dir, run_script)])
# Provide a dummy results file to advance Dakota.
with open(sys.argv[2], 'w') as fp:
fp.write('0.0\n1.0\n')
if __name__ == '__main__':
driver()
|
<commit_before><commit_msg>Add the analysis driver for the run step of the study<commit_after>
|
#! /usr/bin/env python
# Brokers communication between Dakota and SWASH through files.
#
# Arguments:
# $1 is 'params.in' from Dakota
# $2 is 'results.out' returned to Dakota
import sys
import os
import shutil
from subprocess import call
def driver():
"""Broker communication between Dakota and SWASH through files."""
# Files and directories.
start_dir = os.path.dirname(os.path.realpath(__file__))
input_file = 'INPUT'
input_template = input_file + '.template'
output_file = 'bot07.mat'
output_file_var = 'Botlev'
data_file = 'sand.bot'
run_script = 'run_swash.sh'
# Use `dprepro` (from $DAKOTA_DIR/bin) to substitute parameter
# values from Dakota into the SWASH input template, creating a new
# SWASH input file.
shutil.copy(os.path.join(start_dir, input_template), os.curdir)
call(['dprepro', sys.argv[1], input_template, input_file])
# Copy the data file into the active directory.
shutil.copy(os.path.join(start_dir, data_file), os.curdir)
# Call SWASH through a PBS submission script. Note that `qsub`
# returns immediately, so jobs do not block.
job_name = 'SWASH-Dakota' + os.path.splitext(os.getcwd())[-1]
call(['qsub', '-N', job_name, os.path.join(start_dir, run_script)])
# Provide a dummy results file to advance Dakota.
with open(sys.argv[2], 'w') as fp:
fp.write('0.0\n1.0\n')
if __name__ == '__main__':
driver()
|
Add the analysis driver for the run step of the study#! /usr/bin/env python
# Brokers communication between Dakota and SWASH through files.
#
# Arguments:
# $1 is 'params.in' from Dakota
# $2 is 'results.out' returned to Dakota
import sys
import os
import shutil
from subprocess import call
def driver():
"""Broker communication between Dakota and SWASH through files."""
# Files and directories.
start_dir = os.path.dirname(os.path.realpath(__file__))
input_file = 'INPUT'
input_template = input_file + '.template'
output_file = 'bot07.mat'
output_file_var = 'Botlev'
data_file = 'sand.bot'
run_script = 'run_swash.sh'
# Use `dprepro` (from $DAKOTA_DIR/bin) to substitute parameter
# values from Dakota into the SWASH input template, creating a new
# SWASH input file.
shutil.copy(os.path.join(start_dir, input_template), os.curdir)
call(['dprepro', sys.argv[1], input_template, input_file])
# Copy the data file into the active directory.
shutil.copy(os.path.join(start_dir, data_file), os.curdir)
# Call SWASH through a PBS submission script. Note that `qsub`
# returns immediately, so jobs do not block.
job_name = 'SWASH-Dakota' + os.path.splitext(os.getcwd())[-1]
call(['qsub', '-N', job_name, os.path.join(start_dir, run_script)])
# Provide a dummy results file to advance Dakota.
with open(sys.argv[2], 'w') as fp:
fp.write('0.0\n1.0\n')
if __name__ == '__main__':
driver()
|
<commit_before><commit_msg>Add the analysis driver for the run step of the study<commit_after>#! /usr/bin/env python
# Brokers communication between Dakota and SWASH through files.
#
# Arguments:
# $1 is 'params.in' from Dakota
# $2 is 'results.out' returned to Dakota
import sys
import os
import shutil
from subprocess import call
def driver():
"""Broker communication between Dakota and SWASH through files."""
# Files and directories.
start_dir = os.path.dirname(os.path.realpath(__file__))
input_file = 'INPUT'
input_template = input_file + '.template'
output_file = 'bot07.mat'
output_file_var = 'Botlev'
data_file = 'sand.bot'
run_script = 'run_swash.sh'
# Use `dprepro` (from $DAKOTA_DIR/bin) to substitute parameter
# values from Dakota into the SWASH input template, creating a new
# SWASH input file.
shutil.copy(os.path.join(start_dir, input_template), os.curdir)
call(['dprepro', sys.argv[1], input_template, input_file])
# Copy the data file into the active directory.
shutil.copy(os.path.join(start_dir, data_file), os.curdir)
# Call SWASH through a PBS submission script. Note that `qsub`
# returns immediately, so jobs do not block.
job_name = 'SWASH-Dakota' + os.path.splitext(os.getcwd())[-1]
call(['qsub', '-N', job_name, os.path.join(start_dir, run_script)])
# Provide a dummy results file to advance Dakota.
with open(sys.argv[2], 'w') as fp:
fp.write('0.0\n1.0\n')
if __name__ == '__main__':
driver()
|
|
dab192db863fdd694bb0adbce10fa2dd6c05353b
|
jrnl/__init__.py
|
jrnl/__init__.py
|
#!/usr/bin/env python
# encoding: utf-8
"""
jrnl is a simple journal application for your command line.
"""
__title__ = 'jrnl'
__version__ = '1.0.3'
__author__ = 'Manuel Ebert'
__license__ = 'MIT License'
__copyright__ = 'Copyright 2013 Manuel Ebert'
from . import Journal
from . import jrnl
|
#!/usr/bin/env python
# encoding: utf-8
"""
jrnl is a simple journal application for your command line.
"""
__title__ = 'jrnl'
__version__ = '1.0.3'
__author__ = 'Manuel Ebert'
__license__ = 'MIT License'
__copyright__ = 'Copyright 2013 Manuel Ebert'
from . import Journal
from . import jrnl
from .jrnl import cli
|
Make the cli work again.
|
Make the cli work again.
|
Python
|
mit
|
rzyns/jrnl,philipsd6/jrnl,notbalanced/jrnl,flight16/jrnl,maebert/jrnl,MSylvia/jrnl,dzeban/jrnl,nikvdp/jrnl,cloudrave/jrnl-todos,MinchinWeb/jrnl,beni55/jrnl,zdravi/jrnl,Shir0kamii/jrnl
|
#!/usr/bin/env python
# encoding: utf-8
"""
jrnl is a simple journal application for your command line.
"""
__title__ = 'jrnl'
__version__ = '1.0.3'
__author__ = 'Manuel Ebert'
__license__ = 'MIT License'
__copyright__ = 'Copyright 2013 Manuel Ebert'
from . import Journal
from . import jrnl
Make the cli work again.
|
#!/usr/bin/env python
# encoding: utf-8
"""
jrnl is a simple journal application for your command line.
"""
__title__ = 'jrnl'
__version__ = '1.0.3'
__author__ = 'Manuel Ebert'
__license__ = 'MIT License'
__copyright__ = 'Copyright 2013 Manuel Ebert'
from . import Journal
from . import jrnl
from .jrnl import cli
|
<commit_before>#!/usr/bin/env python
# encoding: utf-8
"""
jrnl is a simple journal application for your command line.
"""
__title__ = 'jrnl'
__version__ = '1.0.3'
__author__ = 'Manuel Ebert'
__license__ = 'MIT License'
__copyright__ = 'Copyright 2013 Manuel Ebert'
from . import Journal
from . import jrnl
<commit_msg>Make the cli work again.<commit_after>
|
#!/usr/bin/env python
# encoding: utf-8
"""
jrnl is a simple journal application for your command line.
"""
__title__ = 'jrnl'
__version__ = '1.0.3'
__author__ = 'Manuel Ebert'
__license__ = 'MIT License'
__copyright__ = 'Copyright 2013 Manuel Ebert'
from . import Journal
from . import jrnl
from .jrnl import cli
|
#!/usr/bin/env python
# encoding: utf-8
"""
jrnl is a simple journal application for your command line.
"""
__title__ = 'jrnl'
__version__ = '1.0.3'
__author__ = 'Manuel Ebert'
__license__ = 'MIT License'
__copyright__ = 'Copyright 2013 Manuel Ebert'
from . import Journal
from . import jrnl
Make the cli work again.#!/usr/bin/env python
# encoding: utf-8
"""
jrnl is a simple journal application for your command line.
"""
__title__ = 'jrnl'
__version__ = '1.0.3'
__author__ = 'Manuel Ebert'
__license__ = 'MIT License'
__copyright__ = 'Copyright 2013 Manuel Ebert'
from . import Journal
from . import jrnl
from .jrnl import cli
|
<commit_before>#!/usr/bin/env python
# encoding: utf-8
"""
jrnl is a simple journal application for your command line.
"""
__title__ = 'jrnl'
__version__ = '1.0.3'
__author__ = 'Manuel Ebert'
__license__ = 'MIT License'
__copyright__ = 'Copyright 2013 Manuel Ebert'
from . import Journal
from . import jrnl
<commit_msg>Make the cli work again.<commit_after>#!/usr/bin/env python
# encoding: utf-8
"""
jrnl is a simple journal application for your command line.
"""
__title__ = 'jrnl'
__version__ = '1.0.3'
__author__ = 'Manuel Ebert'
__license__ = 'MIT License'
__copyright__ = 'Copyright 2013 Manuel Ebert'
from . import Journal
from . import jrnl
from .jrnl import cli
|
8a668efbc266802a4f4e23c936d3589b230d9528
|
nanpy/examples/blink_2boards.py
|
nanpy/examples/blink_2boards.py
|
#!/usr/bin/env python
# Author: Andrea Stagi <stagi.andrea@gmail.com>
# Description: keeps your led blinking on 2 boards
# Dependencies: None
from nanpy import (ArduinoApi, SerialManager)
from time import sleep
device_1 = '/dev/tty.usbmodem1411'
device_2 = '/dev/tty.usbmodem1431'
connection_1 = SerialManager(device=device_1)
connection_2 = SerialManager(device=device_2)
a1 = ArduinoApi(connection=connection_1)
a1.pinMode(13, a1.OUTPUT)
a2 = ArduinoApi(connection=connection_2)
a2.pinMode(13, a2.OUTPUT)
for i in range(10000):
a1.digitalWrite(13, (i + 1) % 2)
sleep(1)
a2.digitalWrite(13, (i + 1) % 2)
sleep(1)
|
Add blink example on two different boards
|
Add blink example on two different boards
|
Python
|
mit
|
joppi/nanpy,nanpy/nanpy
|
Add blink example on two different boards
|
#!/usr/bin/env python
# Author: Andrea Stagi <stagi.andrea@gmail.com>
# Description: keeps your led blinking on 2 boards
# Dependencies: None
from nanpy import (ArduinoApi, SerialManager)
from time import sleep
device_1 = '/dev/tty.usbmodem1411'
device_2 = '/dev/tty.usbmodem1431'
connection_1 = SerialManager(device=device_1)
connection_2 = SerialManager(device=device_2)
a1 = ArduinoApi(connection=connection_1)
a1.pinMode(13, a1.OUTPUT)
a2 = ArduinoApi(connection=connection_2)
a2.pinMode(13, a2.OUTPUT)
for i in range(10000):
a1.digitalWrite(13, (i + 1) % 2)
sleep(1)
a2.digitalWrite(13, (i + 1) % 2)
sleep(1)
|
<commit_before><commit_msg>Add blink example on two different boards<commit_after>
|
#!/usr/bin/env python
# Author: Andrea Stagi <stagi.andrea@gmail.com>
# Description: keeps your led blinking on 2 boards
# Dependencies: None
from nanpy import (ArduinoApi, SerialManager)
from time import sleep
device_1 = '/dev/tty.usbmodem1411'
device_2 = '/dev/tty.usbmodem1431'
connection_1 = SerialManager(device=device_1)
connection_2 = SerialManager(device=device_2)
a1 = ArduinoApi(connection=connection_1)
a1.pinMode(13, a1.OUTPUT)
a2 = ArduinoApi(connection=connection_2)
a2.pinMode(13, a2.OUTPUT)
for i in range(10000):
a1.digitalWrite(13, (i + 1) % 2)
sleep(1)
a2.digitalWrite(13, (i + 1) % 2)
sleep(1)
|
Add blink example on two different boards#!/usr/bin/env python
# Author: Andrea Stagi <stagi.andrea@gmail.com>
# Description: keeps your led blinking on 2 boards
# Dependencies: None
from nanpy import (ArduinoApi, SerialManager)
from time import sleep
device_1 = '/dev/tty.usbmodem1411'
device_2 = '/dev/tty.usbmodem1431'
connection_1 = SerialManager(device=device_1)
connection_2 = SerialManager(device=device_2)
a1 = ArduinoApi(connection=connection_1)
a1.pinMode(13, a1.OUTPUT)
a2 = ArduinoApi(connection=connection_2)
a2.pinMode(13, a2.OUTPUT)
for i in range(10000):
a1.digitalWrite(13, (i + 1) % 2)
sleep(1)
a2.digitalWrite(13, (i + 1) % 2)
sleep(1)
|
<commit_before><commit_msg>Add blink example on two different boards<commit_after>#!/usr/bin/env python
# Author: Andrea Stagi <stagi.andrea@gmail.com>
# Description: keeps your led blinking on 2 boards
# Dependencies: None
from nanpy import (ArduinoApi, SerialManager)
from time import sleep
device_1 = '/dev/tty.usbmodem1411'
device_2 = '/dev/tty.usbmodem1431'
connection_1 = SerialManager(device=device_1)
connection_2 = SerialManager(device=device_2)
a1 = ArduinoApi(connection=connection_1)
a1.pinMode(13, a1.OUTPUT)
a2 = ArduinoApi(connection=connection_2)
a2.pinMode(13, a2.OUTPUT)
for i in range(10000):
a1.digitalWrite(13, (i + 1) % 2)
sleep(1)
a2.digitalWrite(13, (i + 1) % 2)
sleep(1)
|
|
61a005ffbc988b6a20441841112890bb397f8ca3
|
2016_player_picks.py
|
2016_player_picks.py
|
stone = {"firstname": "chris", "lastname": "stone", "timestamp": "9/6/2016", "email": "stone@usisales.com",
"afc_east_1": "Patriots", "afc_east_2": "Jets", "afc_east_last": "Bills", "afc_north_1": "Steelers",
"afc_north_2": "Bengals", "afc_north_last": "Browns", "afc_south_1": "Colts", "afc_south_2": "Colts",
"afc_south_last": "Titans"}
thaden = []
garber = []
fronczak = []
thomas = []
cutler = []
norred = []
oakland = []
|
Create stub for 2016 NFLPool player picks
|
Create stub for 2016 NFLPool player picks
|
Python
|
mit
|
prcutler/nflpool,prcutler/nflpool
|
Create stub for 2016 NFLPool player picks
|
stone = {"firstname": "chris", "lastname": "stone", "timestamp": "9/6/2016", "email": "stone@usisales.com",
"afc_east_1": "Patriots", "afc_east_2": "Jets", "afc_east_last": "Bills", "afc_north_1": "Steelers",
"afc_north_2": "Bengals", "afc_north_last": "Browns", "afc_south_1": "Colts", "afc_south_2": "Colts",
"afc_south_last": "Titans"}
thaden = []
garber = []
fronczak = []
thomas = []
cutler = []
norred = []
oakland = []
|
<commit_before><commit_msg>Create stub for 2016 NFLPool player picks<commit_after>
|
stone = {"firstname": "chris", "lastname": "stone", "timestamp": "9/6/2016", "email": "stone@usisales.com",
"afc_east_1": "Patriots", "afc_east_2": "Jets", "afc_east_last": "Bills", "afc_north_1": "Steelers",
"afc_north_2": "Bengals", "afc_north_last": "Browns", "afc_south_1": "Colts", "afc_south_2": "Colts",
"afc_south_last": "Titans"}
thaden = []
garber = []
fronczak = []
thomas = []
cutler = []
norred = []
oakland = []
|
Create stub for 2016 NFLPool player picks
stone = {"firstname": "chris", "lastname": "stone", "timestamp": "9/6/2016", "email": "stone@usisales.com",
"afc_east_1": "Patriots", "afc_east_2": "Jets", "afc_east_last": "Bills", "afc_north_1": "Steelers",
"afc_north_2": "Bengals", "afc_north_last": "Browns", "afc_south_1": "Colts", "afc_south_2": "Colts",
"afc_south_last": "Titans"}
thaden = []
garber = []
fronczak = []
thomas = []
cutler = []
norred = []
oakland = []
|
<commit_before><commit_msg>Create stub for 2016 NFLPool player picks<commit_after>
stone = {"firstname": "chris", "lastname": "stone", "timestamp": "9/6/2016", "email": "stone@usisales.com",
"afc_east_1": "Patriots", "afc_east_2": "Jets", "afc_east_last": "Bills", "afc_north_1": "Steelers",
"afc_north_2": "Bengals", "afc_north_last": "Browns", "afc_south_1": "Colts", "afc_south_2": "Colts",
"afc_south_last": "Titans"}
thaden = []
garber = []
fronczak = []
thomas = []
cutler = []
norred = []
oakland = []
|
|
893e4292f6b1799bf5f1888fcbad41ec8b5a5951
|
examples/tic_ql_tabular_selfplay_all.py
|
examples/tic_ql_tabular_selfplay_all.py
|
'''
In this example we use Q-learning via self-play to learn
the value function of all Tic-Tac-Toe positions.
'''
from capstone.environment import Environment
from capstone.game import TicTacToe
from capstone.mdp import GameMDP
from capstone.rl import QLearningSelfPlay
from capstone.rl.tabularf import TabularF
from capstone.util import tic2pdf
game = TicTacToe()
env = Environment(GameMDP(game))
qlearning = QLearningSelfPlay(env, n_episodes=1000)
qlearning.learn()
for move in game.legal_moves():
print('-' * 80)
value = qlearning.qf[(game, move)]
new_game = game.copy().make_move(move)
print(value)
print(new_game)
|
Use Q-learning to learn all state-action values via self-play
|
Use Q-learning to learn all state-action values via self-play
|
Python
|
mit
|
davidrobles/mlnd-capstone-code
|
Use Q-learning to learn all state-action values via self-play
|
'''
In this example we use Q-learning via self-play to learn
the value function of all Tic-Tac-Toe positions.
'''
from capstone.environment import Environment
from capstone.game import TicTacToe
from capstone.mdp import GameMDP
from capstone.rl import QLearningSelfPlay
from capstone.rl.tabularf import TabularF
from capstone.util import tic2pdf
game = TicTacToe()
env = Environment(GameMDP(game))
qlearning = QLearningSelfPlay(env, n_episodes=1000)
qlearning.learn()
for move in game.legal_moves():
print('-' * 80)
value = qlearning.qf[(game, move)]
new_game = game.copy().make_move(move)
print(value)
print(new_game)
|
<commit_before><commit_msg>Use Q-learning to learn all state-action values via self-play<commit_after>
|
'''
In this example we use Q-learning via self-play to learn
the value function of all Tic-Tac-Toe positions.
'''
from capstone.environment import Environment
from capstone.game import TicTacToe
from capstone.mdp import GameMDP
from capstone.rl import QLearningSelfPlay
from capstone.rl.tabularf import TabularF
from capstone.util import tic2pdf
game = TicTacToe()
env = Environment(GameMDP(game))
qlearning = QLearningSelfPlay(env, n_episodes=1000)
qlearning.learn()
for move in game.legal_moves():
print('-' * 80)
value = qlearning.qf[(game, move)]
new_game = game.copy().make_move(move)
print(value)
print(new_game)
|
Use Q-learning to learn all state-action values via self-play'''
In this example we use Q-learning via self-play to learn
the value function of all Tic-Tac-Toe positions.
'''
from capstone.environment import Environment
from capstone.game import TicTacToe
from capstone.mdp import GameMDP
from capstone.rl import QLearningSelfPlay
from capstone.rl.tabularf import TabularF
from capstone.util import tic2pdf
game = TicTacToe()
env = Environment(GameMDP(game))
qlearning = QLearningSelfPlay(env, n_episodes=1000)
qlearning.learn()
for move in game.legal_moves():
print('-' * 80)
value = qlearning.qf[(game, move)]
new_game = game.copy().make_move(move)
print(value)
print(new_game)
|
<commit_before><commit_msg>Use Q-learning to learn all state-action values via self-play<commit_after>'''
In this example we use Q-learning via self-play to learn
the value function of all Tic-Tac-Toe positions.
'''
from capstone.environment import Environment
from capstone.game import TicTacToe
from capstone.mdp import GameMDP
from capstone.rl import QLearningSelfPlay
from capstone.rl.tabularf import TabularF
from capstone.util import tic2pdf
game = TicTacToe()
env = Environment(GameMDP(game))
qlearning = QLearningSelfPlay(env, n_episodes=1000)
qlearning.learn()
for move in game.legal_moves():
print('-' * 80)
value = qlearning.qf[(game, move)]
new_game = game.copy().make_move(move)
print(value)
print(new_game)
|
|
a8419c46ceed655a276dad00a24e21f300fda543
|
py/find-bottom-left-tree-value.py
|
py/find-bottom-left-tree-value.py
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def findBottomLeftValue(self, root):
"""
:type root: TreeNode
:rtype: int
"""
q = [root]
for v in q:
if v.right:
q.append(v.right)
if v.left:
q.append(v.left)
return v.val
|
Add py solution for 513. Find Bottom Left Tree Value
|
Add py solution for 513. Find Bottom Left Tree Value
513. Find Bottom Left Tree Value: https://leetcode.com/problems/find-bottom-left-tree-value/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 513. Find Bottom Left Tree Value
513. Find Bottom Left Tree Value: https://leetcode.com/problems/find-bottom-left-tree-value/
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def findBottomLeftValue(self, root):
"""
:type root: TreeNode
:rtype: int
"""
q = [root]
for v in q:
if v.right:
q.append(v.right)
if v.left:
q.append(v.left)
return v.val
|
<commit_before><commit_msg>Add py solution for 513. Find Bottom Left Tree Value
513. Find Bottom Left Tree Value: https://leetcode.com/problems/find-bottom-left-tree-value/<commit_after>
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def findBottomLeftValue(self, root):
"""
:type root: TreeNode
:rtype: int
"""
q = [root]
for v in q:
if v.right:
q.append(v.right)
if v.left:
q.append(v.left)
return v.val
|
Add py solution for 513. Find Bottom Left Tree Value
513. Find Bottom Left Tree Value: https://leetcode.com/problems/find-bottom-left-tree-value/# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def findBottomLeftValue(self, root):
"""
:type root: TreeNode
:rtype: int
"""
q = [root]
for v in q:
if v.right:
q.append(v.right)
if v.left:
q.append(v.left)
return v.val
|
<commit_before><commit_msg>Add py solution for 513. Find Bottom Left Tree Value
513. Find Bottom Left Tree Value: https://leetcode.com/problems/find-bottom-left-tree-value/<commit_after># Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def findBottomLeftValue(self, root):
"""
:type root: TreeNode
:rtype: int
"""
q = [root]
for v in q:
if v.right:
q.append(v.right)
if v.left:
q.append(v.left)
return v.val
|
|
3dcf737fa6a6467e1c96d31325e26ecf20c50320
|
test/test_logger.py
|
test/test_logger.py
|
# encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
from __future__ import print_function
from __future__ import unicode_literals
import logbook
import pytest
from sqliteschema import (
set_logger,
set_log_level,
)
class Test_set_logger(object):
@pytest.mark.parametrize(["value"], [
[True],
[False],
])
def test_smoke(self, value):
set_logger(value)
class Test_set_log_level(object):
@pytest.mark.parametrize(["value"], [
[logbook.CRITICAL],
[logbook.ERROR],
[logbook.WARNING],
[logbook.NOTICE],
[logbook.INFO],
[logbook.DEBUG],
[logbook.TRACE],
[logbook.NOTSET],
])
def test_smoke(self, value):
set_log_level(value)
@pytest.mark.parametrize(["value", "expected"], [
[None, LookupError],
["unexpected", LookupError],
])
def test_exception(self, value, expected):
with pytest.raises(expected):
set_log_level(value)
|
Add test cases for the logger
|
Add test cases for the logger
|
Python
|
mit
|
thombashi/sqliteschema
|
Add test cases for the logger
|
# encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
from __future__ import print_function
from __future__ import unicode_literals
import logbook
import pytest
from sqliteschema import (
set_logger,
set_log_level,
)
class Test_set_logger(object):
@pytest.mark.parametrize(["value"], [
[True],
[False],
])
def test_smoke(self, value):
set_logger(value)
class Test_set_log_level(object):
@pytest.mark.parametrize(["value"], [
[logbook.CRITICAL],
[logbook.ERROR],
[logbook.WARNING],
[logbook.NOTICE],
[logbook.INFO],
[logbook.DEBUG],
[logbook.TRACE],
[logbook.NOTSET],
])
def test_smoke(self, value):
set_log_level(value)
@pytest.mark.parametrize(["value", "expected"], [
[None, LookupError],
["unexpected", LookupError],
])
def test_exception(self, value, expected):
with pytest.raises(expected):
set_log_level(value)
|
<commit_before><commit_msg>Add test cases for the logger<commit_after>
|
# encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
from __future__ import print_function
from __future__ import unicode_literals
import logbook
import pytest
from sqliteschema import (
set_logger,
set_log_level,
)
class Test_set_logger(object):
@pytest.mark.parametrize(["value"], [
[True],
[False],
])
def test_smoke(self, value):
set_logger(value)
class Test_set_log_level(object):
@pytest.mark.parametrize(["value"], [
[logbook.CRITICAL],
[logbook.ERROR],
[logbook.WARNING],
[logbook.NOTICE],
[logbook.INFO],
[logbook.DEBUG],
[logbook.TRACE],
[logbook.NOTSET],
])
def test_smoke(self, value):
set_log_level(value)
@pytest.mark.parametrize(["value", "expected"], [
[None, LookupError],
["unexpected", LookupError],
])
def test_exception(self, value, expected):
with pytest.raises(expected):
set_log_level(value)
|
Add test cases for the logger# encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
from __future__ import print_function
from __future__ import unicode_literals
import logbook
import pytest
from sqliteschema import (
set_logger,
set_log_level,
)
class Test_set_logger(object):
@pytest.mark.parametrize(["value"], [
[True],
[False],
])
def test_smoke(self, value):
set_logger(value)
class Test_set_log_level(object):
@pytest.mark.parametrize(["value"], [
[logbook.CRITICAL],
[logbook.ERROR],
[logbook.WARNING],
[logbook.NOTICE],
[logbook.INFO],
[logbook.DEBUG],
[logbook.TRACE],
[logbook.NOTSET],
])
def test_smoke(self, value):
set_log_level(value)
@pytest.mark.parametrize(["value", "expected"], [
[None, LookupError],
["unexpected", LookupError],
])
def test_exception(self, value, expected):
with pytest.raises(expected):
set_log_level(value)
|
<commit_before><commit_msg>Add test cases for the logger<commit_after># encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
from __future__ import print_function
from __future__ import unicode_literals
import logbook
import pytest
from sqliteschema import (
set_logger,
set_log_level,
)
class Test_set_logger(object):
@pytest.mark.parametrize(["value"], [
[True],
[False],
])
def test_smoke(self, value):
set_logger(value)
class Test_set_log_level(object):
@pytest.mark.parametrize(["value"], [
[logbook.CRITICAL],
[logbook.ERROR],
[logbook.WARNING],
[logbook.NOTICE],
[logbook.INFO],
[logbook.DEBUG],
[logbook.TRACE],
[logbook.NOTSET],
])
def test_smoke(self, value):
set_log_level(value)
@pytest.mark.parametrize(["value", "expected"], [
[None, LookupError],
["unexpected", LookupError],
])
def test_exception(self, value, expected):
with pytest.raises(expected):
set_log_level(value)
|
|
8b0b7c19d2e2c015fd8ba7d5408b23334ee8874f
|
test/Configure/VariantDir2.py
|
test/Configure/VariantDir2.py
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify that Configure contexts work with SConstruct/SConscript structure
"""
import os
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', """\
SConscript('SConscript', build_dir='build', src='.')
""")
test.write('SConscript', """\
env = Environment()
config = env.Configure(conf_dir='sconf', log_file='config.log')
config.TryRun("int main() {}", ".c")
config.Finish()
""")
test.run()
test.pass_test()
|
Add test case for configure failure.
|
Add test case for configure failure.
TryRun fails to find the executable when VariantDir is set up from
SConscript/SConstruct.
|
Python
|
mit
|
Distrotech/scons,Distrotech/scons,Distrotech/scons,Distrotech/scons,Distrotech/scons
|
Add test case for configure failure.
TryRun fails to find the executable when VariantDir is set up from
SConscript/SConstruct.
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify that Configure contexts work with SConstruct/SConscript structure
"""
import os
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', """\
SConscript('SConscript', build_dir='build', src='.')
""")
test.write('SConscript', """\
env = Environment()
config = env.Configure(conf_dir='sconf', log_file='config.log')
config.TryRun("int main() {}", ".c")
config.Finish()
""")
test.run()
test.pass_test()
|
<commit_before><commit_msg>Add test case for configure failure.
TryRun fails to find the executable when VariantDir is set up from
SConscript/SConstruct.<commit_after>
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify that Configure contexts work with SConstruct/SConscript structure
"""
import os
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', """\
SConscript('SConscript', build_dir='build', src='.')
""")
test.write('SConscript', """\
env = Environment()
config = env.Configure(conf_dir='sconf', log_file='config.log')
config.TryRun("int main() {}", ".c")
config.Finish()
""")
test.run()
test.pass_test()
|
Add test case for configure failure.
TryRun fails to find the executable when VariantDir is set up from
SConscript/SConstruct.#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify that Configure contexts work with SConstruct/SConscript structure
"""
import os
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', """\
SConscript('SConscript', build_dir='build', src='.')
""")
test.write('SConscript', """\
env = Environment()
config = env.Configure(conf_dir='sconf', log_file='config.log')
config.TryRun("int main() {}", ".c")
config.Finish()
""")
test.run()
test.pass_test()
|
<commit_before><commit_msg>Add test case for configure failure.
TryRun fails to find the executable when VariantDir is set up from
SConscript/SConstruct.<commit_after>#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify that Configure contexts work with SConstruct/SConscript structure
"""
import os
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', """\
SConscript('SConscript', build_dir='build', src='.')
""")
test.write('SConscript', """\
env = Environment()
config = env.Configure(conf_dir='sconf', log_file='config.log')
config.TryRun("int main() {}", ".c")
config.Finish()
""")
test.run()
test.pass_test()
|
|
1fd09e7328b1ebf41bc0790f2a96c18207b10077
|
tests/test-sweep.py
|
tests/test-sweep.py
|
#!/usr/bin/env python
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
class Makefile(object):
def __init__(self):
self._fp = StringIO()
self._all = set()
self._targets = set()
def add_default(self, x):
self._all.add(x)
def build(self, target, deps, *cmds):
if target in self._targets:
return
self._targets.add(target)
fp = self._fp
fp.write(target + ':')
for dep in deps:
fp.write(' ' + dep)
fp.write('\n')
for cmd in cmds:
fp.write('\t' + cmd + '\n')
def write(self, *line):
for line in line:
self._fp.write(line + '\n')
def save(self):
f = open('Makefile', 'w')
f.write('all:')
for t in sorted(self._all):
f.write(' ' + t)
f.write('\n')
f.write(self._fp.getvalue())
make = Makefile()
make.write(
'FR := ../build/product/fresample',
'SOX := sox')
def test_sweep(depth, rate1, rate2):
inpath = 'in_%dk%d.wav' % (rate1 // 1000, depth)
make.build(
inpath, ['Makefile'],
'$(SOX) -b %d -r %d -n $@ synth 8 sine 0+%d vol 0.999' %
(depth, rate1, rate1//2))
for q in range(4):
outpath = 'out_%dk%d_%dk%dq' % \
(rate1 // 1000, depth, rate2/1000, q)
make.build(
outpath + '.wav', [inpath, '$(FR)', 'Makefile'],
'$(FR) -q %d -r %d $< $@' % (q, rate2))
make.build(
outpath + '.png', [outpath + '.wav', 'Makefile'],
'sox $< -n spectrogram -w kaiser -o $@')
make.add_default(outpath + '.png')
test_sweep(16, 96000, 44100)
test_sweep(16, 96000, 48000)
test_sweep(16, 48000, 44100)
make.write(
'clean:',
'\trm -f *.wav *.png')
make.save()
|
Add sine wave sweep test
|
Add sine wave sweep test
This test produces spectrograms like those available on the SRC
Comparisons web site.
http://src.infinitewave.ca/
|
Python
|
bsd-2-clause
|
depp/libfresample,depp/libfresample,depp/libfresample,h6ah4i/libfresample,h6ah4i/libfresample
|
Add sine wave sweep test
This test produces spectrograms like those available on the SRC
Comparisons web site.
http://src.infinitewave.ca/
|
#!/usr/bin/env python
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
class Makefile(object):
def __init__(self):
self._fp = StringIO()
self._all = set()
self._targets = set()
def add_default(self, x):
self._all.add(x)
def build(self, target, deps, *cmds):
if target in self._targets:
return
self._targets.add(target)
fp = self._fp
fp.write(target + ':')
for dep in deps:
fp.write(' ' + dep)
fp.write('\n')
for cmd in cmds:
fp.write('\t' + cmd + '\n')
def write(self, *line):
for line in line:
self._fp.write(line + '\n')
def save(self):
f = open('Makefile', 'w')
f.write('all:')
for t in sorted(self._all):
f.write(' ' + t)
f.write('\n')
f.write(self._fp.getvalue())
make = Makefile()
make.write(
'FR := ../build/product/fresample',
'SOX := sox')
def test_sweep(depth, rate1, rate2):
inpath = 'in_%dk%d.wav' % (rate1 // 1000, depth)
make.build(
inpath, ['Makefile'],
'$(SOX) -b %d -r %d -n $@ synth 8 sine 0+%d vol 0.999' %
(depth, rate1, rate1//2))
for q in range(4):
outpath = 'out_%dk%d_%dk%dq' % \
(rate1 // 1000, depth, rate2/1000, q)
make.build(
outpath + '.wav', [inpath, '$(FR)', 'Makefile'],
'$(FR) -q %d -r %d $< $@' % (q, rate2))
make.build(
outpath + '.png', [outpath + '.wav', 'Makefile'],
'sox $< -n spectrogram -w kaiser -o $@')
make.add_default(outpath + '.png')
test_sweep(16, 96000, 44100)
test_sweep(16, 96000, 48000)
test_sweep(16, 48000, 44100)
make.write(
'clean:',
'\trm -f *.wav *.png')
make.save()
|
<commit_before><commit_msg>Add sine wave sweep test
This test produces spectrograms like those available on the SRC
Comparisons web site.
http://src.infinitewave.ca/<commit_after>
|
#!/usr/bin/env python
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
class Makefile(object):
def __init__(self):
self._fp = StringIO()
self._all = set()
self._targets = set()
def add_default(self, x):
self._all.add(x)
def build(self, target, deps, *cmds):
if target in self._targets:
return
self._targets.add(target)
fp = self._fp
fp.write(target + ':')
for dep in deps:
fp.write(' ' + dep)
fp.write('\n')
for cmd in cmds:
fp.write('\t' + cmd + '\n')
def write(self, *line):
for line in line:
self._fp.write(line + '\n')
def save(self):
f = open('Makefile', 'w')
f.write('all:')
for t in sorted(self._all):
f.write(' ' + t)
f.write('\n')
f.write(self._fp.getvalue())
make = Makefile()
make.write(
'FR := ../build/product/fresample',
'SOX := sox')
def test_sweep(depth, rate1, rate2):
inpath = 'in_%dk%d.wav' % (rate1 // 1000, depth)
make.build(
inpath, ['Makefile'],
'$(SOX) -b %d -r %d -n $@ synth 8 sine 0+%d vol 0.999' %
(depth, rate1, rate1//2))
for q in range(4):
outpath = 'out_%dk%d_%dk%dq' % \
(rate1 // 1000, depth, rate2/1000, q)
make.build(
outpath + '.wav', [inpath, '$(FR)', 'Makefile'],
'$(FR) -q %d -r %d $< $@' % (q, rate2))
make.build(
outpath + '.png', [outpath + '.wav', 'Makefile'],
'sox $< -n spectrogram -w kaiser -o $@')
make.add_default(outpath + '.png')
test_sweep(16, 96000, 44100)
test_sweep(16, 96000, 48000)
test_sweep(16, 48000, 44100)
make.write(
'clean:',
'\trm -f *.wav *.png')
make.save()
|
Add sine wave sweep test
This test produces spectrograms like those available on the SRC
Comparisons web site.
http://src.infinitewave.ca/#!/usr/bin/env python
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
class Makefile(object):
def __init__(self):
self._fp = StringIO()
self._all = set()
self._targets = set()
def add_default(self, x):
self._all.add(x)
def build(self, target, deps, *cmds):
if target in self._targets:
return
self._targets.add(target)
fp = self._fp
fp.write(target + ':')
for dep in deps:
fp.write(' ' + dep)
fp.write('\n')
for cmd in cmds:
fp.write('\t' + cmd + '\n')
def write(self, *line):
for line in line:
self._fp.write(line + '\n')
def save(self):
f = open('Makefile', 'w')
f.write('all:')
for t in sorted(self._all):
f.write(' ' + t)
f.write('\n')
f.write(self._fp.getvalue())
make = Makefile()
make.write(
'FR := ../build/product/fresample',
'SOX := sox')
def test_sweep(depth, rate1, rate2):
inpath = 'in_%dk%d.wav' % (rate1 // 1000, depth)
make.build(
inpath, ['Makefile'],
'$(SOX) -b %d -r %d -n $@ synth 8 sine 0+%d vol 0.999' %
(depth, rate1, rate1//2))
for q in range(4):
outpath = 'out_%dk%d_%dk%dq' % \
(rate1 // 1000, depth, rate2/1000, q)
make.build(
outpath + '.wav', [inpath, '$(FR)', 'Makefile'],
'$(FR) -q %d -r %d $< $@' % (q, rate2))
make.build(
outpath + '.png', [outpath + '.wav', 'Makefile'],
'sox $< -n spectrogram -w kaiser -o $@')
make.add_default(outpath + '.png')
test_sweep(16, 96000, 44100)
test_sweep(16, 96000, 48000)
test_sweep(16, 48000, 44100)
make.write(
'clean:',
'\trm -f *.wav *.png')
make.save()
|
<commit_before><commit_msg>Add sine wave sweep test
This test produces spectrograms like those available on the SRC
Comparisons web site.
http://src.infinitewave.ca/<commit_after>#!/usr/bin/env python
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
class Makefile(object):
def __init__(self):
self._fp = StringIO()
self._all = set()
self._targets = set()
def add_default(self, x):
self._all.add(x)
def build(self, target, deps, *cmds):
if target in self._targets:
return
self._targets.add(target)
fp = self._fp
fp.write(target + ':')
for dep in deps:
fp.write(' ' + dep)
fp.write('\n')
for cmd in cmds:
fp.write('\t' + cmd + '\n')
def write(self, *line):
for line in line:
self._fp.write(line + '\n')
def save(self):
f = open('Makefile', 'w')
f.write('all:')
for t in sorted(self._all):
f.write(' ' + t)
f.write('\n')
f.write(self._fp.getvalue())
make = Makefile()
make.write(
'FR := ../build/product/fresample',
'SOX := sox')
def test_sweep(depth, rate1, rate2):
inpath = 'in_%dk%d.wav' % (rate1 // 1000, depth)
make.build(
inpath, ['Makefile'],
'$(SOX) -b %d -r %d -n $@ synth 8 sine 0+%d vol 0.999' %
(depth, rate1, rate1//2))
for q in range(4):
outpath = 'out_%dk%d_%dk%dq' % \
(rate1 // 1000, depth, rate2/1000, q)
make.build(
outpath + '.wav', [inpath, '$(FR)', 'Makefile'],
'$(FR) -q %d -r %d $< $@' % (q, rate2))
make.build(
outpath + '.png', [outpath + '.wav', 'Makefile'],
'sox $< -n spectrogram -w kaiser -o $@')
make.add_default(outpath + '.png')
test_sweep(16, 96000, 44100)
test_sweep(16, 96000, 48000)
test_sweep(16, 48000, 44100)
make.write(
'clean:',
'\trm -f *.wav *.png')
make.save()
|
|
1165673d784eab36edcdc4ed4caf22dbd222874a
|
whois-scraper.py
|
whois-scraper.py
|
from lxml import html
from PIL import Image
import requests
def enlarge_image(image_file):
image = Image.open(image_file)
enlarged_size = map(lambda x: x*2, image.size)
enlarged_image = image.resize(enlarged_size)
return enlarged_image
def extract_text(image_file):
image = enlarge_image(image_file)
# Use Tesseract to extract text from the enlarged image. Then Return it.
domain = 'speedtest.net'
page = requests.get('http://www.whois.com/whois/{}'.format(domain))
tree = html.fromstring(page.content)
|
Add some preliminary code and function to enlarge image
|
Add some preliminary code and function to enlarge image
|
Python
|
mit
|
SkullTech/whois-scraper
|
Add some preliminary code and function to enlarge image
|
from lxml import html
from PIL import Image
import requests
def enlarge_image(image_file):
image = Image.open(image_file)
enlarged_size = map(lambda x: x*2, image.size)
enlarged_image = image.resize(enlarged_size)
return enlarged_image
def extract_text(image_file):
image = enlarge_image(image_file)
# Use Tesseract to extract text from the enlarged image. Then Return it.
domain = 'speedtest.net'
page = requests.get('http://www.whois.com/whois/{}'.format(domain))
tree = html.fromstring(page.content)
|
<commit_before><commit_msg>Add some preliminary code and function to enlarge image<commit_after>
|
from lxml import html
from PIL import Image
import requests
def enlarge_image(image_file):
image = Image.open(image_file)
enlarged_size = map(lambda x: x*2, image.size)
enlarged_image = image.resize(enlarged_size)
return enlarged_image
def extract_text(image_file):
image = enlarge_image(image_file)
# Use Tesseract to extract text from the enlarged image. Then Return it.
domain = 'speedtest.net'
page = requests.get('http://www.whois.com/whois/{}'.format(domain))
tree = html.fromstring(page.content)
|
Add some preliminary code and function to enlarge imagefrom lxml import html
from PIL import Image
import requests
def enlarge_image(image_file):
image = Image.open(image_file)
enlarged_size = map(lambda x: x*2, image.size)
enlarged_image = image.resize(enlarged_size)
return enlarged_image
def extract_text(image_file):
image = enlarge_image(image_file)
# Use Tesseract to extract text from the enlarged image. Then Return it.
domain = 'speedtest.net'
page = requests.get('http://www.whois.com/whois/{}'.format(domain))
tree = html.fromstring(page.content)
|
<commit_before><commit_msg>Add some preliminary code and function to enlarge image<commit_after>from lxml import html
from PIL import Image
import requests
def enlarge_image(image_file):
image = Image.open(image_file)
enlarged_size = map(lambda x: x*2, image.size)
enlarged_image = image.resize(enlarged_size)
return enlarged_image
def extract_text(image_file):
image = enlarge_image(image_file)
# Use Tesseract to extract text from the enlarged image. Then Return it.
domain = 'speedtest.net'
page = requests.get('http://www.whois.com/whois/{}'.format(domain))
tree = html.fromstring(page.content)
|
|
1d1f9d5d8f4873d6a23c430a5629eaeddfd50d2a
|
subiquity/ui/views/network_default_route.py
|
subiquity/ui/views/network_default_route.py
|
# Copyright 2015 Canonical, Ltd.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from urwid import Text, Pile, ListBox
from subiquity.view import ViewPolicy
from subiquity.ui.buttons import cancel_btn, done_btn
from subiquity.ui.utils import Color, Padding
import logging
log = logging.getLogger('subiquity.network.set_default_route')
class NetworkSetDefaultRouteView(ViewPolicy):
def __init__(self, model, signal):
self.model = model
self.signal = signal
self.is_manual = False
body = [
Padding.center_50(self._build_disk_selection()),
Padding.line_break(""),
Padding.center_50(self._build_raid_configuration()),
Padding.line_break(""),
Padding.center_20(self._build_buttons())
]
super().__init__(ListBox(body))
def _build_default_routes(self):
items = [
Text("Please set the default gateway:"),
Color.menu_button(done_btn(label="192.168.9.1 (em1, em2)",
on_press=self.done),
focus_map="menu_button focus"),
Color.menu_button(
done_btn(label="Specify the default route manually",
on_press=self.set_manually),
focus_map="menu_button focus")
]
return Pile(items)
def _build_buttons(self):
cancel = cancel_btn(on_press=self.cancel)
done = done_btn(on_press=self.done)
buttons = [
Color.button(done, focus_map='button focus'),
Color.button(cancel, focus_map='button focus')
]
return Pile(buttons)
def set_manually(self, result):
self.is_manual = True
self.signal.emit_signal('refresh')
def done(self, result):
self.signal.emit_signal('network:show')
def cancel(self, button):
self.signal.emit_signal(self.model.get_previous_signal)
|
Add network set default route view
|
Add network set default route view
Signed-off-by: Adam Stokes <0a364f4bf549cc82d725fa7fd7ed34404be64079@ubuntu.com>
|
Python
|
agpl-3.0
|
CanonicalLtd/subiquity,CanonicalLtd/subiquity
|
Add network set default route view
Signed-off-by: Adam Stokes <0a364f4bf549cc82d725fa7fd7ed34404be64079@ubuntu.com>
|
# Copyright 2015 Canonical, Ltd.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from urwid import Text, Pile, ListBox
from subiquity.view import ViewPolicy
from subiquity.ui.buttons import cancel_btn, done_btn
from subiquity.ui.utils import Color, Padding
import logging
log = logging.getLogger('subiquity.network.set_default_route')
class NetworkSetDefaultRouteView(ViewPolicy):
def __init__(self, model, signal):
self.model = model
self.signal = signal
self.is_manual = False
body = [
Padding.center_50(self._build_disk_selection()),
Padding.line_break(""),
Padding.center_50(self._build_raid_configuration()),
Padding.line_break(""),
Padding.center_20(self._build_buttons())
]
super().__init__(ListBox(body))
def _build_default_routes(self):
items = [
Text("Please set the default gateway:"),
Color.menu_button(done_btn(label="192.168.9.1 (em1, em2)",
on_press=self.done),
focus_map="menu_button focus"),
Color.menu_button(
done_btn(label="Specify the default route manually",
on_press=self.set_manually),
focus_map="menu_button focus")
]
return Pile(items)
def _build_buttons(self):
cancel = cancel_btn(on_press=self.cancel)
done = done_btn(on_press=self.done)
buttons = [
Color.button(done, focus_map='button focus'),
Color.button(cancel, focus_map='button focus')
]
return Pile(buttons)
def set_manually(self, result):
self.is_manual = True
self.signal.emit_signal('refresh')
def done(self, result):
self.signal.emit_signal('network:show')
def cancel(self, button):
self.signal.emit_signal(self.model.get_previous_signal)
|
<commit_before><commit_msg>Add network set default route view
Signed-off-by: Adam Stokes <0a364f4bf549cc82d725fa7fd7ed34404be64079@ubuntu.com><commit_after>
|
# Copyright 2015 Canonical, Ltd.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from urwid import Text, Pile, ListBox
from subiquity.view import ViewPolicy
from subiquity.ui.buttons import cancel_btn, done_btn
from subiquity.ui.utils import Color, Padding
import logging
log = logging.getLogger('subiquity.network.set_default_route')
class NetworkSetDefaultRouteView(ViewPolicy):
def __init__(self, model, signal):
self.model = model
self.signal = signal
self.is_manual = False
body = [
Padding.center_50(self._build_disk_selection()),
Padding.line_break(""),
Padding.center_50(self._build_raid_configuration()),
Padding.line_break(""),
Padding.center_20(self._build_buttons())
]
super().__init__(ListBox(body))
def _build_default_routes(self):
items = [
Text("Please set the default gateway:"),
Color.menu_button(done_btn(label="192.168.9.1 (em1, em2)",
on_press=self.done),
focus_map="menu_button focus"),
Color.menu_button(
done_btn(label="Specify the default route manually",
on_press=self.set_manually),
focus_map="menu_button focus")
]
return Pile(items)
def _build_buttons(self):
cancel = cancel_btn(on_press=self.cancel)
done = done_btn(on_press=self.done)
buttons = [
Color.button(done, focus_map='button focus'),
Color.button(cancel, focus_map='button focus')
]
return Pile(buttons)
def set_manually(self, result):
self.is_manual = True
self.signal.emit_signal('refresh')
def done(self, result):
self.signal.emit_signal('network:show')
def cancel(self, button):
self.signal.emit_signal(self.model.get_previous_signal)
|
Add network set default route view
Signed-off-by: Adam Stokes <0a364f4bf549cc82d725fa7fd7ed34404be64079@ubuntu.com># Copyright 2015 Canonical, Ltd.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from urwid import Text, Pile, ListBox
from subiquity.view import ViewPolicy
from subiquity.ui.buttons import cancel_btn, done_btn
from subiquity.ui.utils import Color, Padding
import logging
log = logging.getLogger('subiquity.network.set_default_route')
class NetworkSetDefaultRouteView(ViewPolicy):
def __init__(self, model, signal):
self.model = model
self.signal = signal
self.is_manual = False
body = [
Padding.center_50(self._build_disk_selection()),
Padding.line_break(""),
Padding.center_50(self._build_raid_configuration()),
Padding.line_break(""),
Padding.center_20(self._build_buttons())
]
super().__init__(ListBox(body))
def _build_default_routes(self):
items = [
Text("Please set the default gateway:"),
Color.menu_button(done_btn(label="192.168.9.1 (em1, em2)",
on_press=self.done),
focus_map="menu_button focus"),
Color.menu_button(
done_btn(label="Specify the default route manually",
on_press=self.set_manually),
focus_map="menu_button focus")
]
return Pile(items)
def _build_buttons(self):
cancel = cancel_btn(on_press=self.cancel)
done = done_btn(on_press=self.done)
buttons = [
Color.button(done, focus_map='button focus'),
Color.button(cancel, focus_map='button focus')
]
return Pile(buttons)
def set_manually(self, result):
self.is_manual = True
self.signal.emit_signal('refresh')
def done(self, result):
self.signal.emit_signal('network:show')
def cancel(self, button):
self.signal.emit_signal(self.model.get_previous_signal)
|
<commit_before><commit_msg>Add network set default route view
Signed-off-by: Adam Stokes <0a364f4bf549cc82d725fa7fd7ed34404be64079@ubuntu.com><commit_after># Copyright 2015 Canonical, Ltd.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from urwid import Text, Pile, ListBox
from subiquity.view import ViewPolicy
from subiquity.ui.buttons import cancel_btn, done_btn
from subiquity.ui.utils import Color, Padding
import logging
log = logging.getLogger('subiquity.network.set_default_route')
class NetworkSetDefaultRouteView(ViewPolicy):
def __init__(self, model, signal):
self.model = model
self.signal = signal
self.is_manual = False
body = [
Padding.center_50(self._build_disk_selection()),
Padding.line_break(""),
Padding.center_50(self._build_raid_configuration()),
Padding.line_break(""),
Padding.center_20(self._build_buttons())
]
super().__init__(ListBox(body))
def _build_default_routes(self):
items = [
Text("Please set the default gateway:"),
Color.menu_button(done_btn(label="192.168.9.1 (em1, em2)",
on_press=self.done),
focus_map="menu_button focus"),
Color.menu_button(
done_btn(label="Specify the default route manually",
on_press=self.set_manually),
focus_map="menu_button focus")
]
return Pile(items)
def _build_buttons(self):
cancel = cancel_btn(on_press=self.cancel)
done = done_btn(on_press=self.done)
buttons = [
Color.button(done, focus_map='button focus'),
Color.button(cancel, focus_map='button focus')
]
return Pile(buttons)
def set_manually(self, result):
self.is_manual = True
self.signal.emit_signal('refresh')
def done(self, result):
self.signal.emit_signal('network:show')
def cancel(self, button):
self.signal.emit_signal(self.model.get_previous_signal)
|
|
2c57f2143e21fa3d006d4e4e2737429fb60b4797
|
tornado/setup_pg.py
|
tornado/setup_pg.py
|
from os.path import expanduser
from os import kill
import subprocess
import sys
import time
python = expanduser('~/FrameworkBenchmarks/installs/py2/bin/python')
cwd = expanduser('~/FrameworkBenchmarks/tornado')
def start(args, logfile, errfile):
subprocess.Popen(
python + " server.py --port=8080 --postgres=%s --logging=error" % (args.database_host,),
shell=True, cwd=cwd, stderr=errfile, stdout=logfile)
return 0
def stop(logfile, errfile):
for line in subprocess.check_output(["ps", "aux"]).splitlines():
if 'server.py --port=8080' in line:
pid = int(line.split(None,2)[1])
kill(pid, 9)
return 0
if __name__ == '__main__':
class DummyArg:
database_host = 'localhost'
start(DummyArg(), sys.stderr, sys.stderr)
time.sleep(1)
stop(sys.stderr, sys.stderr)
|
import os
import subprocess
import sys
import time
bin_dir = os.path.expanduser('~/FrameworkBenchmarks/installs/py2/bin')
python = os.path.expanduser(os.path.join(bin_dir, 'python'))
pip = os.path.expanduser(os.path.join(bin_dir, 'pip'))
cwd = os.path.expanduser('~/FrameworkBenchmarks/tornado')
def start(args, logfile, errfile):
subprocess.call(pip + ' install -r requirements.txt', cwd=cwd, shell=True, stderr=errfile, stdout=logfile)
subprocess.Popen(
python + ' server.py --port=8080 --postgres=%s --logging=error' % (args.database_host,),
shell=True, cwd=cwd, stderr=errfile, stdout=logfile)
return 0
def stop(logfile, errfile):
for line in subprocess.check_output(['ps', 'aux']).splitlines():
if 'server.py --port=8080' in line:
pid = int(line.split(None, 2)[1])
os.kill(pid, 9)
return 0
if __name__ == '__main__':
class DummyArg:
database_host = 'localhost'
start(DummyArg(), sys.stderr, sys.stderr)
time.sleep(1)
stop(sys.stderr, sys.stderr)
|
Call pip install before running server.
|
Call pip install before running server.
|
Python
|
bsd-3-clause
|
knewmanTE/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,denkab/FrameworkBenchmarks,testn/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,methane/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,joshk/FrameworkBenchmarks,khellang/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,zloster/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,zapov/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,grob/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,methane/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,grob/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,jamming/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,herloct/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,Verber/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,torhve/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,herloct/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,sgml/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,testn/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,doom369/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,zloster/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,jamming/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,sgml/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,zapov/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,khellang/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,testn/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,sxend/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,herloct/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,Verber/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,testn/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,torhve/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,grob/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,valyala/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,sgml/FrameworkBenchmarks,zloster/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,jamming/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,doom369/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,sxend/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,actframework/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,valyala/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,methane/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,doom369/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,sxend/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,herloct/FrameworkBenchmarks,jamming/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,zloster/FrameworkBenchmarks,Verber/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,joshk/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,testn/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,zapov/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,Verber/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,Verber/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,herloct/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,torhve/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,torhve/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,testn/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,denkab/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,methane/FrameworkBenchmarks,actframework/FrameworkBenchmarks,torhve/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,methane/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,methane/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,herloct/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,testn/FrameworkBenchmarks,khellang/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,khellang/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,joshk/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,zloster/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,zapov/FrameworkBenchmarks,sxend/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,denkab/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,zapov/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,zapov/FrameworkBenchmarks,torhve/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,sxend/FrameworkBenchmarks,testn/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,valyala/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,valyala/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,methane/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,methane/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,valyala/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,zapov/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,sxend/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,doom369/FrameworkBenchmarks,torhve/FrameworkBenchmarks,actframework/FrameworkBenchmarks,zloster/FrameworkBenchmarks,sxend/FrameworkBenchmarks,zapov/FrameworkBenchmarks,doom369/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,grob/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,zloster/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,sxend/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,jamming/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,jamming/FrameworkBenchmarks,Verber/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,zloster/FrameworkBenchmarks,jamming/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,zapov/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,joshk/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,actframework/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,doom369/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,sxend/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,sgml/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,testn/FrameworkBenchmarks,doom369/FrameworkBenchmarks,sxend/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,actframework/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,doom369/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,zapov/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,zloster/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,denkab/FrameworkBenchmarks,herloct/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,actframework/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,doom369/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,herloct/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,grob/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,sxend/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,methane/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,sgml/FrameworkBenchmarks,herloct/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,khellang/FrameworkBenchmarks,torhve/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,zloster/FrameworkBenchmarks,sgml/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,jamming/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,sgml/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,zapov/FrameworkBenchmarks,Verber/FrameworkBenchmarks,zloster/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,denkab/FrameworkBenchmarks,doom369/FrameworkBenchmarks,actframework/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,valyala/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,sgml/FrameworkBenchmarks,actframework/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,denkab/FrameworkBenchmarks,khellang/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,jamming/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,methane/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,denkab/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,grob/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,actframework/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,Verber/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,sxend/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,denkab/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,grob/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,valyala/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,khellang/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,torhve/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,herloct/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,jamming/FrameworkBenchmarks,doom369/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,zapov/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,doom369/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,doom369/FrameworkBenchmarks,testn/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,methane/FrameworkBenchmarks,khellang/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,testn/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,denkab/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,Verber/FrameworkBenchmarks,torhve/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,valyala/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,zapov/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,methane/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,khellang/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,herloct/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,sxend/FrameworkBenchmarks,valyala/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,zloster/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,khellang/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,joshk/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,doom369/FrameworkBenchmarks,Verber/FrameworkBenchmarks,testn/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,grob/FrameworkBenchmarks,sxend/FrameworkBenchmarks,methane/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,grob/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,sgml/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,sxend/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,actframework/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,valyala/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,zapov/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,sgml/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,herloct/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,actframework/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,joshk/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,sgml/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,sxend/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,sgml/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,torhve/FrameworkBenchmarks,joshk/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,actframework/FrameworkBenchmarks,actframework/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,zloster/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,doom369/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,sxend/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,joshk/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,actframework/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,zloster/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,joshk/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,jamming/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,grob/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,denkab/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,sgml/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,torhve/FrameworkBenchmarks,herloct/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,joshk/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,Verber/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,khellang/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,joshk/FrameworkBenchmarks,sgml/FrameworkBenchmarks,valyala/FrameworkBenchmarks,khellang/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,doom369/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,denkab/FrameworkBenchmarks,actframework/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,zapov/FrameworkBenchmarks,grob/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,joshk/FrameworkBenchmarks,denkab/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,sxend/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,doom369/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,zloster/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,Verber/FrameworkBenchmarks,grob/FrameworkBenchmarks,herloct/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,herloct/FrameworkBenchmarks,denkab/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,jamming/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,valyala/FrameworkBenchmarks,zloster/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,Verber/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,zloster/FrameworkBenchmarks,jamming/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,joshk/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,testn/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,valyala/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,grob/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,grob/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,actframework/FrameworkBenchmarks,zloster/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,denkab/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,zapov/FrameworkBenchmarks,jamming/FrameworkBenchmarks,testn/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,khellang/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,khellang/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,Verber/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,valyala/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,methane/FrameworkBenchmarks,joshk/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,nathana1/FrameworkBenchmarks
|
from os.path import expanduser
from os import kill
import subprocess
import sys
import time
python = expanduser('~/FrameworkBenchmarks/installs/py2/bin/python')
cwd = expanduser('~/FrameworkBenchmarks/tornado')
def start(args, logfile, errfile):
subprocess.Popen(
python + " server.py --port=8080 --postgres=%s --logging=error" % (args.database_host,),
shell=True, cwd=cwd, stderr=errfile, stdout=logfile)
return 0
def stop(logfile, errfile):
for line in subprocess.check_output(["ps", "aux"]).splitlines():
if 'server.py --port=8080' in line:
pid = int(line.split(None,2)[1])
kill(pid, 9)
return 0
if __name__ == '__main__':
class DummyArg:
database_host = 'localhost'
start(DummyArg(), sys.stderr, sys.stderr)
time.sleep(1)
stop(sys.stderr, sys.stderr)
Call pip install before running server.
|
import os
import subprocess
import sys
import time
bin_dir = os.path.expanduser('~/FrameworkBenchmarks/installs/py2/bin')
python = os.path.expanduser(os.path.join(bin_dir, 'python'))
pip = os.path.expanduser(os.path.join(bin_dir, 'pip'))
cwd = os.path.expanduser('~/FrameworkBenchmarks/tornado')
def start(args, logfile, errfile):
subprocess.call(pip + ' install -r requirements.txt', cwd=cwd, shell=True, stderr=errfile, stdout=logfile)
subprocess.Popen(
python + ' server.py --port=8080 --postgres=%s --logging=error' % (args.database_host,),
shell=True, cwd=cwd, stderr=errfile, stdout=logfile)
return 0
def stop(logfile, errfile):
for line in subprocess.check_output(['ps', 'aux']).splitlines():
if 'server.py --port=8080' in line:
pid = int(line.split(None, 2)[1])
os.kill(pid, 9)
return 0
if __name__ == '__main__':
class DummyArg:
database_host = 'localhost'
start(DummyArg(), sys.stderr, sys.stderr)
time.sleep(1)
stop(sys.stderr, sys.stderr)
|
<commit_before>from os.path import expanduser
from os import kill
import subprocess
import sys
import time
python = expanduser('~/FrameworkBenchmarks/installs/py2/bin/python')
cwd = expanduser('~/FrameworkBenchmarks/tornado')
def start(args, logfile, errfile):
subprocess.Popen(
python + " server.py --port=8080 --postgres=%s --logging=error" % (args.database_host,),
shell=True, cwd=cwd, stderr=errfile, stdout=logfile)
return 0
def stop(logfile, errfile):
for line in subprocess.check_output(["ps", "aux"]).splitlines():
if 'server.py --port=8080' in line:
pid = int(line.split(None,2)[1])
kill(pid, 9)
return 0
if __name__ == '__main__':
class DummyArg:
database_host = 'localhost'
start(DummyArg(), sys.stderr, sys.stderr)
time.sleep(1)
stop(sys.stderr, sys.stderr)
<commit_msg>Call pip install before running server.<commit_after>
|
import os
import subprocess
import sys
import time
bin_dir = os.path.expanduser('~/FrameworkBenchmarks/installs/py2/bin')
python = os.path.expanduser(os.path.join(bin_dir, 'python'))
pip = os.path.expanduser(os.path.join(bin_dir, 'pip'))
cwd = os.path.expanduser('~/FrameworkBenchmarks/tornado')
def start(args, logfile, errfile):
subprocess.call(pip + ' install -r requirements.txt', cwd=cwd, shell=True, stderr=errfile, stdout=logfile)
subprocess.Popen(
python + ' server.py --port=8080 --postgres=%s --logging=error' % (args.database_host,),
shell=True, cwd=cwd, stderr=errfile, stdout=logfile)
return 0
def stop(logfile, errfile):
for line in subprocess.check_output(['ps', 'aux']).splitlines():
if 'server.py --port=8080' in line:
pid = int(line.split(None, 2)[1])
os.kill(pid, 9)
return 0
if __name__ == '__main__':
class DummyArg:
database_host = 'localhost'
start(DummyArg(), sys.stderr, sys.stderr)
time.sleep(1)
stop(sys.stderr, sys.stderr)
|
from os.path import expanduser
from os import kill
import subprocess
import sys
import time
python = expanduser('~/FrameworkBenchmarks/installs/py2/bin/python')
cwd = expanduser('~/FrameworkBenchmarks/tornado')
def start(args, logfile, errfile):
subprocess.Popen(
python + " server.py --port=8080 --postgres=%s --logging=error" % (args.database_host,),
shell=True, cwd=cwd, stderr=errfile, stdout=logfile)
return 0
def stop(logfile, errfile):
for line in subprocess.check_output(["ps", "aux"]).splitlines():
if 'server.py --port=8080' in line:
pid = int(line.split(None,2)[1])
kill(pid, 9)
return 0
if __name__ == '__main__':
class DummyArg:
database_host = 'localhost'
start(DummyArg(), sys.stderr, sys.stderr)
time.sleep(1)
stop(sys.stderr, sys.stderr)
Call pip install before running server.import os
import subprocess
import sys
import time
bin_dir = os.path.expanduser('~/FrameworkBenchmarks/installs/py2/bin')
python = os.path.expanduser(os.path.join(bin_dir, 'python'))
pip = os.path.expanduser(os.path.join(bin_dir, 'pip'))
cwd = os.path.expanduser('~/FrameworkBenchmarks/tornado')
def start(args, logfile, errfile):
subprocess.call(pip + ' install -r requirements.txt', cwd=cwd, shell=True, stderr=errfile, stdout=logfile)
subprocess.Popen(
python + ' server.py --port=8080 --postgres=%s --logging=error' % (args.database_host,),
shell=True, cwd=cwd, stderr=errfile, stdout=logfile)
return 0
def stop(logfile, errfile):
for line in subprocess.check_output(['ps', 'aux']).splitlines():
if 'server.py --port=8080' in line:
pid = int(line.split(None, 2)[1])
os.kill(pid, 9)
return 0
if __name__ == '__main__':
class DummyArg:
database_host = 'localhost'
start(DummyArg(), sys.stderr, sys.stderr)
time.sleep(1)
stop(sys.stderr, sys.stderr)
|
<commit_before>from os.path import expanduser
from os import kill
import subprocess
import sys
import time
python = expanduser('~/FrameworkBenchmarks/installs/py2/bin/python')
cwd = expanduser('~/FrameworkBenchmarks/tornado')
def start(args, logfile, errfile):
subprocess.Popen(
python + " server.py --port=8080 --postgres=%s --logging=error" % (args.database_host,),
shell=True, cwd=cwd, stderr=errfile, stdout=logfile)
return 0
def stop(logfile, errfile):
for line in subprocess.check_output(["ps", "aux"]).splitlines():
if 'server.py --port=8080' in line:
pid = int(line.split(None,2)[1])
kill(pid, 9)
return 0
if __name__ == '__main__':
class DummyArg:
database_host = 'localhost'
start(DummyArg(), sys.stderr, sys.stderr)
time.sleep(1)
stop(sys.stderr, sys.stderr)
<commit_msg>Call pip install before running server.<commit_after>import os
import subprocess
import sys
import time
bin_dir = os.path.expanduser('~/FrameworkBenchmarks/installs/py2/bin')
python = os.path.expanduser(os.path.join(bin_dir, 'python'))
pip = os.path.expanduser(os.path.join(bin_dir, 'pip'))
cwd = os.path.expanduser('~/FrameworkBenchmarks/tornado')
def start(args, logfile, errfile):
subprocess.call(pip + ' install -r requirements.txt', cwd=cwd, shell=True, stderr=errfile, stdout=logfile)
subprocess.Popen(
python + ' server.py --port=8080 --postgres=%s --logging=error' % (args.database_host,),
shell=True, cwd=cwd, stderr=errfile, stdout=logfile)
return 0
def stop(logfile, errfile):
for line in subprocess.check_output(['ps', 'aux']).splitlines():
if 'server.py --port=8080' in line:
pid = int(line.split(None, 2)[1])
os.kill(pid, 9)
return 0
if __name__ == '__main__':
class DummyArg:
database_host = 'localhost'
start(DummyArg(), sys.stderr, sys.stderr)
time.sleep(1)
stop(sys.stderr, sys.stderr)
|
9ec957af0c3d57dff4c05c1b7ed3e66e1c033f6b
|
nagios/check_idot_snowplows.py
|
nagios/check_idot_snowplows.py
|
"""
Nagios check to see how much snowplow data we are currently ingesting
"""
import sys
import os
import psycopg2
POSTGIS = psycopg2.connect(database='postgis', host='iemdb', user='nobody')
pcursor = POSTGIS.cursor()
pcursor.execute("""
select count(*) from idot_snowplow_current WHERE
valid > now() - '30 minutes'::interval
""")
row = pcursor.fetchone()
count = row[0]
if count > 2:
print 'OK - snowplows %s |count=%s;2;1;0' % (count, count)
sys.exit(0)
elif count > 1:
print 'OK - snowplows %s |count=%s;2;1;0' % (count, count)
sys.exit(1)
else:
print 'CRITICAL - snowplows %s |count=%s;2;1;0' % (count, count)
sys.exit(2)
|
Add nagios check for idot snowplow ingest
|
Add nagios check for idot snowplow ingest
|
Python
|
mit
|
akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem
|
Add nagios check for idot snowplow ingest
|
"""
Nagios check to see how much snowplow data we are currently ingesting
"""
import sys
import os
import psycopg2
POSTGIS = psycopg2.connect(database='postgis', host='iemdb', user='nobody')
pcursor = POSTGIS.cursor()
pcursor.execute("""
select count(*) from idot_snowplow_current WHERE
valid > now() - '30 minutes'::interval
""")
row = pcursor.fetchone()
count = row[0]
if count > 2:
print 'OK - snowplows %s |count=%s;2;1;0' % (count, count)
sys.exit(0)
elif count > 1:
print 'OK - snowplows %s |count=%s;2;1;0' % (count, count)
sys.exit(1)
else:
print 'CRITICAL - snowplows %s |count=%s;2;1;0' % (count, count)
sys.exit(2)
|
<commit_before><commit_msg>Add nagios check for idot snowplow ingest<commit_after>
|
"""
Nagios check to see how much snowplow data we are currently ingesting
"""
import sys
import os
import psycopg2
POSTGIS = psycopg2.connect(database='postgis', host='iemdb', user='nobody')
pcursor = POSTGIS.cursor()
pcursor.execute("""
select count(*) from idot_snowplow_current WHERE
valid > now() - '30 minutes'::interval
""")
row = pcursor.fetchone()
count = row[0]
if count > 2:
print 'OK - snowplows %s |count=%s;2;1;0' % (count, count)
sys.exit(0)
elif count > 1:
print 'OK - snowplows %s |count=%s;2;1;0' % (count, count)
sys.exit(1)
else:
print 'CRITICAL - snowplows %s |count=%s;2;1;0' % (count, count)
sys.exit(2)
|
Add nagios check for idot snowplow ingest"""
Nagios check to see how much snowplow data we are currently ingesting
"""
import sys
import os
import psycopg2
POSTGIS = psycopg2.connect(database='postgis', host='iemdb', user='nobody')
pcursor = POSTGIS.cursor()
pcursor.execute("""
select count(*) from idot_snowplow_current WHERE
valid > now() - '30 minutes'::interval
""")
row = pcursor.fetchone()
count = row[0]
if count > 2:
print 'OK - snowplows %s |count=%s;2;1;0' % (count, count)
sys.exit(0)
elif count > 1:
print 'OK - snowplows %s |count=%s;2;1;0' % (count, count)
sys.exit(1)
else:
print 'CRITICAL - snowplows %s |count=%s;2;1;0' % (count, count)
sys.exit(2)
|
<commit_before><commit_msg>Add nagios check for idot snowplow ingest<commit_after>"""
Nagios check to see how much snowplow data we are currently ingesting
"""
import sys
import os
import psycopg2
POSTGIS = psycopg2.connect(database='postgis', host='iemdb', user='nobody')
pcursor = POSTGIS.cursor()
pcursor.execute("""
select count(*) from idot_snowplow_current WHERE
valid > now() - '30 minutes'::interval
""")
row = pcursor.fetchone()
count = row[0]
if count > 2:
print 'OK - snowplows %s |count=%s;2;1;0' % (count, count)
sys.exit(0)
elif count > 1:
print 'OK - snowplows %s |count=%s;2;1;0' % (count, count)
sys.exit(1)
else:
print 'CRITICAL - snowplows %s |count=%s;2;1;0' % (count, count)
sys.exit(2)
|
|
717b20e298547685ed0685bd09a4fac541034910
|
example/map_flows.py
|
example/map_flows.py
|
from taskin import task
def get_servers(data):
return [
'foo.example.com',
'bar.example.com',
]
def create_something(data):
servers, name = data
for server in servers:
print('Creating: https://%s/%s' % (server, name))
def main():
flow = [
get_servers,
task.MapTask(create_something, args=xrange(10))
]
task.do_flow(flow)
if __name__ == '__main__':
main()
|
Add an example map flow
|
Add an example map flow
|
Python
|
bsd-3-clause
|
ionrock/taskin
|
Add an example map flow
|
from taskin import task
def get_servers(data):
return [
'foo.example.com',
'bar.example.com',
]
def create_something(data):
servers, name = data
for server in servers:
print('Creating: https://%s/%s' % (server, name))
def main():
flow = [
get_servers,
task.MapTask(create_something, args=xrange(10))
]
task.do_flow(flow)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add an example map flow<commit_after>
|
from taskin import task
def get_servers(data):
return [
'foo.example.com',
'bar.example.com',
]
def create_something(data):
servers, name = data
for server in servers:
print('Creating: https://%s/%s' % (server, name))
def main():
flow = [
get_servers,
task.MapTask(create_something, args=xrange(10))
]
task.do_flow(flow)
if __name__ == '__main__':
main()
|
Add an example map flowfrom taskin import task
def get_servers(data):
return [
'foo.example.com',
'bar.example.com',
]
def create_something(data):
servers, name = data
for server in servers:
print('Creating: https://%s/%s' % (server, name))
def main():
flow = [
get_servers,
task.MapTask(create_something, args=xrange(10))
]
task.do_flow(flow)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add an example map flow<commit_after>from taskin import task
def get_servers(data):
return [
'foo.example.com',
'bar.example.com',
]
def create_something(data):
servers, name = data
for server in servers:
print('Creating: https://%s/%s' % (server, name))
def main():
flow = [
get_servers,
task.MapTask(create_something, args=xrange(10))
]
task.do_flow(flow)
if __name__ == '__main__':
main()
|
|
61139332ce1bcfd145f16b8f3c411e178db4054c
|
numpy/core/tests/test_dtype.py
|
numpy/core/tests/test_dtype.py
|
import numpy as np
from numpy.testing import *
class TestBuiltin(TestCase):
def test_run(self):
"""Only test hash runs at all."""
for t in [np.int, np.float, np.complex, np.int32, np.str, np.object,
np.unicode]:
dt = np.dtype(t)
hash(dt)
class TestRecord(TestCase):
def test_equivalent_record(self):
"""Test whether equivalent record dtypes hash the same."""
a = np.dtype([('yo', np.int)])
b = np.dtype([('yo', np.int)])
self.failUnless(hash(a) == hash(b),
"two equivalent types do not hash to the same value !")
def test_different_names(self):
# In theory, they may hash the same (collision) ?
a = np.dtype([('yo', np.int)])
b = np.dtype([('ye', np.int)])
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
def test_different_titles(self):
# In theory, they may hash the same (collision) ?
a = np.dtype({'names': ['r','b'], 'formats': ['u1', 'u1'],
'titles': ['Red pixel', 'Blue pixel']})
b = np.dtype({'names': ['r','b'], 'formats': ['u1', 'u1'],
'titles': ['RRed pixel', 'Blue pixel']})
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
class TestSubarray(TestCase):
def test_equivalent_record(self):
"""Test whether equivalent subarray dtypes hash the same."""
a = np.dtype((np.int, (2, 3)))
b = np.dtype((np.int, (2, 3)))
self.failUnless(hash(a) == hash(b),
"two equivalent types do not hash to the same value !")
def test_nonequivalent_record(self):
"""Test whether different subarray dtypes hash differently."""
a = np.dtype((np.int, (2, 3)))
b = np.dtype((np.int, (3, 2)))
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
a = np.dtype((np.int, (2, 3)))
b = np.dtype((np.int, (2, 2)))
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
a = np.dtype((np.int, (1, 2, 3)))
b = np.dtype((np.int, (1, 2)))
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
class TestMonsterType(TestCase):
"""Test deeply nested subtypes."""
pass
if __name__ == "__main__":
run_module_suite()
|
Add some unit tests for the hashing protocol of dtype (fail currently).
|
Add some unit tests for the hashing protocol of dtype (fail currently).
git-svn-id: 77a43f9646713b91fea7788fad5dfbf67e151ece@6666 94b884b6-d6fd-0310-90d3-974f1d3f35e1
|
Python
|
bsd-3-clause
|
teoliphant/numpy-refactor,efiring/numpy-work,efiring/numpy-work,Ademan/NumPy-GSoC,teoliphant/numpy-refactor,jasonmccampbell/numpy-refactor-sprint,teoliphant/numpy-refactor,jasonmccampbell/numpy-refactor-sprint,Ademan/NumPy-GSoC,teoliphant/numpy-refactor,illume/numpy3k,jasonmccampbell/numpy-refactor-sprint,chadnetzer/numpy-gaurdro,efiring/numpy-work,illume/numpy3k,Ademan/NumPy-GSoC,chadnetzer/numpy-gaurdro,teoliphant/numpy-refactor,illume/numpy3k,jasonmccampbell/numpy-refactor-sprint,efiring/numpy-work,chadnetzer/numpy-gaurdro,illume/numpy3k,chadnetzer/numpy-gaurdro,Ademan/NumPy-GSoC
|
Add some unit tests for the hashing protocol of dtype (fail currently).
git-svn-id: 77a43f9646713b91fea7788fad5dfbf67e151ece@6666 94b884b6-d6fd-0310-90d3-974f1d3f35e1
|
import numpy as np
from numpy.testing import *
class TestBuiltin(TestCase):
def test_run(self):
"""Only test hash runs at all."""
for t in [np.int, np.float, np.complex, np.int32, np.str, np.object,
np.unicode]:
dt = np.dtype(t)
hash(dt)
class TestRecord(TestCase):
def test_equivalent_record(self):
"""Test whether equivalent record dtypes hash the same."""
a = np.dtype([('yo', np.int)])
b = np.dtype([('yo', np.int)])
self.failUnless(hash(a) == hash(b),
"two equivalent types do not hash to the same value !")
def test_different_names(self):
# In theory, they may hash the same (collision) ?
a = np.dtype([('yo', np.int)])
b = np.dtype([('ye', np.int)])
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
def test_different_titles(self):
# In theory, they may hash the same (collision) ?
a = np.dtype({'names': ['r','b'], 'formats': ['u1', 'u1'],
'titles': ['Red pixel', 'Blue pixel']})
b = np.dtype({'names': ['r','b'], 'formats': ['u1', 'u1'],
'titles': ['RRed pixel', 'Blue pixel']})
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
class TestSubarray(TestCase):
def test_equivalent_record(self):
"""Test whether equivalent subarray dtypes hash the same."""
a = np.dtype((np.int, (2, 3)))
b = np.dtype((np.int, (2, 3)))
self.failUnless(hash(a) == hash(b),
"two equivalent types do not hash to the same value !")
def test_nonequivalent_record(self):
"""Test whether different subarray dtypes hash differently."""
a = np.dtype((np.int, (2, 3)))
b = np.dtype((np.int, (3, 2)))
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
a = np.dtype((np.int, (2, 3)))
b = np.dtype((np.int, (2, 2)))
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
a = np.dtype((np.int, (1, 2, 3)))
b = np.dtype((np.int, (1, 2)))
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
class TestMonsterType(TestCase):
"""Test deeply nested subtypes."""
pass
if __name__ == "__main__":
run_module_suite()
|
<commit_before><commit_msg>Add some unit tests for the hashing protocol of dtype (fail currently).
git-svn-id: 77a43f9646713b91fea7788fad5dfbf67e151ece@6666 94b884b6-d6fd-0310-90d3-974f1d3f35e1<commit_after>
|
import numpy as np
from numpy.testing import *
class TestBuiltin(TestCase):
def test_run(self):
"""Only test hash runs at all."""
for t in [np.int, np.float, np.complex, np.int32, np.str, np.object,
np.unicode]:
dt = np.dtype(t)
hash(dt)
class TestRecord(TestCase):
def test_equivalent_record(self):
"""Test whether equivalent record dtypes hash the same."""
a = np.dtype([('yo', np.int)])
b = np.dtype([('yo', np.int)])
self.failUnless(hash(a) == hash(b),
"two equivalent types do not hash to the same value !")
def test_different_names(self):
# In theory, they may hash the same (collision) ?
a = np.dtype([('yo', np.int)])
b = np.dtype([('ye', np.int)])
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
def test_different_titles(self):
# In theory, they may hash the same (collision) ?
a = np.dtype({'names': ['r','b'], 'formats': ['u1', 'u1'],
'titles': ['Red pixel', 'Blue pixel']})
b = np.dtype({'names': ['r','b'], 'formats': ['u1', 'u1'],
'titles': ['RRed pixel', 'Blue pixel']})
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
class TestSubarray(TestCase):
def test_equivalent_record(self):
"""Test whether equivalent subarray dtypes hash the same."""
a = np.dtype((np.int, (2, 3)))
b = np.dtype((np.int, (2, 3)))
self.failUnless(hash(a) == hash(b),
"two equivalent types do not hash to the same value !")
def test_nonequivalent_record(self):
"""Test whether different subarray dtypes hash differently."""
a = np.dtype((np.int, (2, 3)))
b = np.dtype((np.int, (3, 2)))
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
a = np.dtype((np.int, (2, 3)))
b = np.dtype((np.int, (2, 2)))
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
a = np.dtype((np.int, (1, 2, 3)))
b = np.dtype((np.int, (1, 2)))
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
class TestMonsterType(TestCase):
"""Test deeply nested subtypes."""
pass
if __name__ == "__main__":
run_module_suite()
|
Add some unit tests for the hashing protocol of dtype (fail currently).
git-svn-id: 77a43f9646713b91fea7788fad5dfbf67e151ece@6666 94b884b6-d6fd-0310-90d3-974f1d3f35e1import numpy as np
from numpy.testing import *
class TestBuiltin(TestCase):
def test_run(self):
"""Only test hash runs at all."""
for t in [np.int, np.float, np.complex, np.int32, np.str, np.object,
np.unicode]:
dt = np.dtype(t)
hash(dt)
class TestRecord(TestCase):
def test_equivalent_record(self):
"""Test whether equivalent record dtypes hash the same."""
a = np.dtype([('yo', np.int)])
b = np.dtype([('yo', np.int)])
self.failUnless(hash(a) == hash(b),
"two equivalent types do not hash to the same value !")
def test_different_names(self):
# In theory, they may hash the same (collision) ?
a = np.dtype([('yo', np.int)])
b = np.dtype([('ye', np.int)])
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
def test_different_titles(self):
# In theory, they may hash the same (collision) ?
a = np.dtype({'names': ['r','b'], 'formats': ['u1', 'u1'],
'titles': ['Red pixel', 'Blue pixel']})
b = np.dtype({'names': ['r','b'], 'formats': ['u1', 'u1'],
'titles': ['RRed pixel', 'Blue pixel']})
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
class TestSubarray(TestCase):
def test_equivalent_record(self):
"""Test whether equivalent subarray dtypes hash the same."""
a = np.dtype((np.int, (2, 3)))
b = np.dtype((np.int, (2, 3)))
self.failUnless(hash(a) == hash(b),
"two equivalent types do not hash to the same value !")
def test_nonequivalent_record(self):
"""Test whether different subarray dtypes hash differently."""
a = np.dtype((np.int, (2, 3)))
b = np.dtype((np.int, (3, 2)))
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
a = np.dtype((np.int, (2, 3)))
b = np.dtype((np.int, (2, 2)))
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
a = np.dtype((np.int, (1, 2, 3)))
b = np.dtype((np.int, (1, 2)))
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
class TestMonsterType(TestCase):
"""Test deeply nested subtypes."""
pass
if __name__ == "__main__":
run_module_suite()
|
<commit_before><commit_msg>Add some unit tests for the hashing protocol of dtype (fail currently).
git-svn-id: 77a43f9646713b91fea7788fad5dfbf67e151ece@6666 94b884b6-d6fd-0310-90d3-974f1d3f35e1<commit_after>import numpy as np
from numpy.testing import *
class TestBuiltin(TestCase):
def test_run(self):
"""Only test hash runs at all."""
for t in [np.int, np.float, np.complex, np.int32, np.str, np.object,
np.unicode]:
dt = np.dtype(t)
hash(dt)
class TestRecord(TestCase):
def test_equivalent_record(self):
"""Test whether equivalent record dtypes hash the same."""
a = np.dtype([('yo', np.int)])
b = np.dtype([('yo', np.int)])
self.failUnless(hash(a) == hash(b),
"two equivalent types do not hash to the same value !")
def test_different_names(self):
# In theory, they may hash the same (collision) ?
a = np.dtype([('yo', np.int)])
b = np.dtype([('ye', np.int)])
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
def test_different_titles(self):
# In theory, they may hash the same (collision) ?
a = np.dtype({'names': ['r','b'], 'formats': ['u1', 'u1'],
'titles': ['Red pixel', 'Blue pixel']})
b = np.dtype({'names': ['r','b'], 'formats': ['u1', 'u1'],
'titles': ['RRed pixel', 'Blue pixel']})
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
class TestSubarray(TestCase):
def test_equivalent_record(self):
"""Test whether equivalent subarray dtypes hash the same."""
a = np.dtype((np.int, (2, 3)))
b = np.dtype((np.int, (2, 3)))
self.failUnless(hash(a) == hash(b),
"two equivalent types do not hash to the same value !")
def test_nonequivalent_record(self):
"""Test whether different subarray dtypes hash differently."""
a = np.dtype((np.int, (2, 3)))
b = np.dtype((np.int, (3, 2)))
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
a = np.dtype((np.int, (2, 3)))
b = np.dtype((np.int, (2, 2)))
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
a = np.dtype((np.int, (1, 2, 3)))
b = np.dtype((np.int, (1, 2)))
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
class TestMonsterType(TestCase):
"""Test deeply nested subtypes."""
pass
if __name__ == "__main__":
run_module_suite()
|
|
c3e7b563c3eeb24aa269f23672b8f469470908b7
|
onetime/views.py
|
onetime/views.py
|
from datetime import datetime
from django.http import HttpResponseRedirect, HttpResponseGone
from django.shortcuts import get_object_or_404
from django.contrib.auth import login
from django.conf import settings
from onetime import utils
from onetime.models import Key
def cleanup(request):
utils.cleanup()
def login(request, key):
data = get_object_or_404(Key, key=key)
if data.usage_left is not None and data.usage_left == 0:
return HttpResponseGone()
if data.expires is not None and data.expires < datetime.now():
return HttpResponseGone()
if data.usage_left is not None:
data.usage_left -= 1
data.save()
login(request, data.user)
next = request.GET.get('next', None)
if data.next is not None:
next = data.next
if next is None:
next = settings.LOGIN_REDIRECT_URL
return HttpResponseRedirect(next)
|
from datetime import datetime
from django.http import HttpResponseRedirect, HttpResponseGone
from django.shortcuts import get_object_or_404
from django.contrib.auth import login
from django.conf import settings
from onetime import utils
from onetime.models import Key
def cleanup(request):
utils.cleanup()
def login(request, key, redirect_expired_to=None):
data = get_object_or_404(Key, key=key)
expired = False
if data.usage_left is not None and data.usage_left == 0:
expired = True
if data.expires is not None and data.expires < datetime.now():
expired = True
if expired:
if redirect_expired_to is not None:
return HttpResponseRedirect(redirect_expired_to)
else:
return HttpResponseGone()
if data.usage_left is not None:
data.usage_left -= 1
data.save()
login(request, data.user)
next = request.GET.get('next', None)
if data.next is not None:
next = data.next
if next is None:
next = settings.LOGIN_REDIRECT_URL
return HttpResponseRedirect(next)
|
Add an option to redirect user to a page if the key is already expired.
|
Add an option to redirect user to a page if the key is already expired.
|
Python
|
agpl-3.0
|
ISIFoundation/influenzanet-website,ISIFoundation/influenzanet-website,ISIFoundation/influenzanet-website,uploadcare/django-loginurl,ISIFoundation/influenzanet-website,vanschelven/cmsplugin-journal,ISIFoundation/influenzanet-website,ISIFoundation/influenzanet-website,fajran/django-loginurl,ISIFoundation/influenzanet-website
|
from datetime import datetime
from django.http import HttpResponseRedirect, HttpResponseGone
from django.shortcuts import get_object_or_404
from django.contrib.auth import login
from django.conf import settings
from onetime import utils
from onetime.models import Key
def cleanup(request):
utils.cleanup()
def login(request, key):
data = get_object_or_404(Key, key=key)
if data.usage_left is not None and data.usage_left == 0:
return HttpResponseGone()
if data.expires is not None and data.expires < datetime.now():
return HttpResponseGone()
if data.usage_left is not None:
data.usage_left -= 1
data.save()
login(request, data.user)
next = request.GET.get('next', None)
if data.next is not None:
next = data.next
if next is None:
next = settings.LOGIN_REDIRECT_URL
return HttpResponseRedirect(next)
Add an option to redirect user to a page if the key is already expired.
|
from datetime import datetime
from django.http import HttpResponseRedirect, HttpResponseGone
from django.shortcuts import get_object_or_404
from django.contrib.auth import login
from django.conf import settings
from onetime import utils
from onetime.models import Key
def cleanup(request):
utils.cleanup()
def login(request, key, redirect_expired_to=None):
data = get_object_or_404(Key, key=key)
expired = False
if data.usage_left is not None and data.usage_left == 0:
expired = True
if data.expires is not None and data.expires < datetime.now():
expired = True
if expired:
if redirect_expired_to is not None:
return HttpResponseRedirect(redirect_expired_to)
else:
return HttpResponseGone()
if data.usage_left is not None:
data.usage_left -= 1
data.save()
login(request, data.user)
next = request.GET.get('next', None)
if data.next is not None:
next = data.next
if next is None:
next = settings.LOGIN_REDIRECT_URL
return HttpResponseRedirect(next)
|
<commit_before>from datetime import datetime
from django.http import HttpResponseRedirect, HttpResponseGone
from django.shortcuts import get_object_or_404
from django.contrib.auth import login
from django.conf import settings
from onetime import utils
from onetime.models import Key
def cleanup(request):
utils.cleanup()
def login(request, key):
data = get_object_or_404(Key, key=key)
if data.usage_left is not None and data.usage_left == 0:
return HttpResponseGone()
if data.expires is not None and data.expires < datetime.now():
return HttpResponseGone()
if data.usage_left is not None:
data.usage_left -= 1
data.save()
login(request, data.user)
next = request.GET.get('next', None)
if data.next is not None:
next = data.next
if next is None:
next = settings.LOGIN_REDIRECT_URL
return HttpResponseRedirect(next)
<commit_msg>Add an option to redirect user to a page if the key is already expired.<commit_after>
|
from datetime import datetime
from django.http import HttpResponseRedirect, HttpResponseGone
from django.shortcuts import get_object_or_404
from django.contrib.auth import login
from django.conf import settings
from onetime import utils
from onetime.models import Key
def cleanup(request):
utils.cleanup()
def login(request, key, redirect_expired_to=None):
data = get_object_or_404(Key, key=key)
expired = False
if data.usage_left is not None and data.usage_left == 0:
expired = True
if data.expires is not None and data.expires < datetime.now():
expired = True
if expired:
if redirect_expired_to is not None:
return HttpResponseRedirect(redirect_expired_to)
else:
return HttpResponseGone()
if data.usage_left is not None:
data.usage_left -= 1
data.save()
login(request, data.user)
next = request.GET.get('next', None)
if data.next is not None:
next = data.next
if next is None:
next = settings.LOGIN_REDIRECT_URL
return HttpResponseRedirect(next)
|
from datetime import datetime
from django.http import HttpResponseRedirect, HttpResponseGone
from django.shortcuts import get_object_or_404
from django.contrib.auth import login
from django.conf import settings
from onetime import utils
from onetime.models import Key
def cleanup(request):
utils.cleanup()
def login(request, key):
data = get_object_or_404(Key, key=key)
if data.usage_left is not None and data.usage_left == 0:
return HttpResponseGone()
if data.expires is not None and data.expires < datetime.now():
return HttpResponseGone()
if data.usage_left is not None:
data.usage_left -= 1
data.save()
login(request, data.user)
next = request.GET.get('next', None)
if data.next is not None:
next = data.next
if next is None:
next = settings.LOGIN_REDIRECT_URL
return HttpResponseRedirect(next)
Add an option to redirect user to a page if the key is already expired.from datetime import datetime
from django.http import HttpResponseRedirect, HttpResponseGone
from django.shortcuts import get_object_or_404
from django.contrib.auth import login
from django.conf import settings
from onetime import utils
from onetime.models import Key
def cleanup(request):
utils.cleanup()
def login(request, key, redirect_expired_to=None):
data = get_object_or_404(Key, key=key)
expired = False
if data.usage_left is not None and data.usage_left == 0:
expired = True
if data.expires is not None and data.expires < datetime.now():
expired = True
if expired:
if redirect_expired_to is not None:
return HttpResponseRedirect(redirect_expired_to)
else:
return HttpResponseGone()
if data.usage_left is not None:
data.usage_left -= 1
data.save()
login(request, data.user)
next = request.GET.get('next', None)
if data.next is not None:
next = data.next
if next is None:
next = settings.LOGIN_REDIRECT_URL
return HttpResponseRedirect(next)
|
<commit_before>from datetime import datetime
from django.http import HttpResponseRedirect, HttpResponseGone
from django.shortcuts import get_object_or_404
from django.contrib.auth import login
from django.conf import settings
from onetime import utils
from onetime.models import Key
def cleanup(request):
utils.cleanup()
def login(request, key):
data = get_object_or_404(Key, key=key)
if data.usage_left is not None and data.usage_left == 0:
return HttpResponseGone()
if data.expires is not None and data.expires < datetime.now():
return HttpResponseGone()
if data.usage_left is not None:
data.usage_left -= 1
data.save()
login(request, data.user)
next = request.GET.get('next', None)
if data.next is not None:
next = data.next
if next is None:
next = settings.LOGIN_REDIRECT_URL
return HttpResponseRedirect(next)
<commit_msg>Add an option to redirect user to a page if the key is already expired.<commit_after>from datetime import datetime
from django.http import HttpResponseRedirect, HttpResponseGone
from django.shortcuts import get_object_or_404
from django.contrib.auth import login
from django.conf import settings
from onetime import utils
from onetime.models import Key
def cleanup(request):
utils.cleanup()
def login(request, key, redirect_expired_to=None):
data = get_object_or_404(Key, key=key)
expired = False
if data.usage_left is not None and data.usage_left == 0:
expired = True
if data.expires is not None and data.expires < datetime.now():
expired = True
if expired:
if redirect_expired_to is not None:
return HttpResponseRedirect(redirect_expired_to)
else:
return HttpResponseGone()
if data.usage_left is not None:
data.usage_left -= 1
data.save()
login(request, data.user)
next = request.GET.get('next', None)
if data.next is not None:
next = data.next
if next is None:
next = settings.LOGIN_REDIRECT_URL
return HttpResponseRedirect(next)
|
a136eeefdd6cf276a0d4815fa39453737ed04727
|
py/next-greater-element-iii.py
|
py/next-greater-element-iii.py
|
class Solution(object):
def nextGreaterElement(self, n):
"""
:type n: int
:rtype: int
"""
s = str(n)
for i, n in enumerate(reversed(s[:-1]), 1):
if n < s[-i]:
x, j = min((x, k) for k, x in enumerate(s[-i:]) if x > n)
ans = s[:-i - 1]
ans += x
l = list(s[-i:])
l[j] = n
ans += ''.join(sorted(l))
ans = int(ans)
if ans >= 1 << 31:
return -1
return ans
else:
return -1
|
Add py solution for 556. Next Greater Element III
|
Add py solution for 556. Next Greater Element III
556. Next Greater Element III: https://leetcode.com/problems/next-greater-element-iii/
Approach:
Observe the first item remaining in each step. The value will be added
1 << step either the remaining count is odd or it's a left-to-right
step. Hence the n | 0x55555.. is the key.
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 556. Next Greater Element III
556. Next Greater Element III: https://leetcode.com/problems/next-greater-element-iii/
Approach:
Observe the first item remaining in each step. The value will be added
1 << step either the remaining count is odd or it's a left-to-right
step. Hence the n | 0x55555.. is the key.
|
class Solution(object):
def nextGreaterElement(self, n):
"""
:type n: int
:rtype: int
"""
s = str(n)
for i, n in enumerate(reversed(s[:-1]), 1):
if n < s[-i]:
x, j = min((x, k) for k, x in enumerate(s[-i:]) if x > n)
ans = s[:-i - 1]
ans += x
l = list(s[-i:])
l[j] = n
ans += ''.join(sorted(l))
ans = int(ans)
if ans >= 1 << 31:
return -1
return ans
else:
return -1
|
<commit_before><commit_msg>Add py solution for 556. Next Greater Element III
556. Next Greater Element III: https://leetcode.com/problems/next-greater-element-iii/
Approach:
Observe the first item remaining in each step. The value will be added
1 << step either the remaining count is odd or it's a left-to-right
step. Hence the n | 0x55555.. is the key.<commit_after>
|
class Solution(object):
def nextGreaterElement(self, n):
"""
:type n: int
:rtype: int
"""
s = str(n)
for i, n in enumerate(reversed(s[:-1]), 1):
if n < s[-i]:
x, j = min((x, k) for k, x in enumerate(s[-i:]) if x > n)
ans = s[:-i - 1]
ans += x
l = list(s[-i:])
l[j] = n
ans += ''.join(sorted(l))
ans = int(ans)
if ans >= 1 << 31:
return -1
return ans
else:
return -1
|
Add py solution for 556. Next Greater Element III
556. Next Greater Element III: https://leetcode.com/problems/next-greater-element-iii/
Approach:
Observe the first item remaining in each step. The value will be added
1 << step either the remaining count is odd or it's a left-to-right
step. Hence the n | 0x55555.. is the key.class Solution(object):
def nextGreaterElement(self, n):
"""
:type n: int
:rtype: int
"""
s = str(n)
for i, n in enumerate(reversed(s[:-1]), 1):
if n < s[-i]:
x, j = min((x, k) for k, x in enumerate(s[-i:]) if x > n)
ans = s[:-i - 1]
ans += x
l = list(s[-i:])
l[j] = n
ans += ''.join(sorted(l))
ans = int(ans)
if ans >= 1 << 31:
return -1
return ans
else:
return -1
|
<commit_before><commit_msg>Add py solution for 556. Next Greater Element III
556. Next Greater Element III: https://leetcode.com/problems/next-greater-element-iii/
Approach:
Observe the first item remaining in each step. The value will be added
1 << step either the remaining count is odd or it's a left-to-right
step. Hence the n | 0x55555.. is the key.<commit_after>class Solution(object):
def nextGreaterElement(self, n):
"""
:type n: int
:rtype: int
"""
s = str(n)
for i, n in enumerate(reversed(s[:-1]), 1):
if n < s[-i]:
x, j = min((x, k) for k, x in enumerate(s[-i:]) if x > n)
ans = s[:-i - 1]
ans += x
l = list(s[-i:])
l[j] = n
ans += ''.join(sorted(l))
ans = int(ans)
if ans >= 1 << 31:
return -1
return ans
else:
return -1
|
|
32d46fe3e080b13ab9ae9dc3d868e9a724cccda9
|
tools/telemetry/telemetry/core/backends/chrome/ios_browser_finder_unittest.py
|
tools/telemetry/telemetry/core/backends/chrome/ios_browser_finder_unittest.py
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.core import browser_options
from telemetry.core.backends.chrome import ios_browser_finder
from telemetry.unittest import test
class IosBrowserFinderUnitTest(unittest.TestCase):
# TODO(baxley): Currently the tests require a device with Chrome running.
# This should be stubbed out so it runs on any system, with no device
# dependencies.
@test.Enabled('ios')
def testFindIosChrome(self):
finder_options = browser_options.BrowserFinderOptions()
browsers = ios_browser_finder.FindAllAvailableBrowsers(finder_options)
self.assertTrue(browsers)
for browser in browsers:
self.assertEqual('ios-chrome', browser.browser_type)
if __name__ == '__main__':
unittest.main()
|
Add unit test for IosBrowserFinder.
|
Add unit test for IosBrowserFinder.
This test checks if Chrome on iOS is running. It only
runs on iOS platforms.
BUG=None
Review URL: https://codereview.chromium.org/350583002
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@279143 0039d316-1c4b-4281-b951-d872f2087c98
|
Python
|
bsd-3-clause
|
krieger-od/nwjs_chromium.src,Fireblend/chromium-crosswalk,ltilve/chromium,axinging/chromium-crosswalk,jaruba/chromium.src,ltilve/chromium,Fireblend/chromium-crosswalk,dednal/chromium.src,mohamed--abdel-maksoud/chromium.src,Jonekee/chromium.src,Just-D/chromium-1,Pluto-tv/chromium-crosswalk,M4sse/chromium.src,M4sse/chromium.src,dednal/chromium.src,hgl888/chromium-crosswalk-efl,axinging/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,Pluto-tv/chromium-crosswalk,Chilledheart/chromium,M4sse/chromium.src,hgl888/chromium-crosswalk-efl,M4sse/chromium.src,mohamed--abdel-maksoud/chromium.src,littlstar/chromium.src,dushu1203/chromium.src,dushu1203/chromium.src,PeterWangIntel/chromium-crosswalk,markYoungH/chromium.src,jaruba/chromium.src,M4sse/chromium.src,Just-D/chromium-1,littlstar/chromium.src,hgl888/chromium-crosswalk-efl,chuan9/chromium-crosswalk,fujunwei/chromium-crosswalk,Just-D/chromium-1,PeterWangIntel/chromium-crosswalk,dednal/chromium.src,M4sse/chromium.src,dushu1203/chromium.src,Pluto-tv/chromium-crosswalk,bright-sparks/chromium-spacewalk,mohamed--abdel-maksoud/chromium.src,fujunwei/chromium-crosswalk,Just-D/chromium-1,fujunwei/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,mohamed--abdel-maksoud/chromium.src,mohamed--abdel-maksoud/chromium.src,Fireblend/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,dushu1203/chromium.src,chuan9/chromium-crosswalk,axinging/chromium-crosswalk,ltilve/chromium,Jonekee/chromium.src,bright-sparks/chromium-spacewalk,crosswalk-project/chromium-crosswalk-efl,krieger-od/nwjs_chromium.src,axinging/chromium-crosswalk,M4sse/chromium.src,Jonekee/chromium.src,fujunwei/chromium-crosswalk,bright-sparks/chromium-spacewalk,M4sse/chromium.src,Fireblend/chromium-crosswalk,bright-sparks/chromium-spacewalk,axinging/chromium-crosswalk,axinging/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,krieger-od/nwjs_chromium.src,axinging/chromium-crosswalk,markYoungH/chromium.src,Chilledheart/chromium,ondra-novak/chromium.src,chuan9/chromium-crosswalk,fujunwei/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,dednal/chromium.src,littlstar/chromium.src,jaruba/chromium.src,jaruba/chromium.src,PeterWangIntel/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,markYoungH/chromium.src,hgl888/chromium-crosswalk-efl,ondra-novak/chromium.src,Just-D/chromium-1,Fireblend/chromium-crosswalk,Jonekee/chromium.src,dednal/chromium.src,littlstar/chromium.src,mohamed--abdel-maksoud/chromium.src,crosswalk-project/chromium-crosswalk-efl,markYoungH/chromium.src,ondra-novak/chromium.src,markYoungH/chromium.src,littlstar/chromium.src,mohamed--abdel-maksoud/chromium.src,ondra-novak/chromium.src,Just-D/chromium-1,Chilledheart/chromium,M4sse/chromium.src,Fireblend/chromium-crosswalk,krieger-od/nwjs_chromium.src,Chilledheart/chromium,hgl888/chromium-crosswalk,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk,Chilledheart/chromium,Jonekee/chromium.src,chuan9/chromium-crosswalk,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk-efl,Just-D/chromium-1,dednal/chromium.src,crosswalk-project/chromium-crosswalk-efl,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk,krieger-od/nwjs_chromium.src,crosswalk-project/chromium-crosswalk-efl,mohamed--abdel-maksoud/chromium.src,jaruba/chromium.src,markYoungH/chromium.src,Pluto-tv/chromium-crosswalk,Chilledheart/chromium,PeterWangIntel/chromium-crosswalk,M4sse/chromium.src,dednal/chromium.src,bright-sparks/chromium-spacewalk,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk,Jonekee/chromium.src,Jonekee/chromium.src,ltilve/chromium,jaruba/chromium.src,hgl888/chromium-crosswalk,bright-sparks/chromium-spacewalk,Jonekee/chromium.src,hgl888/chromium-crosswalk,chuan9/chromium-crosswalk,fujunwei/chromium-crosswalk,ltilve/chromium,dushu1203/chromium.src,dushu1203/chromium.src,fujunwei/chromium-crosswalk,Just-D/chromium-1,Chilledheart/chromium,krieger-od/nwjs_chromium.src,jaruba/chromium.src,krieger-od/nwjs_chromium.src,ltilve/chromium,Jonekee/chromium.src,jaruba/chromium.src,bright-sparks/chromium-spacewalk,ondra-novak/chromium.src,dushu1203/chromium.src,markYoungH/chromium.src,Jonekee/chromium.src,mohamed--abdel-maksoud/chromium.src,markYoungH/chromium.src,fujunwei/chromium-crosswalk,markYoungH/chromium.src,TheTypoMaster/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,littlstar/chromium.src,axinging/chromium-crosswalk,hgl888/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,axinging/chromium-crosswalk,dushu1203/chromium.src,axinging/chromium-crosswalk,dednal/chromium.src,PeterWangIntel/chromium-crosswalk,Chilledheart/chromium,TheTypoMaster/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,axinging/chromium-crosswalk,Fireblend/chromium-crosswalk,dushu1203/chromium.src,crosswalk-project/chromium-crosswalk-efl,dednal/chromium.src,Fireblend/chromium-crosswalk,Pluto-tv/chromium-crosswalk,littlstar/chromium.src,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk,ondra-novak/chromium.src,TheTypoMaster/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,bright-sparks/chromium-spacewalk,ltilve/chromium,ondra-novak/chromium.src,bright-sparks/chromium-spacewalk,ondra-novak/chromium.src,hgl888/chromium-crosswalk-efl,markYoungH/chromium.src,krieger-od/nwjs_chromium.src,krieger-od/nwjs_chromium.src,markYoungH/chromium.src,TheTypoMaster/chromium-crosswalk,chuan9/chromium-crosswalk,Pluto-tv/chromium-crosswalk,jaruba/chromium.src,hgl888/chromium-crosswalk-efl,Jonekee/chromium.src,Chilledheart/chromium,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk-efl,fujunwei/chromium-crosswalk,M4sse/chromium.src,jaruba/chromium.src,littlstar/chromium.src,dednal/chromium.src,dednal/chromium.src,hgl888/chromium-crosswalk,ondra-novak/chromium.src,Pluto-tv/chromium-crosswalk,Just-D/chromium-1,hgl888/chromium-crosswalk-efl,ltilve/chromium,dushu1203/chromium.src,ltilve/chromium,TheTypoMaster/chromium-crosswalk,jaruba/chromium.src,Fireblend/chromium-crosswalk,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk-efl,TheTypoMaster/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,dushu1203/chromium.src
|
Add unit test for IosBrowserFinder.
This test checks if Chrome on iOS is running. It only
runs on iOS platforms.
BUG=None
Review URL: https://codereview.chromium.org/350583002
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@279143 0039d316-1c4b-4281-b951-d872f2087c98
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.core import browser_options
from telemetry.core.backends.chrome import ios_browser_finder
from telemetry.unittest import test
class IosBrowserFinderUnitTest(unittest.TestCase):
# TODO(baxley): Currently the tests require a device with Chrome running.
# This should be stubbed out so it runs on any system, with no device
# dependencies.
@test.Enabled('ios')
def testFindIosChrome(self):
finder_options = browser_options.BrowserFinderOptions()
browsers = ios_browser_finder.FindAllAvailableBrowsers(finder_options)
self.assertTrue(browsers)
for browser in browsers:
self.assertEqual('ios-chrome', browser.browser_type)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unit test for IosBrowserFinder.
This test checks if Chrome on iOS is running. It only
runs on iOS platforms.
BUG=None
Review URL: https://codereview.chromium.org/350583002
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@279143 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.core import browser_options
from telemetry.core.backends.chrome import ios_browser_finder
from telemetry.unittest import test
class IosBrowserFinderUnitTest(unittest.TestCase):
# TODO(baxley): Currently the tests require a device with Chrome running.
# This should be stubbed out so it runs on any system, with no device
# dependencies.
@test.Enabled('ios')
def testFindIosChrome(self):
finder_options = browser_options.BrowserFinderOptions()
browsers = ios_browser_finder.FindAllAvailableBrowsers(finder_options)
self.assertTrue(browsers)
for browser in browsers:
self.assertEqual('ios-chrome', browser.browser_type)
if __name__ == '__main__':
unittest.main()
|
Add unit test for IosBrowserFinder.
This test checks if Chrome on iOS is running. It only
runs on iOS platforms.
BUG=None
Review URL: https://codereview.chromium.org/350583002
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@279143 0039d316-1c4b-4281-b951-d872f2087c98# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.core import browser_options
from telemetry.core.backends.chrome import ios_browser_finder
from telemetry.unittest import test
class IosBrowserFinderUnitTest(unittest.TestCase):
# TODO(baxley): Currently the tests require a device with Chrome running.
# This should be stubbed out so it runs on any system, with no device
# dependencies.
@test.Enabled('ios')
def testFindIosChrome(self):
finder_options = browser_options.BrowserFinderOptions()
browsers = ios_browser_finder.FindAllAvailableBrowsers(finder_options)
self.assertTrue(browsers)
for browser in browsers:
self.assertEqual('ios-chrome', browser.browser_type)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unit test for IosBrowserFinder.
This test checks if Chrome on iOS is running. It only
runs on iOS platforms.
BUG=None
Review URL: https://codereview.chromium.org/350583002
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@279143 0039d316-1c4b-4281-b951-d872f2087c98<commit_after># Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.core import browser_options
from telemetry.core.backends.chrome import ios_browser_finder
from telemetry.unittest import test
class IosBrowserFinderUnitTest(unittest.TestCase):
# TODO(baxley): Currently the tests require a device with Chrome running.
# This should be stubbed out so it runs on any system, with no device
# dependencies.
@test.Enabled('ios')
def testFindIosChrome(self):
finder_options = browser_options.BrowserFinderOptions()
browsers = ios_browser_finder.FindAllAvailableBrowsers(finder_options)
self.assertTrue(browsers)
for browser in browsers:
self.assertEqual('ios-chrome', browser.browser_type)
if __name__ == '__main__':
unittest.main()
|
|
bc35e89d04e541f75fc12788893b21a3b876aaf9
|
tail/tests/test_tail.py
|
tail/tests/test_tail.py
|
"""
Tests for the tail implementation
"""
from tail import FileTail
def test_tail_from_file():
"""Tests that tail works as advertised from a file"""
from unittest.mock import mock_open, patch
# The mock_data we are using for our test
mock_data = """A
B
C
D
E
F
"""
mocked_open = mock_open(read_data=mock_data)
# mock_open does not support iteration by lines by default so
# we must define the following:
mocked_open.return_value.__iter__.return_value = mock_data.splitlines()
# We need to patch the open found in the namespace of the module
# where the function is defined
with patch('mocking_file_opens.open', mocked_open, create=True) as mocked_file_open:
res = FileTail('Test_filename.txt').tail(3)
mocked_file_open.assert_called_once_with('Test_filename.txt', 'r')
assert len(res) == 3
assert res == ["D", "E", "F"]
|
Create test case for tail from file
|
Create test case for tail from file
|
Python
|
mit
|
shuttle1987/tail,shuttle1987/tail
|
Create test case for tail from file
|
"""
Tests for the tail implementation
"""
from tail import FileTail
def test_tail_from_file():
"""Tests that tail works as advertised from a file"""
from unittest.mock import mock_open, patch
# The mock_data we are using for our test
mock_data = """A
B
C
D
E
F
"""
mocked_open = mock_open(read_data=mock_data)
# mock_open does not support iteration by lines by default so
# we must define the following:
mocked_open.return_value.__iter__.return_value = mock_data.splitlines()
# We need to patch the open found in the namespace of the module
# where the function is defined
with patch('mocking_file_opens.open', mocked_open, create=True) as mocked_file_open:
res = FileTail('Test_filename.txt').tail(3)
mocked_file_open.assert_called_once_with('Test_filename.txt', 'r')
assert len(res) == 3
assert res == ["D", "E", "F"]
|
<commit_before><commit_msg>Create test case for tail from file<commit_after>
|
"""
Tests for the tail implementation
"""
from tail import FileTail
def test_tail_from_file():
"""Tests that tail works as advertised from a file"""
from unittest.mock import mock_open, patch
# The mock_data we are using for our test
mock_data = """A
B
C
D
E
F
"""
mocked_open = mock_open(read_data=mock_data)
# mock_open does not support iteration by lines by default so
# we must define the following:
mocked_open.return_value.__iter__.return_value = mock_data.splitlines()
# We need to patch the open found in the namespace of the module
# where the function is defined
with patch('mocking_file_opens.open', mocked_open, create=True) as mocked_file_open:
res = FileTail('Test_filename.txt').tail(3)
mocked_file_open.assert_called_once_with('Test_filename.txt', 'r')
assert len(res) == 3
assert res == ["D", "E", "F"]
|
Create test case for tail from file"""
Tests for the tail implementation
"""
from tail import FileTail
def test_tail_from_file():
"""Tests that tail works as advertised from a file"""
from unittest.mock import mock_open, patch
# The mock_data we are using for our test
mock_data = """A
B
C
D
E
F
"""
mocked_open = mock_open(read_data=mock_data)
# mock_open does not support iteration by lines by default so
# we must define the following:
mocked_open.return_value.__iter__.return_value = mock_data.splitlines()
# We need to patch the open found in the namespace of the module
# where the function is defined
with patch('mocking_file_opens.open', mocked_open, create=True) as mocked_file_open:
res = FileTail('Test_filename.txt').tail(3)
mocked_file_open.assert_called_once_with('Test_filename.txt', 'r')
assert len(res) == 3
assert res == ["D", "E", "F"]
|
<commit_before><commit_msg>Create test case for tail from file<commit_after>"""
Tests for the tail implementation
"""
from tail import FileTail
def test_tail_from_file():
"""Tests that tail works as advertised from a file"""
from unittest.mock import mock_open, patch
# The mock_data we are using for our test
mock_data = """A
B
C
D
E
F
"""
mocked_open = mock_open(read_data=mock_data)
# mock_open does not support iteration by lines by default so
# we must define the following:
mocked_open.return_value.__iter__.return_value = mock_data.splitlines()
# We need to patch the open found in the namespace of the module
# where the function is defined
with patch('mocking_file_opens.open', mocked_open, create=True) as mocked_file_open:
res = FileTail('Test_filename.txt').tail(3)
mocked_file_open.assert_called_once_with('Test_filename.txt', 'r')
assert len(res) == 3
assert res == ["D", "E", "F"]
|
|
d21743f2543f8d953a837d75bff0fcdb0105f4db
|
feincms/module/page/extensions/changedate.py
|
feincms/module/page/extensions/changedate.py
|
"""
Track the modification date for pages.
"""
from datetime import datetime
from django.db import models
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
def register(cls, admin_cls):
cls.add_to_class('creation_date', models.DateTimeField(_('creation date'), editable=False))
cls.add_to_class('modification_date', models.DateTimeField(_('modification date'), editable=False))
orig_save = cls.save
def save(page):
now = datetime.now()
if page.id is None:
page.creation_date = now
page.modification_date = now
orig_save(page)
cls.save = save
|
Add page extension for tracking page creation and modification dates.
|
Add page extension for tracking page creation and modification dates.
This can be used in conjunction with a response processor to set the "last-modified" or "etag" response headers.
|
Python
|
bsd-3-clause
|
feincms/feincms,hgrimelid/feincms,pjdelport/feincms,joshuajonah/feincms,nickburlett/feincms,matthiask/django-content-editor,joshuajonah/feincms,matthiask/django-content-editor,matthiask/django-content-editor,nickburlett/feincms,michaelkuty/feincms,joshuajonah/feincms,mjl/feincms,michaelkuty/feincms,hgrimelid/feincms,joshuajonah/feincms,mjl/feincms,mjl/feincms,feincms/feincms,matthiask/django-content-editor,nickburlett/feincms,pjdelport/feincms,nickburlett/feincms,matthiask/feincms2-content,hgrimelid/feincms,matthiask/feincms2-content,feincms/feincms,pjdelport/feincms,michaelkuty/feincms,michaelkuty/feincms,matthiask/feincms2-content
|
Add page extension for tracking page creation and modification dates.
This can be used in conjunction with a response processor to set the "last-modified" or "etag" response headers.
|
"""
Track the modification date for pages.
"""
from datetime import datetime
from django.db import models
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
def register(cls, admin_cls):
cls.add_to_class('creation_date', models.DateTimeField(_('creation date'), editable=False))
cls.add_to_class('modification_date', models.DateTimeField(_('modification date'), editable=False))
orig_save = cls.save
def save(page):
now = datetime.now()
if page.id is None:
page.creation_date = now
page.modification_date = now
orig_save(page)
cls.save = save
|
<commit_before><commit_msg>Add page extension for tracking page creation and modification dates.
This can be used in conjunction with a response processor to set the "last-modified" or "etag" response headers.<commit_after>
|
"""
Track the modification date for pages.
"""
from datetime import datetime
from django.db import models
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
def register(cls, admin_cls):
cls.add_to_class('creation_date', models.DateTimeField(_('creation date'), editable=False))
cls.add_to_class('modification_date', models.DateTimeField(_('modification date'), editable=False))
orig_save = cls.save
def save(page):
now = datetime.now()
if page.id is None:
page.creation_date = now
page.modification_date = now
orig_save(page)
cls.save = save
|
Add page extension for tracking page creation and modification dates.
This can be used in conjunction with a response processor to set the "last-modified" or "etag" response headers."""
Track the modification date for pages.
"""
from datetime import datetime
from django.db import models
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
def register(cls, admin_cls):
cls.add_to_class('creation_date', models.DateTimeField(_('creation date'), editable=False))
cls.add_to_class('modification_date', models.DateTimeField(_('modification date'), editable=False))
orig_save = cls.save
def save(page):
now = datetime.now()
if page.id is None:
page.creation_date = now
page.modification_date = now
orig_save(page)
cls.save = save
|
<commit_before><commit_msg>Add page extension for tracking page creation and modification dates.
This can be used in conjunction with a response processor to set the "last-modified" or "etag" response headers.<commit_after>"""
Track the modification date for pages.
"""
from datetime import datetime
from django.db import models
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
def register(cls, admin_cls):
cls.add_to_class('creation_date', models.DateTimeField(_('creation date'), editable=False))
cls.add_to_class('modification_date', models.DateTimeField(_('modification date'), editable=False))
orig_save = cls.save
def save(page):
now = datetime.now()
if page.id is None:
page.creation_date = now
page.modification_date = now
orig_save(page)
cls.save = save
|
|
06e4fd4b7d4cc4c984a05887fce00f7c8bbdc174
|
tests/notifiers/test_messaging.py
|
tests/notifiers/test_messaging.py
|
# Copyright 2014 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from osprofiler._notifiers import base
from tests import test
class MessagingTestCase(test.TestCase):
def test_init_and_notify(self):
messaging = mock.MagicMock()
context = "context"
transport = "transport"
project = "project"
service = "service"
host = "host"
notify_func = base.Notifier.factory("Messaging", messaging, context,
transport, project, service, host)
messaging.Notifier.assert_called_once_with(
transport, publisher_id=host, driver="messaging", topic="profiler")
info = {
"a": 10
}
notify_func(info)
expected_data = {"project": project, "service": service}
expected_data.update(info)
messaging.Notifier().info.assert_called_once_with(
context, "profiler.%s" % service, expected_data)
messaging.reset_mock()
notify_func(info, context="my_context")
messaging.Notifier().info.assert_called_once_with(
"my_context", "profiler.%s" % service, expected_data)
|
Add missing tests for messaging notifer plugin
|
Add missing tests for messaging notifer plugin
Change-Id: I1a206fdbbb89c03b04eafe0ad850441094e363e5
|
Python
|
apache-2.0
|
stackforge/osprofiler,openstack/osprofiler,stackforge/osprofiler,openstack/osprofiler,stackforge/osprofiler,openstack/osprofiler
|
Add missing tests for messaging notifer plugin
Change-Id: I1a206fdbbb89c03b04eafe0ad850441094e363e5
|
# Copyright 2014 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from osprofiler._notifiers import base
from tests import test
class MessagingTestCase(test.TestCase):
def test_init_and_notify(self):
messaging = mock.MagicMock()
context = "context"
transport = "transport"
project = "project"
service = "service"
host = "host"
notify_func = base.Notifier.factory("Messaging", messaging, context,
transport, project, service, host)
messaging.Notifier.assert_called_once_with(
transport, publisher_id=host, driver="messaging", topic="profiler")
info = {
"a": 10
}
notify_func(info)
expected_data = {"project": project, "service": service}
expected_data.update(info)
messaging.Notifier().info.assert_called_once_with(
context, "profiler.%s" % service, expected_data)
messaging.reset_mock()
notify_func(info, context="my_context")
messaging.Notifier().info.assert_called_once_with(
"my_context", "profiler.%s" % service, expected_data)
|
<commit_before><commit_msg>Add missing tests for messaging notifer plugin
Change-Id: I1a206fdbbb89c03b04eafe0ad850441094e363e5<commit_after>
|
# Copyright 2014 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from osprofiler._notifiers import base
from tests import test
class MessagingTestCase(test.TestCase):
def test_init_and_notify(self):
messaging = mock.MagicMock()
context = "context"
transport = "transport"
project = "project"
service = "service"
host = "host"
notify_func = base.Notifier.factory("Messaging", messaging, context,
transport, project, service, host)
messaging.Notifier.assert_called_once_with(
transport, publisher_id=host, driver="messaging", topic="profiler")
info = {
"a": 10
}
notify_func(info)
expected_data = {"project": project, "service": service}
expected_data.update(info)
messaging.Notifier().info.assert_called_once_with(
context, "profiler.%s" % service, expected_data)
messaging.reset_mock()
notify_func(info, context="my_context")
messaging.Notifier().info.assert_called_once_with(
"my_context", "profiler.%s" % service, expected_data)
|
Add missing tests for messaging notifer plugin
Change-Id: I1a206fdbbb89c03b04eafe0ad850441094e363e5# Copyright 2014 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from osprofiler._notifiers import base
from tests import test
class MessagingTestCase(test.TestCase):
def test_init_and_notify(self):
messaging = mock.MagicMock()
context = "context"
transport = "transport"
project = "project"
service = "service"
host = "host"
notify_func = base.Notifier.factory("Messaging", messaging, context,
transport, project, service, host)
messaging.Notifier.assert_called_once_with(
transport, publisher_id=host, driver="messaging", topic="profiler")
info = {
"a": 10
}
notify_func(info)
expected_data = {"project": project, "service": service}
expected_data.update(info)
messaging.Notifier().info.assert_called_once_with(
context, "profiler.%s" % service, expected_data)
messaging.reset_mock()
notify_func(info, context="my_context")
messaging.Notifier().info.assert_called_once_with(
"my_context", "profiler.%s" % service, expected_data)
|
<commit_before><commit_msg>Add missing tests for messaging notifer plugin
Change-Id: I1a206fdbbb89c03b04eafe0ad850441094e363e5<commit_after># Copyright 2014 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from osprofiler._notifiers import base
from tests import test
class MessagingTestCase(test.TestCase):
def test_init_and_notify(self):
messaging = mock.MagicMock()
context = "context"
transport = "transport"
project = "project"
service = "service"
host = "host"
notify_func = base.Notifier.factory("Messaging", messaging, context,
transport, project, service, host)
messaging.Notifier.assert_called_once_with(
transport, publisher_id=host, driver="messaging", topic="profiler")
info = {
"a": 10
}
notify_func(info)
expected_data = {"project": project, "service": service}
expected_data.update(info)
messaging.Notifier().info.assert_called_once_with(
context, "profiler.%s" % service, expected_data)
messaging.reset_mock()
notify_func(info, context="my_context")
messaging.Notifier().info.assert_called_once_with(
"my_context", "profiler.%s" % service, expected_data)
|
|
387b5732c0b2231580ae04bf5088ef7ce59b0d84
|
normalize_dataset.py
|
normalize_dataset.py
|
"""Create multilabel data set with normalized spelling.
The input consists of a directory of text files containing the dataset in
historic spelling.
The data set consists of:
<sentence id>\t<sentence>\tEmotie_Liefde (embodied emotions labels separated by
_)
<sentence id>\t<sentence>\tNone ('None' if no words were tagged)
Usage: python normalize_dataset.py <input dir> <output dir>
"""
import argparse
import codecs
import os
from collections import Counter
import json
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_dir', help='the name of the directory '
'containing text files that should be normalized.')
parser.add_argument('output_dir', help='the directory where the '
'normalized data files should be saved.')
args = parser.parse_args()
input_dir = args.input_dir
output_dir = args.output_dir
# load hist2modern dictionary
with codecs.open('hist2modern_bwnt.json', 'rb', 'utf-8') as f:
full_dict = json.load(f, 'utf-8')
# create simple historic word -> modern word mapping
# (full_dict may contain multiple alternatives for a word)
hist2modern = {}
for w in full_dict.keys():
if w not in full_dict[w]:
c = Counter(full_dict[w])
hist2modern[w] = c.most_common()[0][0]
print '#words in dict: {}'.format(len(hist2modern))
text_files = [fi for fi in os.listdir(input_dir) if fi.endswith('.txt')]
for text_file in text_files:
print text_file
in_file = os.path.join(input_dir, text_file)
out_file = os.path.join(output_dir, text_file)
with codecs.open(in_file, 'rb', 'utf-8') as f:
lines = f.readlines()
with codecs.open(out_file, 'wb', 'utf-8') as f:
for line in lines:
parts = line.split('\t')
words = parts[1].split(' ')
new_words = []
for w in words:
wo = w.lower()
if wo in hist2modern.keys():
new_words.append(hist2modern[wo])
else:
new_words.append(w)
f.write(u'{}\t{}\t{}'.format(parts[0],
' '.join(new_words),
parts[2]))
|
Add script to normalize the spelling in a dataset
|
Add script to normalize the spelling in a dataset
Added a script that normalizes the spelling in a dataset consisting of
files containing <sentence_id> \t <sentence> \t <label(s)>
|
Python
|
apache-2.0
|
NLeSC/embodied-emotions-scripts,NLeSC/embodied-emotions-scripts
|
Add script to normalize the spelling in a dataset
Added a script that normalizes the spelling in a dataset consisting of
files containing <sentence_id> \t <sentence> \t <label(s)>
|
"""Create multilabel data set with normalized spelling.
The input consists of a directory of text files containing the dataset in
historic spelling.
The data set consists of:
<sentence id>\t<sentence>\tEmotie_Liefde (embodied emotions labels separated by
_)
<sentence id>\t<sentence>\tNone ('None' if no words were tagged)
Usage: python normalize_dataset.py <input dir> <output dir>
"""
import argparse
import codecs
import os
from collections import Counter
import json
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_dir', help='the name of the directory '
'containing text files that should be normalized.')
parser.add_argument('output_dir', help='the directory where the '
'normalized data files should be saved.')
args = parser.parse_args()
input_dir = args.input_dir
output_dir = args.output_dir
# load hist2modern dictionary
with codecs.open('hist2modern_bwnt.json', 'rb', 'utf-8') as f:
full_dict = json.load(f, 'utf-8')
# create simple historic word -> modern word mapping
# (full_dict may contain multiple alternatives for a word)
hist2modern = {}
for w in full_dict.keys():
if w not in full_dict[w]:
c = Counter(full_dict[w])
hist2modern[w] = c.most_common()[0][0]
print '#words in dict: {}'.format(len(hist2modern))
text_files = [fi for fi in os.listdir(input_dir) if fi.endswith('.txt')]
for text_file in text_files:
print text_file
in_file = os.path.join(input_dir, text_file)
out_file = os.path.join(output_dir, text_file)
with codecs.open(in_file, 'rb', 'utf-8') as f:
lines = f.readlines()
with codecs.open(out_file, 'wb', 'utf-8') as f:
for line in lines:
parts = line.split('\t')
words = parts[1].split(' ')
new_words = []
for w in words:
wo = w.lower()
if wo in hist2modern.keys():
new_words.append(hist2modern[wo])
else:
new_words.append(w)
f.write(u'{}\t{}\t{}'.format(parts[0],
' '.join(new_words),
parts[2]))
|
<commit_before><commit_msg>Add script to normalize the spelling in a dataset
Added a script that normalizes the spelling in a dataset consisting of
files containing <sentence_id> \t <sentence> \t <label(s)><commit_after>
|
"""Create multilabel data set with normalized spelling.
The input consists of a directory of text files containing the dataset in
historic spelling.
The data set consists of:
<sentence id>\t<sentence>\tEmotie_Liefde (embodied emotions labels separated by
_)
<sentence id>\t<sentence>\tNone ('None' if no words were tagged)
Usage: python normalize_dataset.py <input dir> <output dir>
"""
import argparse
import codecs
import os
from collections import Counter
import json
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_dir', help='the name of the directory '
'containing text files that should be normalized.')
parser.add_argument('output_dir', help='the directory where the '
'normalized data files should be saved.')
args = parser.parse_args()
input_dir = args.input_dir
output_dir = args.output_dir
# load hist2modern dictionary
with codecs.open('hist2modern_bwnt.json', 'rb', 'utf-8') as f:
full_dict = json.load(f, 'utf-8')
# create simple historic word -> modern word mapping
# (full_dict may contain multiple alternatives for a word)
hist2modern = {}
for w in full_dict.keys():
if w not in full_dict[w]:
c = Counter(full_dict[w])
hist2modern[w] = c.most_common()[0][0]
print '#words in dict: {}'.format(len(hist2modern))
text_files = [fi for fi in os.listdir(input_dir) if fi.endswith('.txt')]
for text_file in text_files:
print text_file
in_file = os.path.join(input_dir, text_file)
out_file = os.path.join(output_dir, text_file)
with codecs.open(in_file, 'rb', 'utf-8') as f:
lines = f.readlines()
with codecs.open(out_file, 'wb', 'utf-8') as f:
for line in lines:
parts = line.split('\t')
words = parts[1].split(' ')
new_words = []
for w in words:
wo = w.lower()
if wo in hist2modern.keys():
new_words.append(hist2modern[wo])
else:
new_words.append(w)
f.write(u'{}\t{}\t{}'.format(parts[0],
' '.join(new_words),
parts[2]))
|
Add script to normalize the spelling in a dataset
Added a script that normalizes the spelling in a dataset consisting of
files containing <sentence_id> \t <sentence> \t <label(s)>"""Create multilabel data set with normalized spelling.
The input consists of a directory of text files containing the dataset in
historic spelling.
The data set consists of:
<sentence id>\t<sentence>\tEmotie_Liefde (embodied emotions labels separated by
_)
<sentence id>\t<sentence>\tNone ('None' if no words were tagged)
Usage: python normalize_dataset.py <input dir> <output dir>
"""
import argparse
import codecs
import os
from collections import Counter
import json
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_dir', help='the name of the directory '
'containing text files that should be normalized.')
parser.add_argument('output_dir', help='the directory where the '
'normalized data files should be saved.')
args = parser.parse_args()
input_dir = args.input_dir
output_dir = args.output_dir
# load hist2modern dictionary
with codecs.open('hist2modern_bwnt.json', 'rb', 'utf-8') as f:
full_dict = json.load(f, 'utf-8')
# create simple historic word -> modern word mapping
# (full_dict may contain multiple alternatives for a word)
hist2modern = {}
for w in full_dict.keys():
if w not in full_dict[w]:
c = Counter(full_dict[w])
hist2modern[w] = c.most_common()[0][0]
print '#words in dict: {}'.format(len(hist2modern))
text_files = [fi for fi in os.listdir(input_dir) if fi.endswith('.txt')]
for text_file in text_files:
print text_file
in_file = os.path.join(input_dir, text_file)
out_file = os.path.join(output_dir, text_file)
with codecs.open(in_file, 'rb', 'utf-8') as f:
lines = f.readlines()
with codecs.open(out_file, 'wb', 'utf-8') as f:
for line in lines:
parts = line.split('\t')
words = parts[1].split(' ')
new_words = []
for w in words:
wo = w.lower()
if wo in hist2modern.keys():
new_words.append(hist2modern[wo])
else:
new_words.append(w)
f.write(u'{}\t{}\t{}'.format(parts[0],
' '.join(new_words),
parts[2]))
|
<commit_before><commit_msg>Add script to normalize the spelling in a dataset
Added a script that normalizes the spelling in a dataset consisting of
files containing <sentence_id> \t <sentence> \t <label(s)><commit_after>"""Create multilabel data set with normalized spelling.
The input consists of a directory of text files containing the dataset in
historic spelling.
The data set consists of:
<sentence id>\t<sentence>\tEmotie_Liefde (embodied emotions labels separated by
_)
<sentence id>\t<sentence>\tNone ('None' if no words were tagged)
Usage: python normalize_dataset.py <input dir> <output dir>
"""
import argparse
import codecs
import os
from collections import Counter
import json
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_dir', help='the name of the directory '
'containing text files that should be normalized.')
parser.add_argument('output_dir', help='the directory where the '
'normalized data files should be saved.')
args = parser.parse_args()
input_dir = args.input_dir
output_dir = args.output_dir
# load hist2modern dictionary
with codecs.open('hist2modern_bwnt.json', 'rb', 'utf-8') as f:
full_dict = json.load(f, 'utf-8')
# create simple historic word -> modern word mapping
# (full_dict may contain multiple alternatives for a word)
hist2modern = {}
for w in full_dict.keys():
if w not in full_dict[w]:
c = Counter(full_dict[w])
hist2modern[w] = c.most_common()[0][0]
print '#words in dict: {}'.format(len(hist2modern))
text_files = [fi for fi in os.listdir(input_dir) if fi.endswith('.txt')]
for text_file in text_files:
print text_file
in_file = os.path.join(input_dir, text_file)
out_file = os.path.join(output_dir, text_file)
with codecs.open(in_file, 'rb', 'utf-8') as f:
lines = f.readlines()
with codecs.open(out_file, 'wb', 'utf-8') as f:
for line in lines:
parts = line.split('\t')
words = parts[1].split(' ')
new_words = []
for w in words:
wo = w.lower()
if wo in hist2modern.keys():
new_words.append(hist2modern[wo])
else:
new_words.append(w)
f.write(u'{}\t{}\t{}'.format(parts[0],
' '.join(new_words),
parts[2]))
|
|
65e689dd66124fcaa0ce8ab9f5029b727fba18e2
|
src/compare_version_numbers.py
|
src/compare_version_numbers.py
|
"""
Source : https://oj.leetcode.com/problems/compare-version-numbers/
Author : Changxi Wu
Date : 2015-01-23
Compare two version numbers version1 and version2.
if version1 > version2 return 1, if version1 < version2 return -1, otherwise return 0.
You may assume that the version strings are non-empty and contain only digits and the . character.
The . character does not represent a decimal point and is used to separate number sequences.
For instance, 2.5 is not "two and a half" for "half way to version three", it is the fifth second-level revision of the second first-level revision.
Here is an example of version numbers ordering:
0.1 < 1.1 < 1.2 < 13.37
"""
# @param version1, a string
# @param version2, a string
# @return an integer
def compareVersion(version1, version2):
list1 = map(int, version1.split('.'))
list2 = map(int, version2.split('.'))
max_length = len(list1) if len(list1) > len(list2) else len(list2)
for i in range(max_length):
value1 = value2 = 0
if i < len(list1):
value1 = list1[i]
if i < len(list2):
value2 = list2[i]
if value1 > value2:
return 1
elif value1 < value2:
return -1
return 0
if __name__ == '__main__':
version1_list = ['0.1','1.1','1.2','13.37','1','1.0']
version2_list = ['1.1','1.2','13.37','1','13.37','1.0']
result_list = [-1, -1, -1, 1, -1, 0]
max_length = len(version1_list)
success = True
for i in range(max_length):
result = compareVersion(version1_list[i], version2_list[i])
if result != result_list[i]:
success = False
print 'Input:', version1_list[i], version2_list[i]
print 'Output:', result
print 'Expected:', result_list[i]
if success:
print 'All tests are passed'
|
Add solution for compare version numbers
|
Add solution for compare version numbers
|
Python
|
mit
|
chancyWu/leetcode
|
Add solution for compare version numbers
|
"""
Source : https://oj.leetcode.com/problems/compare-version-numbers/
Author : Changxi Wu
Date : 2015-01-23
Compare two version numbers version1 and version2.
if version1 > version2 return 1, if version1 < version2 return -1, otherwise return 0.
You may assume that the version strings are non-empty and contain only digits and the . character.
The . character does not represent a decimal point and is used to separate number sequences.
For instance, 2.5 is not "two and a half" for "half way to version three", it is the fifth second-level revision of the second first-level revision.
Here is an example of version numbers ordering:
0.1 < 1.1 < 1.2 < 13.37
"""
# @param version1, a string
# @param version2, a string
# @return an integer
def compareVersion(version1, version2):
list1 = map(int, version1.split('.'))
list2 = map(int, version2.split('.'))
max_length = len(list1) if len(list1) > len(list2) else len(list2)
for i in range(max_length):
value1 = value2 = 0
if i < len(list1):
value1 = list1[i]
if i < len(list2):
value2 = list2[i]
if value1 > value2:
return 1
elif value1 < value2:
return -1
return 0
if __name__ == '__main__':
version1_list = ['0.1','1.1','1.2','13.37','1','1.0']
version2_list = ['1.1','1.2','13.37','1','13.37','1.0']
result_list = [-1, -1, -1, 1, -1, 0]
max_length = len(version1_list)
success = True
for i in range(max_length):
result = compareVersion(version1_list[i], version2_list[i])
if result != result_list[i]:
success = False
print 'Input:', version1_list[i], version2_list[i]
print 'Output:', result
print 'Expected:', result_list[i]
if success:
print 'All tests are passed'
|
<commit_before><commit_msg>Add solution for compare version numbers<commit_after>
|
"""
Source : https://oj.leetcode.com/problems/compare-version-numbers/
Author : Changxi Wu
Date : 2015-01-23
Compare two version numbers version1 and version2.
if version1 > version2 return 1, if version1 < version2 return -1, otherwise return 0.
You may assume that the version strings are non-empty and contain only digits and the . character.
The . character does not represent a decimal point and is used to separate number sequences.
For instance, 2.5 is not "two and a half" for "half way to version three", it is the fifth second-level revision of the second first-level revision.
Here is an example of version numbers ordering:
0.1 < 1.1 < 1.2 < 13.37
"""
# @param version1, a string
# @param version2, a string
# @return an integer
def compareVersion(version1, version2):
list1 = map(int, version1.split('.'))
list2 = map(int, version2.split('.'))
max_length = len(list1) if len(list1) > len(list2) else len(list2)
for i in range(max_length):
value1 = value2 = 0
if i < len(list1):
value1 = list1[i]
if i < len(list2):
value2 = list2[i]
if value1 > value2:
return 1
elif value1 < value2:
return -1
return 0
if __name__ == '__main__':
version1_list = ['0.1','1.1','1.2','13.37','1','1.0']
version2_list = ['1.1','1.2','13.37','1','13.37','1.0']
result_list = [-1, -1, -1, 1, -1, 0]
max_length = len(version1_list)
success = True
for i in range(max_length):
result = compareVersion(version1_list[i], version2_list[i])
if result != result_list[i]:
success = False
print 'Input:', version1_list[i], version2_list[i]
print 'Output:', result
print 'Expected:', result_list[i]
if success:
print 'All tests are passed'
|
Add solution for compare version numbers"""
Source : https://oj.leetcode.com/problems/compare-version-numbers/
Author : Changxi Wu
Date : 2015-01-23
Compare two version numbers version1 and version2.
if version1 > version2 return 1, if version1 < version2 return -1, otherwise return 0.
You may assume that the version strings are non-empty and contain only digits and the . character.
The . character does not represent a decimal point and is used to separate number sequences.
For instance, 2.5 is not "two and a half" for "half way to version three", it is the fifth second-level revision of the second first-level revision.
Here is an example of version numbers ordering:
0.1 < 1.1 < 1.2 < 13.37
"""
# @param version1, a string
# @param version2, a string
# @return an integer
def compareVersion(version1, version2):
list1 = map(int, version1.split('.'))
list2 = map(int, version2.split('.'))
max_length = len(list1) if len(list1) > len(list2) else len(list2)
for i in range(max_length):
value1 = value2 = 0
if i < len(list1):
value1 = list1[i]
if i < len(list2):
value2 = list2[i]
if value1 > value2:
return 1
elif value1 < value2:
return -1
return 0
if __name__ == '__main__':
version1_list = ['0.1','1.1','1.2','13.37','1','1.0']
version2_list = ['1.1','1.2','13.37','1','13.37','1.0']
result_list = [-1, -1, -1, 1, -1, 0]
max_length = len(version1_list)
success = True
for i in range(max_length):
result = compareVersion(version1_list[i], version2_list[i])
if result != result_list[i]:
success = False
print 'Input:', version1_list[i], version2_list[i]
print 'Output:', result
print 'Expected:', result_list[i]
if success:
print 'All tests are passed'
|
<commit_before><commit_msg>Add solution for compare version numbers<commit_after>"""
Source : https://oj.leetcode.com/problems/compare-version-numbers/
Author : Changxi Wu
Date : 2015-01-23
Compare two version numbers version1 and version2.
if version1 > version2 return 1, if version1 < version2 return -1, otherwise return 0.
You may assume that the version strings are non-empty and contain only digits and the . character.
The . character does not represent a decimal point and is used to separate number sequences.
For instance, 2.5 is not "two and a half" for "half way to version three", it is the fifth second-level revision of the second first-level revision.
Here is an example of version numbers ordering:
0.1 < 1.1 < 1.2 < 13.37
"""
# @param version1, a string
# @param version2, a string
# @return an integer
def compareVersion(version1, version2):
list1 = map(int, version1.split('.'))
list2 = map(int, version2.split('.'))
max_length = len(list1) if len(list1) > len(list2) else len(list2)
for i in range(max_length):
value1 = value2 = 0
if i < len(list1):
value1 = list1[i]
if i < len(list2):
value2 = list2[i]
if value1 > value2:
return 1
elif value1 < value2:
return -1
return 0
if __name__ == '__main__':
version1_list = ['0.1','1.1','1.2','13.37','1','1.0']
version2_list = ['1.1','1.2','13.37','1','13.37','1.0']
result_list = [-1, -1, -1, 1, -1, 0]
max_length = len(version1_list)
success = True
for i in range(max_length):
result = compareVersion(version1_list[i], version2_list[i])
if result != result_list[i]:
success = False
print 'Input:', version1_list[i], version2_list[i]
print 'Output:', result
print 'Expected:', result_list[i]
if success:
print 'All tests are passed'
|
|
2dd5afae12dc7d58c3349f2df2694eeb77ca0298
|
examples/test_spinn_tracks4.py
|
examples/test_spinn_tracks4.py
|
import nengo
import nengo_pushbot
import numpy as np
model = nengo.Network()
with model:
input = nengo.Node(lambda t: [0.5*np.sin(t), 0.5*np.cos(t)])
a = nengo.Ensemble(nengo.LIF(100), dimensions=2)
#b = nengo.Ensemble(nengo.LIF(100), dimensions=2)
#c = nengo.Ensemble(nengo.LIF(100), dimensions=2)
#d = nengo.Ensemble(nengo.LIF(100), dimensions=2)
#nengo.Connection(a, b, filter=0.01)
#nengo.Connection(b, c, filter=0.01)
#nengo.Connection(c, d, filter=0.01)
#nengo.Connection(a, a, transform=[[1.1, 0], [0, 1.1]], filter=0.1)
#b = nengo.Ensemble(nengo.LIF(100), dimensions=2)
bot = nengo_pushbot.PushBot(address=(0xFE, 0xFF, 1, 0, 0))
tracks = nengo_pushbot.Tracks(bot)
#def printout(t, x):
# print t, x
# return []
#tracks2 = nengo.Node(printout, size_in=2)
nengo.Connection(input, a, filter=0.01)
#nengo.Connection(a, b, filter=0.01)
#nengo.Connection(b, c, filter=0.01)
#nengo.Connection(c, d, filter=0.01)
nengo.Connection(a, tracks, filter=0.01)
#nengo.Connection(b, tracks2, filter=0.01)
#sim_normal = nengo.Simulator(model)
#sim_normal.run(5)
import nengo_spinnaker
sim = nengo_spinnaker.Simulator(model, use_serial=True)
sim.run(1000)
|
Test driving robot via serial input
|
Test driving robot via serial input
|
Python
|
mit
|
ctn-waterloo/nengo_pushbot,ctn-waterloo/nengo_pushbot
|
Test driving robot via serial input
|
import nengo
import nengo_pushbot
import numpy as np
model = nengo.Network()
with model:
input = nengo.Node(lambda t: [0.5*np.sin(t), 0.5*np.cos(t)])
a = nengo.Ensemble(nengo.LIF(100), dimensions=2)
#b = nengo.Ensemble(nengo.LIF(100), dimensions=2)
#c = nengo.Ensemble(nengo.LIF(100), dimensions=2)
#d = nengo.Ensemble(nengo.LIF(100), dimensions=2)
#nengo.Connection(a, b, filter=0.01)
#nengo.Connection(b, c, filter=0.01)
#nengo.Connection(c, d, filter=0.01)
#nengo.Connection(a, a, transform=[[1.1, 0], [0, 1.1]], filter=0.1)
#b = nengo.Ensemble(nengo.LIF(100), dimensions=2)
bot = nengo_pushbot.PushBot(address=(0xFE, 0xFF, 1, 0, 0))
tracks = nengo_pushbot.Tracks(bot)
#def printout(t, x):
# print t, x
# return []
#tracks2 = nengo.Node(printout, size_in=2)
nengo.Connection(input, a, filter=0.01)
#nengo.Connection(a, b, filter=0.01)
#nengo.Connection(b, c, filter=0.01)
#nengo.Connection(c, d, filter=0.01)
nengo.Connection(a, tracks, filter=0.01)
#nengo.Connection(b, tracks2, filter=0.01)
#sim_normal = nengo.Simulator(model)
#sim_normal.run(5)
import nengo_spinnaker
sim = nengo_spinnaker.Simulator(model, use_serial=True)
sim.run(1000)
|
<commit_before><commit_msg>Test driving robot via serial input<commit_after>
|
import nengo
import nengo_pushbot
import numpy as np
model = nengo.Network()
with model:
input = nengo.Node(lambda t: [0.5*np.sin(t), 0.5*np.cos(t)])
a = nengo.Ensemble(nengo.LIF(100), dimensions=2)
#b = nengo.Ensemble(nengo.LIF(100), dimensions=2)
#c = nengo.Ensemble(nengo.LIF(100), dimensions=2)
#d = nengo.Ensemble(nengo.LIF(100), dimensions=2)
#nengo.Connection(a, b, filter=0.01)
#nengo.Connection(b, c, filter=0.01)
#nengo.Connection(c, d, filter=0.01)
#nengo.Connection(a, a, transform=[[1.1, 0], [0, 1.1]], filter=0.1)
#b = nengo.Ensemble(nengo.LIF(100), dimensions=2)
bot = nengo_pushbot.PushBot(address=(0xFE, 0xFF, 1, 0, 0))
tracks = nengo_pushbot.Tracks(bot)
#def printout(t, x):
# print t, x
# return []
#tracks2 = nengo.Node(printout, size_in=2)
nengo.Connection(input, a, filter=0.01)
#nengo.Connection(a, b, filter=0.01)
#nengo.Connection(b, c, filter=0.01)
#nengo.Connection(c, d, filter=0.01)
nengo.Connection(a, tracks, filter=0.01)
#nengo.Connection(b, tracks2, filter=0.01)
#sim_normal = nengo.Simulator(model)
#sim_normal.run(5)
import nengo_spinnaker
sim = nengo_spinnaker.Simulator(model, use_serial=True)
sim.run(1000)
|
Test driving robot via serial inputimport nengo
import nengo_pushbot
import numpy as np
model = nengo.Network()
with model:
input = nengo.Node(lambda t: [0.5*np.sin(t), 0.5*np.cos(t)])
a = nengo.Ensemble(nengo.LIF(100), dimensions=2)
#b = nengo.Ensemble(nengo.LIF(100), dimensions=2)
#c = nengo.Ensemble(nengo.LIF(100), dimensions=2)
#d = nengo.Ensemble(nengo.LIF(100), dimensions=2)
#nengo.Connection(a, b, filter=0.01)
#nengo.Connection(b, c, filter=0.01)
#nengo.Connection(c, d, filter=0.01)
#nengo.Connection(a, a, transform=[[1.1, 0], [0, 1.1]], filter=0.1)
#b = nengo.Ensemble(nengo.LIF(100), dimensions=2)
bot = nengo_pushbot.PushBot(address=(0xFE, 0xFF, 1, 0, 0))
tracks = nengo_pushbot.Tracks(bot)
#def printout(t, x):
# print t, x
# return []
#tracks2 = nengo.Node(printout, size_in=2)
nengo.Connection(input, a, filter=0.01)
#nengo.Connection(a, b, filter=0.01)
#nengo.Connection(b, c, filter=0.01)
#nengo.Connection(c, d, filter=0.01)
nengo.Connection(a, tracks, filter=0.01)
#nengo.Connection(b, tracks2, filter=0.01)
#sim_normal = nengo.Simulator(model)
#sim_normal.run(5)
import nengo_spinnaker
sim = nengo_spinnaker.Simulator(model, use_serial=True)
sim.run(1000)
|
<commit_before><commit_msg>Test driving robot via serial input<commit_after>import nengo
import nengo_pushbot
import numpy as np
model = nengo.Network()
with model:
input = nengo.Node(lambda t: [0.5*np.sin(t), 0.5*np.cos(t)])
a = nengo.Ensemble(nengo.LIF(100), dimensions=2)
#b = nengo.Ensemble(nengo.LIF(100), dimensions=2)
#c = nengo.Ensemble(nengo.LIF(100), dimensions=2)
#d = nengo.Ensemble(nengo.LIF(100), dimensions=2)
#nengo.Connection(a, b, filter=0.01)
#nengo.Connection(b, c, filter=0.01)
#nengo.Connection(c, d, filter=0.01)
#nengo.Connection(a, a, transform=[[1.1, 0], [0, 1.1]], filter=0.1)
#b = nengo.Ensemble(nengo.LIF(100), dimensions=2)
bot = nengo_pushbot.PushBot(address=(0xFE, 0xFF, 1, 0, 0))
tracks = nengo_pushbot.Tracks(bot)
#def printout(t, x):
# print t, x
# return []
#tracks2 = nengo.Node(printout, size_in=2)
nengo.Connection(input, a, filter=0.01)
#nengo.Connection(a, b, filter=0.01)
#nengo.Connection(b, c, filter=0.01)
#nengo.Connection(c, d, filter=0.01)
nengo.Connection(a, tracks, filter=0.01)
#nengo.Connection(b, tracks2, filter=0.01)
#sim_normal = nengo.Simulator(model)
#sim_normal.run(5)
import nengo_spinnaker
sim = nengo_spinnaker.Simulator(model, use_serial=True)
sim.run(1000)
|
|
d777a19bb804ae1a4268702da00d3138b028b386
|
contrib/dump_docs.py
|
contrib/dump_docs.py
|
#!/usr/bin/env python
"""
Start the process and dump the documentation to the doc dir
"""
import socket, subprocess, time,os
env = os.environ
env['L1FWD_BTS_HOST'] = '127.0.0.1'
bts_proc = subprocess.Popen(["./src/osmo-bts-sysmo/sysmobts-remote",
"-c", "./doc/examples/osmo-bts.cfg"], env = env,
stdin=None, stdout=None)
time.sleep(1)
try:
sck = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sck.setblocking(1)
sck.connect(("localhost", 4241))
sck.recv(4096)
# Now send the command
sck.send("show online-help\r")
xml = ""
while True:
data = sck.recv(4096)
xml = "%s%s" % (xml, data)
if data.endswith('\r\nOsmoBTS> '):
break
# Now write everything until the end to the file
out = open('doc/vty_reference.xml', 'w')
out.write(xml[18:-11])
out.close()
finally:
# Clean-up
bts_proc.kill()
bts_proc.wait()
|
Add a python script to start sysmobts-remote and dump docs
|
contrib: Add a python script to start sysmobts-remote and dump docs
This starts sysmobts-remote and dumps the documentation about the
VTY to the doc/ directory.
$ ./contrib/dump_docs.py
this writes doc/vty_reference.xml
|
Python
|
agpl-3.0
|
osmocom/osmo-bts,telenoobie/osmo-bts,shimaore/osmo-bts,geosphere/osmo-bts,telenoobie/osmo-bts,shimaore/osmo-bts,osmocom/osmo-bts,shimaore/osmo-bts,geosphere/osmo-bts,geosphere/osmo-bts,telenoobie/osmo-bts,shimaore/osmo-bts,geosphere/osmo-bts,telenoobie/osmo-bts,osmocom/osmo-bts
|
contrib: Add a python script to start sysmobts-remote and dump docs
This starts sysmobts-remote and dumps the documentation about the
VTY to the doc/ directory.
$ ./contrib/dump_docs.py
this writes doc/vty_reference.xml
|
#!/usr/bin/env python
"""
Start the process and dump the documentation to the doc dir
"""
import socket, subprocess, time,os
env = os.environ
env['L1FWD_BTS_HOST'] = '127.0.0.1'
bts_proc = subprocess.Popen(["./src/osmo-bts-sysmo/sysmobts-remote",
"-c", "./doc/examples/osmo-bts.cfg"], env = env,
stdin=None, stdout=None)
time.sleep(1)
try:
sck = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sck.setblocking(1)
sck.connect(("localhost", 4241))
sck.recv(4096)
# Now send the command
sck.send("show online-help\r")
xml = ""
while True:
data = sck.recv(4096)
xml = "%s%s" % (xml, data)
if data.endswith('\r\nOsmoBTS> '):
break
# Now write everything until the end to the file
out = open('doc/vty_reference.xml', 'w')
out.write(xml[18:-11])
out.close()
finally:
# Clean-up
bts_proc.kill()
bts_proc.wait()
|
<commit_before><commit_msg>contrib: Add a python script to start sysmobts-remote and dump docs
This starts sysmobts-remote and dumps the documentation about the
VTY to the doc/ directory.
$ ./contrib/dump_docs.py
this writes doc/vty_reference.xml<commit_after>
|
#!/usr/bin/env python
"""
Start the process and dump the documentation to the doc dir
"""
import socket, subprocess, time,os
env = os.environ
env['L1FWD_BTS_HOST'] = '127.0.0.1'
bts_proc = subprocess.Popen(["./src/osmo-bts-sysmo/sysmobts-remote",
"-c", "./doc/examples/osmo-bts.cfg"], env = env,
stdin=None, stdout=None)
time.sleep(1)
try:
sck = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sck.setblocking(1)
sck.connect(("localhost", 4241))
sck.recv(4096)
# Now send the command
sck.send("show online-help\r")
xml = ""
while True:
data = sck.recv(4096)
xml = "%s%s" % (xml, data)
if data.endswith('\r\nOsmoBTS> '):
break
# Now write everything until the end to the file
out = open('doc/vty_reference.xml', 'w')
out.write(xml[18:-11])
out.close()
finally:
# Clean-up
bts_proc.kill()
bts_proc.wait()
|
contrib: Add a python script to start sysmobts-remote and dump docs
This starts sysmobts-remote and dumps the documentation about the
VTY to the doc/ directory.
$ ./contrib/dump_docs.py
this writes doc/vty_reference.xml#!/usr/bin/env python
"""
Start the process and dump the documentation to the doc dir
"""
import socket, subprocess, time,os
env = os.environ
env['L1FWD_BTS_HOST'] = '127.0.0.1'
bts_proc = subprocess.Popen(["./src/osmo-bts-sysmo/sysmobts-remote",
"-c", "./doc/examples/osmo-bts.cfg"], env = env,
stdin=None, stdout=None)
time.sleep(1)
try:
sck = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sck.setblocking(1)
sck.connect(("localhost", 4241))
sck.recv(4096)
# Now send the command
sck.send("show online-help\r")
xml = ""
while True:
data = sck.recv(4096)
xml = "%s%s" % (xml, data)
if data.endswith('\r\nOsmoBTS> '):
break
# Now write everything until the end to the file
out = open('doc/vty_reference.xml', 'w')
out.write(xml[18:-11])
out.close()
finally:
# Clean-up
bts_proc.kill()
bts_proc.wait()
|
<commit_before><commit_msg>contrib: Add a python script to start sysmobts-remote and dump docs
This starts sysmobts-remote and dumps the documentation about the
VTY to the doc/ directory.
$ ./contrib/dump_docs.py
this writes doc/vty_reference.xml<commit_after>#!/usr/bin/env python
"""
Start the process and dump the documentation to the doc dir
"""
import socket, subprocess, time,os
env = os.environ
env['L1FWD_BTS_HOST'] = '127.0.0.1'
bts_proc = subprocess.Popen(["./src/osmo-bts-sysmo/sysmobts-remote",
"-c", "./doc/examples/osmo-bts.cfg"], env = env,
stdin=None, stdout=None)
time.sleep(1)
try:
sck = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sck.setblocking(1)
sck.connect(("localhost", 4241))
sck.recv(4096)
# Now send the command
sck.send("show online-help\r")
xml = ""
while True:
data = sck.recv(4096)
xml = "%s%s" % (xml, data)
if data.endswith('\r\nOsmoBTS> '):
break
# Now write everything until the end to the file
out = open('doc/vty_reference.xml', 'w')
out.write(xml[18:-11])
out.close()
finally:
# Clean-up
bts_proc.kill()
bts_proc.wait()
|
|
89c17110f9d17e99ea7686e884cfba91b4762d57
|
pybaseball/lahman.py
|
pybaseball/lahman.py
|
################################################
# WORK IN PROGRESS: ADD LAHMAN DB TO PYBASEBALL
# TODO: Make a callable function that retrieves the Lahman db
# Considerations: users should have a way to pull just the parts they want
# within their code without having to write / save permanently. They should
# also have the option to write and save permanently if desired.
################################################
import requests
import zipfile
from io import BytesIO
from bs4 import BeautifulSoup
# Download zip file and extract all files into working directory
url = "http://seanlahman.com/files/database/baseballdatabank-2017.1.zip"
s=requests.get(url,stream=True)
z = zipfile.ZipFile(BytesIO(s.content))
z.extractall()
|
Add starter code for Lahman db
|
Add starter code for Lahman db
|
Python
|
mit
|
jldbc/pybaseball
|
Add starter code for Lahman db
|
################################################
# WORK IN PROGRESS: ADD LAHMAN DB TO PYBASEBALL
# TODO: Make a callable function that retrieves the Lahman db
# Considerations: users should have a way to pull just the parts they want
# within their code without having to write / save permanently. They should
# also have the option to write and save permanently if desired.
################################################
import requests
import zipfile
from io import BytesIO
from bs4 import BeautifulSoup
# Download zip file and extract all files into working directory
url = "http://seanlahman.com/files/database/baseballdatabank-2017.1.zip"
s=requests.get(url,stream=True)
z = zipfile.ZipFile(BytesIO(s.content))
z.extractall()
|
<commit_before><commit_msg>Add starter code for Lahman db<commit_after>
|
################################################
# WORK IN PROGRESS: ADD LAHMAN DB TO PYBASEBALL
# TODO: Make a callable function that retrieves the Lahman db
# Considerations: users should have a way to pull just the parts they want
# within their code without having to write / save permanently. They should
# also have the option to write and save permanently if desired.
################################################
import requests
import zipfile
from io import BytesIO
from bs4 import BeautifulSoup
# Download zip file and extract all files into working directory
url = "http://seanlahman.com/files/database/baseballdatabank-2017.1.zip"
s=requests.get(url,stream=True)
z = zipfile.ZipFile(BytesIO(s.content))
z.extractall()
|
Add starter code for Lahman db################################################
# WORK IN PROGRESS: ADD LAHMAN DB TO PYBASEBALL
# TODO: Make a callable function that retrieves the Lahman db
# Considerations: users should have a way to pull just the parts they want
# within their code without having to write / save permanently. They should
# also have the option to write and save permanently if desired.
################################################
import requests
import zipfile
from io import BytesIO
from bs4 import BeautifulSoup
# Download zip file and extract all files into working directory
url = "http://seanlahman.com/files/database/baseballdatabank-2017.1.zip"
s=requests.get(url,stream=True)
z = zipfile.ZipFile(BytesIO(s.content))
z.extractall()
|
<commit_before><commit_msg>Add starter code for Lahman db<commit_after>################################################
# WORK IN PROGRESS: ADD LAHMAN DB TO PYBASEBALL
# TODO: Make a callable function that retrieves the Lahman db
# Considerations: users should have a way to pull just the parts they want
# within their code without having to write / save permanently. They should
# also have the option to write and save permanently if desired.
################################################
import requests
import zipfile
from io import BytesIO
from bs4 import BeautifulSoup
# Download zip file and extract all files into working directory
url = "http://seanlahman.com/files/database/baseballdatabank-2017.1.zip"
s=requests.get(url,stream=True)
z = zipfile.ZipFile(BytesIO(s.content))
z.extractall()
|
|
0e9e63a48c5f3e02fb49d0068363ac5442b39e37
|
discussion/models.py
|
discussion/models.py
|
from django.contrib.auth.models import User
from django.db import models
class Discussion(models.Model):
user = models.ForeignKey(User)
name = models.CharField(max_length=255)
slug = models.SlugField()
def __unicode__(self):
return self.name
class Post(models.Model):
discussion = models.ForeignKey(Discussion)
user = models.ForeignKey(User)
name = models.CharField(max_length=255)
slug = models.SlugField()
posts_file = models.FileField(upload_to='uploads/posts',
blank=True, null=True)
def __unicode__(self):
return self.name
class Comment(models.Model):
post = models.ForeignKey(Post)
user = models.ForeignKey(User)
body = models.TextField()
comment_file = models.FileField(upload_to='uploads/comments',
blank=True, null=True)
def __unicode__(self):
return 'Comment on %s by %s' % (self.post.name, self.user)
|
from django.contrib.auth.models import User
from django.db import models
class Discussion(models.Model):
user = models.ForeignKey(User)
name = models.CharField(max_length=255)
slug = models.SlugField()
def __unicode__(self):
return self.name
class Post(models.Model):
discussion = models.ForeignKey(Discussion)
user = models.ForeignKey(User)
name = models.CharField(max_length=255)
slug = models.SlugField()
body = models.TextField()
posts_file = models.FileField(upload_to='uploads/posts',
blank=True, null=True)
def __unicode__(self):
return self.name
class Comment(models.Model):
post = models.ForeignKey(Post)
user = models.ForeignKey(User)
body = models.TextField()
comment_file = models.FileField(upload_to='uploads/comments',
blank=True, null=True)
def __unicode__(self):
return 'Comment on %s by %s' % (self.post.name, self.user)
|
Add a body to posts
|
Add a body to posts
|
Python
|
bsd-2-clause
|
incuna/django-discussion,lehins/lehins-discussion,lehins/lehins-discussion,incuna/django-discussion,lehins/lehins-discussion
|
from django.contrib.auth.models import User
from django.db import models
class Discussion(models.Model):
user = models.ForeignKey(User)
name = models.CharField(max_length=255)
slug = models.SlugField()
def __unicode__(self):
return self.name
class Post(models.Model):
discussion = models.ForeignKey(Discussion)
user = models.ForeignKey(User)
name = models.CharField(max_length=255)
slug = models.SlugField()
posts_file = models.FileField(upload_to='uploads/posts',
blank=True, null=True)
def __unicode__(self):
return self.name
class Comment(models.Model):
post = models.ForeignKey(Post)
user = models.ForeignKey(User)
body = models.TextField()
comment_file = models.FileField(upload_to='uploads/comments',
blank=True, null=True)
def __unicode__(self):
return 'Comment on %s by %s' % (self.post.name, self.user)
Add a body to posts
|
from django.contrib.auth.models import User
from django.db import models
class Discussion(models.Model):
user = models.ForeignKey(User)
name = models.CharField(max_length=255)
slug = models.SlugField()
def __unicode__(self):
return self.name
class Post(models.Model):
discussion = models.ForeignKey(Discussion)
user = models.ForeignKey(User)
name = models.CharField(max_length=255)
slug = models.SlugField()
body = models.TextField()
posts_file = models.FileField(upload_to='uploads/posts',
blank=True, null=True)
def __unicode__(self):
return self.name
class Comment(models.Model):
post = models.ForeignKey(Post)
user = models.ForeignKey(User)
body = models.TextField()
comment_file = models.FileField(upload_to='uploads/comments',
blank=True, null=True)
def __unicode__(self):
return 'Comment on %s by %s' % (self.post.name, self.user)
|
<commit_before>from django.contrib.auth.models import User
from django.db import models
class Discussion(models.Model):
user = models.ForeignKey(User)
name = models.CharField(max_length=255)
slug = models.SlugField()
def __unicode__(self):
return self.name
class Post(models.Model):
discussion = models.ForeignKey(Discussion)
user = models.ForeignKey(User)
name = models.CharField(max_length=255)
slug = models.SlugField()
posts_file = models.FileField(upload_to='uploads/posts',
blank=True, null=True)
def __unicode__(self):
return self.name
class Comment(models.Model):
post = models.ForeignKey(Post)
user = models.ForeignKey(User)
body = models.TextField()
comment_file = models.FileField(upload_to='uploads/comments',
blank=True, null=True)
def __unicode__(self):
return 'Comment on %s by %s' % (self.post.name, self.user)
<commit_msg>Add a body to posts<commit_after>
|
from django.contrib.auth.models import User
from django.db import models
class Discussion(models.Model):
user = models.ForeignKey(User)
name = models.CharField(max_length=255)
slug = models.SlugField()
def __unicode__(self):
return self.name
class Post(models.Model):
discussion = models.ForeignKey(Discussion)
user = models.ForeignKey(User)
name = models.CharField(max_length=255)
slug = models.SlugField()
body = models.TextField()
posts_file = models.FileField(upload_to='uploads/posts',
blank=True, null=True)
def __unicode__(self):
return self.name
class Comment(models.Model):
post = models.ForeignKey(Post)
user = models.ForeignKey(User)
body = models.TextField()
comment_file = models.FileField(upload_to='uploads/comments',
blank=True, null=True)
def __unicode__(self):
return 'Comment on %s by %s' % (self.post.name, self.user)
|
from django.contrib.auth.models import User
from django.db import models
class Discussion(models.Model):
user = models.ForeignKey(User)
name = models.CharField(max_length=255)
slug = models.SlugField()
def __unicode__(self):
return self.name
class Post(models.Model):
discussion = models.ForeignKey(Discussion)
user = models.ForeignKey(User)
name = models.CharField(max_length=255)
slug = models.SlugField()
posts_file = models.FileField(upload_to='uploads/posts',
blank=True, null=True)
def __unicode__(self):
return self.name
class Comment(models.Model):
post = models.ForeignKey(Post)
user = models.ForeignKey(User)
body = models.TextField()
comment_file = models.FileField(upload_to='uploads/comments',
blank=True, null=True)
def __unicode__(self):
return 'Comment on %s by %s' % (self.post.name, self.user)
Add a body to postsfrom django.contrib.auth.models import User
from django.db import models
class Discussion(models.Model):
user = models.ForeignKey(User)
name = models.CharField(max_length=255)
slug = models.SlugField()
def __unicode__(self):
return self.name
class Post(models.Model):
discussion = models.ForeignKey(Discussion)
user = models.ForeignKey(User)
name = models.CharField(max_length=255)
slug = models.SlugField()
body = models.TextField()
posts_file = models.FileField(upload_to='uploads/posts',
blank=True, null=True)
def __unicode__(self):
return self.name
class Comment(models.Model):
post = models.ForeignKey(Post)
user = models.ForeignKey(User)
body = models.TextField()
comment_file = models.FileField(upload_to='uploads/comments',
blank=True, null=True)
def __unicode__(self):
return 'Comment on %s by %s' % (self.post.name, self.user)
|
<commit_before>from django.contrib.auth.models import User
from django.db import models
class Discussion(models.Model):
user = models.ForeignKey(User)
name = models.CharField(max_length=255)
slug = models.SlugField()
def __unicode__(self):
return self.name
class Post(models.Model):
discussion = models.ForeignKey(Discussion)
user = models.ForeignKey(User)
name = models.CharField(max_length=255)
slug = models.SlugField()
posts_file = models.FileField(upload_to='uploads/posts',
blank=True, null=True)
def __unicode__(self):
return self.name
class Comment(models.Model):
post = models.ForeignKey(Post)
user = models.ForeignKey(User)
body = models.TextField()
comment_file = models.FileField(upload_to='uploads/comments',
blank=True, null=True)
def __unicode__(self):
return 'Comment on %s by %s' % (self.post.name, self.user)
<commit_msg>Add a body to posts<commit_after>from django.contrib.auth.models import User
from django.db import models
class Discussion(models.Model):
user = models.ForeignKey(User)
name = models.CharField(max_length=255)
slug = models.SlugField()
def __unicode__(self):
return self.name
class Post(models.Model):
discussion = models.ForeignKey(Discussion)
user = models.ForeignKey(User)
name = models.CharField(max_length=255)
slug = models.SlugField()
body = models.TextField()
posts_file = models.FileField(upload_to='uploads/posts',
blank=True, null=True)
def __unicode__(self):
return self.name
class Comment(models.Model):
post = models.ForeignKey(Post)
user = models.ForeignKey(User)
body = models.TextField()
comment_file = models.FileField(upload_to='uploads/comments',
blank=True, null=True)
def __unicode__(self):
return 'Comment on %s by %s' % (self.post.name, self.user)
|
20d77f66e0287b3aab08b4cf14f23e7e5672aefd
|
db_setup/nflpool_picks.py
|
db_setup/nflpool_picks.py
|
import sqlite3
conn = sqlite3.connect('nflpool.sqlite')
cur = conn.cursor()
# Do some setup
cur.executescript('''
DROP TABLE IF EXISTS Player;
CREATE TABLE Picks (
firstname TEXT NOT NULL,
lastname TEXT NOT NULL,
id INTEGER NOT NULL PRIMARY KEY UNIQUE,
season TEXT NOT NULL UNIQUE,
email TEXT NOT NULL UNIQUE,
timestamp TEXT NOT NULL
key
afc_east_first TEXT NOT NULL
afc_east_second TEXT NOT NULL
afc_east_last TEXT NOT NULL
afc_north_first TEXT NOT NULL
afc_north_second TEXT NOT NULL
afc_north_last TEXT NOT NULL
afc_south_first TEXT NOT NULL
afc_south_second TEXT NOT NULL
afc_south_last TEXT NOT NULL
afc_west_first TEXT NOT NULL
afc_west_second TEXT NOT NULL
afc_west_last TEXT NOT NULL
nfc_east_first TEXT NOT NULL
nfc_east_second TEXT NOT NULL
nfc_east_last TEXT NOT NULL
nfc_north_first TEXT NOT NULL
nfc_north_second TEXT NOT NULL
nfc_north_last TEXT NOT NULL
nfc_south_first TEXT NOT NULL
nfc_south_second TEXT NOT NULL
nfc_south_last TEXT NOT NULL
nfc_west_first TEXT NOT NULL
nfc_west_second TEXT NOT NULL
nfc_west_last TEXT NOT NULL
afc_wildcard1 TEXT NOT NULL
afc_wildcard2 TEXT NOT NULL
nfc_wildcard1 TEXT NOT NULL
nfc_wildcard2 TEXT NOT NULL
afc_rushing_first TEXT NOT NULL
afc_rushing_second TEXT NOT NULL
afc_rushing_third TEXT NOT NULL
afc_passing_first TEXT NOT NULL
afc_passing_second TEXT NOT NULL
afc_passing_third TEXT NOT NULL
afc_receiving_first TEXT NOT NULL
afc_receiving_second TEXT NOT NULL
afc_receiving_third TEXT NOT NULL
afc_sacks_first TEXT NOT NULL
afc_sacks_second TEXT NOT NULL
afc_sacks_third TEXT NOT NULL
afc_int_first TEXT NOT NULL
afc_int_second TEXT NOT NULL
afc_int_third TEXT NOT NULL
nfc_rushing_first TEXT NOT NULL
nfc_rushing_second TEXT NOT NULL
nfc_rushing_third TEXT NOT NULL
nfc_passing_first TEXT NOT NULL
nfc_passing_second TEXT NOT NULL
nfc_passing_third TEXT NOT NULL
nfc_receiving_first TEXT NOT NULL
nfc_receiving_second TEXT NOT NULL
nfc_receiving_third TEXT NOT NULL
nfc_sacks_first TEXT NOT NULL
nfc_sacks_second TEXT NOT NULL
nfc_sacks_third TEXT NOT NULL
nfc_int_first TEXT NOT NULL
nfc_int_second TEXT NOT NULL
nfc_int_third TEXT NOT NULL
afc_pf TEXT NOT NULL
nfc_pf TEXT NOT NULL
specialteams_td TEXT NOT NULL
)
''')
conn.commit()
conn.close()
|
Create database import script for the Picks table (each NFLPool Player's picks for a given season)
|
Create database import script for the Picks table (each NFLPool Player's picks for a given season)
|
Python
|
mit
|
prcutler/nflpool,prcutler/nflpool
|
Create database import script for the Picks table (each NFLPool Player's picks for a given season)
|
import sqlite3
conn = sqlite3.connect('nflpool.sqlite')
cur = conn.cursor()
# Do some setup
cur.executescript('''
DROP TABLE IF EXISTS Player;
CREATE TABLE Picks (
firstname TEXT NOT NULL,
lastname TEXT NOT NULL,
id INTEGER NOT NULL PRIMARY KEY UNIQUE,
season TEXT NOT NULL UNIQUE,
email TEXT NOT NULL UNIQUE,
timestamp TEXT NOT NULL
key
afc_east_first TEXT NOT NULL
afc_east_second TEXT NOT NULL
afc_east_last TEXT NOT NULL
afc_north_first TEXT NOT NULL
afc_north_second TEXT NOT NULL
afc_north_last TEXT NOT NULL
afc_south_first TEXT NOT NULL
afc_south_second TEXT NOT NULL
afc_south_last TEXT NOT NULL
afc_west_first TEXT NOT NULL
afc_west_second TEXT NOT NULL
afc_west_last TEXT NOT NULL
nfc_east_first TEXT NOT NULL
nfc_east_second TEXT NOT NULL
nfc_east_last TEXT NOT NULL
nfc_north_first TEXT NOT NULL
nfc_north_second TEXT NOT NULL
nfc_north_last TEXT NOT NULL
nfc_south_first TEXT NOT NULL
nfc_south_second TEXT NOT NULL
nfc_south_last TEXT NOT NULL
nfc_west_first TEXT NOT NULL
nfc_west_second TEXT NOT NULL
nfc_west_last TEXT NOT NULL
afc_wildcard1 TEXT NOT NULL
afc_wildcard2 TEXT NOT NULL
nfc_wildcard1 TEXT NOT NULL
nfc_wildcard2 TEXT NOT NULL
afc_rushing_first TEXT NOT NULL
afc_rushing_second TEXT NOT NULL
afc_rushing_third TEXT NOT NULL
afc_passing_first TEXT NOT NULL
afc_passing_second TEXT NOT NULL
afc_passing_third TEXT NOT NULL
afc_receiving_first TEXT NOT NULL
afc_receiving_second TEXT NOT NULL
afc_receiving_third TEXT NOT NULL
afc_sacks_first TEXT NOT NULL
afc_sacks_second TEXT NOT NULL
afc_sacks_third TEXT NOT NULL
afc_int_first TEXT NOT NULL
afc_int_second TEXT NOT NULL
afc_int_third TEXT NOT NULL
nfc_rushing_first TEXT NOT NULL
nfc_rushing_second TEXT NOT NULL
nfc_rushing_third TEXT NOT NULL
nfc_passing_first TEXT NOT NULL
nfc_passing_second TEXT NOT NULL
nfc_passing_third TEXT NOT NULL
nfc_receiving_first TEXT NOT NULL
nfc_receiving_second TEXT NOT NULL
nfc_receiving_third TEXT NOT NULL
nfc_sacks_first TEXT NOT NULL
nfc_sacks_second TEXT NOT NULL
nfc_sacks_third TEXT NOT NULL
nfc_int_first TEXT NOT NULL
nfc_int_second TEXT NOT NULL
nfc_int_third TEXT NOT NULL
afc_pf TEXT NOT NULL
nfc_pf TEXT NOT NULL
specialteams_td TEXT NOT NULL
)
''')
conn.commit()
conn.close()
|
<commit_before><commit_msg>Create database import script for the Picks table (each NFLPool Player's picks for a given season)<commit_after>
|
import sqlite3
conn = sqlite3.connect('nflpool.sqlite')
cur = conn.cursor()
# Do some setup
cur.executescript('''
DROP TABLE IF EXISTS Player;
CREATE TABLE Picks (
firstname TEXT NOT NULL,
lastname TEXT NOT NULL,
id INTEGER NOT NULL PRIMARY KEY UNIQUE,
season TEXT NOT NULL UNIQUE,
email TEXT NOT NULL UNIQUE,
timestamp TEXT NOT NULL
key
afc_east_first TEXT NOT NULL
afc_east_second TEXT NOT NULL
afc_east_last TEXT NOT NULL
afc_north_first TEXT NOT NULL
afc_north_second TEXT NOT NULL
afc_north_last TEXT NOT NULL
afc_south_first TEXT NOT NULL
afc_south_second TEXT NOT NULL
afc_south_last TEXT NOT NULL
afc_west_first TEXT NOT NULL
afc_west_second TEXT NOT NULL
afc_west_last TEXT NOT NULL
nfc_east_first TEXT NOT NULL
nfc_east_second TEXT NOT NULL
nfc_east_last TEXT NOT NULL
nfc_north_first TEXT NOT NULL
nfc_north_second TEXT NOT NULL
nfc_north_last TEXT NOT NULL
nfc_south_first TEXT NOT NULL
nfc_south_second TEXT NOT NULL
nfc_south_last TEXT NOT NULL
nfc_west_first TEXT NOT NULL
nfc_west_second TEXT NOT NULL
nfc_west_last TEXT NOT NULL
afc_wildcard1 TEXT NOT NULL
afc_wildcard2 TEXT NOT NULL
nfc_wildcard1 TEXT NOT NULL
nfc_wildcard2 TEXT NOT NULL
afc_rushing_first TEXT NOT NULL
afc_rushing_second TEXT NOT NULL
afc_rushing_third TEXT NOT NULL
afc_passing_first TEXT NOT NULL
afc_passing_second TEXT NOT NULL
afc_passing_third TEXT NOT NULL
afc_receiving_first TEXT NOT NULL
afc_receiving_second TEXT NOT NULL
afc_receiving_third TEXT NOT NULL
afc_sacks_first TEXT NOT NULL
afc_sacks_second TEXT NOT NULL
afc_sacks_third TEXT NOT NULL
afc_int_first TEXT NOT NULL
afc_int_second TEXT NOT NULL
afc_int_third TEXT NOT NULL
nfc_rushing_first TEXT NOT NULL
nfc_rushing_second TEXT NOT NULL
nfc_rushing_third TEXT NOT NULL
nfc_passing_first TEXT NOT NULL
nfc_passing_second TEXT NOT NULL
nfc_passing_third TEXT NOT NULL
nfc_receiving_first TEXT NOT NULL
nfc_receiving_second TEXT NOT NULL
nfc_receiving_third TEXT NOT NULL
nfc_sacks_first TEXT NOT NULL
nfc_sacks_second TEXT NOT NULL
nfc_sacks_third TEXT NOT NULL
nfc_int_first TEXT NOT NULL
nfc_int_second TEXT NOT NULL
nfc_int_third TEXT NOT NULL
afc_pf TEXT NOT NULL
nfc_pf TEXT NOT NULL
specialteams_td TEXT NOT NULL
)
''')
conn.commit()
conn.close()
|
Create database import script for the Picks table (each NFLPool Player's picks for a given season)import sqlite3
conn = sqlite3.connect('nflpool.sqlite')
cur = conn.cursor()
# Do some setup
cur.executescript('''
DROP TABLE IF EXISTS Player;
CREATE TABLE Picks (
firstname TEXT NOT NULL,
lastname TEXT NOT NULL,
id INTEGER NOT NULL PRIMARY KEY UNIQUE,
season TEXT NOT NULL UNIQUE,
email TEXT NOT NULL UNIQUE,
timestamp TEXT NOT NULL
key
afc_east_first TEXT NOT NULL
afc_east_second TEXT NOT NULL
afc_east_last TEXT NOT NULL
afc_north_first TEXT NOT NULL
afc_north_second TEXT NOT NULL
afc_north_last TEXT NOT NULL
afc_south_first TEXT NOT NULL
afc_south_second TEXT NOT NULL
afc_south_last TEXT NOT NULL
afc_west_first TEXT NOT NULL
afc_west_second TEXT NOT NULL
afc_west_last TEXT NOT NULL
nfc_east_first TEXT NOT NULL
nfc_east_second TEXT NOT NULL
nfc_east_last TEXT NOT NULL
nfc_north_first TEXT NOT NULL
nfc_north_second TEXT NOT NULL
nfc_north_last TEXT NOT NULL
nfc_south_first TEXT NOT NULL
nfc_south_second TEXT NOT NULL
nfc_south_last TEXT NOT NULL
nfc_west_first TEXT NOT NULL
nfc_west_second TEXT NOT NULL
nfc_west_last TEXT NOT NULL
afc_wildcard1 TEXT NOT NULL
afc_wildcard2 TEXT NOT NULL
nfc_wildcard1 TEXT NOT NULL
nfc_wildcard2 TEXT NOT NULL
afc_rushing_first TEXT NOT NULL
afc_rushing_second TEXT NOT NULL
afc_rushing_third TEXT NOT NULL
afc_passing_first TEXT NOT NULL
afc_passing_second TEXT NOT NULL
afc_passing_third TEXT NOT NULL
afc_receiving_first TEXT NOT NULL
afc_receiving_second TEXT NOT NULL
afc_receiving_third TEXT NOT NULL
afc_sacks_first TEXT NOT NULL
afc_sacks_second TEXT NOT NULL
afc_sacks_third TEXT NOT NULL
afc_int_first TEXT NOT NULL
afc_int_second TEXT NOT NULL
afc_int_third TEXT NOT NULL
nfc_rushing_first TEXT NOT NULL
nfc_rushing_second TEXT NOT NULL
nfc_rushing_third TEXT NOT NULL
nfc_passing_first TEXT NOT NULL
nfc_passing_second TEXT NOT NULL
nfc_passing_third TEXT NOT NULL
nfc_receiving_first TEXT NOT NULL
nfc_receiving_second TEXT NOT NULL
nfc_receiving_third TEXT NOT NULL
nfc_sacks_first TEXT NOT NULL
nfc_sacks_second TEXT NOT NULL
nfc_sacks_third TEXT NOT NULL
nfc_int_first TEXT NOT NULL
nfc_int_second TEXT NOT NULL
nfc_int_third TEXT NOT NULL
afc_pf TEXT NOT NULL
nfc_pf TEXT NOT NULL
specialteams_td TEXT NOT NULL
)
''')
conn.commit()
conn.close()
|
<commit_before><commit_msg>Create database import script for the Picks table (each NFLPool Player's picks for a given season)<commit_after>import sqlite3
conn = sqlite3.connect('nflpool.sqlite')
cur = conn.cursor()
# Do some setup
cur.executescript('''
DROP TABLE IF EXISTS Player;
CREATE TABLE Picks (
firstname TEXT NOT NULL,
lastname TEXT NOT NULL,
id INTEGER NOT NULL PRIMARY KEY UNIQUE,
season TEXT NOT NULL UNIQUE,
email TEXT NOT NULL UNIQUE,
timestamp TEXT NOT NULL
key
afc_east_first TEXT NOT NULL
afc_east_second TEXT NOT NULL
afc_east_last TEXT NOT NULL
afc_north_first TEXT NOT NULL
afc_north_second TEXT NOT NULL
afc_north_last TEXT NOT NULL
afc_south_first TEXT NOT NULL
afc_south_second TEXT NOT NULL
afc_south_last TEXT NOT NULL
afc_west_first TEXT NOT NULL
afc_west_second TEXT NOT NULL
afc_west_last TEXT NOT NULL
nfc_east_first TEXT NOT NULL
nfc_east_second TEXT NOT NULL
nfc_east_last TEXT NOT NULL
nfc_north_first TEXT NOT NULL
nfc_north_second TEXT NOT NULL
nfc_north_last TEXT NOT NULL
nfc_south_first TEXT NOT NULL
nfc_south_second TEXT NOT NULL
nfc_south_last TEXT NOT NULL
nfc_west_first TEXT NOT NULL
nfc_west_second TEXT NOT NULL
nfc_west_last TEXT NOT NULL
afc_wildcard1 TEXT NOT NULL
afc_wildcard2 TEXT NOT NULL
nfc_wildcard1 TEXT NOT NULL
nfc_wildcard2 TEXT NOT NULL
afc_rushing_first TEXT NOT NULL
afc_rushing_second TEXT NOT NULL
afc_rushing_third TEXT NOT NULL
afc_passing_first TEXT NOT NULL
afc_passing_second TEXT NOT NULL
afc_passing_third TEXT NOT NULL
afc_receiving_first TEXT NOT NULL
afc_receiving_second TEXT NOT NULL
afc_receiving_third TEXT NOT NULL
afc_sacks_first TEXT NOT NULL
afc_sacks_second TEXT NOT NULL
afc_sacks_third TEXT NOT NULL
afc_int_first TEXT NOT NULL
afc_int_second TEXT NOT NULL
afc_int_third TEXT NOT NULL
nfc_rushing_first TEXT NOT NULL
nfc_rushing_second TEXT NOT NULL
nfc_rushing_third TEXT NOT NULL
nfc_passing_first TEXT NOT NULL
nfc_passing_second TEXT NOT NULL
nfc_passing_third TEXT NOT NULL
nfc_receiving_first TEXT NOT NULL
nfc_receiving_second TEXT NOT NULL
nfc_receiving_third TEXT NOT NULL
nfc_sacks_first TEXT NOT NULL
nfc_sacks_second TEXT NOT NULL
nfc_sacks_third TEXT NOT NULL
nfc_int_first TEXT NOT NULL
nfc_int_second TEXT NOT NULL
nfc_int_third TEXT NOT NULL
afc_pf TEXT NOT NULL
nfc_pf TEXT NOT NULL
specialteams_td TEXT NOT NULL
)
''')
conn.commit()
conn.close()
|
|
8ad4627973db344e228a9170aef030ab58efdeb9
|
src/ggrc/converters/__init__.py
|
src/ggrc/converters/__init__.py
|
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: dan@reciprocitylabs.com
# Maintained By: dan@reciprocitylabs.com
from ggrc.converters.sections import SectionsConverter
all_converters = [('sections', SectionsConverter)]
HANDLERS = {}
def get_converter(name):
return all_converters(name)
|
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: dan@reciprocitylabs.com
# Maintained By: dan@reciprocitylabs.com
from ggrc.converters.sections import SectionsConverter
from ggrc.models import (
Audit, Control, ControlAssessment, DataAsset, Directive, Contract,
Policy, Regulation, Standard, Facility, Market, Objective, Option,
OrgGroup, Vendor, Person, Product, Program, Project, Request, Response,
Section, Clause, System, Process, Issue,
)
all_converters = [('sections', SectionsConverter)]
HANDLERS = {}
def get_converter(name):
return all_converters(name)
COLUMN_ORDER = (
"slug",
"title",
"description",
"notes",
"owners",
)
IMPORTABLE = {
"audit": Audit,
"control": Control,
"control assessment": ControlAssessment,
"control_assessment": ControlAssessment,
"data asset": DataAsset,
"data_asset": DataAsset,
"directive": Directive,
"contract": Contract,
"policy": Policy,
"regulation": Regulation,
"standard": Standard,
"facility": Facility,
"market": Market,
"objective": Objective,
"option": Option,
"org group": OrgGroup,
"org_group": OrgGroup,
"vendor": Vendor,
"person": Person,
"product": Product,
"program": Program,
"project": Project,
"request": Request,
"response": Response,
"section": Section,
"clause": Clause,
"system": System,
"process": Process,
"issue": Issue,
}
|
Add column order and importable objects lists
|
Add column order and importable objects lists
|
Python
|
apache-2.0
|
edofic/ggrc-core,hasanalom/ggrc-core,AleksNeStu/ggrc-core,AleksNeStu/ggrc-core,plamut/ggrc-core,NejcZupec/ggrc-core,selahssea/ggrc-core,jmakov/ggrc-core,j0gurt/ggrc-core,NejcZupec/ggrc-core,NejcZupec/ggrc-core,josthkko/ggrc-core,jmakov/ggrc-core,VinnieJohns/ggrc-core,edofic/ggrc-core,selahssea/ggrc-core,hyperNURb/ggrc-core,jmakov/ggrc-core,hasanalom/ggrc-core,andrei-karalionak/ggrc-core,hasanalom/ggrc-core,AleksNeStu/ggrc-core,kr41/ggrc-core,VinnieJohns/ggrc-core,hyperNURb/ggrc-core,edofic/ggrc-core,uskudnik/ggrc-core,plamut/ggrc-core,hyperNURb/ggrc-core,prasannav7/ggrc-core,andrei-karalionak/ggrc-core,uskudnik/ggrc-core,VinnieJohns/ggrc-core,andrei-karalionak/ggrc-core,selahssea/ggrc-core,hasanalom/ggrc-core,AleksNeStu/ggrc-core,kr41/ggrc-core,josthkko/ggrc-core,NejcZupec/ggrc-core,uskudnik/ggrc-core,prasannav7/ggrc-core,jmakov/ggrc-core,hyperNURb/ggrc-core,j0gurt/ggrc-core,hyperNURb/ggrc-core,josthkko/ggrc-core,josthkko/ggrc-core,kr41/ggrc-core,edofic/ggrc-core,hasanalom/ggrc-core,kr41/ggrc-core,uskudnik/ggrc-core,andrei-karalionak/ggrc-core,selahssea/ggrc-core,prasannav7/ggrc-core,jmakov/ggrc-core,j0gurt/ggrc-core,uskudnik/ggrc-core,plamut/ggrc-core,plamut/ggrc-core,prasannav7/ggrc-core,VinnieJohns/ggrc-core,j0gurt/ggrc-core
|
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: dan@reciprocitylabs.com
# Maintained By: dan@reciprocitylabs.com
from ggrc.converters.sections import SectionsConverter
all_converters = [('sections', SectionsConverter)]
HANDLERS = {}
def get_converter(name):
return all_converters(name)
Add column order and importable objects lists
|
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: dan@reciprocitylabs.com
# Maintained By: dan@reciprocitylabs.com
from ggrc.converters.sections import SectionsConverter
from ggrc.models import (
Audit, Control, ControlAssessment, DataAsset, Directive, Contract,
Policy, Regulation, Standard, Facility, Market, Objective, Option,
OrgGroup, Vendor, Person, Product, Program, Project, Request, Response,
Section, Clause, System, Process, Issue,
)
all_converters = [('sections', SectionsConverter)]
HANDLERS = {}
def get_converter(name):
return all_converters(name)
COLUMN_ORDER = (
"slug",
"title",
"description",
"notes",
"owners",
)
IMPORTABLE = {
"audit": Audit,
"control": Control,
"control assessment": ControlAssessment,
"control_assessment": ControlAssessment,
"data asset": DataAsset,
"data_asset": DataAsset,
"directive": Directive,
"contract": Contract,
"policy": Policy,
"regulation": Regulation,
"standard": Standard,
"facility": Facility,
"market": Market,
"objective": Objective,
"option": Option,
"org group": OrgGroup,
"org_group": OrgGroup,
"vendor": Vendor,
"person": Person,
"product": Product,
"program": Program,
"project": Project,
"request": Request,
"response": Response,
"section": Section,
"clause": Clause,
"system": System,
"process": Process,
"issue": Issue,
}
|
<commit_before># Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: dan@reciprocitylabs.com
# Maintained By: dan@reciprocitylabs.com
from ggrc.converters.sections import SectionsConverter
all_converters = [('sections', SectionsConverter)]
HANDLERS = {}
def get_converter(name):
return all_converters(name)
<commit_msg>Add column order and importable objects lists<commit_after>
|
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: dan@reciprocitylabs.com
# Maintained By: dan@reciprocitylabs.com
from ggrc.converters.sections import SectionsConverter
from ggrc.models import (
Audit, Control, ControlAssessment, DataAsset, Directive, Contract,
Policy, Regulation, Standard, Facility, Market, Objective, Option,
OrgGroup, Vendor, Person, Product, Program, Project, Request, Response,
Section, Clause, System, Process, Issue,
)
all_converters = [('sections', SectionsConverter)]
HANDLERS = {}
def get_converter(name):
return all_converters(name)
COLUMN_ORDER = (
"slug",
"title",
"description",
"notes",
"owners",
)
IMPORTABLE = {
"audit": Audit,
"control": Control,
"control assessment": ControlAssessment,
"control_assessment": ControlAssessment,
"data asset": DataAsset,
"data_asset": DataAsset,
"directive": Directive,
"contract": Contract,
"policy": Policy,
"regulation": Regulation,
"standard": Standard,
"facility": Facility,
"market": Market,
"objective": Objective,
"option": Option,
"org group": OrgGroup,
"org_group": OrgGroup,
"vendor": Vendor,
"person": Person,
"product": Product,
"program": Program,
"project": Project,
"request": Request,
"response": Response,
"section": Section,
"clause": Clause,
"system": System,
"process": Process,
"issue": Issue,
}
|
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: dan@reciprocitylabs.com
# Maintained By: dan@reciprocitylabs.com
from ggrc.converters.sections import SectionsConverter
all_converters = [('sections', SectionsConverter)]
HANDLERS = {}
def get_converter(name):
return all_converters(name)
Add column order and importable objects lists# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: dan@reciprocitylabs.com
# Maintained By: dan@reciprocitylabs.com
from ggrc.converters.sections import SectionsConverter
from ggrc.models import (
Audit, Control, ControlAssessment, DataAsset, Directive, Contract,
Policy, Regulation, Standard, Facility, Market, Objective, Option,
OrgGroup, Vendor, Person, Product, Program, Project, Request, Response,
Section, Clause, System, Process, Issue,
)
all_converters = [('sections', SectionsConverter)]
HANDLERS = {}
def get_converter(name):
return all_converters(name)
COLUMN_ORDER = (
"slug",
"title",
"description",
"notes",
"owners",
)
IMPORTABLE = {
"audit": Audit,
"control": Control,
"control assessment": ControlAssessment,
"control_assessment": ControlAssessment,
"data asset": DataAsset,
"data_asset": DataAsset,
"directive": Directive,
"contract": Contract,
"policy": Policy,
"regulation": Regulation,
"standard": Standard,
"facility": Facility,
"market": Market,
"objective": Objective,
"option": Option,
"org group": OrgGroup,
"org_group": OrgGroup,
"vendor": Vendor,
"person": Person,
"product": Product,
"program": Program,
"project": Project,
"request": Request,
"response": Response,
"section": Section,
"clause": Clause,
"system": System,
"process": Process,
"issue": Issue,
}
|
<commit_before># Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: dan@reciprocitylabs.com
# Maintained By: dan@reciprocitylabs.com
from ggrc.converters.sections import SectionsConverter
all_converters = [('sections', SectionsConverter)]
HANDLERS = {}
def get_converter(name):
return all_converters(name)
<commit_msg>Add column order and importable objects lists<commit_after># Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: dan@reciprocitylabs.com
# Maintained By: dan@reciprocitylabs.com
from ggrc.converters.sections import SectionsConverter
from ggrc.models import (
Audit, Control, ControlAssessment, DataAsset, Directive, Contract,
Policy, Regulation, Standard, Facility, Market, Objective, Option,
OrgGroup, Vendor, Person, Product, Program, Project, Request, Response,
Section, Clause, System, Process, Issue,
)
all_converters = [('sections', SectionsConverter)]
HANDLERS = {}
def get_converter(name):
return all_converters(name)
COLUMN_ORDER = (
"slug",
"title",
"description",
"notes",
"owners",
)
IMPORTABLE = {
"audit": Audit,
"control": Control,
"control assessment": ControlAssessment,
"control_assessment": ControlAssessment,
"data asset": DataAsset,
"data_asset": DataAsset,
"directive": Directive,
"contract": Contract,
"policy": Policy,
"regulation": Regulation,
"standard": Standard,
"facility": Facility,
"market": Market,
"objective": Objective,
"option": Option,
"org group": OrgGroup,
"org_group": OrgGroup,
"vendor": Vendor,
"person": Person,
"product": Product,
"program": Program,
"project": Project,
"request": Request,
"response": Response,
"section": Section,
"clause": Clause,
"system": System,
"process": Process,
"issue": Issue,
}
|
8141d6cafb4a1c8986ec7065f27d536d98cc9916
|
Modules/Biophotonics/python/iMC/script_plot_one_spectrum.py
|
Modules/Biophotonics/python/iMC/script_plot_one_spectrum.py
|
'''
Created on Oct 12, 2015
@author: wirkert
'''
import pickle
import logging
import numpy as np
import matplotlib.pyplot as plt
import luigi
import tasks_regression as rt
from msi.plot import plot
from msi.msi import Msi
import msi.normalize as norm
import scriptpaths as sp
sp.ROOT_FOLDER = "/media/wirkert/data/Data/2015_xxxx_plot_one_spectrum"
# the wavelengths recorded by our camera
RECORDED_WAVELENGTHS = \
np.array([580, 470, 660, 560, 480, 511, 600, 700]) * 10 ** -9
PARAMS = np.array([0.05, # bvf
0.0, # SaO2
0.0, # billirubin
500., # a_mie
0.0, # a_ray
1.091, # b (for scattering
500. * 10 ** -6]) # d_muc
class PlotOneSpectrum(luigi.Task):
batch_prefix = luigi.Parameter()
def requires(self):
return rt.TrainForestForwardModel(self.batch_prefix)
def run(self):
f = file(self.input().path, "r")
rf = pickle.load(f)
f.close()
refl = rf.predict(PARAMS)
msi = Msi(refl)
msi.set_wavelengths(RECORDED_WAVELENGTHS)
norm.standard_normalizer.normalize(msi)
plot(msi)
plt.gca().set_xlabel("wavelength")
plt.gca().set_ylabel("normalized reflectance")
plt.grid()
plt.ylim([0.0, 0.4])
plt.title("bvf: " + str(PARAMS[0]) + "; saO2: " + str(PARAMS[1]) +
"; bili: " + str(PARAMS[2]) + "; a_mie: " + str(PARAMS[3]) +
"; a_ray: " + str(PARAMS[4]) + "; d_muc: " + str(PARAMS[6]))
plt.show()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
luigi.interface.setup_interface_logging()
sch = luigi.scheduler.CentralPlannerScheduler()
w = luigi.worker.Worker(scheduler=sch)
main_task = PlotOneSpectrum(batch_prefix=
"jacques_no_billi_generic_scattering_")
w.add(main_task)
w.run()
|
Add little script calculate sample spectra.
|
Add little script calculate sample spectra.
|
Python
|
bsd-3-clause
|
RabadanLab/MITKats,MITK/MITK,MITK/MITK,RabadanLab/MITKats,MITK/MITK,MITK/MITK,fmilano/mitk,fmilano/mitk,fmilano/mitk,iwegner/MITK,RabadanLab/MITKats,iwegner/MITK,RabadanLab/MITKats,iwegner/MITK,RabadanLab/MITKats,MITK/MITK,fmilano/mitk,iwegner/MITK,fmilano/mitk,iwegner/MITK,fmilano/mitk,fmilano/mitk,RabadanLab/MITKats,MITK/MITK,iwegner/MITK
|
Add little script calculate sample spectra.
|
'''
Created on Oct 12, 2015
@author: wirkert
'''
import pickle
import logging
import numpy as np
import matplotlib.pyplot as plt
import luigi
import tasks_regression as rt
from msi.plot import plot
from msi.msi import Msi
import msi.normalize as norm
import scriptpaths as sp
sp.ROOT_FOLDER = "/media/wirkert/data/Data/2015_xxxx_plot_one_spectrum"
# the wavelengths recorded by our camera
RECORDED_WAVELENGTHS = \
np.array([580, 470, 660, 560, 480, 511, 600, 700]) * 10 ** -9
PARAMS = np.array([0.05, # bvf
0.0, # SaO2
0.0, # billirubin
500., # a_mie
0.0, # a_ray
1.091, # b (for scattering
500. * 10 ** -6]) # d_muc
class PlotOneSpectrum(luigi.Task):
batch_prefix = luigi.Parameter()
def requires(self):
return rt.TrainForestForwardModel(self.batch_prefix)
def run(self):
f = file(self.input().path, "r")
rf = pickle.load(f)
f.close()
refl = rf.predict(PARAMS)
msi = Msi(refl)
msi.set_wavelengths(RECORDED_WAVELENGTHS)
norm.standard_normalizer.normalize(msi)
plot(msi)
plt.gca().set_xlabel("wavelength")
plt.gca().set_ylabel("normalized reflectance")
plt.grid()
plt.ylim([0.0, 0.4])
plt.title("bvf: " + str(PARAMS[0]) + "; saO2: " + str(PARAMS[1]) +
"; bili: " + str(PARAMS[2]) + "; a_mie: " + str(PARAMS[3]) +
"; a_ray: " + str(PARAMS[4]) + "; d_muc: " + str(PARAMS[6]))
plt.show()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
luigi.interface.setup_interface_logging()
sch = luigi.scheduler.CentralPlannerScheduler()
w = luigi.worker.Worker(scheduler=sch)
main_task = PlotOneSpectrum(batch_prefix=
"jacques_no_billi_generic_scattering_")
w.add(main_task)
w.run()
|
<commit_before><commit_msg>Add little script calculate sample spectra.<commit_after>
|
'''
Created on Oct 12, 2015
@author: wirkert
'''
import pickle
import logging
import numpy as np
import matplotlib.pyplot as plt
import luigi
import tasks_regression as rt
from msi.plot import plot
from msi.msi import Msi
import msi.normalize as norm
import scriptpaths as sp
sp.ROOT_FOLDER = "/media/wirkert/data/Data/2015_xxxx_plot_one_spectrum"
# the wavelengths recorded by our camera
RECORDED_WAVELENGTHS = \
np.array([580, 470, 660, 560, 480, 511, 600, 700]) * 10 ** -9
PARAMS = np.array([0.05, # bvf
0.0, # SaO2
0.0, # billirubin
500., # a_mie
0.0, # a_ray
1.091, # b (for scattering
500. * 10 ** -6]) # d_muc
class PlotOneSpectrum(luigi.Task):
batch_prefix = luigi.Parameter()
def requires(self):
return rt.TrainForestForwardModel(self.batch_prefix)
def run(self):
f = file(self.input().path, "r")
rf = pickle.load(f)
f.close()
refl = rf.predict(PARAMS)
msi = Msi(refl)
msi.set_wavelengths(RECORDED_WAVELENGTHS)
norm.standard_normalizer.normalize(msi)
plot(msi)
plt.gca().set_xlabel("wavelength")
plt.gca().set_ylabel("normalized reflectance")
plt.grid()
plt.ylim([0.0, 0.4])
plt.title("bvf: " + str(PARAMS[0]) + "; saO2: " + str(PARAMS[1]) +
"; bili: " + str(PARAMS[2]) + "; a_mie: " + str(PARAMS[3]) +
"; a_ray: " + str(PARAMS[4]) + "; d_muc: " + str(PARAMS[6]))
plt.show()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
luigi.interface.setup_interface_logging()
sch = luigi.scheduler.CentralPlannerScheduler()
w = luigi.worker.Worker(scheduler=sch)
main_task = PlotOneSpectrum(batch_prefix=
"jacques_no_billi_generic_scattering_")
w.add(main_task)
w.run()
|
Add little script calculate sample spectra.'''
Created on Oct 12, 2015
@author: wirkert
'''
import pickle
import logging
import numpy as np
import matplotlib.pyplot as plt
import luigi
import tasks_regression as rt
from msi.plot import plot
from msi.msi import Msi
import msi.normalize as norm
import scriptpaths as sp
sp.ROOT_FOLDER = "/media/wirkert/data/Data/2015_xxxx_plot_one_spectrum"
# the wavelengths recorded by our camera
RECORDED_WAVELENGTHS = \
np.array([580, 470, 660, 560, 480, 511, 600, 700]) * 10 ** -9
PARAMS = np.array([0.05, # bvf
0.0, # SaO2
0.0, # billirubin
500., # a_mie
0.0, # a_ray
1.091, # b (for scattering
500. * 10 ** -6]) # d_muc
class PlotOneSpectrum(luigi.Task):
batch_prefix = luigi.Parameter()
def requires(self):
return rt.TrainForestForwardModel(self.batch_prefix)
def run(self):
f = file(self.input().path, "r")
rf = pickle.load(f)
f.close()
refl = rf.predict(PARAMS)
msi = Msi(refl)
msi.set_wavelengths(RECORDED_WAVELENGTHS)
norm.standard_normalizer.normalize(msi)
plot(msi)
plt.gca().set_xlabel("wavelength")
plt.gca().set_ylabel("normalized reflectance")
plt.grid()
plt.ylim([0.0, 0.4])
plt.title("bvf: " + str(PARAMS[0]) + "; saO2: " + str(PARAMS[1]) +
"; bili: " + str(PARAMS[2]) + "; a_mie: " + str(PARAMS[3]) +
"; a_ray: " + str(PARAMS[4]) + "; d_muc: " + str(PARAMS[6]))
plt.show()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
luigi.interface.setup_interface_logging()
sch = luigi.scheduler.CentralPlannerScheduler()
w = luigi.worker.Worker(scheduler=sch)
main_task = PlotOneSpectrum(batch_prefix=
"jacques_no_billi_generic_scattering_")
w.add(main_task)
w.run()
|
<commit_before><commit_msg>Add little script calculate sample spectra.<commit_after>'''
Created on Oct 12, 2015
@author: wirkert
'''
import pickle
import logging
import numpy as np
import matplotlib.pyplot as plt
import luigi
import tasks_regression as rt
from msi.plot import plot
from msi.msi import Msi
import msi.normalize as norm
import scriptpaths as sp
sp.ROOT_FOLDER = "/media/wirkert/data/Data/2015_xxxx_plot_one_spectrum"
# the wavelengths recorded by our camera
RECORDED_WAVELENGTHS = \
np.array([580, 470, 660, 560, 480, 511, 600, 700]) * 10 ** -9
PARAMS = np.array([0.05, # bvf
0.0, # SaO2
0.0, # billirubin
500., # a_mie
0.0, # a_ray
1.091, # b (for scattering
500. * 10 ** -6]) # d_muc
class PlotOneSpectrum(luigi.Task):
batch_prefix = luigi.Parameter()
def requires(self):
return rt.TrainForestForwardModel(self.batch_prefix)
def run(self):
f = file(self.input().path, "r")
rf = pickle.load(f)
f.close()
refl = rf.predict(PARAMS)
msi = Msi(refl)
msi.set_wavelengths(RECORDED_WAVELENGTHS)
norm.standard_normalizer.normalize(msi)
plot(msi)
plt.gca().set_xlabel("wavelength")
plt.gca().set_ylabel("normalized reflectance")
plt.grid()
plt.ylim([0.0, 0.4])
plt.title("bvf: " + str(PARAMS[0]) + "; saO2: " + str(PARAMS[1]) +
"; bili: " + str(PARAMS[2]) + "; a_mie: " + str(PARAMS[3]) +
"; a_ray: " + str(PARAMS[4]) + "; d_muc: " + str(PARAMS[6]))
plt.show()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
luigi.interface.setup_interface_logging()
sch = luigi.scheduler.CentralPlannerScheduler()
w = luigi.worker.Worker(scheduler=sch)
main_task = PlotOneSpectrum(batch_prefix=
"jacques_no_billi_generic_scattering_")
w.add(main_task)
w.run()
|
|
6be3e0c5264ca2750a77ac1dbd4175502e51fd3c
|
ceph_deploy/tests/parser/test_admin.py
|
ceph_deploy/tests/parser/test_admin.py
|
import pytest
from ceph_deploy.cli import get_parser
class TestParserAdmin(object):
def setup(self):
self.parser = get_parser()
def test_admin_help(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('admin --help'.split())
out, err = capsys.readouterr()
assert 'usage: ceph-deploy admin' in out
assert 'positional arguments:' in out
assert 'optional arguments:' in out
def test_admin_host_required(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('admin'.split())
out, err = capsys.readouterr()
assert "error: too few arguments" in err
def test_admin_one_host(self):
args = self.parser.parse_args('admin host1'.split())
assert args.client == ['host1']
def test_admin_multiple_hosts(self):
hostnames = ['host1', 'host2', 'host3']
args = self.parser.parse_args(['admin'] + hostnames)
assert args.client == hostnames
|
Add argparse tests for ceph-deploy admin
|
[RM-11742] Add argparse tests for ceph-deploy admin
Signed-off-by: Travis Rhoden <e5e44d6dbac12e32e01c3bb8b67940d8b42e225b@redhat.com>
|
Python
|
mit
|
Vicente-Cheng/ceph-deploy,ceph/ceph-deploy,codenrhoden/ceph-deploy,shenhequnying/ceph-deploy,branto1/ceph-deploy,ghxandsky/ceph-deploy,imzhulei/ceph-deploy,isyippee/ceph-deploy,trhoden/ceph-deploy,zhouyuan/ceph-deploy,Vicente-Cheng/ceph-deploy,zhouyuan/ceph-deploy,ceph/ceph-deploy,osynge/ceph-deploy,SUSE/ceph-deploy-to-be-deleted,isyippee/ceph-deploy,branto1/ceph-deploy,SUSE/ceph-deploy-to-be-deleted,osynge/ceph-deploy,imzhulei/ceph-deploy,SUSE/ceph-deploy,shenhequnying/ceph-deploy,trhoden/ceph-deploy,ghxandsky/ceph-deploy,codenrhoden/ceph-deploy,SUSE/ceph-deploy
|
[RM-11742] Add argparse tests for ceph-deploy admin
Signed-off-by: Travis Rhoden <e5e44d6dbac12e32e01c3bb8b67940d8b42e225b@redhat.com>
|
import pytest
from ceph_deploy.cli import get_parser
class TestParserAdmin(object):
def setup(self):
self.parser = get_parser()
def test_admin_help(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('admin --help'.split())
out, err = capsys.readouterr()
assert 'usage: ceph-deploy admin' in out
assert 'positional arguments:' in out
assert 'optional arguments:' in out
def test_admin_host_required(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('admin'.split())
out, err = capsys.readouterr()
assert "error: too few arguments" in err
def test_admin_one_host(self):
args = self.parser.parse_args('admin host1'.split())
assert args.client == ['host1']
def test_admin_multiple_hosts(self):
hostnames = ['host1', 'host2', 'host3']
args = self.parser.parse_args(['admin'] + hostnames)
assert args.client == hostnames
|
<commit_before><commit_msg>[RM-11742] Add argparse tests for ceph-deploy admin
Signed-off-by: Travis Rhoden <e5e44d6dbac12e32e01c3bb8b67940d8b42e225b@redhat.com><commit_after>
|
import pytest
from ceph_deploy.cli import get_parser
class TestParserAdmin(object):
def setup(self):
self.parser = get_parser()
def test_admin_help(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('admin --help'.split())
out, err = capsys.readouterr()
assert 'usage: ceph-deploy admin' in out
assert 'positional arguments:' in out
assert 'optional arguments:' in out
def test_admin_host_required(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('admin'.split())
out, err = capsys.readouterr()
assert "error: too few arguments" in err
def test_admin_one_host(self):
args = self.parser.parse_args('admin host1'.split())
assert args.client == ['host1']
def test_admin_multiple_hosts(self):
hostnames = ['host1', 'host2', 'host3']
args = self.parser.parse_args(['admin'] + hostnames)
assert args.client == hostnames
|
[RM-11742] Add argparse tests for ceph-deploy admin
Signed-off-by: Travis Rhoden <e5e44d6dbac12e32e01c3bb8b67940d8b42e225b@redhat.com>import pytest
from ceph_deploy.cli import get_parser
class TestParserAdmin(object):
def setup(self):
self.parser = get_parser()
def test_admin_help(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('admin --help'.split())
out, err = capsys.readouterr()
assert 'usage: ceph-deploy admin' in out
assert 'positional arguments:' in out
assert 'optional arguments:' in out
def test_admin_host_required(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('admin'.split())
out, err = capsys.readouterr()
assert "error: too few arguments" in err
def test_admin_one_host(self):
args = self.parser.parse_args('admin host1'.split())
assert args.client == ['host1']
def test_admin_multiple_hosts(self):
hostnames = ['host1', 'host2', 'host3']
args = self.parser.parse_args(['admin'] + hostnames)
assert args.client == hostnames
|
<commit_before><commit_msg>[RM-11742] Add argparse tests for ceph-deploy admin
Signed-off-by: Travis Rhoden <e5e44d6dbac12e32e01c3bb8b67940d8b42e225b@redhat.com><commit_after>import pytest
from ceph_deploy.cli import get_parser
class TestParserAdmin(object):
def setup(self):
self.parser = get_parser()
def test_admin_help(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('admin --help'.split())
out, err = capsys.readouterr()
assert 'usage: ceph-deploy admin' in out
assert 'positional arguments:' in out
assert 'optional arguments:' in out
def test_admin_host_required(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('admin'.split())
out, err = capsys.readouterr()
assert "error: too few arguments" in err
def test_admin_one_host(self):
args = self.parser.parse_args('admin host1'.split())
assert args.client == ['host1']
def test_admin_multiple_hosts(self):
hostnames = ['host1', 'host2', 'host3']
args = self.parser.parse_args(['admin'] + hostnames)
assert args.client == hostnames
|
|
2523d34d4f3e26a408c7ec0e43708efea77f03a9
|
workflow/cndic_naver_search.py
|
workflow/cndic_naver_search.py
|
# Naver Search Workflow for Alfred 2
# Copyright (C) 2013 Jinuk Baek
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import sys
from workflow import web, Workflow
def get_dictionary_data(word):
url = 'http://ac.cndic.naver.com/ac2'
params = dict(q=word,
_callback='',
q_enc='utf-8',
st=11,
r_lt='00',
t_koreng=1,
r_format='json',
r_enc='utf-8',
r_unicode=0,
r_escape=1)
r = web.get(url, params)
r.raise_for_status()
return r.json()
def main(wf):
import cgi;
args = wf.args[0]
wf.add_item(title = 'Search Naver Cndic for \'%s\'' % args,
autocomplete=args,
arg=args,
valid=True)
def wrapper():
return get_dictionary_data(args)
res_json = wf.cached_data("cn_%s" % args, wrapper, max_age=600)
for item in res_json['items']:
for ltxt in item:
if len(ltxt) > 0:
txt = ltxt[0][0];
rtxt = cgi.escape(ltxt[1][0]);
wf.add_item(title = u"%s %s" % (txt, rtxt) ,
subtitle = 'Search Naver Cndic for \'%s\'' % txt,
autocomplete=txt,
arg=txt,
valid=True);
wf.send_feedback()
if __name__ == '__main__':
wf = Workflow()
sys.exit(wf.run(main))
|
Add to support the chinese library
|
Add to support the chinese library
|
Python
|
mit
|
Kuniz/alfnaversearch,Kuniz/alfnaversearch
|
Add to support the chinese library
|
# Naver Search Workflow for Alfred 2
# Copyright (C) 2013 Jinuk Baek
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import sys
from workflow import web, Workflow
def get_dictionary_data(word):
url = 'http://ac.cndic.naver.com/ac2'
params = dict(q=word,
_callback='',
q_enc='utf-8',
st=11,
r_lt='00',
t_koreng=1,
r_format='json',
r_enc='utf-8',
r_unicode=0,
r_escape=1)
r = web.get(url, params)
r.raise_for_status()
return r.json()
def main(wf):
import cgi;
args = wf.args[0]
wf.add_item(title = 'Search Naver Cndic for \'%s\'' % args,
autocomplete=args,
arg=args,
valid=True)
def wrapper():
return get_dictionary_data(args)
res_json = wf.cached_data("cn_%s" % args, wrapper, max_age=600)
for item in res_json['items']:
for ltxt in item:
if len(ltxt) > 0:
txt = ltxt[0][0];
rtxt = cgi.escape(ltxt[1][0]);
wf.add_item(title = u"%s %s" % (txt, rtxt) ,
subtitle = 'Search Naver Cndic for \'%s\'' % txt,
autocomplete=txt,
arg=txt,
valid=True);
wf.send_feedback()
if __name__ == '__main__':
wf = Workflow()
sys.exit(wf.run(main))
|
<commit_before><commit_msg>Add to support the chinese library<commit_after>
|
# Naver Search Workflow for Alfred 2
# Copyright (C) 2013 Jinuk Baek
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import sys
from workflow import web, Workflow
def get_dictionary_data(word):
url = 'http://ac.cndic.naver.com/ac2'
params = dict(q=word,
_callback='',
q_enc='utf-8',
st=11,
r_lt='00',
t_koreng=1,
r_format='json',
r_enc='utf-8',
r_unicode=0,
r_escape=1)
r = web.get(url, params)
r.raise_for_status()
return r.json()
def main(wf):
import cgi;
args = wf.args[0]
wf.add_item(title = 'Search Naver Cndic for \'%s\'' % args,
autocomplete=args,
arg=args,
valid=True)
def wrapper():
return get_dictionary_data(args)
res_json = wf.cached_data("cn_%s" % args, wrapper, max_age=600)
for item in res_json['items']:
for ltxt in item:
if len(ltxt) > 0:
txt = ltxt[0][0];
rtxt = cgi.escape(ltxt[1][0]);
wf.add_item(title = u"%s %s" % (txt, rtxt) ,
subtitle = 'Search Naver Cndic for \'%s\'' % txt,
autocomplete=txt,
arg=txt,
valid=True);
wf.send_feedback()
if __name__ == '__main__':
wf = Workflow()
sys.exit(wf.run(main))
|
Add to support the chinese library# Naver Search Workflow for Alfred 2
# Copyright (C) 2013 Jinuk Baek
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import sys
from workflow import web, Workflow
def get_dictionary_data(word):
url = 'http://ac.cndic.naver.com/ac2'
params = dict(q=word,
_callback='',
q_enc='utf-8',
st=11,
r_lt='00',
t_koreng=1,
r_format='json',
r_enc='utf-8',
r_unicode=0,
r_escape=1)
r = web.get(url, params)
r.raise_for_status()
return r.json()
def main(wf):
import cgi;
args = wf.args[0]
wf.add_item(title = 'Search Naver Cndic for \'%s\'' % args,
autocomplete=args,
arg=args,
valid=True)
def wrapper():
return get_dictionary_data(args)
res_json = wf.cached_data("cn_%s" % args, wrapper, max_age=600)
for item in res_json['items']:
for ltxt in item:
if len(ltxt) > 0:
txt = ltxt[0][0];
rtxt = cgi.escape(ltxt[1][0]);
wf.add_item(title = u"%s %s" % (txt, rtxt) ,
subtitle = 'Search Naver Cndic for \'%s\'' % txt,
autocomplete=txt,
arg=txt,
valid=True);
wf.send_feedback()
if __name__ == '__main__':
wf = Workflow()
sys.exit(wf.run(main))
|
<commit_before><commit_msg>Add to support the chinese library<commit_after># Naver Search Workflow for Alfred 2
# Copyright (C) 2013 Jinuk Baek
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import sys
from workflow import web, Workflow
def get_dictionary_data(word):
url = 'http://ac.cndic.naver.com/ac2'
params = dict(q=word,
_callback='',
q_enc='utf-8',
st=11,
r_lt='00',
t_koreng=1,
r_format='json',
r_enc='utf-8',
r_unicode=0,
r_escape=1)
r = web.get(url, params)
r.raise_for_status()
return r.json()
def main(wf):
import cgi;
args = wf.args[0]
wf.add_item(title = 'Search Naver Cndic for \'%s\'' % args,
autocomplete=args,
arg=args,
valid=True)
def wrapper():
return get_dictionary_data(args)
res_json = wf.cached_data("cn_%s" % args, wrapper, max_age=600)
for item in res_json['items']:
for ltxt in item:
if len(ltxt) > 0:
txt = ltxt[0][0];
rtxt = cgi.escape(ltxt[1][0]);
wf.add_item(title = u"%s %s" % (txt, rtxt) ,
subtitle = 'Search Naver Cndic for \'%s\'' % txt,
autocomplete=txt,
arg=txt,
valid=True);
wf.send_feedback()
if __name__ == '__main__':
wf = Workflow()
sys.exit(wf.run(main))
|
|
1e7b84155623691fb9fc1cec4efa6386938f3e72
|
core/migrations/0055_update_username_validators.py
|
core/migrations/0055_update_username_validators.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-22 22:03
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0054_add_provider__cloud_config_and_timezone'),
]
operations = [
migrations.AlterField(
model_name='atmosphereuser',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=30, unique=True, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.')], verbose_name='username'),
),
]
|
Add missing migration (updating validators=)
|
Add missing migration (updating validators=)
|
Python
|
apache-2.0
|
CCI-MOC/GUI-Backend,CCI-MOC/GUI-Backend,CCI-MOC/GUI-Backend,CCI-MOC/GUI-Backend
|
Add missing migration (updating validators=)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-22 22:03
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0054_add_provider__cloud_config_and_timezone'),
]
operations = [
migrations.AlterField(
model_name='atmosphereuser',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=30, unique=True, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.')], verbose_name='username'),
),
]
|
<commit_before><commit_msg>Add missing migration (updating validators=)<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-22 22:03
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0054_add_provider__cloud_config_and_timezone'),
]
operations = [
migrations.AlterField(
model_name='atmosphereuser',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=30, unique=True, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.')], verbose_name='username'),
),
]
|
Add missing migration (updating validators=)# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-22 22:03
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0054_add_provider__cloud_config_and_timezone'),
]
operations = [
migrations.AlterField(
model_name='atmosphereuser',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=30, unique=True, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.')], verbose_name='username'),
),
]
|
<commit_before><commit_msg>Add missing migration (updating validators=)<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-22 22:03
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0054_add_provider__cloud_config_and_timezone'),
]
operations = [
migrations.AlterField(
model_name='atmosphereuser',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=30, unique=True, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.')], verbose_name='username'),
),
]
|
|
48217e5317412a9b5fb8181b6915963783efeaf2
|
tests/test_historical_klines.py
|
tests/test_historical_klines.py
|
#!/usr/bin/env python
# coding=utf-8
from binance.client import Client
import pytest
import requests_mock
client = Client('api_key', 'api_secret')
def test_exact_amount():
"""Test Exact amount returned"""
first_res = []
row = [1519892340000,"0.00099400","0.00099810","0.00099400","0.00099810","4806.04000000",1519892399999,"4.78553253",154,"1785.14000000","1.77837524","0"]
for i in range(0, 500):
first_res.append(row)
second_res = []
with requests_mock.mock() as m:
m.get('https://api.binance.com/api/v1/klines?interval=1m&limit=500&startTime=1519862400000&symbol=BNBBTC', json=first_res)
m.get('https://api.binance.com/api/v1/klines?interval=1m&limit=500&startTime=1519892400000&symbol=BNBBTC', json=second_res)
client.get_historical_klines(
symbol="BNBBTC",
interval=Client.KLINE_INTERVAL_1MINUTE,
start_str="1st March 2018"
)
|
Add test for kline result of exact amount
|
Add test for kline result of exact amount
|
Python
|
mit
|
sammchardy/python-binance
|
Add test for kline result of exact amount
|
#!/usr/bin/env python
# coding=utf-8
from binance.client import Client
import pytest
import requests_mock
client = Client('api_key', 'api_secret')
def test_exact_amount():
"""Test Exact amount returned"""
first_res = []
row = [1519892340000,"0.00099400","0.00099810","0.00099400","0.00099810","4806.04000000",1519892399999,"4.78553253",154,"1785.14000000","1.77837524","0"]
for i in range(0, 500):
first_res.append(row)
second_res = []
with requests_mock.mock() as m:
m.get('https://api.binance.com/api/v1/klines?interval=1m&limit=500&startTime=1519862400000&symbol=BNBBTC', json=first_res)
m.get('https://api.binance.com/api/v1/klines?interval=1m&limit=500&startTime=1519892400000&symbol=BNBBTC', json=second_res)
client.get_historical_klines(
symbol="BNBBTC",
interval=Client.KLINE_INTERVAL_1MINUTE,
start_str="1st March 2018"
)
|
<commit_before><commit_msg>Add test for kline result of exact amount<commit_after>
|
#!/usr/bin/env python
# coding=utf-8
from binance.client import Client
import pytest
import requests_mock
client = Client('api_key', 'api_secret')
def test_exact_amount():
"""Test Exact amount returned"""
first_res = []
row = [1519892340000,"0.00099400","0.00099810","0.00099400","0.00099810","4806.04000000",1519892399999,"4.78553253",154,"1785.14000000","1.77837524","0"]
for i in range(0, 500):
first_res.append(row)
second_res = []
with requests_mock.mock() as m:
m.get('https://api.binance.com/api/v1/klines?interval=1m&limit=500&startTime=1519862400000&symbol=BNBBTC', json=first_res)
m.get('https://api.binance.com/api/v1/klines?interval=1m&limit=500&startTime=1519892400000&symbol=BNBBTC', json=second_res)
client.get_historical_klines(
symbol="BNBBTC",
interval=Client.KLINE_INTERVAL_1MINUTE,
start_str="1st March 2018"
)
|
Add test for kline result of exact amount#!/usr/bin/env python
# coding=utf-8
from binance.client import Client
import pytest
import requests_mock
client = Client('api_key', 'api_secret')
def test_exact_amount():
"""Test Exact amount returned"""
first_res = []
row = [1519892340000,"0.00099400","0.00099810","0.00099400","0.00099810","4806.04000000",1519892399999,"4.78553253",154,"1785.14000000","1.77837524","0"]
for i in range(0, 500):
first_res.append(row)
second_res = []
with requests_mock.mock() as m:
m.get('https://api.binance.com/api/v1/klines?interval=1m&limit=500&startTime=1519862400000&symbol=BNBBTC', json=first_res)
m.get('https://api.binance.com/api/v1/klines?interval=1m&limit=500&startTime=1519892400000&symbol=BNBBTC', json=second_res)
client.get_historical_klines(
symbol="BNBBTC",
interval=Client.KLINE_INTERVAL_1MINUTE,
start_str="1st March 2018"
)
|
<commit_before><commit_msg>Add test for kline result of exact amount<commit_after>#!/usr/bin/env python
# coding=utf-8
from binance.client import Client
import pytest
import requests_mock
client = Client('api_key', 'api_secret')
def test_exact_amount():
"""Test Exact amount returned"""
first_res = []
row = [1519892340000,"0.00099400","0.00099810","0.00099400","0.00099810","4806.04000000",1519892399999,"4.78553253",154,"1785.14000000","1.77837524","0"]
for i in range(0, 500):
first_res.append(row)
second_res = []
with requests_mock.mock() as m:
m.get('https://api.binance.com/api/v1/klines?interval=1m&limit=500&startTime=1519862400000&symbol=BNBBTC', json=first_res)
m.get('https://api.binance.com/api/v1/klines?interval=1m&limit=500&startTime=1519892400000&symbol=BNBBTC', json=second_res)
client.get_historical_klines(
symbol="BNBBTC",
interval=Client.KLINE_INTERVAL_1MINUTE,
start_str="1st March 2018"
)
|
|
c663f6b6e31832fae682c2c527955b13682b701e
|
course_discovery/apps/course_metadata/migrations/0127_remove_courserun_learner_testimonials.py
|
course_discovery/apps/course_metadata/migrations/0127_remove_courserun_learner_testimonials.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-11-07 17:16
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('course_metadata', '0126_course_has_ofac_restrictions'),
]
operations = [
migrations.RemoveField(
model_name='courserun',
name='learner_testimonials',
),
]
|
Remove learner_testimonials column from course_metadata course run table
|
Remove learner_testimonials column from course_metadata course run table
|
Python
|
agpl-3.0
|
edx/course-discovery,edx/course-discovery,edx/course-discovery,edx/course-discovery
|
Remove learner_testimonials column from course_metadata course run table
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-11-07 17:16
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('course_metadata', '0126_course_has_ofac_restrictions'),
]
operations = [
migrations.RemoveField(
model_name='courserun',
name='learner_testimonials',
),
]
|
<commit_before><commit_msg>Remove learner_testimonials column from course_metadata course run table<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-11-07 17:16
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('course_metadata', '0126_course_has_ofac_restrictions'),
]
operations = [
migrations.RemoveField(
model_name='courserun',
name='learner_testimonials',
),
]
|
Remove learner_testimonials column from course_metadata course run table# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-11-07 17:16
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('course_metadata', '0126_course_has_ofac_restrictions'),
]
operations = [
migrations.RemoveField(
model_name='courserun',
name='learner_testimonials',
),
]
|
<commit_before><commit_msg>Remove learner_testimonials column from course_metadata course run table<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-11-07 17:16
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('course_metadata', '0126_course_has_ofac_restrictions'),
]
operations = [
migrations.RemoveField(
model_name='courserun',
name='learner_testimonials',
),
]
|
|
b75e10f3235e9215458071279b67910627a95180
|
ceam/framework/celery_tasks.py
|
ceam/framework/celery_tasks.py
|
import os
from time import time
import logging
import pandas as pd
from celery import Celery
from billiard import current_process
app = Celery()
@app.task(autoretry_for=(Exception,), max_retries=2)
def worker(draw_number, component_config, branch_config, logging_directory):
worker = current_process().index
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', filename=os.path.join(logging_directory, str(worker)+'.log'), level=logging.DEBUG)
logging.info('Starting job: {}'.format((draw_number, component_config, branch_config)))
run_configuration = component_config['configuration'].get('run_configuration', {})
results_directory = run_configuration['results_directory']
run_configuration['run_id'] = str(worker)+'_'+str(time())
if branch_config is not None:
run_configuration['run_key'] = dict(branch_config)
run_configuration['run_key']['draw'] = draw_number
component_config['configuration']['run_configuration'] = run_configuration
try:
from ceam.framework.engine import configure, run
from ceam.framework.components import prepare_component_configuration
from ceam.framework.util import collapse_nested_dict
configure(draw_number=draw_number, simulation_config=branch_config)
results = run(prepare_component_configuration(component_config))
results = pd.DataFrame(results, index=[draw_number]).to_json()
return results
except Exception as e:
logging.exception('Unhandled exception in worker')
raise
finally:
logging.info('Exiting job: {}'.format((draw_number, component_config, branch_config)))
|
Add celery based job runner
|
Add celery based job runner
|
Python
|
bsd-3-clause
|
ihmeuw/vivarium
|
Add celery based job runner
|
import os
from time import time
import logging
import pandas as pd
from celery import Celery
from billiard import current_process
app = Celery()
@app.task(autoretry_for=(Exception,), max_retries=2)
def worker(draw_number, component_config, branch_config, logging_directory):
worker = current_process().index
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', filename=os.path.join(logging_directory, str(worker)+'.log'), level=logging.DEBUG)
logging.info('Starting job: {}'.format((draw_number, component_config, branch_config)))
run_configuration = component_config['configuration'].get('run_configuration', {})
results_directory = run_configuration['results_directory']
run_configuration['run_id'] = str(worker)+'_'+str(time())
if branch_config is not None:
run_configuration['run_key'] = dict(branch_config)
run_configuration['run_key']['draw'] = draw_number
component_config['configuration']['run_configuration'] = run_configuration
try:
from ceam.framework.engine import configure, run
from ceam.framework.components import prepare_component_configuration
from ceam.framework.util import collapse_nested_dict
configure(draw_number=draw_number, simulation_config=branch_config)
results = run(prepare_component_configuration(component_config))
results = pd.DataFrame(results, index=[draw_number]).to_json()
return results
except Exception as e:
logging.exception('Unhandled exception in worker')
raise
finally:
logging.info('Exiting job: {}'.format((draw_number, component_config, branch_config)))
|
<commit_before><commit_msg>Add celery based job runner<commit_after>
|
import os
from time import time
import logging
import pandas as pd
from celery import Celery
from billiard import current_process
app = Celery()
@app.task(autoretry_for=(Exception,), max_retries=2)
def worker(draw_number, component_config, branch_config, logging_directory):
worker = current_process().index
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', filename=os.path.join(logging_directory, str(worker)+'.log'), level=logging.DEBUG)
logging.info('Starting job: {}'.format((draw_number, component_config, branch_config)))
run_configuration = component_config['configuration'].get('run_configuration', {})
results_directory = run_configuration['results_directory']
run_configuration['run_id'] = str(worker)+'_'+str(time())
if branch_config is not None:
run_configuration['run_key'] = dict(branch_config)
run_configuration['run_key']['draw'] = draw_number
component_config['configuration']['run_configuration'] = run_configuration
try:
from ceam.framework.engine import configure, run
from ceam.framework.components import prepare_component_configuration
from ceam.framework.util import collapse_nested_dict
configure(draw_number=draw_number, simulation_config=branch_config)
results = run(prepare_component_configuration(component_config))
results = pd.DataFrame(results, index=[draw_number]).to_json()
return results
except Exception as e:
logging.exception('Unhandled exception in worker')
raise
finally:
logging.info('Exiting job: {}'.format((draw_number, component_config, branch_config)))
|
Add celery based job runnerimport os
from time import time
import logging
import pandas as pd
from celery import Celery
from billiard import current_process
app = Celery()
@app.task(autoretry_for=(Exception,), max_retries=2)
def worker(draw_number, component_config, branch_config, logging_directory):
worker = current_process().index
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', filename=os.path.join(logging_directory, str(worker)+'.log'), level=logging.DEBUG)
logging.info('Starting job: {}'.format((draw_number, component_config, branch_config)))
run_configuration = component_config['configuration'].get('run_configuration', {})
results_directory = run_configuration['results_directory']
run_configuration['run_id'] = str(worker)+'_'+str(time())
if branch_config is not None:
run_configuration['run_key'] = dict(branch_config)
run_configuration['run_key']['draw'] = draw_number
component_config['configuration']['run_configuration'] = run_configuration
try:
from ceam.framework.engine import configure, run
from ceam.framework.components import prepare_component_configuration
from ceam.framework.util import collapse_nested_dict
configure(draw_number=draw_number, simulation_config=branch_config)
results = run(prepare_component_configuration(component_config))
results = pd.DataFrame(results, index=[draw_number]).to_json()
return results
except Exception as e:
logging.exception('Unhandled exception in worker')
raise
finally:
logging.info('Exiting job: {}'.format((draw_number, component_config, branch_config)))
|
<commit_before><commit_msg>Add celery based job runner<commit_after>import os
from time import time
import logging
import pandas as pd
from celery import Celery
from billiard import current_process
app = Celery()
@app.task(autoretry_for=(Exception,), max_retries=2)
def worker(draw_number, component_config, branch_config, logging_directory):
worker = current_process().index
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', filename=os.path.join(logging_directory, str(worker)+'.log'), level=logging.DEBUG)
logging.info('Starting job: {}'.format((draw_number, component_config, branch_config)))
run_configuration = component_config['configuration'].get('run_configuration', {})
results_directory = run_configuration['results_directory']
run_configuration['run_id'] = str(worker)+'_'+str(time())
if branch_config is not None:
run_configuration['run_key'] = dict(branch_config)
run_configuration['run_key']['draw'] = draw_number
component_config['configuration']['run_configuration'] = run_configuration
try:
from ceam.framework.engine import configure, run
from ceam.framework.components import prepare_component_configuration
from ceam.framework.util import collapse_nested_dict
configure(draw_number=draw_number, simulation_config=branch_config)
results = run(prepare_component_configuration(component_config))
results = pd.DataFrame(results, index=[draw_number]).to_json()
return results
except Exception as e:
logging.exception('Unhandled exception in worker')
raise
finally:
logging.info('Exiting job: {}'.format((draw_number, component_config, branch_config)))
|
|
164f43f902b89b84b4f0d474f4d3e0a18924110d
|
selection_test.py
|
selection_test.py
|
import quicksort.quicksort
import random_selection.random_selection
import sys
import time
from random import randint
def main(max_len, check):
for n in [2**(n+1) for n in range(max_len)]:
arr = [randint(0, 2**max_len) for n in range(n)]
median = int((len(arr)+1)/2) - 1
current_time = time.time()
result = random_selection.random_selection.select(arr, median)
end_time = time.time() - current_time
sorted_arr = quicksort.quicksort.sort(arr)
if sorted_arr[median] == result:
print "Success! In %f" % end_time
else:
print "Failed"
return
if __name__ == '__main__':
arr_len = int(sys.argv[1])
main(arr_len)
|
Add test of randomized select algorithm
|
Add test of randomized select algorithm
Similar to the sorting algorithm tests, selection_test.py checks
the runtime and correctness of the randomized selection algorithm
for exponentially increasing n.
Because it checks for correctness with quicksort, it can't test for
very large n (sorting runtime grows a lot faster than selection
runtime, which is exected).
|
Python
|
mit
|
timpel/stanford-algs,timpel/stanford-algs
|
Add test of randomized select algorithm
Similar to the sorting algorithm tests, selection_test.py checks
the runtime and correctness of the randomized selection algorithm
for exponentially increasing n.
Because it checks for correctness with quicksort, it can't test for
very large n (sorting runtime grows a lot faster than selection
runtime, which is exected).
|
import quicksort.quicksort
import random_selection.random_selection
import sys
import time
from random import randint
def main(max_len, check):
for n in [2**(n+1) for n in range(max_len)]:
arr = [randint(0, 2**max_len) for n in range(n)]
median = int((len(arr)+1)/2) - 1
current_time = time.time()
result = random_selection.random_selection.select(arr, median)
end_time = time.time() - current_time
sorted_arr = quicksort.quicksort.sort(arr)
if sorted_arr[median] == result:
print "Success! In %f" % end_time
else:
print "Failed"
return
if __name__ == '__main__':
arr_len = int(sys.argv[1])
main(arr_len)
|
<commit_before><commit_msg>Add test of randomized select algorithm
Similar to the sorting algorithm tests, selection_test.py checks
the runtime and correctness of the randomized selection algorithm
for exponentially increasing n.
Because it checks for correctness with quicksort, it can't test for
very large n (sorting runtime grows a lot faster than selection
runtime, which is exected).<commit_after>
|
import quicksort.quicksort
import random_selection.random_selection
import sys
import time
from random import randint
def main(max_len, check):
for n in [2**(n+1) for n in range(max_len)]:
arr = [randint(0, 2**max_len) for n in range(n)]
median = int((len(arr)+1)/2) - 1
current_time = time.time()
result = random_selection.random_selection.select(arr, median)
end_time = time.time() - current_time
sorted_arr = quicksort.quicksort.sort(arr)
if sorted_arr[median] == result:
print "Success! In %f" % end_time
else:
print "Failed"
return
if __name__ == '__main__':
arr_len = int(sys.argv[1])
main(arr_len)
|
Add test of randomized select algorithm
Similar to the sorting algorithm tests, selection_test.py checks
the runtime and correctness of the randomized selection algorithm
for exponentially increasing n.
Because it checks for correctness with quicksort, it can't test for
very large n (sorting runtime grows a lot faster than selection
runtime, which is exected).import quicksort.quicksort
import random_selection.random_selection
import sys
import time
from random import randint
def main(max_len, check):
for n in [2**(n+1) for n in range(max_len)]:
arr = [randint(0, 2**max_len) for n in range(n)]
median = int((len(arr)+1)/2) - 1
current_time = time.time()
result = random_selection.random_selection.select(arr, median)
end_time = time.time() - current_time
sorted_arr = quicksort.quicksort.sort(arr)
if sorted_arr[median] == result:
print "Success! In %f" % end_time
else:
print "Failed"
return
if __name__ == '__main__':
arr_len = int(sys.argv[1])
main(arr_len)
|
<commit_before><commit_msg>Add test of randomized select algorithm
Similar to the sorting algorithm tests, selection_test.py checks
the runtime and correctness of the randomized selection algorithm
for exponentially increasing n.
Because it checks for correctness with quicksort, it can't test for
very large n (sorting runtime grows a lot faster than selection
runtime, which is exected).<commit_after>import quicksort.quicksort
import random_selection.random_selection
import sys
import time
from random import randint
def main(max_len, check):
for n in [2**(n+1) for n in range(max_len)]:
arr = [randint(0, 2**max_len) for n in range(n)]
median = int((len(arr)+1)/2) - 1
current_time = time.time()
result = random_selection.random_selection.select(arr, median)
end_time = time.time() - current_time
sorted_arr = quicksort.quicksort.sort(arr)
if sorted_arr[median] == result:
print "Success! In %f" % end_time
else:
print "Failed"
return
if __name__ == '__main__':
arr_len = int(sys.argv[1])
main(arr_len)
|
|
82d34111295fdfa35d0e9815053498e935d415af
|
examples/store_datetimes.py
|
examples/store_datetimes.py
|
import h5py
import numpy as np
arr = np.array([np.datetime64('2019-09-22T17:38:30')])
with h5py.File('datetimes.h5', 'w') as f:
# Create dataset
f['data'] = arr.astype(h5py.opaque_dtype(arr.dtype))
# Read
print(f['data'][:])
|
Add example script to store & read datetime
|
Add example script to store & read datetime
|
Python
|
bsd-3-clause
|
h5py/h5py,h5py/h5py,h5py/h5py
|
Add example script to store & read datetime
|
import h5py
import numpy as np
arr = np.array([np.datetime64('2019-09-22T17:38:30')])
with h5py.File('datetimes.h5', 'w') as f:
# Create dataset
f['data'] = arr.astype(h5py.opaque_dtype(arr.dtype))
# Read
print(f['data'][:])
|
<commit_before><commit_msg>Add example script to store & read datetime<commit_after>
|
import h5py
import numpy as np
arr = np.array([np.datetime64('2019-09-22T17:38:30')])
with h5py.File('datetimes.h5', 'w') as f:
# Create dataset
f['data'] = arr.astype(h5py.opaque_dtype(arr.dtype))
# Read
print(f['data'][:])
|
Add example script to store & read datetimeimport h5py
import numpy as np
arr = np.array([np.datetime64('2019-09-22T17:38:30')])
with h5py.File('datetimes.h5', 'w') as f:
# Create dataset
f['data'] = arr.astype(h5py.opaque_dtype(arr.dtype))
# Read
print(f['data'][:])
|
<commit_before><commit_msg>Add example script to store & read datetime<commit_after>import h5py
import numpy as np
arr = np.array([np.datetime64('2019-09-22T17:38:30')])
with h5py.File('datetimes.h5', 'w') as f:
# Create dataset
f['data'] = arr.astype(h5py.opaque_dtype(arr.dtype))
# Read
print(f['data'][:])
|
|
52236b1ad285683d828b248e462a7b984d31e636
|
examples/world.py
|
examples/world.py
|
import ogr
import pylab
from numpy import asarray
from shapely.wkb import loads
source = ogr.Open("/var/gis/data/world/world_borders.shp")
borders = source.GetLayerByName("world_borders")
fig = pylab.figure(1, figsize=(4,2), dpi=300)
while 1:
feature = borders.GetNextFeature()
if not feature:
break
geom = loads(feature.GetGeometryRef().ExportToWkb())
a = asarray(geom)
pylab.plot(a[:,0], a[:,1])
pylab.show()
|
Add example of connecting OGR to matplotlib through shapely and numpy
|
Add example of connecting OGR to matplotlib through shapely and numpy
git-svn-id: 30e8e193f18ae0331cc1220771e45549f871ece9@749 b426a367-1105-0410-b9ff-cdf4ab011145
|
Python
|
bsd-3-clause
|
jdmcbr/Shapely,mindw/shapely,jdmcbr/Shapely,mouadino/Shapely,abali96/Shapely,abali96/Shapely,mouadino/Shapely,mindw/shapely
|
Add example of connecting OGR to matplotlib through shapely and numpy
git-svn-id: 30e8e193f18ae0331cc1220771e45549f871ece9@749 b426a367-1105-0410-b9ff-cdf4ab011145
|
import ogr
import pylab
from numpy import asarray
from shapely.wkb import loads
source = ogr.Open("/var/gis/data/world/world_borders.shp")
borders = source.GetLayerByName("world_borders")
fig = pylab.figure(1, figsize=(4,2), dpi=300)
while 1:
feature = borders.GetNextFeature()
if not feature:
break
geom = loads(feature.GetGeometryRef().ExportToWkb())
a = asarray(geom)
pylab.plot(a[:,0], a[:,1])
pylab.show()
|
<commit_before><commit_msg>Add example of connecting OGR to matplotlib through shapely and numpy
git-svn-id: 30e8e193f18ae0331cc1220771e45549f871ece9@749 b426a367-1105-0410-b9ff-cdf4ab011145<commit_after>
|
import ogr
import pylab
from numpy import asarray
from shapely.wkb import loads
source = ogr.Open("/var/gis/data/world/world_borders.shp")
borders = source.GetLayerByName("world_borders")
fig = pylab.figure(1, figsize=(4,2), dpi=300)
while 1:
feature = borders.GetNextFeature()
if not feature:
break
geom = loads(feature.GetGeometryRef().ExportToWkb())
a = asarray(geom)
pylab.plot(a[:,0], a[:,1])
pylab.show()
|
Add example of connecting OGR to matplotlib through shapely and numpy
git-svn-id: 30e8e193f18ae0331cc1220771e45549f871ece9@749 b426a367-1105-0410-b9ff-cdf4ab011145import ogr
import pylab
from numpy import asarray
from shapely.wkb import loads
source = ogr.Open("/var/gis/data/world/world_borders.shp")
borders = source.GetLayerByName("world_borders")
fig = pylab.figure(1, figsize=(4,2), dpi=300)
while 1:
feature = borders.GetNextFeature()
if not feature:
break
geom = loads(feature.GetGeometryRef().ExportToWkb())
a = asarray(geom)
pylab.plot(a[:,0], a[:,1])
pylab.show()
|
<commit_before><commit_msg>Add example of connecting OGR to matplotlib through shapely and numpy
git-svn-id: 30e8e193f18ae0331cc1220771e45549f871ece9@749 b426a367-1105-0410-b9ff-cdf4ab011145<commit_after>import ogr
import pylab
from numpy import asarray
from shapely.wkb import loads
source = ogr.Open("/var/gis/data/world/world_borders.shp")
borders = source.GetLayerByName("world_borders")
fig = pylab.figure(1, figsize=(4,2), dpi=300)
while 1:
feature = borders.GetNextFeature()
if not feature:
break
geom = loads(feature.GetGeometryRef().ExportToWkb())
a = asarray(geom)
pylab.plot(a[:,0], a[:,1])
pylab.show()
|
|
fd33fadc260cda2bd2395f027457f990ab05480b
|
registration/migrations/0008_auto_20160418_2250.py
|
registration/migrations/0008_auto_20160418_2250.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-04-18 13:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('registration', '0007_auto_20160416_1217'),
]
operations = [
migrations.AlterField(
model_name='registration',
name='payment_status',
field=models.CharField(choices=[('ready', 'Ready'), ('paid', 'Paid'), ('deleted', 'Deleted')], default='ready', max_length=10),
),
migrations.AlterField(
model_name='registration',
name='transaction_code',
field=models.CharField(blank=True, max_length=36),
),
]
|
Add migration for Registration changed
|
Add migration for Registration changed
|
Python
|
mit
|
pythonkr/pyconapac-2016,pythonkr/pyconapac-2016,pythonkr/pyconapac-2016
|
Add migration for Registration changed
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-04-18 13:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('registration', '0007_auto_20160416_1217'),
]
operations = [
migrations.AlterField(
model_name='registration',
name='payment_status',
field=models.CharField(choices=[('ready', 'Ready'), ('paid', 'Paid'), ('deleted', 'Deleted')], default='ready', max_length=10),
),
migrations.AlterField(
model_name='registration',
name='transaction_code',
field=models.CharField(blank=True, max_length=36),
),
]
|
<commit_before><commit_msg>Add migration for Registration changed<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-04-18 13:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('registration', '0007_auto_20160416_1217'),
]
operations = [
migrations.AlterField(
model_name='registration',
name='payment_status',
field=models.CharField(choices=[('ready', 'Ready'), ('paid', 'Paid'), ('deleted', 'Deleted')], default='ready', max_length=10),
),
migrations.AlterField(
model_name='registration',
name='transaction_code',
field=models.CharField(blank=True, max_length=36),
),
]
|
Add migration for Registration changed# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-04-18 13:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('registration', '0007_auto_20160416_1217'),
]
operations = [
migrations.AlterField(
model_name='registration',
name='payment_status',
field=models.CharField(choices=[('ready', 'Ready'), ('paid', 'Paid'), ('deleted', 'Deleted')], default='ready', max_length=10),
),
migrations.AlterField(
model_name='registration',
name='transaction_code',
field=models.CharField(blank=True, max_length=36),
),
]
|
<commit_before><commit_msg>Add migration for Registration changed<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-04-18 13:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('registration', '0007_auto_20160416_1217'),
]
operations = [
migrations.AlterField(
model_name='registration',
name='payment_status',
field=models.CharField(choices=[('ready', 'Ready'), ('paid', 'Paid'), ('deleted', 'Deleted')], default='ready', max_length=10),
),
migrations.AlterField(
model_name='registration',
name='transaction_code',
field=models.CharField(blank=True, max_length=36),
),
]
|
|
9f6f6b727458eb331d370443074a58d1efa6d755
|
kolibri/logger/migrations/0003_auto_20170531_1140.py
|
kolibri/logger/migrations/0003_auto_20170531_1140.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-05-31 18:40
from __future__ import unicode_literals
import kolibri.core.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('logger', '0002_auto_20170518_1031'),
]
operations = [
migrations.AlterField(
model_name='usersessionlog',
name='last_interaction_timestamp',
field=kolibri.core.fields.DateTimeTzField(blank=True, null=True),
),
]
|
Add migration for blank true.
|
Add migration for blank true.
|
Python
|
mit
|
mrpau/kolibri,benjaoming/kolibri,indirectlylit/kolibri,benjaoming/kolibri,christianmemije/kolibri,lyw07/kolibri,benjaoming/kolibri,learningequality/kolibri,rtibbles/kolibri,indirectlylit/kolibri,indirectlylit/kolibri,jonboiser/kolibri,MingDai/kolibri,DXCanas/kolibri,rtibbles/kolibri,benjaoming/kolibri,rtibbles/kolibri,indirectlylit/kolibri,lyw07/kolibri,jonboiser/kolibri,christianmemije/kolibri,mrpau/kolibri,lyw07/kolibri,MingDai/kolibri,DXCanas/kolibri,learningequality/kolibri,learningequality/kolibri,lyw07/kolibri,jonboiser/kolibri,DXCanas/kolibri,learningequality/kolibri,christianmemije/kolibri,jonboiser/kolibri,mrpau/kolibri,MingDai/kolibri,DXCanas/kolibri,rtibbles/kolibri,christianmemije/kolibri,MingDai/kolibri,mrpau/kolibri
|
Add migration for blank true.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-05-31 18:40
from __future__ import unicode_literals
import kolibri.core.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('logger', '0002_auto_20170518_1031'),
]
operations = [
migrations.AlterField(
model_name='usersessionlog',
name='last_interaction_timestamp',
field=kolibri.core.fields.DateTimeTzField(blank=True, null=True),
),
]
|
<commit_before><commit_msg>Add migration for blank true.<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-05-31 18:40
from __future__ import unicode_literals
import kolibri.core.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('logger', '0002_auto_20170518_1031'),
]
operations = [
migrations.AlterField(
model_name='usersessionlog',
name='last_interaction_timestamp',
field=kolibri.core.fields.DateTimeTzField(blank=True, null=True),
),
]
|
Add migration for blank true.# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-05-31 18:40
from __future__ import unicode_literals
import kolibri.core.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('logger', '0002_auto_20170518_1031'),
]
operations = [
migrations.AlterField(
model_name='usersessionlog',
name='last_interaction_timestamp',
field=kolibri.core.fields.DateTimeTzField(blank=True, null=True),
),
]
|
<commit_before><commit_msg>Add migration for blank true.<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-05-31 18:40
from __future__ import unicode_literals
import kolibri.core.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('logger', '0002_auto_20170518_1031'),
]
operations = [
migrations.AlterField(
model_name='usersessionlog',
name='last_interaction_timestamp',
field=kolibri.core.fields.DateTimeTzField(blank=True, null=True),
),
]
|
|
b04e3787de29d4bee68854e15a7e783cbe3c3bd0
|
pymks/tests/test_microstructure_generator.py
|
pymks/tests/test_microstructure_generator.py
|
import pytest
import numpy as np
from pymks.datasets import make_microstructure
@pytest.mark.xfail
def test_size_and_grain_size_failure():
make_microstructure(n_samples=1, size=(7, 7), grain_size=(8, 1))
@pytest.mark.xfail
def test_volume_fraction_failure():
make_microstructure(n_samples=1, volume_fraction=(0.3, 0.6))
@pytest.mark.xfail
def test_volume_fraction_with_n_phases_failure():
make_microstructure(n_samples=1, size=(7, 7), n_phases=3,
volume_fraction=(0.5, 0.5))
@pytest.mark.xfail
def test_percent_variance_exceeds_limit_failure():
make_microstructure(n_samples=1, size=(7, 7), n_phases=3,
volume_fraction=(0.3, 0.3, 0.4), percent_variance=0.5)
def test_volume_fraction():
X = make_microstructure(n_samples=1, n_phases=3,
volume_fraction=(0.3, 0.2, 0.5))
assert np.allclose(np.sum(X == 1) / float(X.size), 0.2, rtol=1e-4)
assert np.allclose(np.sum(X == 2) / float(X.size), 0.5, atol=1e-4)
def test_percent_variance():
X = make_microstructure(n_samples=1, n_phases=3,
volume_fraction=(0.3, 0.2, 0.5),
percent_variance=.2)
print np.sum(X == 1) / float(X.size)
print np.sum(X == 2) / float(X.size)
assert np.allclose(np.sum(X == 1) / float(X.size), 0.09, atol=1e-2)
assert np.allclose(np.sum(X == 2) / float(X.size), 0.57, atol=1e-2)
if __name__ == '__main__':
test_volume_fraction()
test_percent_variance()
|
Add test for microstructure generator
|
Add test for microstructure generator
address #242
Add tests for microstructure generator to test new functionality. These
tests change that correct quantities of the volume fraction are
generated and that in appropriate sets of parameters fail.
|
Python
|
mit
|
awhite40/pymks,davidbrough1/pymks,davidbrough1/pymks
|
Add test for microstructure generator
address #242
Add tests for microstructure generator to test new functionality. These
tests change that correct quantities of the volume fraction are
generated and that in appropriate sets of parameters fail.
|
import pytest
import numpy as np
from pymks.datasets import make_microstructure
@pytest.mark.xfail
def test_size_and_grain_size_failure():
make_microstructure(n_samples=1, size=(7, 7), grain_size=(8, 1))
@pytest.mark.xfail
def test_volume_fraction_failure():
make_microstructure(n_samples=1, volume_fraction=(0.3, 0.6))
@pytest.mark.xfail
def test_volume_fraction_with_n_phases_failure():
make_microstructure(n_samples=1, size=(7, 7), n_phases=3,
volume_fraction=(0.5, 0.5))
@pytest.mark.xfail
def test_percent_variance_exceeds_limit_failure():
make_microstructure(n_samples=1, size=(7, 7), n_phases=3,
volume_fraction=(0.3, 0.3, 0.4), percent_variance=0.5)
def test_volume_fraction():
X = make_microstructure(n_samples=1, n_phases=3,
volume_fraction=(0.3, 0.2, 0.5))
assert np.allclose(np.sum(X == 1) / float(X.size), 0.2, rtol=1e-4)
assert np.allclose(np.sum(X == 2) / float(X.size), 0.5, atol=1e-4)
def test_percent_variance():
X = make_microstructure(n_samples=1, n_phases=3,
volume_fraction=(0.3, 0.2, 0.5),
percent_variance=.2)
print np.sum(X == 1) / float(X.size)
print np.sum(X == 2) / float(X.size)
assert np.allclose(np.sum(X == 1) / float(X.size), 0.09, atol=1e-2)
assert np.allclose(np.sum(X == 2) / float(X.size), 0.57, atol=1e-2)
if __name__ == '__main__':
test_volume_fraction()
test_percent_variance()
|
<commit_before><commit_msg>Add test for microstructure generator
address #242
Add tests for microstructure generator to test new functionality. These
tests change that correct quantities of the volume fraction are
generated and that in appropriate sets of parameters fail.<commit_after>
|
import pytest
import numpy as np
from pymks.datasets import make_microstructure
@pytest.mark.xfail
def test_size_and_grain_size_failure():
make_microstructure(n_samples=1, size=(7, 7), grain_size=(8, 1))
@pytest.mark.xfail
def test_volume_fraction_failure():
make_microstructure(n_samples=1, volume_fraction=(0.3, 0.6))
@pytest.mark.xfail
def test_volume_fraction_with_n_phases_failure():
make_microstructure(n_samples=1, size=(7, 7), n_phases=3,
volume_fraction=(0.5, 0.5))
@pytest.mark.xfail
def test_percent_variance_exceeds_limit_failure():
make_microstructure(n_samples=1, size=(7, 7), n_phases=3,
volume_fraction=(0.3, 0.3, 0.4), percent_variance=0.5)
def test_volume_fraction():
X = make_microstructure(n_samples=1, n_phases=3,
volume_fraction=(0.3, 0.2, 0.5))
assert np.allclose(np.sum(X == 1) / float(X.size), 0.2, rtol=1e-4)
assert np.allclose(np.sum(X == 2) / float(X.size), 0.5, atol=1e-4)
def test_percent_variance():
X = make_microstructure(n_samples=1, n_phases=3,
volume_fraction=(0.3, 0.2, 0.5),
percent_variance=.2)
print np.sum(X == 1) / float(X.size)
print np.sum(X == 2) / float(X.size)
assert np.allclose(np.sum(X == 1) / float(X.size), 0.09, atol=1e-2)
assert np.allclose(np.sum(X == 2) / float(X.size), 0.57, atol=1e-2)
if __name__ == '__main__':
test_volume_fraction()
test_percent_variance()
|
Add test for microstructure generator
address #242
Add tests for microstructure generator to test new functionality. These
tests change that correct quantities of the volume fraction are
generated and that in appropriate sets of parameters fail.import pytest
import numpy as np
from pymks.datasets import make_microstructure
@pytest.mark.xfail
def test_size_and_grain_size_failure():
make_microstructure(n_samples=1, size=(7, 7), grain_size=(8, 1))
@pytest.mark.xfail
def test_volume_fraction_failure():
make_microstructure(n_samples=1, volume_fraction=(0.3, 0.6))
@pytest.mark.xfail
def test_volume_fraction_with_n_phases_failure():
make_microstructure(n_samples=1, size=(7, 7), n_phases=3,
volume_fraction=(0.5, 0.5))
@pytest.mark.xfail
def test_percent_variance_exceeds_limit_failure():
make_microstructure(n_samples=1, size=(7, 7), n_phases=3,
volume_fraction=(0.3, 0.3, 0.4), percent_variance=0.5)
def test_volume_fraction():
X = make_microstructure(n_samples=1, n_phases=3,
volume_fraction=(0.3, 0.2, 0.5))
assert np.allclose(np.sum(X == 1) / float(X.size), 0.2, rtol=1e-4)
assert np.allclose(np.sum(X == 2) / float(X.size), 0.5, atol=1e-4)
def test_percent_variance():
X = make_microstructure(n_samples=1, n_phases=3,
volume_fraction=(0.3, 0.2, 0.5),
percent_variance=.2)
print np.sum(X == 1) / float(X.size)
print np.sum(X == 2) / float(X.size)
assert np.allclose(np.sum(X == 1) / float(X.size), 0.09, atol=1e-2)
assert np.allclose(np.sum(X == 2) / float(X.size), 0.57, atol=1e-2)
if __name__ == '__main__':
test_volume_fraction()
test_percent_variance()
|
<commit_before><commit_msg>Add test for microstructure generator
address #242
Add tests for microstructure generator to test new functionality. These
tests change that correct quantities of the volume fraction are
generated and that in appropriate sets of parameters fail.<commit_after>import pytest
import numpy as np
from pymks.datasets import make_microstructure
@pytest.mark.xfail
def test_size_and_grain_size_failure():
make_microstructure(n_samples=1, size=(7, 7), grain_size=(8, 1))
@pytest.mark.xfail
def test_volume_fraction_failure():
make_microstructure(n_samples=1, volume_fraction=(0.3, 0.6))
@pytest.mark.xfail
def test_volume_fraction_with_n_phases_failure():
make_microstructure(n_samples=1, size=(7, 7), n_phases=3,
volume_fraction=(0.5, 0.5))
@pytest.mark.xfail
def test_percent_variance_exceeds_limit_failure():
make_microstructure(n_samples=1, size=(7, 7), n_phases=3,
volume_fraction=(0.3, 0.3, 0.4), percent_variance=0.5)
def test_volume_fraction():
X = make_microstructure(n_samples=1, n_phases=3,
volume_fraction=(0.3, 0.2, 0.5))
assert np.allclose(np.sum(X == 1) / float(X.size), 0.2, rtol=1e-4)
assert np.allclose(np.sum(X == 2) / float(X.size), 0.5, atol=1e-4)
def test_percent_variance():
X = make_microstructure(n_samples=1, n_phases=3,
volume_fraction=(0.3, 0.2, 0.5),
percent_variance=.2)
print np.sum(X == 1) / float(X.size)
print np.sum(X == 2) / float(X.size)
assert np.allclose(np.sum(X == 1) / float(X.size), 0.09, atol=1e-2)
assert np.allclose(np.sum(X == 2) / float(X.size), 0.57, atol=1e-2)
if __name__ == '__main__':
test_volume_fraction()
test_percent_variance()
|
|
77e980157f51af421eceb7c7b7a84945d8d33a91
|
scripts/caffe_to_chainermodel.py
|
scripts/caffe_to_chainermodel.py
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import os.path as osp
import caffe
import chainer.functions as F
import chainer.serializers as S
import fcn
from fcn.models import FCN8s
data_dir = fcn.get_data_dir()
caffemodel = osp.join(data_dir, 'voc-fcn8s/fcn8s-heavy-pascal.caffemodel')
caffe_prototxt = osp.join(data_dir, 'voc-fcn8s/deploy.prototxt')
chainermodel = osp.join(data_dir, 'fcn8s.chainermodel')
net = caffe.Net(caffe_prototxt, caffemodel, caffe.TEST)
# TODO(pfnet): chainer CaffeFunction not support some layers
# from chainer.functions.caffe import CaffeFunction
# func = CaffeFunction(caffemodel)
model = FCN8s()
for name, param in net.params.iteritems():
layer = getattr(model, name)
has_bias = True
if len(param) == 1:
has_bias = False
print('{0}:'.format(name))
# weight
print(' - W:', param[0].data.shape, layer.W.data.shape)
assert param[0].data.shape == layer.W.data.shape
layer.W.data = param[0].data
# bias
if has_bias:
print(' - b:', param[1].data.shape, layer.b.data.shape)
assert param[1].data.shape == layer.b.data.shape
layer.b.data = param[1].data
S.save_hdf5(chainermodel, model)
|
Convert caffemodel of FCN8s to chainer model
|
Convert caffemodel of FCN8s to chainer model
|
Python
|
mit
|
wkentaro/fcn
|
Convert caffemodel of FCN8s to chainer model
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import os.path as osp
import caffe
import chainer.functions as F
import chainer.serializers as S
import fcn
from fcn.models import FCN8s
data_dir = fcn.get_data_dir()
caffemodel = osp.join(data_dir, 'voc-fcn8s/fcn8s-heavy-pascal.caffemodel')
caffe_prototxt = osp.join(data_dir, 'voc-fcn8s/deploy.prototxt')
chainermodel = osp.join(data_dir, 'fcn8s.chainermodel')
net = caffe.Net(caffe_prototxt, caffemodel, caffe.TEST)
# TODO(pfnet): chainer CaffeFunction not support some layers
# from chainer.functions.caffe import CaffeFunction
# func = CaffeFunction(caffemodel)
model = FCN8s()
for name, param in net.params.iteritems():
layer = getattr(model, name)
has_bias = True
if len(param) == 1:
has_bias = False
print('{0}:'.format(name))
# weight
print(' - W:', param[0].data.shape, layer.W.data.shape)
assert param[0].data.shape == layer.W.data.shape
layer.W.data = param[0].data
# bias
if has_bias:
print(' - b:', param[1].data.shape, layer.b.data.shape)
assert param[1].data.shape == layer.b.data.shape
layer.b.data = param[1].data
S.save_hdf5(chainermodel, model)
|
<commit_before><commit_msg>Convert caffemodel of FCN8s to chainer model<commit_after>
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import os.path as osp
import caffe
import chainer.functions as F
import chainer.serializers as S
import fcn
from fcn.models import FCN8s
data_dir = fcn.get_data_dir()
caffemodel = osp.join(data_dir, 'voc-fcn8s/fcn8s-heavy-pascal.caffemodel')
caffe_prototxt = osp.join(data_dir, 'voc-fcn8s/deploy.prototxt')
chainermodel = osp.join(data_dir, 'fcn8s.chainermodel')
net = caffe.Net(caffe_prototxt, caffemodel, caffe.TEST)
# TODO(pfnet): chainer CaffeFunction not support some layers
# from chainer.functions.caffe import CaffeFunction
# func = CaffeFunction(caffemodel)
model = FCN8s()
for name, param in net.params.iteritems():
layer = getattr(model, name)
has_bias = True
if len(param) == 1:
has_bias = False
print('{0}:'.format(name))
# weight
print(' - W:', param[0].data.shape, layer.W.data.shape)
assert param[0].data.shape == layer.W.data.shape
layer.W.data = param[0].data
# bias
if has_bias:
print(' - b:', param[1].data.shape, layer.b.data.shape)
assert param[1].data.shape == layer.b.data.shape
layer.b.data = param[1].data
S.save_hdf5(chainermodel, model)
|
Convert caffemodel of FCN8s to chainer model#!/usr/bin/env python
from __future__ import print_function
import argparse
import os.path as osp
import caffe
import chainer.functions as F
import chainer.serializers as S
import fcn
from fcn.models import FCN8s
data_dir = fcn.get_data_dir()
caffemodel = osp.join(data_dir, 'voc-fcn8s/fcn8s-heavy-pascal.caffemodel')
caffe_prototxt = osp.join(data_dir, 'voc-fcn8s/deploy.prototxt')
chainermodel = osp.join(data_dir, 'fcn8s.chainermodel')
net = caffe.Net(caffe_prototxt, caffemodel, caffe.TEST)
# TODO(pfnet): chainer CaffeFunction not support some layers
# from chainer.functions.caffe import CaffeFunction
# func = CaffeFunction(caffemodel)
model = FCN8s()
for name, param in net.params.iteritems():
layer = getattr(model, name)
has_bias = True
if len(param) == 1:
has_bias = False
print('{0}:'.format(name))
# weight
print(' - W:', param[0].data.shape, layer.W.data.shape)
assert param[0].data.shape == layer.W.data.shape
layer.W.data = param[0].data
# bias
if has_bias:
print(' - b:', param[1].data.shape, layer.b.data.shape)
assert param[1].data.shape == layer.b.data.shape
layer.b.data = param[1].data
S.save_hdf5(chainermodel, model)
|
<commit_before><commit_msg>Convert caffemodel of FCN8s to chainer model<commit_after>#!/usr/bin/env python
from __future__ import print_function
import argparse
import os.path as osp
import caffe
import chainer.functions as F
import chainer.serializers as S
import fcn
from fcn.models import FCN8s
data_dir = fcn.get_data_dir()
caffemodel = osp.join(data_dir, 'voc-fcn8s/fcn8s-heavy-pascal.caffemodel')
caffe_prototxt = osp.join(data_dir, 'voc-fcn8s/deploy.prototxt')
chainermodel = osp.join(data_dir, 'fcn8s.chainermodel')
net = caffe.Net(caffe_prototxt, caffemodel, caffe.TEST)
# TODO(pfnet): chainer CaffeFunction not support some layers
# from chainer.functions.caffe import CaffeFunction
# func = CaffeFunction(caffemodel)
model = FCN8s()
for name, param in net.params.iteritems():
layer = getattr(model, name)
has_bias = True
if len(param) == 1:
has_bias = False
print('{0}:'.format(name))
# weight
print(' - W:', param[0].data.shape, layer.W.data.shape)
assert param[0].data.shape == layer.W.data.shape
layer.W.data = param[0].data
# bias
if has_bias:
print(' - b:', param[1].data.shape, layer.b.data.shape)
assert param[1].data.shape == layer.b.data.shape
layer.b.data = param[1].data
S.save_hdf5(chainermodel, model)
|
|
b0d699066799d0309e7af3f8892f56a6feaac778
|
new_tests.py
|
new_tests.py
|
from numpy import testing
import unittest
import numpy as np
from numpy import pi
from robot_arm import RobotArm
class TestRobotArm(unittest.TestCase):
def setUp(self):
self.lengths = (3, 2, 2,)
self.destinations = (
(5, 0,),
(4, 2,),
(6, 0.5),
(4, -2),
(5, -1),
)
self.theta = (pi, pi/2, 0,)
def test_init_all_arguments(self):
RobotArm(self.lengths, self.destinations, self.theta)
def test_init_without_theta(self):
RobotArm(self.lengths, self.destinations)
def test_wrong_lengths_type(self):
self.assertRaises(
AssertionError,
RobotArm,
np.array(self.lengths),
self.destinations,
self.theta)
def test_wrong_destinations_type(self):
self.assertRaises(
AssertionError,
RobotArm,
self.lengths,
np.array(self.destinations),
self.theta)
def test_wrong_theta_type(self):
self.assertRaises(
AssertionError,
RobotArm,
self.lengths,
self.destinations,
np.array(self.theta))
|
Write tests for new functionality; several destinations
|
Write tests for new functionality; several destinations
|
Python
|
mit
|
JakobGM/robotarm-optimization
|
Write tests for new functionality; several destinations
|
from numpy import testing
import unittest
import numpy as np
from numpy import pi
from robot_arm import RobotArm
class TestRobotArm(unittest.TestCase):
def setUp(self):
self.lengths = (3, 2, 2,)
self.destinations = (
(5, 0,),
(4, 2,),
(6, 0.5),
(4, -2),
(5, -1),
)
self.theta = (pi, pi/2, 0,)
def test_init_all_arguments(self):
RobotArm(self.lengths, self.destinations, self.theta)
def test_init_without_theta(self):
RobotArm(self.lengths, self.destinations)
def test_wrong_lengths_type(self):
self.assertRaises(
AssertionError,
RobotArm,
np.array(self.lengths),
self.destinations,
self.theta)
def test_wrong_destinations_type(self):
self.assertRaises(
AssertionError,
RobotArm,
self.lengths,
np.array(self.destinations),
self.theta)
def test_wrong_theta_type(self):
self.assertRaises(
AssertionError,
RobotArm,
self.lengths,
self.destinations,
np.array(self.theta))
|
<commit_before><commit_msg>Write tests for new functionality; several destinations<commit_after>
|
from numpy import testing
import unittest
import numpy as np
from numpy import pi
from robot_arm import RobotArm
class TestRobotArm(unittest.TestCase):
def setUp(self):
self.lengths = (3, 2, 2,)
self.destinations = (
(5, 0,),
(4, 2,),
(6, 0.5),
(4, -2),
(5, -1),
)
self.theta = (pi, pi/2, 0,)
def test_init_all_arguments(self):
RobotArm(self.lengths, self.destinations, self.theta)
def test_init_without_theta(self):
RobotArm(self.lengths, self.destinations)
def test_wrong_lengths_type(self):
self.assertRaises(
AssertionError,
RobotArm,
np.array(self.lengths),
self.destinations,
self.theta)
def test_wrong_destinations_type(self):
self.assertRaises(
AssertionError,
RobotArm,
self.lengths,
np.array(self.destinations),
self.theta)
def test_wrong_theta_type(self):
self.assertRaises(
AssertionError,
RobotArm,
self.lengths,
self.destinations,
np.array(self.theta))
|
Write tests for new functionality; several destinationsfrom numpy import testing
import unittest
import numpy as np
from numpy import pi
from robot_arm import RobotArm
class TestRobotArm(unittest.TestCase):
def setUp(self):
self.lengths = (3, 2, 2,)
self.destinations = (
(5, 0,),
(4, 2,),
(6, 0.5),
(4, -2),
(5, -1),
)
self.theta = (pi, pi/2, 0,)
def test_init_all_arguments(self):
RobotArm(self.lengths, self.destinations, self.theta)
def test_init_without_theta(self):
RobotArm(self.lengths, self.destinations)
def test_wrong_lengths_type(self):
self.assertRaises(
AssertionError,
RobotArm,
np.array(self.lengths),
self.destinations,
self.theta)
def test_wrong_destinations_type(self):
self.assertRaises(
AssertionError,
RobotArm,
self.lengths,
np.array(self.destinations),
self.theta)
def test_wrong_theta_type(self):
self.assertRaises(
AssertionError,
RobotArm,
self.lengths,
self.destinations,
np.array(self.theta))
|
<commit_before><commit_msg>Write tests for new functionality; several destinations<commit_after>from numpy import testing
import unittest
import numpy as np
from numpy import pi
from robot_arm import RobotArm
class TestRobotArm(unittest.TestCase):
def setUp(self):
self.lengths = (3, 2, 2,)
self.destinations = (
(5, 0,),
(4, 2,),
(6, 0.5),
(4, -2),
(5, -1),
)
self.theta = (pi, pi/2, 0,)
def test_init_all_arguments(self):
RobotArm(self.lengths, self.destinations, self.theta)
def test_init_without_theta(self):
RobotArm(self.lengths, self.destinations)
def test_wrong_lengths_type(self):
self.assertRaises(
AssertionError,
RobotArm,
np.array(self.lengths),
self.destinations,
self.theta)
def test_wrong_destinations_type(self):
self.assertRaises(
AssertionError,
RobotArm,
self.lengths,
np.array(self.destinations),
self.theta)
def test_wrong_theta_type(self):
self.assertRaises(
AssertionError,
RobotArm,
self.lengths,
self.destinations,
np.array(self.theta))
|
|
50f698c2fdd90bc4b3e60a583c196381fc23e099
|
lltk-restful/base.py
|
lltk-restful/base.py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import lltk
import lltk.generic
import lltk.caching
import lltk.exceptions
from flask import Flask
from flask import jsonify, request
__author__ = 'Markus Beuckelmann'
__author_email__ = 'email@markus-beuckelmann.de'
__version__ = '0.1.0'
DEBUG = True
CACHING = True
NAME = 'lltk-restful'
HOST = '127.0.0.1'
PORT = 5000
app = Flask(NAME)
if DEBUG:
app.debug = True
lltk.config['debug'] = True
if not CACHING:
lltk.caching.disable()
@app.route('/lltk/<string:language>/<string:method>/<string:word>', methods = ['GET'])
@app.route('/lltk/<string:language>/<string:method>/<path:extraargs>/<string:word>', methods = ['GET'])
def lltkapi(language, method, word, extraargs = tuple()):
''' Returns LLTK's results as a JSON document. '''
data = dict()
data['language'] = language
data['method'] = method
data['word'] = word
data['result'] = None
if hasattr(lltk.generic, method) and callable(getattr(lltk.generic, method)):
function = getattr(lltk.generic, method)
if not isinstance(extraargs, tuple):
extraargs = tuple(extraargs.split('/'))
kwargs = request.args.to_dict()
data['result'] = function(language, word, *extraargs, **kwargs)
else:
return http_404(NotImplementedError)
return jsonify(data)
if __name__ == '__main__':
app.run(
host = HOST,
port = PORT
)
|
Implement a rudimentary API for LLTK
|
Implement a rudimentary API for LLTK
|
Python
|
agpl-3.0
|
lltk/lltk-restful
|
Implement a rudimentary API for LLTK
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import lltk
import lltk.generic
import lltk.caching
import lltk.exceptions
from flask import Flask
from flask import jsonify, request
__author__ = 'Markus Beuckelmann'
__author_email__ = 'email@markus-beuckelmann.de'
__version__ = '0.1.0'
DEBUG = True
CACHING = True
NAME = 'lltk-restful'
HOST = '127.0.0.1'
PORT = 5000
app = Flask(NAME)
if DEBUG:
app.debug = True
lltk.config['debug'] = True
if not CACHING:
lltk.caching.disable()
@app.route('/lltk/<string:language>/<string:method>/<string:word>', methods = ['GET'])
@app.route('/lltk/<string:language>/<string:method>/<path:extraargs>/<string:word>', methods = ['GET'])
def lltkapi(language, method, word, extraargs = tuple()):
''' Returns LLTK's results as a JSON document. '''
data = dict()
data['language'] = language
data['method'] = method
data['word'] = word
data['result'] = None
if hasattr(lltk.generic, method) and callable(getattr(lltk.generic, method)):
function = getattr(lltk.generic, method)
if not isinstance(extraargs, tuple):
extraargs = tuple(extraargs.split('/'))
kwargs = request.args.to_dict()
data['result'] = function(language, word, *extraargs, **kwargs)
else:
return http_404(NotImplementedError)
return jsonify(data)
if __name__ == '__main__':
app.run(
host = HOST,
port = PORT
)
|
<commit_before><commit_msg>Implement a rudimentary API for LLTK<commit_after>
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import lltk
import lltk.generic
import lltk.caching
import lltk.exceptions
from flask import Flask
from flask import jsonify, request
__author__ = 'Markus Beuckelmann'
__author_email__ = 'email@markus-beuckelmann.de'
__version__ = '0.1.0'
DEBUG = True
CACHING = True
NAME = 'lltk-restful'
HOST = '127.0.0.1'
PORT = 5000
app = Flask(NAME)
if DEBUG:
app.debug = True
lltk.config['debug'] = True
if not CACHING:
lltk.caching.disable()
@app.route('/lltk/<string:language>/<string:method>/<string:word>', methods = ['GET'])
@app.route('/lltk/<string:language>/<string:method>/<path:extraargs>/<string:word>', methods = ['GET'])
def lltkapi(language, method, word, extraargs = tuple()):
''' Returns LLTK's results as a JSON document. '''
data = dict()
data['language'] = language
data['method'] = method
data['word'] = word
data['result'] = None
if hasattr(lltk.generic, method) and callable(getattr(lltk.generic, method)):
function = getattr(lltk.generic, method)
if not isinstance(extraargs, tuple):
extraargs = tuple(extraargs.split('/'))
kwargs = request.args.to_dict()
data['result'] = function(language, word, *extraargs, **kwargs)
else:
return http_404(NotImplementedError)
return jsonify(data)
if __name__ == '__main__':
app.run(
host = HOST,
port = PORT
)
|
Implement a rudimentary API for LLTK#!/usr/bin/python
# -*- coding: UTF-8 -*-
import lltk
import lltk.generic
import lltk.caching
import lltk.exceptions
from flask import Flask
from flask import jsonify, request
__author__ = 'Markus Beuckelmann'
__author_email__ = 'email@markus-beuckelmann.de'
__version__ = '0.1.0'
DEBUG = True
CACHING = True
NAME = 'lltk-restful'
HOST = '127.0.0.1'
PORT = 5000
app = Flask(NAME)
if DEBUG:
app.debug = True
lltk.config['debug'] = True
if not CACHING:
lltk.caching.disable()
@app.route('/lltk/<string:language>/<string:method>/<string:word>', methods = ['GET'])
@app.route('/lltk/<string:language>/<string:method>/<path:extraargs>/<string:word>', methods = ['GET'])
def lltkapi(language, method, word, extraargs = tuple()):
''' Returns LLTK's results as a JSON document. '''
data = dict()
data['language'] = language
data['method'] = method
data['word'] = word
data['result'] = None
if hasattr(lltk.generic, method) and callable(getattr(lltk.generic, method)):
function = getattr(lltk.generic, method)
if not isinstance(extraargs, tuple):
extraargs = tuple(extraargs.split('/'))
kwargs = request.args.to_dict()
data['result'] = function(language, word, *extraargs, **kwargs)
else:
return http_404(NotImplementedError)
return jsonify(data)
if __name__ == '__main__':
app.run(
host = HOST,
port = PORT
)
|
<commit_before><commit_msg>Implement a rudimentary API for LLTK<commit_after>#!/usr/bin/python
# -*- coding: UTF-8 -*-
import lltk
import lltk.generic
import lltk.caching
import lltk.exceptions
from flask import Flask
from flask import jsonify, request
__author__ = 'Markus Beuckelmann'
__author_email__ = 'email@markus-beuckelmann.de'
__version__ = '0.1.0'
DEBUG = True
CACHING = True
NAME = 'lltk-restful'
HOST = '127.0.0.1'
PORT = 5000
app = Flask(NAME)
if DEBUG:
app.debug = True
lltk.config['debug'] = True
if not CACHING:
lltk.caching.disable()
@app.route('/lltk/<string:language>/<string:method>/<string:word>', methods = ['GET'])
@app.route('/lltk/<string:language>/<string:method>/<path:extraargs>/<string:word>', methods = ['GET'])
def lltkapi(language, method, word, extraargs = tuple()):
''' Returns LLTK's results as a JSON document. '''
data = dict()
data['language'] = language
data['method'] = method
data['word'] = word
data['result'] = None
if hasattr(lltk.generic, method) and callable(getattr(lltk.generic, method)):
function = getattr(lltk.generic, method)
if not isinstance(extraargs, tuple):
extraargs = tuple(extraargs.split('/'))
kwargs = request.args.to_dict()
data['result'] = function(language, word, *extraargs, **kwargs)
else:
return http_404(NotImplementedError)
return jsonify(data)
if __name__ == '__main__':
app.run(
host = HOST,
port = PORT
)
|
|
4f404a71cb7ee912bca8184fe94c97d6cfba1186
|
preprocessing_tools/solid_rotation_y.py
|
preprocessing_tools/solid_rotation_y.py
|
'''
Rotates the protein by a solid angle on the plane xz
'''
import numpy
import os
from argparse import ArgumentParser
from move_prot_helper import (read_vertex, read_pqr, rotate_y,
modify_pqr)
def read_inputs():
"""
Parse command-line arguments to run move_protein.
User should provide:
-inMesh : str, mesh file you want to rotate.
-inpqr : str, pqr of the object you want to rotate.
-alpha_y: float [degrees], rotation angle, about the dipole moment.
-name : str, output file name.
"""
parser = ArgumentParser(description='Manage solid_rotation_y command line arguments')
parser.add_argument('-im', '--inMesh', dest='im', type=str, default=None,
help="mesh file you want to rotate")
parser.add_argument('-ip', '--inpqr', dest='ip', type=str, default=None,
help="pqr of the object you want to rotate")
parser.add_argument('-angy', '--angle_y', dest='angy', type=float, default=None,
help="rotation angle in the plane xz")
parser.add_argument('-n', '--name', dest='name', type=str, default='',
help="output file name")
return parser.parse_args()
args = read_inputs()
inMesh = args.im
inpqr = args.ip
angle_y = float(args.angy)*numpy.pi/180.
name = args.name
outMesh = inMesh + name
outpqr = inpqr + name
#Read mesh and pqr
#vert = read_vertex(inMesh+'.vert', float)
vert = numpy.loadtxt(inMesh+'.vert', dtype=float)
xq, q, Nq = read_pqr(inpqr+'.pqr', float)
xq_new = rotate_y(xq, angle_y)
vert_new = rotate_y(vert, angle_y)
ctr = numpy.average(vert_new, axis=0)
r_min_last = numpy.min(numpy.linalg.norm(vert_new, axis=1))
idx_rmin_last = numpy.argmin(numpy.linalg.norm(vert_new, axis=1))
print ('Desired configuration:')
print ('\tProtein is centered, {}'.format(ctr))
print ('\tProtein r minimum is {}, located at {}'.format(r_min_last,
vert_new[idx_rmin_last, :]))
#### Save to file
numpy.savetxt(outMesh+'.vert', vert_new)
cmd = 'cp '+inMesh+'.face '+outMesh+'.face'
os.system(cmd)
modify_pqr(inpqr+'.pqr', outpqr+'.pqr', xq_new)
print ('\nWritten to '+outMesh+'.vert(.face) and '+outpqr+'.pqr')
|
Add script to rotate a solid angle in the xz plane
|
Add script to rotate a solid angle in the xz plane
|
Python
|
bsd-3-clause
|
barbagroup/pygbe,barbagroup/pygbe,barbagroup/pygbe
|
Add script to rotate a solid angle in the xz plane
|
'''
Rotates the protein by a solid angle on the plane xz
'''
import numpy
import os
from argparse import ArgumentParser
from move_prot_helper import (read_vertex, read_pqr, rotate_y,
modify_pqr)
def read_inputs():
"""
Parse command-line arguments to run move_protein.
User should provide:
-inMesh : str, mesh file you want to rotate.
-inpqr : str, pqr of the object you want to rotate.
-alpha_y: float [degrees], rotation angle, about the dipole moment.
-name : str, output file name.
"""
parser = ArgumentParser(description='Manage solid_rotation_y command line arguments')
parser.add_argument('-im', '--inMesh', dest='im', type=str, default=None,
help="mesh file you want to rotate")
parser.add_argument('-ip', '--inpqr', dest='ip', type=str, default=None,
help="pqr of the object you want to rotate")
parser.add_argument('-angy', '--angle_y', dest='angy', type=float, default=None,
help="rotation angle in the plane xz")
parser.add_argument('-n', '--name', dest='name', type=str, default='',
help="output file name")
return parser.parse_args()
args = read_inputs()
inMesh = args.im
inpqr = args.ip
angle_y = float(args.angy)*numpy.pi/180.
name = args.name
outMesh = inMesh + name
outpqr = inpqr + name
#Read mesh and pqr
#vert = read_vertex(inMesh+'.vert', float)
vert = numpy.loadtxt(inMesh+'.vert', dtype=float)
xq, q, Nq = read_pqr(inpqr+'.pqr', float)
xq_new = rotate_y(xq, angle_y)
vert_new = rotate_y(vert, angle_y)
ctr = numpy.average(vert_new, axis=0)
r_min_last = numpy.min(numpy.linalg.norm(vert_new, axis=1))
idx_rmin_last = numpy.argmin(numpy.linalg.norm(vert_new, axis=1))
print ('Desired configuration:')
print ('\tProtein is centered, {}'.format(ctr))
print ('\tProtein r minimum is {}, located at {}'.format(r_min_last,
vert_new[idx_rmin_last, :]))
#### Save to file
numpy.savetxt(outMesh+'.vert', vert_new)
cmd = 'cp '+inMesh+'.face '+outMesh+'.face'
os.system(cmd)
modify_pqr(inpqr+'.pqr', outpqr+'.pqr', xq_new)
print ('\nWritten to '+outMesh+'.vert(.face) and '+outpqr+'.pqr')
|
<commit_before><commit_msg>Add script to rotate a solid angle in the xz plane<commit_after>
|
'''
Rotates the protein by a solid angle on the plane xz
'''
import numpy
import os
from argparse import ArgumentParser
from move_prot_helper import (read_vertex, read_pqr, rotate_y,
modify_pqr)
def read_inputs():
"""
Parse command-line arguments to run move_protein.
User should provide:
-inMesh : str, mesh file you want to rotate.
-inpqr : str, pqr of the object you want to rotate.
-alpha_y: float [degrees], rotation angle, about the dipole moment.
-name : str, output file name.
"""
parser = ArgumentParser(description='Manage solid_rotation_y command line arguments')
parser.add_argument('-im', '--inMesh', dest='im', type=str, default=None,
help="mesh file you want to rotate")
parser.add_argument('-ip', '--inpqr', dest='ip', type=str, default=None,
help="pqr of the object you want to rotate")
parser.add_argument('-angy', '--angle_y', dest='angy', type=float, default=None,
help="rotation angle in the plane xz")
parser.add_argument('-n', '--name', dest='name', type=str, default='',
help="output file name")
return parser.parse_args()
args = read_inputs()
inMesh = args.im
inpqr = args.ip
angle_y = float(args.angy)*numpy.pi/180.
name = args.name
outMesh = inMesh + name
outpqr = inpqr + name
#Read mesh and pqr
#vert = read_vertex(inMesh+'.vert', float)
vert = numpy.loadtxt(inMesh+'.vert', dtype=float)
xq, q, Nq = read_pqr(inpqr+'.pqr', float)
xq_new = rotate_y(xq, angle_y)
vert_new = rotate_y(vert, angle_y)
ctr = numpy.average(vert_new, axis=0)
r_min_last = numpy.min(numpy.linalg.norm(vert_new, axis=1))
idx_rmin_last = numpy.argmin(numpy.linalg.norm(vert_new, axis=1))
print ('Desired configuration:')
print ('\tProtein is centered, {}'.format(ctr))
print ('\tProtein r minimum is {}, located at {}'.format(r_min_last,
vert_new[idx_rmin_last, :]))
#### Save to file
numpy.savetxt(outMesh+'.vert', vert_new)
cmd = 'cp '+inMesh+'.face '+outMesh+'.face'
os.system(cmd)
modify_pqr(inpqr+'.pqr', outpqr+'.pqr', xq_new)
print ('\nWritten to '+outMesh+'.vert(.face) and '+outpqr+'.pqr')
|
Add script to rotate a solid angle in the xz plane'''
Rotates the protein by a solid angle on the plane xz
'''
import numpy
import os
from argparse import ArgumentParser
from move_prot_helper import (read_vertex, read_pqr, rotate_y,
modify_pqr)
def read_inputs():
"""
Parse command-line arguments to run move_protein.
User should provide:
-inMesh : str, mesh file you want to rotate.
-inpqr : str, pqr of the object you want to rotate.
-alpha_y: float [degrees], rotation angle, about the dipole moment.
-name : str, output file name.
"""
parser = ArgumentParser(description='Manage solid_rotation_y command line arguments')
parser.add_argument('-im', '--inMesh', dest='im', type=str, default=None,
help="mesh file you want to rotate")
parser.add_argument('-ip', '--inpqr', dest='ip', type=str, default=None,
help="pqr of the object you want to rotate")
parser.add_argument('-angy', '--angle_y', dest='angy', type=float, default=None,
help="rotation angle in the plane xz")
parser.add_argument('-n', '--name', dest='name', type=str, default='',
help="output file name")
return parser.parse_args()
args = read_inputs()
inMesh = args.im
inpqr = args.ip
angle_y = float(args.angy)*numpy.pi/180.
name = args.name
outMesh = inMesh + name
outpqr = inpqr + name
#Read mesh and pqr
#vert = read_vertex(inMesh+'.vert', float)
vert = numpy.loadtxt(inMesh+'.vert', dtype=float)
xq, q, Nq = read_pqr(inpqr+'.pqr', float)
xq_new = rotate_y(xq, angle_y)
vert_new = rotate_y(vert, angle_y)
ctr = numpy.average(vert_new, axis=0)
r_min_last = numpy.min(numpy.linalg.norm(vert_new, axis=1))
idx_rmin_last = numpy.argmin(numpy.linalg.norm(vert_new, axis=1))
print ('Desired configuration:')
print ('\tProtein is centered, {}'.format(ctr))
print ('\tProtein r minimum is {}, located at {}'.format(r_min_last,
vert_new[idx_rmin_last, :]))
#### Save to file
numpy.savetxt(outMesh+'.vert', vert_new)
cmd = 'cp '+inMesh+'.face '+outMesh+'.face'
os.system(cmd)
modify_pqr(inpqr+'.pqr', outpqr+'.pqr', xq_new)
print ('\nWritten to '+outMesh+'.vert(.face) and '+outpqr+'.pqr')
|
<commit_before><commit_msg>Add script to rotate a solid angle in the xz plane<commit_after>'''
Rotates the protein by a solid angle on the plane xz
'''
import numpy
import os
from argparse import ArgumentParser
from move_prot_helper import (read_vertex, read_pqr, rotate_y,
modify_pqr)
def read_inputs():
"""
Parse command-line arguments to run move_protein.
User should provide:
-inMesh : str, mesh file you want to rotate.
-inpqr : str, pqr of the object you want to rotate.
-alpha_y: float [degrees], rotation angle, about the dipole moment.
-name : str, output file name.
"""
parser = ArgumentParser(description='Manage solid_rotation_y command line arguments')
parser.add_argument('-im', '--inMesh', dest='im', type=str, default=None,
help="mesh file you want to rotate")
parser.add_argument('-ip', '--inpqr', dest='ip', type=str, default=None,
help="pqr of the object you want to rotate")
parser.add_argument('-angy', '--angle_y', dest='angy', type=float, default=None,
help="rotation angle in the plane xz")
parser.add_argument('-n', '--name', dest='name', type=str, default='',
help="output file name")
return parser.parse_args()
args = read_inputs()
inMesh = args.im
inpqr = args.ip
angle_y = float(args.angy)*numpy.pi/180.
name = args.name
outMesh = inMesh + name
outpqr = inpqr + name
#Read mesh and pqr
#vert = read_vertex(inMesh+'.vert', float)
vert = numpy.loadtxt(inMesh+'.vert', dtype=float)
xq, q, Nq = read_pqr(inpqr+'.pqr', float)
xq_new = rotate_y(xq, angle_y)
vert_new = rotate_y(vert, angle_y)
ctr = numpy.average(vert_new, axis=0)
r_min_last = numpy.min(numpy.linalg.norm(vert_new, axis=1))
idx_rmin_last = numpy.argmin(numpy.linalg.norm(vert_new, axis=1))
print ('Desired configuration:')
print ('\tProtein is centered, {}'.format(ctr))
print ('\tProtein r minimum is {}, located at {}'.format(r_min_last,
vert_new[idx_rmin_last, :]))
#### Save to file
numpy.savetxt(outMesh+'.vert', vert_new)
cmd = 'cp '+inMesh+'.face '+outMesh+'.face'
os.system(cmd)
modify_pqr(inpqr+'.pqr', outpqr+'.pqr', xq_new)
print ('\nWritten to '+outMesh+'.vert(.face) and '+outpqr+'.pqr')
|
|
116babc38e2e4023eb0b45eabc02050ed433e240
|
scripts/mod_info.py
|
scripts/mod_info.py
|
# mod_info.py
#
# Display information about a Protracker module.
#
# Written & released by Keir Fraser <keir.xen@gmail.com>
#
# This is free and unencumbered software released into the public domain.
# See the file COPYING for more details, or visit <http://unlicense.org>.
import struct, sys
with open(sys.argv[1], "rb") as f:
dat = f.read()
dlen = len(dat)
tname, = struct.unpack("20s", dat[:20])
print("Name: '%s'" % tname.decode('utf-8'))
dat = dat[20:]
samples_len = 0
for i in range(31):
name, wordlen, finetune, volume, repstart, replen = struct.unpack(
">22sH2B2H", dat[:30])
dat = dat[30:]
if wordlen == 0:
continue
samples_len += wordlen*2
print("Sample Data: %u" % samples_len)
songlen, pad = struct.unpack("2B", dat[:2])
dat = dat[2:]
#assert pad == 127
assert songlen <= 128
print("Song Length: %u" % songlen)
patterns = list(struct.unpack("128B", dat[:128]))
dat = dat[128:]
patterns = patterns[:songlen]
nr_patterns = max(patterns)+1
print("Nr Patterns: %u (%u bytes)" % (nr_patterns, nr_patterns*1024))
mksig, = struct.unpack("4s", dat[:4])
dat = dat[4:]
assert mksig == b'M.K.'
totlen = 1084 + nr_patterns*1024 + samples_len
print("Total Bytes: %u (0x%x)" % (totlen, totlen))
assert totlen <= dlen
|
Include a helpful MOD analyser script
|
scripts: Include a helpful MOD analyser script
|
Python
|
unlicense
|
keirf/Amiga-Stuff,keirf/Amiga-Stuff
|
scripts: Include a helpful MOD analyser script
|
# mod_info.py
#
# Display information about a Protracker module.
#
# Written & released by Keir Fraser <keir.xen@gmail.com>
#
# This is free and unencumbered software released into the public domain.
# See the file COPYING for more details, or visit <http://unlicense.org>.
import struct, sys
with open(sys.argv[1], "rb") as f:
dat = f.read()
dlen = len(dat)
tname, = struct.unpack("20s", dat[:20])
print("Name: '%s'" % tname.decode('utf-8'))
dat = dat[20:]
samples_len = 0
for i in range(31):
name, wordlen, finetune, volume, repstart, replen = struct.unpack(
">22sH2B2H", dat[:30])
dat = dat[30:]
if wordlen == 0:
continue
samples_len += wordlen*2
print("Sample Data: %u" % samples_len)
songlen, pad = struct.unpack("2B", dat[:2])
dat = dat[2:]
#assert pad == 127
assert songlen <= 128
print("Song Length: %u" % songlen)
patterns = list(struct.unpack("128B", dat[:128]))
dat = dat[128:]
patterns = patterns[:songlen]
nr_patterns = max(patterns)+1
print("Nr Patterns: %u (%u bytes)" % (nr_patterns, nr_patterns*1024))
mksig, = struct.unpack("4s", dat[:4])
dat = dat[4:]
assert mksig == b'M.K.'
totlen = 1084 + nr_patterns*1024 + samples_len
print("Total Bytes: %u (0x%x)" % (totlen, totlen))
assert totlen <= dlen
|
<commit_before><commit_msg>scripts: Include a helpful MOD analyser script<commit_after>
|
# mod_info.py
#
# Display information about a Protracker module.
#
# Written & released by Keir Fraser <keir.xen@gmail.com>
#
# This is free and unencumbered software released into the public domain.
# See the file COPYING for more details, or visit <http://unlicense.org>.
import struct, sys
with open(sys.argv[1], "rb") as f:
dat = f.read()
dlen = len(dat)
tname, = struct.unpack("20s", dat[:20])
print("Name: '%s'" % tname.decode('utf-8'))
dat = dat[20:]
samples_len = 0
for i in range(31):
name, wordlen, finetune, volume, repstart, replen = struct.unpack(
">22sH2B2H", dat[:30])
dat = dat[30:]
if wordlen == 0:
continue
samples_len += wordlen*2
print("Sample Data: %u" % samples_len)
songlen, pad = struct.unpack("2B", dat[:2])
dat = dat[2:]
#assert pad == 127
assert songlen <= 128
print("Song Length: %u" % songlen)
patterns = list(struct.unpack("128B", dat[:128]))
dat = dat[128:]
patterns = patterns[:songlen]
nr_patterns = max(patterns)+1
print("Nr Patterns: %u (%u bytes)" % (nr_patterns, nr_patterns*1024))
mksig, = struct.unpack("4s", dat[:4])
dat = dat[4:]
assert mksig == b'M.K.'
totlen = 1084 + nr_patterns*1024 + samples_len
print("Total Bytes: %u (0x%x)" % (totlen, totlen))
assert totlen <= dlen
|
scripts: Include a helpful MOD analyser script# mod_info.py
#
# Display information about a Protracker module.
#
# Written & released by Keir Fraser <keir.xen@gmail.com>
#
# This is free and unencumbered software released into the public domain.
# See the file COPYING for more details, or visit <http://unlicense.org>.
import struct, sys
with open(sys.argv[1], "rb") as f:
dat = f.read()
dlen = len(dat)
tname, = struct.unpack("20s", dat[:20])
print("Name: '%s'" % tname.decode('utf-8'))
dat = dat[20:]
samples_len = 0
for i in range(31):
name, wordlen, finetune, volume, repstart, replen = struct.unpack(
">22sH2B2H", dat[:30])
dat = dat[30:]
if wordlen == 0:
continue
samples_len += wordlen*2
print("Sample Data: %u" % samples_len)
songlen, pad = struct.unpack("2B", dat[:2])
dat = dat[2:]
#assert pad == 127
assert songlen <= 128
print("Song Length: %u" % songlen)
patterns = list(struct.unpack("128B", dat[:128]))
dat = dat[128:]
patterns = patterns[:songlen]
nr_patterns = max(patterns)+1
print("Nr Patterns: %u (%u bytes)" % (nr_patterns, nr_patterns*1024))
mksig, = struct.unpack("4s", dat[:4])
dat = dat[4:]
assert mksig == b'M.K.'
totlen = 1084 + nr_patterns*1024 + samples_len
print("Total Bytes: %u (0x%x)" % (totlen, totlen))
assert totlen <= dlen
|
<commit_before><commit_msg>scripts: Include a helpful MOD analyser script<commit_after># mod_info.py
#
# Display information about a Protracker module.
#
# Written & released by Keir Fraser <keir.xen@gmail.com>
#
# This is free and unencumbered software released into the public domain.
# See the file COPYING for more details, or visit <http://unlicense.org>.
import struct, sys
with open(sys.argv[1], "rb") as f:
dat = f.read()
dlen = len(dat)
tname, = struct.unpack("20s", dat[:20])
print("Name: '%s'" % tname.decode('utf-8'))
dat = dat[20:]
samples_len = 0
for i in range(31):
name, wordlen, finetune, volume, repstart, replen = struct.unpack(
">22sH2B2H", dat[:30])
dat = dat[30:]
if wordlen == 0:
continue
samples_len += wordlen*2
print("Sample Data: %u" % samples_len)
songlen, pad = struct.unpack("2B", dat[:2])
dat = dat[2:]
#assert pad == 127
assert songlen <= 128
print("Song Length: %u" % songlen)
patterns = list(struct.unpack("128B", dat[:128]))
dat = dat[128:]
patterns = patterns[:songlen]
nr_patterns = max(patterns)+1
print("Nr Patterns: %u (%u bytes)" % (nr_patterns, nr_patterns*1024))
mksig, = struct.unpack("4s", dat[:4])
dat = dat[4:]
assert mksig == b'M.K.'
totlen = 1084 + nr_patterns*1024 + samples_len
print("Total Bytes: %u (0x%x)" % (totlen, totlen))
assert totlen <= dlen
|
|
07467664b699612e10b51bbeafdce79a9d1e0127
|
test/test_util.py
|
test/test_util.py
|
from __future__ import unicode_literals
try:
import io
StringIO = io.StringIO
except ImportError:
import StringIO
StringIO = StringIO.StringIO
import os
import shutil
import sys
import tempfile
import unittest
import cudnnenv
class TestSafeTempDir(unittest.TestCase):
def test_safe_temp_dir(self):
with cudnnenv.safe_temp_dir() as path:
self.assertTrue(os.path.exists(path))
self.assertFalse(os.path.exists(path))
def test_safe_temp_dir_error(self):
try:
with cudnnenv.safe_temp_dir() as path:
raise Exception
except Exception:
pass
self.assertFalse(os.path.exists(path))
class TestSafeDir(unittest.TestCase):
def setUp(self):
self.path = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.path, ignore_errors=True)
def test_safe_dir(self):
path = os.path.join(self.path, 'd')
with cudnnenv.safe_dir(path) as p:
self.assertTrue(os.path.exists(p))
self.assertTrue(os.path.exists(path))
def test_safe_dir_error(self):
path = os.path.join(self.path, 'd')
try:
with cudnnenv.safe_dir(path) as p:
raise Exception
except Exception:
pass
self.assertFalse(os.path.exists(p))
self.assertFalse(os.path.exists(path))
class TestYesNo(unittest.TestCase):
def tearDown(self):
sys.stdin = sys.__stdin__
def test_yes(self):
sys.stdin = StringIO('y\n')
self.assertTrue(cudnnenv.yes_no_query('q'))
def test_no(self):
sys.stdin = StringIO('n\n')
self.assertFalse(cudnnenv.yes_no_query('q'))
def test_invalid(self):
sys.stdin = StringIO('a\nb\nc\nd\ny\nn\n')
self.assertTrue(cudnnenv.yes_no_query('q'))
|
Write unit test for utility functions
|
Write unit test for utility functions
|
Python
|
mit
|
unnonouno/cudnnenv
|
Write unit test for utility functions
|
from __future__ import unicode_literals
try:
import io
StringIO = io.StringIO
except ImportError:
import StringIO
StringIO = StringIO.StringIO
import os
import shutil
import sys
import tempfile
import unittest
import cudnnenv
class TestSafeTempDir(unittest.TestCase):
def test_safe_temp_dir(self):
with cudnnenv.safe_temp_dir() as path:
self.assertTrue(os.path.exists(path))
self.assertFalse(os.path.exists(path))
def test_safe_temp_dir_error(self):
try:
with cudnnenv.safe_temp_dir() as path:
raise Exception
except Exception:
pass
self.assertFalse(os.path.exists(path))
class TestSafeDir(unittest.TestCase):
def setUp(self):
self.path = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.path, ignore_errors=True)
def test_safe_dir(self):
path = os.path.join(self.path, 'd')
with cudnnenv.safe_dir(path) as p:
self.assertTrue(os.path.exists(p))
self.assertTrue(os.path.exists(path))
def test_safe_dir_error(self):
path = os.path.join(self.path, 'd')
try:
with cudnnenv.safe_dir(path) as p:
raise Exception
except Exception:
pass
self.assertFalse(os.path.exists(p))
self.assertFalse(os.path.exists(path))
class TestYesNo(unittest.TestCase):
def tearDown(self):
sys.stdin = sys.__stdin__
def test_yes(self):
sys.stdin = StringIO('y\n')
self.assertTrue(cudnnenv.yes_no_query('q'))
def test_no(self):
sys.stdin = StringIO('n\n')
self.assertFalse(cudnnenv.yes_no_query('q'))
def test_invalid(self):
sys.stdin = StringIO('a\nb\nc\nd\ny\nn\n')
self.assertTrue(cudnnenv.yes_no_query('q'))
|
<commit_before><commit_msg>Write unit test for utility functions<commit_after>
|
from __future__ import unicode_literals
try:
import io
StringIO = io.StringIO
except ImportError:
import StringIO
StringIO = StringIO.StringIO
import os
import shutil
import sys
import tempfile
import unittest
import cudnnenv
class TestSafeTempDir(unittest.TestCase):
def test_safe_temp_dir(self):
with cudnnenv.safe_temp_dir() as path:
self.assertTrue(os.path.exists(path))
self.assertFalse(os.path.exists(path))
def test_safe_temp_dir_error(self):
try:
with cudnnenv.safe_temp_dir() as path:
raise Exception
except Exception:
pass
self.assertFalse(os.path.exists(path))
class TestSafeDir(unittest.TestCase):
def setUp(self):
self.path = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.path, ignore_errors=True)
def test_safe_dir(self):
path = os.path.join(self.path, 'd')
with cudnnenv.safe_dir(path) as p:
self.assertTrue(os.path.exists(p))
self.assertTrue(os.path.exists(path))
def test_safe_dir_error(self):
path = os.path.join(self.path, 'd')
try:
with cudnnenv.safe_dir(path) as p:
raise Exception
except Exception:
pass
self.assertFalse(os.path.exists(p))
self.assertFalse(os.path.exists(path))
class TestYesNo(unittest.TestCase):
def tearDown(self):
sys.stdin = sys.__stdin__
def test_yes(self):
sys.stdin = StringIO('y\n')
self.assertTrue(cudnnenv.yes_no_query('q'))
def test_no(self):
sys.stdin = StringIO('n\n')
self.assertFalse(cudnnenv.yes_no_query('q'))
def test_invalid(self):
sys.stdin = StringIO('a\nb\nc\nd\ny\nn\n')
self.assertTrue(cudnnenv.yes_no_query('q'))
|
Write unit test for utility functionsfrom __future__ import unicode_literals
try:
import io
StringIO = io.StringIO
except ImportError:
import StringIO
StringIO = StringIO.StringIO
import os
import shutil
import sys
import tempfile
import unittest
import cudnnenv
class TestSafeTempDir(unittest.TestCase):
def test_safe_temp_dir(self):
with cudnnenv.safe_temp_dir() as path:
self.assertTrue(os.path.exists(path))
self.assertFalse(os.path.exists(path))
def test_safe_temp_dir_error(self):
try:
with cudnnenv.safe_temp_dir() as path:
raise Exception
except Exception:
pass
self.assertFalse(os.path.exists(path))
class TestSafeDir(unittest.TestCase):
def setUp(self):
self.path = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.path, ignore_errors=True)
def test_safe_dir(self):
path = os.path.join(self.path, 'd')
with cudnnenv.safe_dir(path) as p:
self.assertTrue(os.path.exists(p))
self.assertTrue(os.path.exists(path))
def test_safe_dir_error(self):
path = os.path.join(self.path, 'd')
try:
with cudnnenv.safe_dir(path) as p:
raise Exception
except Exception:
pass
self.assertFalse(os.path.exists(p))
self.assertFalse(os.path.exists(path))
class TestYesNo(unittest.TestCase):
def tearDown(self):
sys.stdin = sys.__stdin__
def test_yes(self):
sys.stdin = StringIO('y\n')
self.assertTrue(cudnnenv.yes_no_query('q'))
def test_no(self):
sys.stdin = StringIO('n\n')
self.assertFalse(cudnnenv.yes_no_query('q'))
def test_invalid(self):
sys.stdin = StringIO('a\nb\nc\nd\ny\nn\n')
self.assertTrue(cudnnenv.yes_no_query('q'))
|
<commit_before><commit_msg>Write unit test for utility functions<commit_after>from __future__ import unicode_literals
try:
import io
StringIO = io.StringIO
except ImportError:
import StringIO
StringIO = StringIO.StringIO
import os
import shutil
import sys
import tempfile
import unittest
import cudnnenv
class TestSafeTempDir(unittest.TestCase):
def test_safe_temp_dir(self):
with cudnnenv.safe_temp_dir() as path:
self.assertTrue(os.path.exists(path))
self.assertFalse(os.path.exists(path))
def test_safe_temp_dir_error(self):
try:
with cudnnenv.safe_temp_dir() as path:
raise Exception
except Exception:
pass
self.assertFalse(os.path.exists(path))
class TestSafeDir(unittest.TestCase):
def setUp(self):
self.path = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.path, ignore_errors=True)
def test_safe_dir(self):
path = os.path.join(self.path, 'd')
with cudnnenv.safe_dir(path) as p:
self.assertTrue(os.path.exists(p))
self.assertTrue(os.path.exists(path))
def test_safe_dir_error(self):
path = os.path.join(self.path, 'd')
try:
with cudnnenv.safe_dir(path) as p:
raise Exception
except Exception:
pass
self.assertFalse(os.path.exists(p))
self.assertFalse(os.path.exists(path))
class TestYesNo(unittest.TestCase):
def tearDown(self):
sys.stdin = sys.__stdin__
def test_yes(self):
sys.stdin = StringIO('y\n')
self.assertTrue(cudnnenv.yes_no_query('q'))
def test_no(self):
sys.stdin = StringIO('n\n')
self.assertFalse(cudnnenv.yes_no_query('q'))
def test_invalid(self):
sys.stdin = StringIO('a\nb\nc\nd\ny\nn\n')
self.assertTrue(cudnnenv.yes_no_query('q'))
|
|
256648ad4effd9811d7c35ed6ef45de67f108926
|
tests/conftest.py
|
tests/conftest.py
|
import sys
def pytest_addoption(parser):
parser.addoption('--typing', action='store', default='typing')
def pytest_configure(config):
if config.option.typing == 'no':
sys.modules['typing'] = None
elif config.option.typing != 'typing':
sys.modules['typing'] = __import__(config.option.typing)
|
Add pytest option for specifying the typing module to use
|
Add pytest option for specifying the typing module to use
|
Python
|
mit
|
bintoro/overloading.py
|
Add pytest option for specifying the typing module to use
|
import sys
def pytest_addoption(parser):
parser.addoption('--typing', action='store', default='typing')
def pytest_configure(config):
if config.option.typing == 'no':
sys.modules['typing'] = None
elif config.option.typing != 'typing':
sys.modules['typing'] = __import__(config.option.typing)
|
<commit_before><commit_msg>Add pytest option for specifying the typing module to use<commit_after>
|
import sys
def pytest_addoption(parser):
parser.addoption('--typing', action='store', default='typing')
def pytest_configure(config):
if config.option.typing == 'no':
sys.modules['typing'] = None
elif config.option.typing != 'typing':
sys.modules['typing'] = __import__(config.option.typing)
|
Add pytest option for specifying the typing module to useimport sys
def pytest_addoption(parser):
parser.addoption('--typing', action='store', default='typing')
def pytest_configure(config):
if config.option.typing == 'no':
sys.modules['typing'] = None
elif config.option.typing != 'typing':
sys.modules['typing'] = __import__(config.option.typing)
|
<commit_before><commit_msg>Add pytest option for specifying the typing module to use<commit_after>import sys
def pytest_addoption(parser):
parser.addoption('--typing', action='store', default='typing')
def pytest_configure(config):
if config.option.typing == 'no':
sys.modules['typing'] = None
elif config.option.typing != 'typing':
sys.modules['typing'] = __import__(config.option.typing)
|
|
1d8cbf94f127571358aee97677a09f7cea3bf3a7
|
p23serialize/util.py
|
p23serialize/util.py
|
from . import str_mode
if str_mode == 'bytes':
unicode_type = unicode
else: # str_mode == 'unicode'
unicode_type = str
def recursive_unicode(obj):
if isinstance(obj, bytes):
return obj.decode('latin1')
elif isinstance(obj, list):
return [recursive_unicode(_) for _ in obj]
else:
return obj
def recursive_bytes(obj):
if isinstance(obj, unicode_type):
return obj.encode('latin1')
elif isinstance(obj, list):
return [recursive_bytes(_) for _ in obj]
else:
return obj
|
Add helper functions for to/from bytes/unicode
|
Add helper functions for to/from bytes/unicode
|
Python
|
mit
|
rh314/p23serialize
|
Add helper functions for to/from bytes/unicode
|
from . import str_mode
if str_mode == 'bytes':
unicode_type = unicode
else: # str_mode == 'unicode'
unicode_type = str
def recursive_unicode(obj):
if isinstance(obj, bytes):
return obj.decode('latin1')
elif isinstance(obj, list):
return [recursive_unicode(_) for _ in obj]
else:
return obj
def recursive_bytes(obj):
if isinstance(obj, unicode_type):
return obj.encode('latin1')
elif isinstance(obj, list):
return [recursive_bytes(_) for _ in obj]
else:
return obj
|
<commit_before><commit_msg>Add helper functions for to/from bytes/unicode<commit_after>
|
from . import str_mode
if str_mode == 'bytes':
unicode_type = unicode
else: # str_mode == 'unicode'
unicode_type = str
def recursive_unicode(obj):
if isinstance(obj, bytes):
return obj.decode('latin1')
elif isinstance(obj, list):
return [recursive_unicode(_) for _ in obj]
else:
return obj
def recursive_bytes(obj):
if isinstance(obj, unicode_type):
return obj.encode('latin1')
elif isinstance(obj, list):
return [recursive_bytes(_) for _ in obj]
else:
return obj
|
Add helper functions for to/from bytes/unicodefrom . import str_mode
if str_mode == 'bytes':
unicode_type = unicode
else: # str_mode == 'unicode'
unicode_type = str
def recursive_unicode(obj):
if isinstance(obj, bytes):
return obj.decode('latin1')
elif isinstance(obj, list):
return [recursive_unicode(_) for _ in obj]
else:
return obj
def recursive_bytes(obj):
if isinstance(obj, unicode_type):
return obj.encode('latin1')
elif isinstance(obj, list):
return [recursive_bytes(_) for _ in obj]
else:
return obj
|
<commit_before><commit_msg>Add helper functions for to/from bytes/unicode<commit_after>from . import str_mode
if str_mode == 'bytes':
unicode_type = unicode
else: # str_mode == 'unicode'
unicode_type = str
def recursive_unicode(obj):
if isinstance(obj, bytes):
return obj.decode('latin1')
elif isinstance(obj, list):
return [recursive_unicode(_) for _ in obj]
else:
return obj
def recursive_bytes(obj):
if isinstance(obj, unicode_type):
return obj.encode('latin1')
elif isinstance(obj, list):
return [recursive_bytes(_) for _ in obj]
else:
return obj
|
|
01f21a16e4bcecccf51a565b51222ab18b79adb4
|
st2common/tests/unit/test_util_shell.py
|
st2common/tests/unit/test_util_shell.py
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
from st2common.util.shell import quote_unix
from st2common.util.shell import quote_windows
class ShellUtilsTestCase(unittest2.TestCase):
def test_quote_unix(self):
arguments = [
'foo',
'foo bar',
'foo1 bar1',
'"foo"',
'"foo" "bar"',
"'foo bar'"
]
expected_values = [
"""
foo
""",
"""
'foo bar'
""",
"""
'foo1 bar1'
""",
"""
'"foo"'
""",
"""
'"foo" "bar"'
""",
"""
''"'"'foo bar'"'"''
"""
]
for argument, expected_value in zip(arguments, expected_values):
actual_value = quote_unix(value=argument)
expected_value = expected_value.lstrip()
self.assertEqual(actual_value, expected_value.strip())
def test_quote_windows(self):
arguments = [
'foo',
'foo bar',
'foo1 bar1',
'"foo"',
'"foo" "bar"',
"'foo bar'"
]
expected_values = [
"""
foo
""",
"""
"foo bar"
""",
"""
"foo1 bar1"
""",
"""
\\"foo\\"
""",
"""
"\\"foo\\" \\"bar\\""
""",
"""
"'foo bar'"
"""
]
for argument, expected_value in zip(arguments, expected_values):
actual_value = quote_windows(value=argument)
expected_value = expected_value.lstrip()
self.assertEqual(actual_value, expected_value.strip())
|
Add tests for shell utils.
|
Add tests for shell utils.
|
Python
|
apache-2.0
|
tonybaloney/st2,pixelrebel/st2,Plexxi/st2,emedvedev/st2,pixelrebel/st2,grengojbo/st2,armab/st2,StackStorm/st2,StackStorm/st2,lakshmi-kannan/st2,Itxaka/st2,emedvedev/st2,punalpatel/st2,alfasin/st2,punalpatel/st2,dennybaa/st2,pinterb/st2,dennybaa/st2,StackStorm/st2,peak6/st2,armab/st2,tonybaloney/st2,Plexxi/st2,Plexxi/st2,dennybaa/st2,armab/st2,jtopjian/st2,emedvedev/st2,jtopjian/st2,Itxaka/st2,nzlosh/st2,Plexxi/st2,peak6/st2,StackStorm/st2,lakshmi-kannan/st2,Itxaka/st2,pixelrebel/st2,pinterb/st2,grengojbo/st2,alfasin/st2,jtopjian/st2,alfasin/st2,peak6/st2,punalpatel/st2,nzlosh/st2,nzlosh/st2,nzlosh/st2,grengojbo/st2,tonybaloney/st2,lakshmi-kannan/st2,pinterb/st2
|
Add tests for shell utils.
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
from st2common.util.shell import quote_unix
from st2common.util.shell import quote_windows
class ShellUtilsTestCase(unittest2.TestCase):
def test_quote_unix(self):
arguments = [
'foo',
'foo bar',
'foo1 bar1',
'"foo"',
'"foo" "bar"',
"'foo bar'"
]
expected_values = [
"""
foo
""",
"""
'foo bar'
""",
"""
'foo1 bar1'
""",
"""
'"foo"'
""",
"""
'"foo" "bar"'
""",
"""
''"'"'foo bar'"'"''
"""
]
for argument, expected_value in zip(arguments, expected_values):
actual_value = quote_unix(value=argument)
expected_value = expected_value.lstrip()
self.assertEqual(actual_value, expected_value.strip())
def test_quote_windows(self):
arguments = [
'foo',
'foo bar',
'foo1 bar1',
'"foo"',
'"foo" "bar"',
"'foo bar'"
]
expected_values = [
"""
foo
""",
"""
"foo bar"
""",
"""
"foo1 bar1"
""",
"""
\\"foo\\"
""",
"""
"\\"foo\\" \\"bar\\""
""",
"""
"'foo bar'"
"""
]
for argument, expected_value in zip(arguments, expected_values):
actual_value = quote_windows(value=argument)
expected_value = expected_value.lstrip()
self.assertEqual(actual_value, expected_value.strip())
|
<commit_before><commit_msg>Add tests for shell utils.<commit_after>
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
from st2common.util.shell import quote_unix
from st2common.util.shell import quote_windows
class ShellUtilsTestCase(unittest2.TestCase):
def test_quote_unix(self):
arguments = [
'foo',
'foo bar',
'foo1 bar1',
'"foo"',
'"foo" "bar"',
"'foo bar'"
]
expected_values = [
"""
foo
""",
"""
'foo bar'
""",
"""
'foo1 bar1'
""",
"""
'"foo"'
""",
"""
'"foo" "bar"'
""",
"""
''"'"'foo bar'"'"''
"""
]
for argument, expected_value in zip(arguments, expected_values):
actual_value = quote_unix(value=argument)
expected_value = expected_value.lstrip()
self.assertEqual(actual_value, expected_value.strip())
def test_quote_windows(self):
arguments = [
'foo',
'foo bar',
'foo1 bar1',
'"foo"',
'"foo" "bar"',
"'foo bar'"
]
expected_values = [
"""
foo
""",
"""
"foo bar"
""",
"""
"foo1 bar1"
""",
"""
\\"foo\\"
""",
"""
"\\"foo\\" \\"bar\\""
""",
"""
"'foo bar'"
"""
]
for argument, expected_value in zip(arguments, expected_values):
actual_value = quote_windows(value=argument)
expected_value = expected_value.lstrip()
self.assertEqual(actual_value, expected_value.strip())
|
Add tests for shell utils.# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
from st2common.util.shell import quote_unix
from st2common.util.shell import quote_windows
class ShellUtilsTestCase(unittest2.TestCase):
def test_quote_unix(self):
arguments = [
'foo',
'foo bar',
'foo1 bar1',
'"foo"',
'"foo" "bar"',
"'foo bar'"
]
expected_values = [
"""
foo
""",
"""
'foo bar'
""",
"""
'foo1 bar1'
""",
"""
'"foo"'
""",
"""
'"foo" "bar"'
""",
"""
''"'"'foo bar'"'"''
"""
]
for argument, expected_value in zip(arguments, expected_values):
actual_value = quote_unix(value=argument)
expected_value = expected_value.lstrip()
self.assertEqual(actual_value, expected_value.strip())
def test_quote_windows(self):
arguments = [
'foo',
'foo bar',
'foo1 bar1',
'"foo"',
'"foo" "bar"',
"'foo bar'"
]
expected_values = [
"""
foo
""",
"""
"foo bar"
""",
"""
"foo1 bar1"
""",
"""
\\"foo\\"
""",
"""
"\\"foo\\" \\"bar\\""
""",
"""
"'foo bar'"
"""
]
for argument, expected_value in zip(arguments, expected_values):
actual_value = quote_windows(value=argument)
expected_value = expected_value.lstrip()
self.assertEqual(actual_value, expected_value.strip())
|
<commit_before><commit_msg>Add tests for shell utils.<commit_after># Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
from st2common.util.shell import quote_unix
from st2common.util.shell import quote_windows
class ShellUtilsTestCase(unittest2.TestCase):
def test_quote_unix(self):
arguments = [
'foo',
'foo bar',
'foo1 bar1',
'"foo"',
'"foo" "bar"',
"'foo bar'"
]
expected_values = [
"""
foo
""",
"""
'foo bar'
""",
"""
'foo1 bar1'
""",
"""
'"foo"'
""",
"""
'"foo" "bar"'
""",
"""
''"'"'foo bar'"'"''
"""
]
for argument, expected_value in zip(arguments, expected_values):
actual_value = quote_unix(value=argument)
expected_value = expected_value.lstrip()
self.assertEqual(actual_value, expected_value.strip())
def test_quote_windows(self):
arguments = [
'foo',
'foo bar',
'foo1 bar1',
'"foo"',
'"foo" "bar"',
"'foo bar'"
]
expected_values = [
"""
foo
""",
"""
"foo bar"
""",
"""
"foo1 bar1"
""",
"""
\\"foo\\"
""",
"""
"\\"foo\\" \\"bar\\""
""",
"""
"'foo bar'"
"""
]
for argument, expected_value in zip(arguments, expected_values):
actual_value = quote_windows(value=argument)
expected_value = expected_value.lstrip()
self.assertEqual(actual_value, expected_value.strip())
|
|
b40512e834e88f24c20885cddb220188fce11339
|
accounts/migrations/0004_auto_20150227_2347.py
|
accounts/migrations/0004_auto_20150227_2347.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0003_auto_20150227_2158'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='email_on_comment_answer',
field=models.BooleanField(default=False, verbose_name=b'Email-Benachrichtigung bei Antwort auf meine Kommentare'),
preserve_default=True,
),
migrations.AlterField(
model_name='userprofile',
name='email_on_message',
field=models.BooleanField(default=False, verbose_name=b'Email-Benachrichtigung bei Nachrichten'),
preserve_default=True,
),
]
|
Add verbose names to UserProfile fields.
|
Add verbose names to UserProfile fields.
|
Python
|
bsd-3-clause
|
ugoertz/django-familio,ugoertz/django-familio,ugoertz/django-familio,ugoertz/django-familio
|
Add verbose names to UserProfile fields.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0003_auto_20150227_2158'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='email_on_comment_answer',
field=models.BooleanField(default=False, verbose_name=b'Email-Benachrichtigung bei Antwort auf meine Kommentare'),
preserve_default=True,
),
migrations.AlterField(
model_name='userprofile',
name='email_on_message',
field=models.BooleanField(default=False, verbose_name=b'Email-Benachrichtigung bei Nachrichten'),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add verbose names to UserProfile fields.<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0003_auto_20150227_2158'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='email_on_comment_answer',
field=models.BooleanField(default=False, verbose_name=b'Email-Benachrichtigung bei Antwort auf meine Kommentare'),
preserve_default=True,
),
migrations.AlterField(
model_name='userprofile',
name='email_on_message',
field=models.BooleanField(default=False, verbose_name=b'Email-Benachrichtigung bei Nachrichten'),
preserve_default=True,
),
]
|
Add verbose names to UserProfile fields.# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0003_auto_20150227_2158'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='email_on_comment_answer',
field=models.BooleanField(default=False, verbose_name=b'Email-Benachrichtigung bei Antwort auf meine Kommentare'),
preserve_default=True,
),
migrations.AlterField(
model_name='userprofile',
name='email_on_message',
field=models.BooleanField(default=False, verbose_name=b'Email-Benachrichtigung bei Nachrichten'),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add verbose names to UserProfile fields.<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0003_auto_20150227_2158'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='email_on_comment_answer',
field=models.BooleanField(default=False, verbose_name=b'Email-Benachrichtigung bei Antwort auf meine Kommentare'),
preserve_default=True,
),
migrations.AlterField(
model_name='userprofile',
name='email_on_message',
field=models.BooleanField(default=False, verbose_name=b'Email-Benachrichtigung bei Nachrichten'),
preserve_default=True,
),
]
|
|
008625fef55f8f58ab80b883d34ae5d40e55c721
|
test_binheap.py
|
test_binheap.py
|
import pytest
from binheap import Binheap
def test_init_bh():
b = Binheap()
assert b.binlist is []
c = Binheap([1, 2])
assert c.binlist == [1, 2]
|
Add initial test for binheap
|
Add initial test for binheap
|
Python
|
mit
|
constanthatz/data-structures
|
Add initial test for binheap
|
import pytest
from binheap import Binheap
def test_init_bh():
b = Binheap()
assert b.binlist is []
c = Binheap([1, 2])
assert c.binlist == [1, 2]
|
<commit_before><commit_msg>Add initial test for binheap<commit_after>
|
import pytest
from binheap import Binheap
def test_init_bh():
b = Binheap()
assert b.binlist is []
c = Binheap([1, 2])
assert c.binlist == [1, 2]
|
Add initial test for binheapimport pytest
from binheap import Binheap
def test_init_bh():
b = Binheap()
assert b.binlist is []
c = Binheap([1, 2])
assert c.binlist == [1, 2]
|
<commit_before><commit_msg>Add initial test for binheap<commit_after>import pytest
from binheap import Binheap
def test_init_bh():
b = Binheap()
assert b.binlist is []
c = Binheap([1, 2])
assert c.binlist == [1, 2]
|
|
80e80bff7603e852710df6c9de613b1781877b2d
|
tests/python/typeinference/same_name.py
|
tests/python/typeinference/same_name.py
|
class A(object):
def method(self):
return 1
A().method() ## type int
class A(object):
def method(self):
return "test"
A().method() ## type str
|
Test case for two classes with the same name in one module.
|
Test case for two classes with the same name in one module.
|
Python
|
lgpl-2.1
|
retoo/pystructure,retoo/pystructure,retoo/pystructure,retoo/pystructure
|
Test case for two classes with the same name in one module.
|
class A(object):
def method(self):
return 1
A().method() ## type int
class A(object):
def method(self):
return "test"
A().method() ## type str
|
<commit_before><commit_msg>Test case for two classes with the same name in one module.<commit_after>
|
class A(object):
def method(self):
return 1
A().method() ## type int
class A(object):
def method(self):
return "test"
A().method() ## type str
|
Test case for two classes with the same name in one module.class A(object):
def method(self):
return 1
A().method() ## type int
class A(object):
def method(self):
return "test"
A().method() ## type str
|
<commit_before><commit_msg>Test case for two classes with the same name in one module.<commit_after>class A(object):
def method(self):
return 1
A().method() ## type int
class A(object):
def method(self):
return "test"
A().method() ## type str
|
|
542bc508b587a39a6e709d469c1cb166dee9eef4
|
app/grandchallenge/retina_api/renderers.py
|
app/grandchallenge/retina_api/renderers.py
|
import base64
from rest_framework import renderers
class Base64Renderer(renderers.BaseRenderer):
media_type = "image/png;base64"
format = "base64"
charset = "utf-8"
render_style = "text"
def render(self, data, media_type=None, renderer_context=None):
return base64.b64encode(data)
|
Add renderer for base64 content
|
Add renderer for base64 content
|
Python
|
apache-2.0
|
comic/comic-django,comic/comic-django,comic/comic-django,comic/comic-django,comic/comic-django
|
Add renderer for base64 content
|
import base64
from rest_framework import renderers
class Base64Renderer(renderers.BaseRenderer):
media_type = "image/png;base64"
format = "base64"
charset = "utf-8"
render_style = "text"
def render(self, data, media_type=None, renderer_context=None):
return base64.b64encode(data)
|
<commit_before><commit_msg>Add renderer for base64 content<commit_after>
|
import base64
from rest_framework import renderers
class Base64Renderer(renderers.BaseRenderer):
media_type = "image/png;base64"
format = "base64"
charset = "utf-8"
render_style = "text"
def render(self, data, media_type=None, renderer_context=None):
return base64.b64encode(data)
|
Add renderer for base64 contentimport base64
from rest_framework import renderers
class Base64Renderer(renderers.BaseRenderer):
media_type = "image/png;base64"
format = "base64"
charset = "utf-8"
render_style = "text"
def render(self, data, media_type=None, renderer_context=None):
return base64.b64encode(data)
|
<commit_before><commit_msg>Add renderer for base64 content<commit_after>import base64
from rest_framework import renderers
class Base64Renderer(renderers.BaseRenderer):
media_type = "image/png;base64"
format = "base64"
charset = "utf-8"
render_style = "text"
def render(self, data, media_type=None, renderer_context=None):
return base64.b64encode(data)
|
|
e8832b5f126f39672cfc48159148b490c1e3c6c7
|
tests/import_time.py
|
tests/import_time.py
|
import subprocess
import sys
import pytest
AVG_ITERATIONS = 15
# Maximum expected import time for rasa module when running on a Travis VM.
# Keep in mind the hardware configuration where tests are run:
# https://docs.travis-ci.com/user/reference/overview/
MAX_IMPORT_TIME_S = 0.3
def average_import_time(n, module):
total = 0
py_cmd_version = tuple(
int(part)
for part in subprocess.getoutput(
"python -c 'import sys; print(sys.version_info[:3])'"
)
.strip("()")
.split(",")
)
if py_cmd_version < (3, 7):
raise Exception(
"Can't use Python version {} for profiling (required: 3.7+).".format(
py_cmd_version
)
)
for _ in range(n):
lines = subprocess.getoutput(
'python -X importtime -c "import {}"'.format(module)
).splitlines()
parts = lines[-1].split("|")
if parts[-1].strip() != module:
raise Exception("Import time not found for {}.".format(module))
total += int(parts[1].strip()) / 1000000
return total / n
@pytest.mark.skipif(sys.version_info < (3, 7), reason="Need 3.7+ for -X importtime")
def test_import_time():
import_time = average_import_time(AVG_ITERATIONS, "rasa")
assert import_time < MAX_IMPORT_TIME_S
|
Add test for import time.
|
Add test for import time.
|
Python
|
apache-2.0
|
RasaHQ/rasa_nlu,RasaHQ/rasa_nlu,RasaHQ/rasa_nlu
|
Add test for import time.
|
import subprocess
import sys
import pytest
AVG_ITERATIONS = 15
# Maximum expected import time for rasa module when running on a Travis VM.
# Keep in mind the hardware configuration where tests are run:
# https://docs.travis-ci.com/user/reference/overview/
MAX_IMPORT_TIME_S = 0.3
def average_import_time(n, module):
total = 0
py_cmd_version = tuple(
int(part)
for part in subprocess.getoutput(
"python -c 'import sys; print(sys.version_info[:3])'"
)
.strip("()")
.split(",")
)
if py_cmd_version < (3, 7):
raise Exception(
"Can't use Python version {} for profiling (required: 3.7+).".format(
py_cmd_version
)
)
for _ in range(n):
lines = subprocess.getoutput(
'python -X importtime -c "import {}"'.format(module)
).splitlines()
parts = lines[-1].split("|")
if parts[-1].strip() != module:
raise Exception("Import time not found for {}.".format(module))
total += int(parts[1].strip()) / 1000000
return total / n
@pytest.mark.skipif(sys.version_info < (3, 7), reason="Need 3.7+ for -X importtime")
def test_import_time():
import_time = average_import_time(AVG_ITERATIONS, "rasa")
assert import_time < MAX_IMPORT_TIME_S
|
<commit_before><commit_msg>Add test for import time.<commit_after>
|
import subprocess
import sys
import pytest
AVG_ITERATIONS = 15
# Maximum expected import time for rasa module when running on a Travis VM.
# Keep in mind the hardware configuration where tests are run:
# https://docs.travis-ci.com/user/reference/overview/
MAX_IMPORT_TIME_S = 0.3
def average_import_time(n, module):
total = 0
py_cmd_version = tuple(
int(part)
for part in subprocess.getoutput(
"python -c 'import sys; print(sys.version_info[:3])'"
)
.strip("()")
.split(",")
)
if py_cmd_version < (3, 7):
raise Exception(
"Can't use Python version {} for profiling (required: 3.7+).".format(
py_cmd_version
)
)
for _ in range(n):
lines = subprocess.getoutput(
'python -X importtime -c "import {}"'.format(module)
).splitlines()
parts = lines[-1].split("|")
if parts[-1].strip() != module:
raise Exception("Import time not found for {}.".format(module))
total += int(parts[1].strip()) / 1000000
return total / n
@pytest.mark.skipif(sys.version_info < (3, 7), reason="Need 3.7+ for -X importtime")
def test_import_time():
import_time = average_import_time(AVG_ITERATIONS, "rasa")
assert import_time < MAX_IMPORT_TIME_S
|
Add test for import time.import subprocess
import sys
import pytest
AVG_ITERATIONS = 15
# Maximum expected import time for rasa module when running on a Travis VM.
# Keep in mind the hardware configuration where tests are run:
# https://docs.travis-ci.com/user/reference/overview/
MAX_IMPORT_TIME_S = 0.3
def average_import_time(n, module):
total = 0
py_cmd_version = tuple(
int(part)
for part in subprocess.getoutput(
"python -c 'import sys; print(sys.version_info[:3])'"
)
.strip("()")
.split(",")
)
if py_cmd_version < (3, 7):
raise Exception(
"Can't use Python version {} for profiling (required: 3.7+).".format(
py_cmd_version
)
)
for _ in range(n):
lines = subprocess.getoutput(
'python -X importtime -c "import {}"'.format(module)
).splitlines()
parts = lines[-1].split("|")
if parts[-1].strip() != module:
raise Exception("Import time not found for {}.".format(module))
total += int(parts[1].strip()) / 1000000
return total / n
@pytest.mark.skipif(sys.version_info < (3, 7), reason="Need 3.7+ for -X importtime")
def test_import_time():
import_time = average_import_time(AVG_ITERATIONS, "rasa")
assert import_time < MAX_IMPORT_TIME_S
|
<commit_before><commit_msg>Add test for import time.<commit_after>import subprocess
import sys
import pytest
AVG_ITERATIONS = 15
# Maximum expected import time for rasa module when running on a Travis VM.
# Keep in mind the hardware configuration where tests are run:
# https://docs.travis-ci.com/user/reference/overview/
MAX_IMPORT_TIME_S = 0.3
def average_import_time(n, module):
total = 0
py_cmd_version = tuple(
int(part)
for part in subprocess.getoutput(
"python -c 'import sys; print(sys.version_info[:3])'"
)
.strip("()")
.split(",")
)
if py_cmd_version < (3, 7):
raise Exception(
"Can't use Python version {} for profiling (required: 3.7+).".format(
py_cmd_version
)
)
for _ in range(n):
lines = subprocess.getoutput(
'python -X importtime -c "import {}"'.format(module)
).splitlines()
parts = lines[-1].split("|")
if parts[-1].strip() != module:
raise Exception("Import time not found for {}.".format(module))
total += int(parts[1].strip()) / 1000000
return total / n
@pytest.mark.skipif(sys.version_info < (3, 7), reason="Need 3.7+ for -X importtime")
def test_import_time():
import_time = average_import_time(AVG_ITERATIONS, "rasa")
assert import_time < MAX_IMPORT_TIME_S
|
|
1a311415c007bebcb2c6f042816a86a7cc32ec36
|
src/foobar/tests/test_admin.py
|
src/foobar/tests/test_admin.py
|
from django.apps import apps
from django.contrib import admin
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from django.test import TestCase
class TestAdminViews(TestCase):
def setUp(self):
User = get_user_model()
self.user = User.objects.create_superuser(
username='bananaman',
email='donkey@kong.com',
password='hunter2',
)
self.client.force_login(self.user)
def test_admin_views(self):
for model in apps.get_models():
if model in admin.site._registry:
app_label = model._meta.app_label
model_name = model._meta.model_name
admin_model = admin.site._registry[model]
fake_request = type('request', (), {'user': self.user})
# Add view
if admin_model.has_add_permission(fake_request):
name = 'admin:{app_label}_{model_name}_add'
url = reverse(name.format(
app_label=app_label,
model_name=model_name,
))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# Change view
if admin_model.has_change_permission(fake_request):
name = 'admin:{app_label}_{model_name}_changelist'
url = reverse(name.format(
app_label=app_label,
model_name=model_name,
))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
|
Test admin views in order to catch stupid errors
|
Test admin views in order to catch stupid errors
|
Python
|
mit
|
uppsaladatavetare/foobar-api,uppsaladatavetare/foobar-api,uppsaladatavetare/foobar-api
|
Test admin views in order to catch stupid errors
|
from django.apps import apps
from django.contrib import admin
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from django.test import TestCase
class TestAdminViews(TestCase):
def setUp(self):
User = get_user_model()
self.user = User.objects.create_superuser(
username='bananaman',
email='donkey@kong.com',
password='hunter2',
)
self.client.force_login(self.user)
def test_admin_views(self):
for model in apps.get_models():
if model in admin.site._registry:
app_label = model._meta.app_label
model_name = model._meta.model_name
admin_model = admin.site._registry[model]
fake_request = type('request', (), {'user': self.user})
# Add view
if admin_model.has_add_permission(fake_request):
name = 'admin:{app_label}_{model_name}_add'
url = reverse(name.format(
app_label=app_label,
model_name=model_name,
))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# Change view
if admin_model.has_change_permission(fake_request):
name = 'admin:{app_label}_{model_name}_changelist'
url = reverse(name.format(
app_label=app_label,
model_name=model_name,
))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
|
<commit_before><commit_msg>Test admin views in order to catch stupid errors<commit_after>
|
from django.apps import apps
from django.contrib import admin
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from django.test import TestCase
class TestAdminViews(TestCase):
def setUp(self):
User = get_user_model()
self.user = User.objects.create_superuser(
username='bananaman',
email='donkey@kong.com',
password='hunter2',
)
self.client.force_login(self.user)
def test_admin_views(self):
for model in apps.get_models():
if model in admin.site._registry:
app_label = model._meta.app_label
model_name = model._meta.model_name
admin_model = admin.site._registry[model]
fake_request = type('request', (), {'user': self.user})
# Add view
if admin_model.has_add_permission(fake_request):
name = 'admin:{app_label}_{model_name}_add'
url = reverse(name.format(
app_label=app_label,
model_name=model_name,
))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# Change view
if admin_model.has_change_permission(fake_request):
name = 'admin:{app_label}_{model_name}_changelist'
url = reverse(name.format(
app_label=app_label,
model_name=model_name,
))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
|
Test admin views in order to catch stupid errorsfrom django.apps import apps
from django.contrib import admin
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from django.test import TestCase
class TestAdminViews(TestCase):
def setUp(self):
User = get_user_model()
self.user = User.objects.create_superuser(
username='bananaman',
email='donkey@kong.com',
password='hunter2',
)
self.client.force_login(self.user)
def test_admin_views(self):
for model in apps.get_models():
if model in admin.site._registry:
app_label = model._meta.app_label
model_name = model._meta.model_name
admin_model = admin.site._registry[model]
fake_request = type('request', (), {'user': self.user})
# Add view
if admin_model.has_add_permission(fake_request):
name = 'admin:{app_label}_{model_name}_add'
url = reverse(name.format(
app_label=app_label,
model_name=model_name,
))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# Change view
if admin_model.has_change_permission(fake_request):
name = 'admin:{app_label}_{model_name}_changelist'
url = reverse(name.format(
app_label=app_label,
model_name=model_name,
))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
|
<commit_before><commit_msg>Test admin views in order to catch stupid errors<commit_after>from django.apps import apps
from django.contrib import admin
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from django.test import TestCase
class TestAdminViews(TestCase):
def setUp(self):
User = get_user_model()
self.user = User.objects.create_superuser(
username='bananaman',
email='donkey@kong.com',
password='hunter2',
)
self.client.force_login(self.user)
def test_admin_views(self):
for model in apps.get_models():
if model in admin.site._registry:
app_label = model._meta.app_label
model_name = model._meta.model_name
admin_model = admin.site._registry[model]
fake_request = type('request', (), {'user': self.user})
# Add view
if admin_model.has_add_permission(fake_request):
name = 'admin:{app_label}_{model_name}_add'
url = reverse(name.format(
app_label=app_label,
model_name=model_name,
))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# Change view
if admin_model.has_change_permission(fake_request):
name = 'admin:{app_label}_{model_name}_changelist'
url = reverse(name.format(
app_label=app_label,
model_name=model_name,
))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
|
|
353df423343688f206c0f3628092608d1a95f5c2
|
mopidy/backends/__init__.py
|
mopidy/backends/__init__.py
|
import logging
import time
from mopidy.exceptions import MpdNotImplemented
from mopidy.models import Playlist
logger = logging.getLogger('backends.base')
class BaseBackend(object):
current_playlist = None
library = None
playback = None
stored_playlists = None
uri_handlers = []
class BasePlaybackController(object):
PAUSED = 1
PLAYING = 2
STOPPED = 3
def __init__(self, backend):
self.backend = backend
self.state = self.STOPPED
self.current_track = None
self.playlist_position = None
def play(self, id=None, position=None):
raise NotImplementedError
def next(self):
raise NotImplementedError
|
import logging
import time
from mopidy.exceptions import MpdNotImplemented
from mopidy.models import Playlist
logger = logging.getLogger('backends.base')
class BaseBackend(object):
current_playlist = None
library = None
playback = None
stored_playlists = None
uri_handlers = []
class BasePlaybackController(object):
PAUSED = 'paused'
PLAYING = 'playing'
STOPPED = 'stopped'
def __init__(self, backend):
self.backend = backend
self.state = self.STOPPED
self.current_track = None
self.playlist_position = None
def play(self, id=None, position=None):
raise NotImplementedError
def next(self):
raise NotImplementedError
|
Use textual defaults for status constants
|
Use textual defaults for status constants
|
Python
|
apache-2.0
|
SuperStarPL/mopidy,liamw9534/mopidy,glogiotatidis/mopidy,mopidy/mopidy,hkariti/mopidy,tkem/mopidy,hkariti/mopidy,jcass77/mopidy,bencevans/mopidy,pacificIT/mopidy,jodal/mopidy,jmarsik/mopidy,woutervanwijk/mopidy,kingosticks/mopidy,hkariti/mopidy,jodal/mopidy,mokieyue/mopidy,pacificIT/mopidy,mokieyue/mopidy,vrs01/mopidy,mopidy/mopidy,ali/mopidy,mopidy/mopidy,swak/mopidy,jmarsik/mopidy,bencevans/mopidy,mokieyue/mopidy,quartz55/mopidy,jodal/mopidy,kingosticks/mopidy,bencevans/mopidy,bencevans/mopidy,vrs01/mopidy,rawdlite/mopidy,glogiotatidis/mopidy,woutervanwijk/mopidy,ZenithDK/mopidy,tkem/mopidy,hkariti/mopidy,SuperStarPL/mopidy,SuperStarPL/mopidy,swak/mopidy,quartz55/mopidy,pacificIT/mopidy,diandiankan/mopidy,rawdlite/mopidy,ali/mopidy,quartz55/mopidy,pacificIT/mopidy,dbrgn/mopidy,dbrgn/mopidy,jcass77/mopidy,ali/mopidy,tkem/mopidy,priestd09/mopidy,diandiankan/mopidy,SuperStarPL/mopidy,rawdlite/mopidy,abarisain/mopidy,abarisain/mopidy,ZenithDK/mopidy,vrs01/mopidy,tkem/mopidy,priestd09/mopidy,mokieyue/mopidy,glogiotatidis/mopidy,ali/mopidy,liamw9534/mopidy,glogiotatidis/mopidy,swak/mopidy,diandiankan/mopidy,swak/mopidy,adamcik/mopidy,jmarsik/mopidy,ZenithDK/mopidy,adamcik/mopidy,bacontext/mopidy,priestd09/mopidy,vrs01/mopidy,ZenithDK/mopidy,adamcik/mopidy,kingosticks/mopidy,jcass77/mopidy,quartz55/mopidy,jmarsik/mopidy,bacontext/mopidy,dbrgn/mopidy,bacontext/mopidy,diandiankan/mopidy,rawdlite/mopidy,bacontext/mopidy,dbrgn/mopidy
|
import logging
import time
from mopidy.exceptions import MpdNotImplemented
from mopidy.models import Playlist
logger = logging.getLogger('backends.base')
class BaseBackend(object):
current_playlist = None
library = None
playback = None
stored_playlists = None
uri_handlers = []
class BasePlaybackController(object):
PAUSED = 1
PLAYING = 2
STOPPED = 3
def __init__(self, backend):
self.backend = backend
self.state = self.STOPPED
self.current_track = None
self.playlist_position = None
def play(self, id=None, position=None):
raise NotImplementedError
def next(self):
raise NotImplementedError
Use textual defaults for status constants
|
import logging
import time
from mopidy.exceptions import MpdNotImplemented
from mopidy.models import Playlist
logger = logging.getLogger('backends.base')
class BaseBackend(object):
current_playlist = None
library = None
playback = None
stored_playlists = None
uri_handlers = []
class BasePlaybackController(object):
PAUSED = 'paused'
PLAYING = 'playing'
STOPPED = 'stopped'
def __init__(self, backend):
self.backend = backend
self.state = self.STOPPED
self.current_track = None
self.playlist_position = None
def play(self, id=None, position=None):
raise NotImplementedError
def next(self):
raise NotImplementedError
|
<commit_before>import logging
import time
from mopidy.exceptions import MpdNotImplemented
from mopidy.models import Playlist
logger = logging.getLogger('backends.base')
class BaseBackend(object):
current_playlist = None
library = None
playback = None
stored_playlists = None
uri_handlers = []
class BasePlaybackController(object):
PAUSED = 1
PLAYING = 2
STOPPED = 3
def __init__(self, backend):
self.backend = backend
self.state = self.STOPPED
self.current_track = None
self.playlist_position = None
def play(self, id=None, position=None):
raise NotImplementedError
def next(self):
raise NotImplementedError
<commit_msg>Use textual defaults for status constants<commit_after>
|
import logging
import time
from mopidy.exceptions import MpdNotImplemented
from mopidy.models import Playlist
logger = logging.getLogger('backends.base')
class BaseBackend(object):
current_playlist = None
library = None
playback = None
stored_playlists = None
uri_handlers = []
class BasePlaybackController(object):
PAUSED = 'paused'
PLAYING = 'playing'
STOPPED = 'stopped'
def __init__(self, backend):
self.backend = backend
self.state = self.STOPPED
self.current_track = None
self.playlist_position = None
def play(self, id=None, position=None):
raise NotImplementedError
def next(self):
raise NotImplementedError
|
import logging
import time
from mopidy.exceptions import MpdNotImplemented
from mopidy.models import Playlist
logger = logging.getLogger('backends.base')
class BaseBackend(object):
current_playlist = None
library = None
playback = None
stored_playlists = None
uri_handlers = []
class BasePlaybackController(object):
PAUSED = 1
PLAYING = 2
STOPPED = 3
def __init__(self, backend):
self.backend = backend
self.state = self.STOPPED
self.current_track = None
self.playlist_position = None
def play(self, id=None, position=None):
raise NotImplementedError
def next(self):
raise NotImplementedError
Use textual defaults for status constantsimport logging
import time
from mopidy.exceptions import MpdNotImplemented
from mopidy.models import Playlist
logger = logging.getLogger('backends.base')
class BaseBackend(object):
current_playlist = None
library = None
playback = None
stored_playlists = None
uri_handlers = []
class BasePlaybackController(object):
PAUSED = 'paused'
PLAYING = 'playing'
STOPPED = 'stopped'
def __init__(self, backend):
self.backend = backend
self.state = self.STOPPED
self.current_track = None
self.playlist_position = None
def play(self, id=None, position=None):
raise NotImplementedError
def next(self):
raise NotImplementedError
|
<commit_before>import logging
import time
from mopidy.exceptions import MpdNotImplemented
from mopidy.models import Playlist
logger = logging.getLogger('backends.base')
class BaseBackend(object):
current_playlist = None
library = None
playback = None
stored_playlists = None
uri_handlers = []
class BasePlaybackController(object):
PAUSED = 1
PLAYING = 2
STOPPED = 3
def __init__(self, backend):
self.backend = backend
self.state = self.STOPPED
self.current_track = None
self.playlist_position = None
def play(self, id=None, position=None):
raise NotImplementedError
def next(self):
raise NotImplementedError
<commit_msg>Use textual defaults for status constants<commit_after>import logging
import time
from mopidy.exceptions import MpdNotImplemented
from mopidy.models import Playlist
logger = logging.getLogger('backends.base')
class BaseBackend(object):
current_playlist = None
library = None
playback = None
stored_playlists = None
uri_handlers = []
class BasePlaybackController(object):
PAUSED = 'paused'
PLAYING = 'playing'
STOPPED = 'stopped'
def __init__(self, backend):
self.backend = backend
self.state = self.STOPPED
self.current_track = None
self.playlist_position = None
def play(self, id=None, position=None):
raise NotImplementedError
def next(self):
raise NotImplementedError
|
112dcd24d9888c8376afa36535870eb861982d9c
|
create_review.py
|
create_review.py
|
import sys
import os
import subprocess
initial_layout = [
"---",
"layout: default",
"permalink: 'reviews/{}.html'",
"title: '{}'",
"---\n",
"# {}",
"---\n",
"## Idea\n\n",
"## Method\n\n",
"## Observations\n\n"
]
def main():
paper_name = sys.argv[1]
formatted_name = subprocess.check_output(
["filename-formatter", paper_name]).decode("utf-8").strip()
file_contents = "\n".join(initial_layout)
with open("_reviews/{}.md".format(formatted_name), 'w') as f:
f.write(file_contents.format(formatted_name, paper_name, paper_name))
if __name__ == '__main__':
main()
|
Add script to automate review markdown file creation
|
Add script to automate review markdown file creation
|
Python
|
cc0-1.0
|
v1n337/research-review-notes,v1n337/research-review-notes,v1n337/research-review-notes
|
Add script to automate review markdown file creation
|
import sys
import os
import subprocess
initial_layout = [
"---",
"layout: default",
"permalink: 'reviews/{}.html'",
"title: '{}'",
"---\n",
"# {}",
"---\n",
"## Idea\n\n",
"## Method\n\n",
"## Observations\n\n"
]
def main():
paper_name = sys.argv[1]
formatted_name = subprocess.check_output(
["filename-formatter", paper_name]).decode("utf-8").strip()
file_contents = "\n".join(initial_layout)
with open("_reviews/{}.md".format(formatted_name), 'w') as f:
f.write(file_contents.format(formatted_name, paper_name, paper_name))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to automate review markdown file creation<commit_after>
|
import sys
import os
import subprocess
initial_layout = [
"---",
"layout: default",
"permalink: 'reviews/{}.html'",
"title: '{}'",
"---\n",
"# {}",
"---\n",
"## Idea\n\n",
"## Method\n\n",
"## Observations\n\n"
]
def main():
paper_name = sys.argv[1]
formatted_name = subprocess.check_output(
["filename-formatter", paper_name]).decode("utf-8").strip()
file_contents = "\n".join(initial_layout)
with open("_reviews/{}.md".format(formatted_name), 'w') as f:
f.write(file_contents.format(formatted_name, paper_name, paper_name))
if __name__ == '__main__':
main()
|
Add script to automate review markdown file creationimport sys
import os
import subprocess
initial_layout = [
"---",
"layout: default",
"permalink: 'reviews/{}.html'",
"title: '{}'",
"---\n",
"# {}",
"---\n",
"## Idea\n\n",
"## Method\n\n",
"## Observations\n\n"
]
def main():
paper_name = sys.argv[1]
formatted_name = subprocess.check_output(
["filename-formatter", paper_name]).decode("utf-8").strip()
file_contents = "\n".join(initial_layout)
with open("_reviews/{}.md".format(formatted_name), 'w') as f:
f.write(file_contents.format(formatted_name, paper_name, paper_name))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to automate review markdown file creation<commit_after>import sys
import os
import subprocess
initial_layout = [
"---",
"layout: default",
"permalink: 'reviews/{}.html'",
"title: '{}'",
"---\n",
"# {}",
"---\n",
"## Idea\n\n",
"## Method\n\n",
"## Observations\n\n"
]
def main():
paper_name = sys.argv[1]
formatted_name = subprocess.check_output(
["filename-formatter", paper_name]).decode("utf-8").strip()
file_contents = "\n".join(initial_layout)
with open("_reviews/{}.md".format(formatted_name), 'w') as f:
f.write(file_contents.format(formatted_name, paper_name, paper_name))
if __name__ == '__main__':
main()
|
|
7594e2b4405be3fb8b5b07fc9b5fd79ecc5a5aec
|
py/maximum-binary-tree.py
|
py/maximum-binary-tree.py
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def constructMaximumBinaryTree(self, nums, start=None, end=None):
"""
:type nums: List[int]
:rtype: TreeNode
"""
if start is None and end is None:
start, end = 0, len(nums)
if start == end:
return None
m, mi = nums[start], start
for i in xrange(start, end):
if nums[i] > m:
m, mi = nums[i], i
ret = TreeNode(m)
ret.left = self.constructMaximumBinaryTree(nums, start, mi)
ret.right = self.constructMaximumBinaryTree(nums, mi + 1, end)
return ret
|
Add py solution for 654. Maximum Binary Tree
|
Add py solution for 654. Maximum Binary Tree
654. Maximum Binary Tree: https://leetcode.com/problems/maximum-binary-tree/
Approach 1:
1. Modify the method to accept (nums, start, end), and the method
will return the subtree of nums[start:end]
2. Find the maximum and split the array into left, right part
3. Recursively generate the left/right subtree using the same method
with start/end specified
4. Return the TreeNode with the val of the maximum value
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 654. Maximum Binary Tree
654. Maximum Binary Tree: https://leetcode.com/problems/maximum-binary-tree/
Approach 1:
1. Modify the method to accept (nums, start, end), and the method
will return the subtree of nums[start:end]
2. Find the maximum and split the array into left, right part
3. Recursively generate the left/right subtree using the same method
with start/end specified
4. Return the TreeNode with the val of the maximum value
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def constructMaximumBinaryTree(self, nums, start=None, end=None):
"""
:type nums: List[int]
:rtype: TreeNode
"""
if start is None and end is None:
start, end = 0, len(nums)
if start == end:
return None
m, mi = nums[start], start
for i in xrange(start, end):
if nums[i] > m:
m, mi = nums[i], i
ret = TreeNode(m)
ret.left = self.constructMaximumBinaryTree(nums, start, mi)
ret.right = self.constructMaximumBinaryTree(nums, mi + 1, end)
return ret
|
<commit_before><commit_msg>Add py solution for 654. Maximum Binary Tree
654. Maximum Binary Tree: https://leetcode.com/problems/maximum-binary-tree/
Approach 1:
1. Modify the method to accept (nums, start, end), and the method
will return the subtree of nums[start:end]
2. Find the maximum and split the array into left, right part
3. Recursively generate the left/right subtree using the same method
with start/end specified
4. Return the TreeNode with the val of the maximum value<commit_after>
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def constructMaximumBinaryTree(self, nums, start=None, end=None):
"""
:type nums: List[int]
:rtype: TreeNode
"""
if start is None and end is None:
start, end = 0, len(nums)
if start == end:
return None
m, mi = nums[start], start
for i in xrange(start, end):
if nums[i] > m:
m, mi = nums[i], i
ret = TreeNode(m)
ret.left = self.constructMaximumBinaryTree(nums, start, mi)
ret.right = self.constructMaximumBinaryTree(nums, mi + 1, end)
return ret
|
Add py solution for 654. Maximum Binary Tree
654. Maximum Binary Tree: https://leetcode.com/problems/maximum-binary-tree/
Approach 1:
1. Modify the method to accept (nums, start, end), and the method
will return the subtree of nums[start:end]
2. Find the maximum and split the array into left, right part
3. Recursively generate the left/right subtree using the same method
with start/end specified
4. Return the TreeNode with the val of the maximum value# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def constructMaximumBinaryTree(self, nums, start=None, end=None):
"""
:type nums: List[int]
:rtype: TreeNode
"""
if start is None and end is None:
start, end = 0, len(nums)
if start == end:
return None
m, mi = nums[start], start
for i in xrange(start, end):
if nums[i] > m:
m, mi = nums[i], i
ret = TreeNode(m)
ret.left = self.constructMaximumBinaryTree(nums, start, mi)
ret.right = self.constructMaximumBinaryTree(nums, mi + 1, end)
return ret
|
<commit_before><commit_msg>Add py solution for 654. Maximum Binary Tree
654. Maximum Binary Tree: https://leetcode.com/problems/maximum-binary-tree/
Approach 1:
1. Modify the method to accept (nums, start, end), and the method
will return the subtree of nums[start:end]
2. Find the maximum and split the array into left, right part
3. Recursively generate the left/right subtree using the same method
with start/end specified
4. Return the TreeNode with the val of the maximum value<commit_after># Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def constructMaximumBinaryTree(self, nums, start=None, end=None):
"""
:type nums: List[int]
:rtype: TreeNode
"""
if start is None and end is None:
start, end = 0, len(nums)
if start == end:
return None
m, mi = nums[start], start
for i in xrange(start, end):
if nums[i] > m:
m, mi = nums[i], i
ret = TreeNode(m)
ret.left = self.constructMaximumBinaryTree(nums, start, mi)
ret.right = self.constructMaximumBinaryTree(nums, mi + 1, end)
return ret
|
|
01acf59474ac2ccd5612bba9b7fa925ebec1e5c5
|
py/valid-palindrome-ii.py
|
py/valid-palindrome-ii.py
|
class Solution(object):
def validPalindromeIdx(self, s, i, j, todelete):
while i < j:
if s[i] == s[j]:
i += 1
j -= 1
else:
return bool(todelete) and (self.validPalindromeIdx(s, i + 1, j, todelete - 1) or self.validPalindromeIdx(s, i, j - 1, todelete - 1))
return True
def validPalindrome(self, s):
"""
:type s: str
:rtype: bool
"""
return self.validPalindromeIdx(s, 0, len(s) - 1, 1)
|
Add py solution for 680. Valid Palindrome II
|
Add py solution for 680. Valid Palindrome II
680. Valid Palindrome II: https://leetcode.com/problems/valid-palindrome-ii/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 680. Valid Palindrome II
680. Valid Palindrome II: https://leetcode.com/problems/valid-palindrome-ii/
|
class Solution(object):
def validPalindromeIdx(self, s, i, j, todelete):
while i < j:
if s[i] == s[j]:
i += 1
j -= 1
else:
return bool(todelete) and (self.validPalindromeIdx(s, i + 1, j, todelete - 1) or self.validPalindromeIdx(s, i, j - 1, todelete - 1))
return True
def validPalindrome(self, s):
"""
:type s: str
:rtype: bool
"""
return self.validPalindromeIdx(s, 0, len(s) - 1, 1)
|
<commit_before><commit_msg>Add py solution for 680. Valid Palindrome II
680. Valid Palindrome II: https://leetcode.com/problems/valid-palindrome-ii/<commit_after>
|
class Solution(object):
def validPalindromeIdx(self, s, i, j, todelete):
while i < j:
if s[i] == s[j]:
i += 1
j -= 1
else:
return bool(todelete) and (self.validPalindromeIdx(s, i + 1, j, todelete - 1) or self.validPalindromeIdx(s, i, j - 1, todelete - 1))
return True
def validPalindrome(self, s):
"""
:type s: str
:rtype: bool
"""
return self.validPalindromeIdx(s, 0, len(s) - 1, 1)
|
Add py solution for 680. Valid Palindrome II
680. Valid Palindrome II: https://leetcode.com/problems/valid-palindrome-ii/class Solution(object):
def validPalindromeIdx(self, s, i, j, todelete):
while i < j:
if s[i] == s[j]:
i += 1
j -= 1
else:
return bool(todelete) and (self.validPalindromeIdx(s, i + 1, j, todelete - 1) or self.validPalindromeIdx(s, i, j - 1, todelete - 1))
return True
def validPalindrome(self, s):
"""
:type s: str
:rtype: bool
"""
return self.validPalindromeIdx(s, 0, len(s) - 1, 1)
|
<commit_before><commit_msg>Add py solution for 680. Valid Palindrome II
680. Valid Palindrome II: https://leetcode.com/problems/valid-palindrome-ii/<commit_after>class Solution(object):
def validPalindromeIdx(self, s, i, j, todelete):
while i < j:
if s[i] == s[j]:
i += 1
j -= 1
else:
return bool(todelete) and (self.validPalindromeIdx(s, i + 1, j, todelete - 1) or self.validPalindromeIdx(s, i, j - 1, todelete - 1))
return True
def validPalindrome(self, s):
"""
:type s: str
:rtype: bool
"""
return self.validPalindromeIdx(s, 0, len(s) - 1, 1)
|
|
01d848f77690bfafff1fdc28efb09b6c0e9e11bc
|
AutoUpdateProxy.py
|
AutoUpdateProxy.py
|
import json
import time
import traceback
import requests
from lxml import etree
def updateProxy():
with open('F:\liuming\Shadowsocks\gui-config.json', encoding='utf-8', mode='r') as file:
proxyInfo = json.load(file)
while 1:
requestHeaders = {
'Accept-Encoding': 'gzip, deflate, sdch, br',
'DNT': '1',
'Host': 'doub.io',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
'hrome/56.0.2924.87 Safari/537.36',
}
resp = requests.get('https://doub.io/sszhfx/', headers=requestHeaders)
if resp.status_code == 200:
html = etree.HTML(resp.text)
trs = html.findall('.//table[@width="100%"]/tbody/tr')
if len(trs) > 0:
skipped = True # 用于控制跳过表格汇总的标题行
for index, tr in enumerate(trs):
if not skipped:
skipped = False
continue
tds = tr.findall('./td')
if len(tds) == 7 and tds[1].text is not None:
tmpIP = tds[1].text
existed = False # 用于标识代理是否已经存在
# 遍历已有代理信息,查看目标代理是否存在
for ind, ip in enumerate(proxyInfo['configs']):
if ip['server'] in tmpIP:
existed = True
# 存在则更新设置
proxyInfo['configs'][ind]['server_port'] = tds[2].text
proxyInfo['configs'][ind]['password'] = tds[3].text
proxyInfo['configs'][ind]['method'] = tds[4].text
proxyInfo['configs'][ind]['remarks'] = 'doub.io-' + str(index)
if not existed:
# 不存在,则新建添加
proxy = {
"server": tmpIP,
"server_port": tds[2].text,
"password": tds[3].text,
"method": tds[4].text,
"remarks": 'doub.io-' + str(index),
"auth": False
}
proxyInfo['configs'].append(proxy)
with open('F:\liuming\Shadowsocks\gui-config.json', encoding='utf-8', mode='w') as file:
print(proxyInfo)
json.dump(proxyInfo, file, ensure_ascii=False)
break
time.sleep(60) # 如果请求页面失败,一分钟后再次请求
if __name__ == '__main__':
try:
while 1:
updateProxy()
time.sleep(3600) # 控制每隔一个小时,更新一下代理信息
except:
print(traceback.format_exc())
|
Add an utility getting the free proxy from the certain site.
|
Add an utility getting the free proxy from the certain site.
|
Python
|
mit
|
i2it/XYWYCrawler
|
Add an utility getting the free proxy from the certain site.
|
import json
import time
import traceback
import requests
from lxml import etree
def updateProxy():
with open('F:\liuming\Shadowsocks\gui-config.json', encoding='utf-8', mode='r') as file:
proxyInfo = json.load(file)
while 1:
requestHeaders = {
'Accept-Encoding': 'gzip, deflate, sdch, br',
'DNT': '1',
'Host': 'doub.io',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
'hrome/56.0.2924.87 Safari/537.36',
}
resp = requests.get('https://doub.io/sszhfx/', headers=requestHeaders)
if resp.status_code == 200:
html = etree.HTML(resp.text)
trs = html.findall('.//table[@width="100%"]/tbody/tr')
if len(trs) > 0:
skipped = True # 用于控制跳过表格汇总的标题行
for index, tr in enumerate(trs):
if not skipped:
skipped = False
continue
tds = tr.findall('./td')
if len(tds) == 7 and tds[1].text is not None:
tmpIP = tds[1].text
existed = False # 用于标识代理是否已经存在
# 遍历已有代理信息,查看目标代理是否存在
for ind, ip in enumerate(proxyInfo['configs']):
if ip['server'] in tmpIP:
existed = True
# 存在则更新设置
proxyInfo['configs'][ind]['server_port'] = tds[2].text
proxyInfo['configs'][ind]['password'] = tds[3].text
proxyInfo['configs'][ind]['method'] = tds[4].text
proxyInfo['configs'][ind]['remarks'] = 'doub.io-' + str(index)
if not existed:
# 不存在,则新建添加
proxy = {
"server": tmpIP,
"server_port": tds[2].text,
"password": tds[3].text,
"method": tds[4].text,
"remarks": 'doub.io-' + str(index),
"auth": False
}
proxyInfo['configs'].append(proxy)
with open('F:\liuming\Shadowsocks\gui-config.json', encoding='utf-8', mode='w') as file:
print(proxyInfo)
json.dump(proxyInfo, file, ensure_ascii=False)
break
time.sleep(60) # 如果请求页面失败,一分钟后再次请求
if __name__ == '__main__':
try:
while 1:
updateProxy()
time.sleep(3600) # 控制每隔一个小时,更新一下代理信息
except:
print(traceback.format_exc())
|
<commit_before><commit_msg>Add an utility getting the free proxy from the certain site.<commit_after>
|
import json
import time
import traceback
import requests
from lxml import etree
def updateProxy():
with open('F:\liuming\Shadowsocks\gui-config.json', encoding='utf-8', mode='r') as file:
proxyInfo = json.load(file)
while 1:
requestHeaders = {
'Accept-Encoding': 'gzip, deflate, sdch, br',
'DNT': '1',
'Host': 'doub.io',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
'hrome/56.0.2924.87 Safari/537.36',
}
resp = requests.get('https://doub.io/sszhfx/', headers=requestHeaders)
if resp.status_code == 200:
html = etree.HTML(resp.text)
trs = html.findall('.//table[@width="100%"]/tbody/tr')
if len(trs) > 0:
skipped = True # 用于控制跳过表格汇总的标题行
for index, tr in enumerate(trs):
if not skipped:
skipped = False
continue
tds = tr.findall('./td')
if len(tds) == 7 and tds[1].text is not None:
tmpIP = tds[1].text
existed = False # 用于标识代理是否已经存在
# 遍历已有代理信息,查看目标代理是否存在
for ind, ip in enumerate(proxyInfo['configs']):
if ip['server'] in tmpIP:
existed = True
# 存在则更新设置
proxyInfo['configs'][ind]['server_port'] = tds[2].text
proxyInfo['configs'][ind]['password'] = tds[3].text
proxyInfo['configs'][ind]['method'] = tds[4].text
proxyInfo['configs'][ind]['remarks'] = 'doub.io-' + str(index)
if not existed:
# 不存在,则新建添加
proxy = {
"server": tmpIP,
"server_port": tds[2].text,
"password": tds[3].text,
"method": tds[4].text,
"remarks": 'doub.io-' + str(index),
"auth": False
}
proxyInfo['configs'].append(proxy)
with open('F:\liuming\Shadowsocks\gui-config.json', encoding='utf-8', mode='w') as file:
print(proxyInfo)
json.dump(proxyInfo, file, ensure_ascii=False)
break
time.sleep(60) # 如果请求页面失败,一分钟后再次请求
if __name__ == '__main__':
try:
while 1:
updateProxy()
time.sleep(3600) # 控制每隔一个小时,更新一下代理信息
except:
print(traceback.format_exc())
|
Add an utility getting the free proxy from the certain site.import json
import time
import traceback
import requests
from lxml import etree
def updateProxy():
with open('F:\liuming\Shadowsocks\gui-config.json', encoding='utf-8', mode='r') as file:
proxyInfo = json.load(file)
while 1:
requestHeaders = {
'Accept-Encoding': 'gzip, deflate, sdch, br',
'DNT': '1',
'Host': 'doub.io',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
'hrome/56.0.2924.87 Safari/537.36',
}
resp = requests.get('https://doub.io/sszhfx/', headers=requestHeaders)
if resp.status_code == 200:
html = etree.HTML(resp.text)
trs = html.findall('.//table[@width="100%"]/tbody/tr')
if len(trs) > 0:
skipped = True # 用于控制跳过表格汇总的标题行
for index, tr in enumerate(trs):
if not skipped:
skipped = False
continue
tds = tr.findall('./td')
if len(tds) == 7 and tds[1].text is not None:
tmpIP = tds[1].text
existed = False # 用于标识代理是否已经存在
# 遍历已有代理信息,查看目标代理是否存在
for ind, ip in enumerate(proxyInfo['configs']):
if ip['server'] in tmpIP:
existed = True
# 存在则更新设置
proxyInfo['configs'][ind]['server_port'] = tds[2].text
proxyInfo['configs'][ind]['password'] = tds[3].text
proxyInfo['configs'][ind]['method'] = tds[4].text
proxyInfo['configs'][ind]['remarks'] = 'doub.io-' + str(index)
if not existed:
# 不存在,则新建添加
proxy = {
"server": tmpIP,
"server_port": tds[2].text,
"password": tds[3].text,
"method": tds[4].text,
"remarks": 'doub.io-' + str(index),
"auth": False
}
proxyInfo['configs'].append(proxy)
with open('F:\liuming\Shadowsocks\gui-config.json', encoding='utf-8', mode='w') as file:
print(proxyInfo)
json.dump(proxyInfo, file, ensure_ascii=False)
break
time.sleep(60) # 如果请求页面失败,一分钟后再次请求
if __name__ == '__main__':
try:
while 1:
updateProxy()
time.sleep(3600) # 控制每隔一个小时,更新一下代理信息
except:
print(traceback.format_exc())
|
<commit_before><commit_msg>Add an utility getting the free proxy from the certain site.<commit_after>import json
import time
import traceback
import requests
from lxml import etree
def updateProxy():
with open('F:\liuming\Shadowsocks\gui-config.json', encoding='utf-8', mode='r') as file:
proxyInfo = json.load(file)
while 1:
requestHeaders = {
'Accept-Encoding': 'gzip, deflate, sdch, br',
'DNT': '1',
'Host': 'doub.io',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
'hrome/56.0.2924.87 Safari/537.36',
}
resp = requests.get('https://doub.io/sszhfx/', headers=requestHeaders)
if resp.status_code == 200:
html = etree.HTML(resp.text)
trs = html.findall('.//table[@width="100%"]/tbody/tr')
if len(trs) > 0:
skipped = True # 用于控制跳过表格汇总的标题行
for index, tr in enumerate(trs):
if not skipped:
skipped = False
continue
tds = tr.findall('./td')
if len(tds) == 7 and tds[1].text is not None:
tmpIP = tds[1].text
existed = False # 用于标识代理是否已经存在
# 遍历已有代理信息,查看目标代理是否存在
for ind, ip in enumerate(proxyInfo['configs']):
if ip['server'] in tmpIP:
existed = True
# 存在则更新设置
proxyInfo['configs'][ind]['server_port'] = tds[2].text
proxyInfo['configs'][ind]['password'] = tds[3].text
proxyInfo['configs'][ind]['method'] = tds[4].text
proxyInfo['configs'][ind]['remarks'] = 'doub.io-' + str(index)
if not existed:
# 不存在,则新建添加
proxy = {
"server": tmpIP,
"server_port": tds[2].text,
"password": tds[3].text,
"method": tds[4].text,
"remarks": 'doub.io-' + str(index),
"auth": False
}
proxyInfo['configs'].append(proxy)
with open('F:\liuming\Shadowsocks\gui-config.json', encoding='utf-8', mode='w') as file:
print(proxyInfo)
json.dump(proxyInfo, file, ensure_ascii=False)
break
time.sleep(60) # 如果请求页面失败,一分钟后再次请求
if __name__ == '__main__':
try:
while 1:
updateProxy()
time.sleep(3600) # 控制每隔一个小时,更新一下代理信息
except:
print(traceback.format_exc())
|
|
27f0b6481675fcc07ec2ce09acbd2bd1ac8cf197
|
testing/test_main.py
|
testing/test_main.py
|
import os.path
mainfile = os.path.join(
os.path.dirname(__file__), "..", "setuptools_scm", "__main__.py")
with open(mainfile) as f:
code = compile(f.read(), "__main__.py", 'exec')
exec(code)
|
Test 'python -m setuptools_scm' invocation (__main__.py).
|
Test 'python -m setuptools_scm' invocation (__main__.py).
|
Python
|
mit
|
esben/setuptools_scm,pypa/setuptools_scm,RonnyPfannschmidt/setuptools_scm,pypa/setuptools_scm,RonnyPfannschmidt/setuptools_scm
|
Test 'python -m setuptools_scm' invocation (__main__.py).
|
import os.path
mainfile = os.path.join(
os.path.dirname(__file__), "..", "setuptools_scm", "__main__.py")
with open(mainfile) as f:
code = compile(f.read(), "__main__.py", 'exec')
exec(code)
|
<commit_before><commit_msg>Test 'python -m setuptools_scm' invocation (__main__.py).<commit_after>
|
import os.path
mainfile = os.path.join(
os.path.dirname(__file__), "..", "setuptools_scm", "__main__.py")
with open(mainfile) as f:
code = compile(f.read(), "__main__.py", 'exec')
exec(code)
|
Test 'python -m setuptools_scm' invocation (__main__.py).import os.path
mainfile = os.path.join(
os.path.dirname(__file__), "..", "setuptools_scm", "__main__.py")
with open(mainfile) as f:
code = compile(f.read(), "__main__.py", 'exec')
exec(code)
|
<commit_before><commit_msg>Test 'python -m setuptools_scm' invocation (__main__.py).<commit_after>import os.path
mainfile = os.path.join(
os.path.dirname(__file__), "..", "setuptools_scm", "__main__.py")
with open(mainfile) as f:
code = compile(f.read(), "__main__.py", 'exec')
exec(code)
|
|
2f3b3a34cfed77fea3c48ec17b8c2e40603bbcc0
|
app/src/visualizer.py
|
app/src/visualizer.py
|
'''
Created on Sep 6, 2017
@author: Hossein
This file handles all the visualizations. It assumes that the status are ready and it
contains the function to generate the jpg's
'''
import os
from utils import get_visual_data_path
import numpy as np
import matplotlib.pyplot as plt
from turtledemo.__main__ import font_sizes
def generate_bar_chart(labels= [], values = [], file_name = None):
'''
generates a bar chart and saves it to a file.
input
:list : a list of lables of the chart. These could be the divisions of organization
:list : a list of values for each lable
:str : a file_name to save the final result in
'''
if len(labels) != len(values):
raise ValueError("The size of lables and values must be equal.")
n = len(labels)
x = np.arange(n)
y = np.array(values)
plt.bar(x, y, facecolor='#ff9999', edgecolor='white', )
#plt.bar(x, y, facecolor='#9999ff', edgecolor='white', )
#plt.bar(X, -Y2, facecolor='#ff9999', edgecolor='white')
for x1,y1 in zip(x, y):
plt.text(x1, y1, '$%.0f' % y1, ha='center', va= 'bottom', fontsize = 25)
#plt.xlim(-.5, n )
plt.ylim(0, 1.2 * max(values) )
plt.xticks(x, labels, fontsize = 15)
# no y axis ticks
plt.yticks([])
plt.title("Light The Night fundraising results", fontsize = 20)
# remove all the spines except the bottom one [trick here](https://stackoverflow.com/questions/18968024/how-to-remove-axis-in-pyplot-bar)
for loc, spine in plt.axes().axes.spines.items():
if loc != 'bottom':
spine.set_color('none')
plt.savefig(file_name, dpi=200)
#plt.show()
if __name__ == "__main__":
file_name = os.path.join(get_visual_data_path(), 'test.jpg')
v = [326, 5000, 20]
l = ['Base Band', 'Radio', 'Indoor']
generate_bar_chart(labels= l, values= v, file_name= file_name)
print('Done!')
|
Add a library for data visualization and add a dummy function
|
Add a library for data visualization and add a dummy function
|
Python
|
apache-2.0
|
TeMedy/DaViL,TeMedy/DaViL
|
Add a library for data visualization and add a dummy function
|
'''
Created on Sep 6, 2017
@author: Hossein
This file handles all the visualizations. It assumes that the status are ready and it
contains the function to generate the jpg's
'''
import os
from utils import get_visual_data_path
import numpy as np
import matplotlib.pyplot as plt
from turtledemo.__main__ import font_sizes
def generate_bar_chart(labels= [], values = [], file_name = None):
'''
generates a bar chart and saves it to a file.
input
:list : a list of lables of the chart. These could be the divisions of organization
:list : a list of values for each lable
:str : a file_name to save the final result in
'''
if len(labels) != len(values):
raise ValueError("The size of lables and values must be equal.")
n = len(labels)
x = np.arange(n)
y = np.array(values)
plt.bar(x, y, facecolor='#ff9999', edgecolor='white', )
#plt.bar(x, y, facecolor='#9999ff', edgecolor='white', )
#plt.bar(X, -Y2, facecolor='#ff9999', edgecolor='white')
for x1,y1 in zip(x, y):
plt.text(x1, y1, '$%.0f' % y1, ha='center', va= 'bottom', fontsize = 25)
#plt.xlim(-.5, n )
plt.ylim(0, 1.2 * max(values) )
plt.xticks(x, labels, fontsize = 15)
# no y axis ticks
plt.yticks([])
plt.title("Light The Night fundraising results", fontsize = 20)
# remove all the spines except the bottom one [trick here](https://stackoverflow.com/questions/18968024/how-to-remove-axis-in-pyplot-bar)
for loc, spine in plt.axes().axes.spines.items():
if loc != 'bottom':
spine.set_color('none')
plt.savefig(file_name, dpi=200)
#plt.show()
if __name__ == "__main__":
file_name = os.path.join(get_visual_data_path(), 'test.jpg')
v = [326, 5000, 20]
l = ['Base Band', 'Radio', 'Indoor']
generate_bar_chart(labels= l, values= v, file_name= file_name)
print('Done!')
|
<commit_before><commit_msg>Add a library for data visualization and add a dummy function<commit_after>
|
'''
Created on Sep 6, 2017
@author: Hossein
This file handles all the visualizations. It assumes that the status are ready and it
contains the function to generate the jpg's
'''
import os
from utils import get_visual_data_path
import numpy as np
import matplotlib.pyplot as plt
from turtledemo.__main__ import font_sizes
def generate_bar_chart(labels= [], values = [], file_name = None):
'''
generates a bar chart and saves it to a file.
input
:list : a list of lables of the chart. These could be the divisions of organization
:list : a list of values for each lable
:str : a file_name to save the final result in
'''
if len(labels) != len(values):
raise ValueError("The size of lables and values must be equal.")
n = len(labels)
x = np.arange(n)
y = np.array(values)
plt.bar(x, y, facecolor='#ff9999', edgecolor='white', )
#plt.bar(x, y, facecolor='#9999ff', edgecolor='white', )
#plt.bar(X, -Y2, facecolor='#ff9999', edgecolor='white')
for x1,y1 in zip(x, y):
plt.text(x1, y1, '$%.0f' % y1, ha='center', va= 'bottom', fontsize = 25)
#plt.xlim(-.5, n )
plt.ylim(0, 1.2 * max(values) )
plt.xticks(x, labels, fontsize = 15)
# no y axis ticks
plt.yticks([])
plt.title("Light The Night fundraising results", fontsize = 20)
# remove all the spines except the bottom one [trick here](https://stackoverflow.com/questions/18968024/how-to-remove-axis-in-pyplot-bar)
for loc, spine in plt.axes().axes.spines.items():
if loc != 'bottom':
spine.set_color('none')
plt.savefig(file_name, dpi=200)
#plt.show()
if __name__ == "__main__":
file_name = os.path.join(get_visual_data_path(), 'test.jpg')
v = [326, 5000, 20]
l = ['Base Band', 'Radio', 'Indoor']
generate_bar_chart(labels= l, values= v, file_name= file_name)
print('Done!')
|
Add a library for data visualization and add a dummy function'''
Created on Sep 6, 2017
@author: Hossein
This file handles all the visualizations. It assumes that the status are ready and it
contains the function to generate the jpg's
'''
import os
from utils import get_visual_data_path
import numpy as np
import matplotlib.pyplot as plt
from turtledemo.__main__ import font_sizes
def generate_bar_chart(labels= [], values = [], file_name = None):
'''
generates a bar chart and saves it to a file.
input
:list : a list of lables of the chart. These could be the divisions of organization
:list : a list of values for each lable
:str : a file_name to save the final result in
'''
if len(labels) != len(values):
raise ValueError("The size of lables and values must be equal.")
n = len(labels)
x = np.arange(n)
y = np.array(values)
plt.bar(x, y, facecolor='#ff9999', edgecolor='white', )
#plt.bar(x, y, facecolor='#9999ff', edgecolor='white', )
#plt.bar(X, -Y2, facecolor='#ff9999', edgecolor='white')
for x1,y1 in zip(x, y):
plt.text(x1, y1, '$%.0f' % y1, ha='center', va= 'bottom', fontsize = 25)
#plt.xlim(-.5, n )
plt.ylim(0, 1.2 * max(values) )
plt.xticks(x, labels, fontsize = 15)
# no y axis ticks
plt.yticks([])
plt.title("Light The Night fundraising results", fontsize = 20)
# remove all the spines except the bottom one [trick here](https://stackoverflow.com/questions/18968024/how-to-remove-axis-in-pyplot-bar)
for loc, spine in plt.axes().axes.spines.items():
if loc != 'bottom':
spine.set_color('none')
plt.savefig(file_name, dpi=200)
#plt.show()
if __name__ == "__main__":
file_name = os.path.join(get_visual_data_path(), 'test.jpg')
v = [326, 5000, 20]
l = ['Base Band', 'Radio', 'Indoor']
generate_bar_chart(labels= l, values= v, file_name= file_name)
print('Done!')
|
<commit_before><commit_msg>Add a library for data visualization and add a dummy function<commit_after>'''
Created on Sep 6, 2017
@author: Hossein
This file handles all the visualizations. It assumes that the status are ready and it
contains the function to generate the jpg's
'''
import os
from utils import get_visual_data_path
import numpy as np
import matplotlib.pyplot as plt
from turtledemo.__main__ import font_sizes
def generate_bar_chart(labels= [], values = [], file_name = None):
'''
generates a bar chart and saves it to a file.
input
:list : a list of lables of the chart. These could be the divisions of organization
:list : a list of values for each lable
:str : a file_name to save the final result in
'''
if len(labels) != len(values):
raise ValueError("The size of lables and values must be equal.")
n = len(labels)
x = np.arange(n)
y = np.array(values)
plt.bar(x, y, facecolor='#ff9999', edgecolor='white', )
#plt.bar(x, y, facecolor='#9999ff', edgecolor='white', )
#plt.bar(X, -Y2, facecolor='#ff9999', edgecolor='white')
for x1,y1 in zip(x, y):
plt.text(x1, y1, '$%.0f' % y1, ha='center', va= 'bottom', fontsize = 25)
#plt.xlim(-.5, n )
plt.ylim(0, 1.2 * max(values) )
plt.xticks(x, labels, fontsize = 15)
# no y axis ticks
plt.yticks([])
plt.title("Light The Night fundraising results", fontsize = 20)
# remove all the spines except the bottom one [trick here](https://stackoverflow.com/questions/18968024/how-to-remove-axis-in-pyplot-bar)
for loc, spine in plt.axes().axes.spines.items():
if loc != 'bottom':
spine.set_color('none')
plt.savefig(file_name, dpi=200)
#plt.show()
if __name__ == "__main__":
file_name = os.path.join(get_visual_data_path(), 'test.jpg')
v = [326, 5000, 20]
l = ['Base Band', 'Radio', 'Indoor']
generate_bar_chart(labels= l, values= v, file_name= file_name)
print('Done!')
|
|
f57ac6c3c7935d44956fac1295e417e34039c703
|
bin/historical/migrations/move_trips_places_sections_stops_to_analysis_timeseries_db.py
|
bin/historical/migrations/move_trips_places_sections_stops_to_analysis_timeseries_db.py
|
import logging
import arrow
import emission.core.get_database as edb
import emission.core.wrapper.entry as ecwe
def convert_wrapper_to_entry(key, wrapper):
logging.debug("found user_id in wrapper %s" % wrapper["user_id"])
wrapper_entry = ecwe.Entry.create_entry(wrapper["user_id"], key, wrapper)
wrapper_entry["_id"] = wrapper["_id"]
return wrapper_entry
def convert_collection(collection, key):
result_cursor = collection.find()
logging.info("About to convert %s entries" % result_cursor.count())
for i, wrapper in enumerate(result_cursor):
entry = convert_wrapper_to_entry(key, wrapper)
if entry.get_id() != wrapper["_id"]:
logging.warn("entry.id = %s, wrapper.id = %s" % (entry.get_id(), wrapper["_id"]))
if i % 10000 == 0:
print "converted %s -> %s" % (wrapper, entry)
edb.get_timeseries_db().insert(entry)
collection.remove(wrapper)
def move_ts_entries(key):
tdb = edb.get_timeseries_db()
atdb = edb.get_analysis_timeseries_db()
result_cursor = tdb.find({'metadata.key': key})
logging.info("About to convert %s entries" % result_cursor.count())
for i, entry_doc in enumerate(result_cursor):
if i % 10000 == 0:
print "moved %s from one ts to the other" % (entry_doc)
atdb.insert(entry_doc)
tdb.remove(entry_doc)
if __name__ == '__main__':
# No arguments - muahahahaha. Just going to copy known fields over.
convert_collection(edb.get_trip_new_db(), "segmentation/raw_trip")
convert_collection(edb.get_place_db(), "segmentation/raw_place")
convert_collection(edb.get_section_new_db(), "segmentation/raw_section")
convert_collection(edb.get_stop_db(), "segmentation/raw_stop")
move_ts_entries("analysis/smoothing")
|
Add a new script to move existing entries to their correct location
|
Add a new script to move existing entries to their correct location
Now that we have moved it out of the usercache into its own class
|
Python
|
bsd-3-clause
|
e-mission/e-mission-server,shankari/e-mission-server,sunil07t/e-mission-server,sunil07t/e-mission-server,yw374cornell/e-mission-server,e-mission/e-mission-server,yw374cornell/e-mission-server,shankari/e-mission-server,sunil07t/e-mission-server,shankari/e-mission-server,e-mission/e-mission-server,yw374cornell/e-mission-server,sunil07t/e-mission-server,yw374cornell/e-mission-server,shankari/e-mission-server,e-mission/e-mission-server
|
Add a new script to move existing entries to their correct location
Now that we have moved it out of the usercache into its own class
|
import logging
import arrow
import emission.core.get_database as edb
import emission.core.wrapper.entry as ecwe
def convert_wrapper_to_entry(key, wrapper):
logging.debug("found user_id in wrapper %s" % wrapper["user_id"])
wrapper_entry = ecwe.Entry.create_entry(wrapper["user_id"], key, wrapper)
wrapper_entry["_id"] = wrapper["_id"]
return wrapper_entry
def convert_collection(collection, key):
result_cursor = collection.find()
logging.info("About to convert %s entries" % result_cursor.count())
for i, wrapper in enumerate(result_cursor):
entry = convert_wrapper_to_entry(key, wrapper)
if entry.get_id() != wrapper["_id"]:
logging.warn("entry.id = %s, wrapper.id = %s" % (entry.get_id(), wrapper["_id"]))
if i % 10000 == 0:
print "converted %s -> %s" % (wrapper, entry)
edb.get_timeseries_db().insert(entry)
collection.remove(wrapper)
def move_ts_entries(key):
tdb = edb.get_timeseries_db()
atdb = edb.get_analysis_timeseries_db()
result_cursor = tdb.find({'metadata.key': key})
logging.info("About to convert %s entries" % result_cursor.count())
for i, entry_doc in enumerate(result_cursor):
if i % 10000 == 0:
print "moved %s from one ts to the other" % (entry_doc)
atdb.insert(entry_doc)
tdb.remove(entry_doc)
if __name__ == '__main__':
# No arguments - muahahahaha. Just going to copy known fields over.
convert_collection(edb.get_trip_new_db(), "segmentation/raw_trip")
convert_collection(edb.get_place_db(), "segmentation/raw_place")
convert_collection(edb.get_section_new_db(), "segmentation/raw_section")
convert_collection(edb.get_stop_db(), "segmentation/raw_stop")
move_ts_entries("analysis/smoothing")
|
<commit_before><commit_msg>Add a new script to move existing entries to their correct location
Now that we have moved it out of the usercache into its own class<commit_after>
|
import logging
import arrow
import emission.core.get_database as edb
import emission.core.wrapper.entry as ecwe
def convert_wrapper_to_entry(key, wrapper):
logging.debug("found user_id in wrapper %s" % wrapper["user_id"])
wrapper_entry = ecwe.Entry.create_entry(wrapper["user_id"], key, wrapper)
wrapper_entry["_id"] = wrapper["_id"]
return wrapper_entry
def convert_collection(collection, key):
result_cursor = collection.find()
logging.info("About to convert %s entries" % result_cursor.count())
for i, wrapper in enumerate(result_cursor):
entry = convert_wrapper_to_entry(key, wrapper)
if entry.get_id() != wrapper["_id"]:
logging.warn("entry.id = %s, wrapper.id = %s" % (entry.get_id(), wrapper["_id"]))
if i % 10000 == 0:
print "converted %s -> %s" % (wrapper, entry)
edb.get_timeseries_db().insert(entry)
collection.remove(wrapper)
def move_ts_entries(key):
tdb = edb.get_timeseries_db()
atdb = edb.get_analysis_timeseries_db()
result_cursor = tdb.find({'metadata.key': key})
logging.info("About to convert %s entries" % result_cursor.count())
for i, entry_doc in enumerate(result_cursor):
if i % 10000 == 0:
print "moved %s from one ts to the other" % (entry_doc)
atdb.insert(entry_doc)
tdb.remove(entry_doc)
if __name__ == '__main__':
# No arguments - muahahahaha. Just going to copy known fields over.
convert_collection(edb.get_trip_new_db(), "segmentation/raw_trip")
convert_collection(edb.get_place_db(), "segmentation/raw_place")
convert_collection(edb.get_section_new_db(), "segmentation/raw_section")
convert_collection(edb.get_stop_db(), "segmentation/raw_stop")
move_ts_entries("analysis/smoothing")
|
Add a new script to move existing entries to their correct location
Now that we have moved it out of the usercache into its own classimport logging
import arrow
import emission.core.get_database as edb
import emission.core.wrapper.entry as ecwe
def convert_wrapper_to_entry(key, wrapper):
logging.debug("found user_id in wrapper %s" % wrapper["user_id"])
wrapper_entry = ecwe.Entry.create_entry(wrapper["user_id"], key, wrapper)
wrapper_entry["_id"] = wrapper["_id"]
return wrapper_entry
def convert_collection(collection, key):
result_cursor = collection.find()
logging.info("About to convert %s entries" % result_cursor.count())
for i, wrapper in enumerate(result_cursor):
entry = convert_wrapper_to_entry(key, wrapper)
if entry.get_id() != wrapper["_id"]:
logging.warn("entry.id = %s, wrapper.id = %s" % (entry.get_id(), wrapper["_id"]))
if i % 10000 == 0:
print "converted %s -> %s" % (wrapper, entry)
edb.get_timeseries_db().insert(entry)
collection.remove(wrapper)
def move_ts_entries(key):
tdb = edb.get_timeseries_db()
atdb = edb.get_analysis_timeseries_db()
result_cursor = tdb.find({'metadata.key': key})
logging.info("About to convert %s entries" % result_cursor.count())
for i, entry_doc in enumerate(result_cursor):
if i % 10000 == 0:
print "moved %s from one ts to the other" % (entry_doc)
atdb.insert(entry_doc)
tdb.remove(entry_doc)
if __name__ == '__main__':
# No arguments - muahahahaha. Just going to copy known fields over.
convert_collection(edb.get_trip_new_db(), "segmentation/raw_trip")
convert_collection(edb.get_place_db(), "segmentation/raw_place")
convert_collection(edb.get_section_new_db(), "segmentation/raw_section")
convert_collection(edb.get_stop_db(), "segmentation/raw_stop")
move_ts_entries("analysis/smoothing")
|
<commit_before><commit_msg>Add a new script to move existing entries to their correct location
Now that we have moved it out of the usercache into its own class<commit_after>import logging
import arrow
import emission.core.get_database as edb
import emission.core.wrapper.entry as ecwe
def convert_wrapper_to_entry(key, wrapper):
logging.debug("found user_id in wrapper %s" % wrapper["user_id"])
wrapper_entry = ecwe.Entry.create_entry(wrapper["user_id"], key, wrapper)
wrapper_entry["_id"] = wrapper["_id"]
return wrapper_entry
def convert_collection(collection, key):
result_cursor = collection.find()
logging.info("About to convert %s entries" % result_cursor.count())
for i, wrapper in enumerate(result_cursor):
entry = convert_wrapper_to_entry(key, wrapper)
if entry.get_id() != wrapper["_id"]:
logging.warn("entry.id = %s, wrapper.id = %s" % (entry.get_id(), wrapper["_id"]))
if i % 10000 == 0:
print "converted %s -> %s" % (wrapper, entry)
edb.get_timeseries_db().insert(entry)
collection.remove(wrapper)
def move_ts_entries(key):
tdb = edb.get_timeseries_db()
atdb = edb.get_analysis_timeseries_db()
result_cursor = tdb.find({'metadata.key': key})
logging.info("About to convert %s entries" % result_cursor.count())
for i, entry_doc in enumerate(result_cursor):
if i % 10000 == 0:
print "moved %s from one ts to the other" % (entry_doc)
atdb.insert(entry_doc)
tdb.remove(entry_doc)
if __name__ == '__main__':
# No arguments - muahahahaha. Just going to copy known fields over.
convert_collection(edb.get_trip_new_db(), "segmentation/raw_trip")
convert_collection(edb.get_place_db(), "segmentation/raw_place")
convert_collection(edb.get_section_new_db(), "segmentation/raw_section")
convert_collection(edb.get_stop_db(), "segmentation/raw_stop")
move_ts_entries("analysis/smoothing")
|
|
5159dcdbdb1b73a57a550b239b9a92ae7a98e73f
|
bindings/python/tests/runner.py
|
bindings/python/tests/runner.py
|
import summer
import unittest
class TestDataTypes (unittest.TestCase):
def testitem(self):
i = summer.ItemData ()
i['title'] = 'hello world'
self.assertEqual (i['title'], 'hello world') #first
# Make sure strings aren't freed
self.assertEqual (i['title'], 'hello world') #second
if __name__ == '__main__':
unittest.main ()
|
Add test case for last commit (should probably be squashed)
|
python-bindings: Add test case for last commit (should probably be squashed)
|
Python
|
lgpl-2.1
|
ozamosi/summer,ozamosi/summer,ozamosi/summer,ozamosi/summer
|
python-bindings: Add test case for last commit (should probably be squashed)
|
import summer
import unittest
class TestDataTypes (unittest.TestCase):
def testitem(self):
i = summer.ItemData ()
i['title'] = 'hello world'
self.assertEqual (i['title'], 'hello world') #first
# Make sure strings aren't freed
self.assertEqual (i['title'], 'hello world') #second
if __name__ == '__main__':
unittest.main ()
|
<commit_before><commit_msg>python-bindings: Add test case for last commit (should probably be squashed)<commit_after>
|
import summer
import unittest
class TestDataTypes (unittest.TestCase):
def testitem(self):
i = summer.ItemData ()
i['title'] = 'hello world'
self.assertEqual (i['title'], 'hello world') #first
# Make sure strings aren't freed
self.assertEqual (i['title'], 'hello world') #second
if __name__ == '__main__':
unittest.main ()
|
python-bindings: Add test case for last commit (should probably be squashed)import summer
import unittest
class TestDataTypes (unittest.TestCase):
def testitem(self):
i = summer.ItemData ()
i['title'] = 'hello world'
self.assertEqual (i['title'], 'hello world') #first
# Make sure strings aren't freed
self.assertEqual (i['title'], 'hello world') #second
if __name__ == '__main__':
unittest.main ()
|
<commit_before><commit_msg>python-bindings: Add test case for last commit (should probably be squashed)<commit_after>import summer
import unittest
class TestDataTypes (unittest.TestCase):
def testitem(self):
i = summer.ItemData ()
i['title'] = 'hello world'
self.assertEqual (i['title'], 'hello world') #first
# Make sure strings aren't freed
self.assertEqual (i['title'], 'hello world') #second
if __name__ == '__main__':
unittest.main ()
|
|
665e0ef301ca91eaa715c236e1db7e5566959b57
|
impschedules/usermanagement.py
|
impschedules/usermanagement.py
|
from flask import Flask, render_template, flash, request, Markup, session, redirect, url_for, escape, Response, current_app, send_file
from werkzeug.security import generate_password_hash, check_password_hash
from functools import wraps
def login_required(fn):
@wraps(fn)
def decorated_function(*args, **kwargs):
if "username" not in session:
flash('You must log in to access that page.', 'error')
return redirect(url_for('index'))
return fn(*args, **kwargs)
return decorated_function
def check_login():
if ("username" in session):
return session["admin"]
else:
return None
|
Move user management to a new file
|
Move user management to a new file
|
Python
|
agpl-3.0
|
pwyf/IATI-Implementation-Schedules,pwyf/IATI-Implementation-Schedules,pwyf/IATI-Implementation-Schedules,pwyf/IATI-Implementation-Schedules
|
Move user management to a new file
|
from flask import Flask, render_template, flash, request, Markup, session, redirect, url_for, escape, Response, current_app, send_file
from werkzeug.security import generate_password_hash, check_password_hash
from functools import wraps
def login_required(fn):
@wraps(fn)
def decorated_function(*args, **kwargs):
if "username" not in session:
flash('You must log in to access that page.', 'error')
return redirect(url_for('index'))
return fn(*args, **kwargs)
return decorated_function
def check_login():
if ("username" in session):
return session["admin"]
else:
return None
|
<commit_before><commit_msg>Move user management to a new file<commit_after>
|
from flask import Flask, render_template, flash, request, Markup, session, redirect, url_for, escape, Response, current_app, send_file
from werkzeug.security import generate_password_hash, check_password_hash
from functools import wraps
def login_required(fn):
@wraps(fn)
def decorated_function(*args, **kwargs):
if "username" not in session:
flash('You must log in to access that page.', 'error')
return redirect(url_for('index'))
return fn(*args, **kwargs)
return decorated_function
def check_login():
if ("username" in session):
return session["admin"]
else:
return None
|
Move user management to a new filefrom flask import Flask, render_template, flash, request, Markup, session, redirect, url_for, escape, Response, current_app, send_file
from werkzeug.security import generate_password_hash, check_password_hash
from functools import wraps
def login_required(fn):
@wraps(fn)
def decorated_function(*args, **kwargs):
if "username" not in session:
flash('You must log in to access that page.', 'error')
return redirect(url_for('index'))
return fn(*args, **kwargs)
return decorated_function
def check_login():
if ("username" in session):
return session["admin"]
else:
return None
|
<commit_before><commit_msg>Move user management to a new file<commit_after>from flask import Flask, render_template, flash, request, Markup, session, redirect, url_for, escape, Response, current_app, send_file
from werkzeug.security import generate_password_hash, check_password_hash
from functools import wraps
def login_required(fn):
@wraps(fn)
def decorated_function(*args, **kwargs):
if "username" not in session:
flash('You must log in to access that page.', 'error')
return redirect(url_for('index'))
return fn(*args, **kwargs)
return decorated_function
def check_login():
if ("username" in session):
return session["admin"]
else:
return None
|
|
a7aa9301daeff4c93bd907f51114af68b36669df
|
redd/tests/test_api_data.py
|
redd/tests/test_api_data.py
|
#!/usr/bin/env python
from django.conf import settings
from django.test import TestCase
from django.test.client import Client
from django.utils import simplejson as json
from redd.models import Dataset
from redd.tests import utils
class TestAPIData(TestCase):
def setUp(self):
settings.CELERY_ALWAYS_EAGER = True
self.solr = utils.get_test_solr()
self.upload = utils.get_test_upload()
self.dataset = utils.get_test_dataset(self.upload)
self.client = Client()
def test_get(self):
self.dataset.import_data()
utils.wait()
response = self.client.get('/api/1.0/data/')
self.assertEqual(response.status_code, 200)
body = json.loads(response.content)
list_result = body['objects'][0]
response = self.client.get('/api/1.0/data/%s/' % list_result['id'])
self.assertEqual(response.status_code, 200)
get_result = json.loads(response.content)
self.assertEqual(list_result, get_result)
def test_list(self):
self.dataset.import_data()
utils.wait()
response = self.client.get('/api/1.0/data/')
self.assertEqual(response.status_code, 200)
body = json.loads(response.content)
self.assertEqual(body['meta']['total_count'], 4)
self.assertEqual(body['objects'][0]['dataset'], '/api/1.0/dataset/%i/' % self.dataset.id)
self.assertIn('data', body['objects'][0])
self.assertIn('id', body['objects'][0])
self.assertIn('resource_uri', body['objects'][0])
self.assertIn('row', body['objects'][0])
def test_create(self):
pass
def test_search(self):
pass
|
Add test_get for api data (and missing file).
|
Add test_get for api data (and missing file).
|
Python
|
mit
|
ibrahimcesar/panda,PalmBeachPost/panda,datadesk/panda,datadesk/panda,newsapps/panda,datadesk/panda,PalmBeachPost/panda,PalmBeachPost/panda,pandaproject/panda,ibrahimcesar/panda,NUKnightLab/panda,pandaproject/panda,pandaproject/panda,newsapps/panda,NUKnightLab/panda,ibrahimcesar/panda,newsapps/panda,NUKnightLab/panda,datadesk/panda,ibrahimcesar/panda,ibrahimcesar/panda,pandaproject/panda,PalmBeachPost/panda,NUKnightLab/panda,pandaproject/panda,datadesk/panda,PalmBeachPost/panda,newsapps/panda
|
Add test_get for api data (and missing file).
|
#!/usr/bin/env python
from django.conf import settings
from django.test import TestCase
from django.test.client import Client
from django.utils import simplejson as json
from redd.models import Dataset
from redd.tests import utils
class TestAPIData(TestCase):
def setUp(self):
settings.CELERY_ALWAYS_EAGER = True
self.solr = utils.get_test_solr()
self.upload = utils.get_test_upload()
self.dataset = utils.get_test_dataset(self.upload)
self.client = Client()
def test_get(self):
self.dataset.import_data()
utils.wait()
response = self.client.get('/api/1.0/data/')
self.assertEqual(response.status_code, 200)
body = json.loads(response.content)
list_result = body['objects'][0]
response = self.client.get('/api/1.0/data/%s/' % list_result['id'])
self.assertEqual(response.status_code, 200)
get_result = json.loads(response.content)
self.assertEqual(list_result, get_result)
def test_list(self):
self.dataset.import_data()
utils.wait()
response = self.client.get('/api/1.0/data/')
self.assertEqual(response.status_code, 200)
body = json.loads(response.content)
self.assertEqual(body['meta']['total_count'], 4)
self.assertEqual(body['objects'][0]['dataset'], '/api/1.0/dataset/%i/' % self.dataset.id)
self.assertIn('data', body['objects'][0])
self.assertIn('id', body['objects'][0])
self.assertIn('resource_uri', body['objects'][0])
self.assertIn('row', body['objects'][0])
def test_create(self):
pass
def test_search(self):
pass
|
<commit_before><commit_msg>Add test_get for api data (and missing file).<commit_after>
|
#!/usr/bin/env python
from django.conf import settings
from django.test import TestCase
from django.test.client import Client
from django.utils import simplejson as json
from redd.models import Dataset
from redd.tests import utils
class TestAPIData(TestCase):
def setUp(self):
settings.CELERY_ALWAYS_EAGER = True
self.solr = utils.get_test_solr()
self.upload = utils.get_test_upload()
self.dataset = utils.get_test_dataset(self.upload)
self.client = Client()
def test_get(self):
self.dataset.import_data()
utils.wait()
response = self.client.get('/api/1.0/data/')
self.assertEqual(response.status_code, 200)
body = json.loads(response.content)
list_result = body['objects'][0]
response = self.client.get('/api/1.0/data/%s/' % list_result['id'])
self.assertEqual(response.status_code, 200)
get_result = json.loads(response.content)
self.assertEqual(list_result, get_result)
def test_list(self):
self.dataset.import_data()
utils.wait()
response = self.client.get('/api/1.0/data/')
self.assertEqual(response.status_code, 200)
body = json.loads(response.content)
self.assertEqual(body['meta']['total_count'], 4)
self.assertEqual(body['objects'][0]['dataset'], '/api/1.0/dataset/%i/' % self.dataset.id)
self.assertIn('data', body['objects'][0])
self.assertIn('id', body['objects'][0])
self.assertIn('resource_uri', body['objects'][0])
self.assertIn('row', body['objects'][0])
def test_create(self):
pass
def test_search(self):
pass
|
Add test_get for api data (and missing file).#!/usr/bin/env python
from django.conf import settings
from django.test import TestCase
from django.test.client import Client
from django.utils import simplejson as json
from redd.models import Dataset
from redd.tests import utils
class TestAPIData(TestCase):
def setUp(self):
settings.CELERY_ALWAYS_EAGER = True
self.solr = utils.get_test_solr()
self.upload = utils.get_test_upload()
self.dataset = utils.get_test_dataset(self.upload)
self.client = Client()
def test_get(self):
self.dataset.import_data()
utils.wait()
response = self.client.get('/api/1.0/data/')
self.assertEqual(response.status_code, 200)
body = json.loads(response.content)
list_result = body['objects'][0]
response = self.client.get('/api/1.0/data/%s/' % list_result['id'])
self.assertEqual(response.status_code, 200)
get_result = json.loads(response.content)
self.assertEqual(list_result, get_result)
def test_list(self):
self.dataset.import_data()
utils.wait()
response = self.client.get('/api/1.0/data/')
self.assertEqual(response.status_code, 200)
body = json.loads(response.content)
self.assertEqual(body['meta']['total_count'], 4)
self.assertEqual(body['objects'][0]['dataset'], '/api/1.0/dataset/%i/' % self.dataset.id)
self.assertIn('data', body['objects'][0])
self.assertIn('id', body['objects'][0])
self.assertIn('resource_uri', body['objects'][0])
self.assertIn('row', body['objects'][0])
def test_create(self):
pass
def test_search(self):
pass
|
<commit_before><commit_msg>Add test_get for api data (and missing file).<commit_after>#!/usr/bin/env python
from django.conf import settings
from django.test import TestCase
from django.test.client import Client
from django.utils import simplejson as json
from redd.models import Dataset
from redd.tests import utils
class TestAPIData(TestCase):
def setUp(self):
settings.CELERY_ALWAYS_EAGER = True
self.solr = utils.get_test_solr()
self.upload = utils.get_test_upload()
self.dataset = utils.get_test_dataset(self.upload)
self.client = Client()
def test_get(self):
self.dataset.import_data()
utils.wait()
response = self.client.get('/api/1.0/data/')
self.assertEqual(response.status_code, 200)
body = json.loads(response.content)
list_result = body['objects'][0]
response = self.client.get('/api/1.0/data/%s/' % list_result['id'])
self.assertEqual(response.status_code, 200)
get_result = json.loads(response.content)
self.assertEqual(list_result, get_result)
def test_list(self):
self.dataset.import_data()
utils.wait()
response = self.client.get('/api/1.0/data/')
self.assertEqual(response.status_code, 200)
body = json.loads(response.content)
self.assertEqual(body['meta']['total_count'], 4)
self.assertEqual(body['objects'][0]['dataset'], '/api/1.0/dataset/%i/' % self.dataset.id)
self.assertIn('data', body['objects'][0])
self.assertIn('id', body['objects'][0])
self.assertIn('resource_uri', body['objects'][0])
self.assertIn('row', body['objects'][0])
def test_create(self):
pass
def test_search(self):
pass
|
|
9eab96f40634c81ed53d54d2c9df126a445dfd56
|
registry/spec_tools/util.py
|
registry/spec_tools/util.py
|
"""Utility functions not closely tied to other spec_tools types."""
# Copyright (c) 2018-2019 Collabora, Ltd.
# Copyright (c) 2013-2019 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def getElemName(elem, default=None):
"""Get the name associated with an element, either a name child or name attribute."""
name_elem = elem.find('name')
if name_elem is not None:
return name_elem.text
# Fallback if there is no child.
return elem.get('name', default)
def getElemType(elem, default=None):
"""Get the type associated with an element, either a type child or type attribute."""
type_elem = elem.find('type')
if type_elem is not None:
return type_elem.text
# Fallback if there is no child.
return elem.get('type', default)
def findFirstWithPredicate(collection, pred):
"""Return the first element that satisfies the predicate, or None if none exist.
NOTE: Some places where this is used might be better served by changing to a dictionary.
"""
for elt in collection:
if pred(elt):
return elt
return None
def findNamedElem(elems, name):
"""Traverse a collection of elements with 'name' nodes or attributes, looking for and returning one with the right name.
NOTE: Many places where this is used might be better served by changing to a dictionary.
"""
return findFirstWithPredicate(elems, lambda elem: getElemName(elem) == name)
def findTypedElem(elems, typename):
"""Traverse a collection of elements with 'type' nodes or attributes, looking for and returning one with the right typename.
NOTE: Many places where this is used might be better served by changing to a dictionary.
"""
return findFirstWithPredicate(elems, lambda elem: getElemType(elem) == typename)
def findNamedObject(collection, name):
"""Traverse a collection of elements with 'name' attributes, looking for and returning one with the right name.
NOTE: Many places where this is used might be better served by changing to a dictionary.
"""
return findFirstWithPredicate(collection, lambda elt: elt.name == name)
|
Add new local dependency of scripts.
|
Add new local dependency of scripts.
|
Python
|
apache-2.0
|
KhronosGroup/Vulkan-Headers,KhronosGroup/Vulkan-Headers,KhronosGroup/Vulkan-Headers
|
Add new local dependency of scripts.
|
"""Utility functions not closely tied to other spec_tools types."""
# Copyright (c) 2018-2019 Collabora, Ltd.
# Copyright (c) 2013-2019 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def getElemName(elem, default=None):
"""Get the name associated with an element, either a name child or name attribute."""
name_elem = elem.find('name')
if name_elem is not None:
return name_elem.text
# Fallback if there is no child.
return elem.get('name', default)
def getElemType(elem, default=None):
"""Get the type associated with an element, either a type child or type attribute."""
type_elem = elem.find('type')
if type_elem is not None:
return type_elem.text
# Fallback if there is no child.
return elem.get('type', default)
def findFirstWithPredicate(collection, pred):
"""Return the first element that satisfies the predicate, or None if none exist.
NOTE: Some places where this is used might be better served by changing to a dictionary.
"""
for elt in collection:
if pred(elt):
return elt
return None
def findNamedElem(elems, name):
"""Traverse a collection of elements with 'name' nodes or attributes, looking for and returning one with the right name.
NOTE: Many places where this is used might be better served by changing to a dictionary.
"""
return findFirstWithPredicate(elems, lambda elem: getElemName(elem) == name)
def findTypedElem(elems, typename):
"""Traverse a collection of elements with 'type' nodes or attributes, looking for and returning one with the right typename.
NOTE: Many places where this is used might be better served by changing to a dictionary.
"""
return findFirstWithPredicate(elems, lambda elem: getElemType(elem) == typename)
def findNamedObject(collection, name):
"""Traverse a collection of elements with 'name' attributes, looking for and returning one with the right name.
NOTE: Many places where this is used might be better served by changing to a dictionary.
"""
return findFirstWithPredicate(collection, lambda elt: elt.name == name)
|
<commit_before><commit_msg>Add new local dependency of scripts.<commit_after>
|
"""Utility functions not closely tied to other spec_tools types."""
# Copyright (c) 2018-2019 Collabora, Ltd.
# Copyright (c) 2013-2019 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def getElemName(elem, default=None):
"""Get the name associated with an element, either a name child or name attribute."""
name_elem = elem.find('name')
if name_elem is not None:
return name_elem.text
# Fallback if there is no child.
return elem.get('name', default)
def getElemType(elem, default=None):
"""Get the type associated with an element, either a type child or type attribute."""
type_elem = elem.find('type')
if type_elem is not None:
return type_elem.text
# Fallback if there is no child.
return elem.get('type', default)
def findFirstWithPredicate(collection, pred):
"""Return the first element that satisfies the predicate, or None if none exist.
NOTE: Some places where this is used might be better served by changing to a dictionary.
"""
for elt in collection:
if pred(elt):
return elt
return None
def findNamedElem(elems, name):
"""Traverse a collection of elements with 'name' nodes or attributes, looking for and returning one with the right name.
NOTE: Many places where this is used might be better served by changing to a dictionary.
"""
return findFirstWithPredicate(elems, lambda elem: getElemName(elem) == name)
def findTypedElem(elems, typename):
"""Traverse a collection of elements with 'type' nodes or attributes, looking for and returning one with the right typename.
NOTE: Many places where this is used might be better served by changing to a dictionary.
"""
return findFirstWithPredicate(elems, lambda elem: getElemType(elem) == typename)
def findNamedObject(collection, name):
"""Traverse a collection of elements with 'name' attributes, looking for and returning one with the right name.
NOTE: Many places where this is used might be better served by changing to a dictionary.
"""
return findFirstWithPredicate(collection, lambda elt: elt.name == name)
|
Add new local dependency of scripts."""Utility functions not closely tied to other spec_tools types."""
# Copyright (c) 2018-2019 Collabora, Ltd.
# Copyright (c) 2013-2019 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def getElemName(elem, default=None):
"""Get the name associated with an element, either a name child or name attribute."""
name_elem = elem.find('name')
if name_elem is not None:
return name_elem.text
# Fallback if there is no child.
return elem.get('name', default)
def getElemType(elem, default=None):
"""Get the type associated with an element, either a type child or type attribute."""
type_elem = elem.find('type')
if type_elem is not None:
return type_elem.text
# Fallback if there is no child.
return elem.get('type', default)
def findFirstWithPredicate(collection, pred):
"""Return the first element that satisfies the predicate, or None if none exist.
NOTE: Some places where this is used might be better served by changing to a dictionary.
"""
for elt in collection:
if pred(elt):
return elt
return None
def findNamedElem(elems, name):
"""Traverse a collection of elements with 'name' nodes or attributes, looking for and returning one with the right name.
NOTE: Many places where this is used might be better served by changing to a dictionary.
"""
return findFirstWithPredicate(elems, lambda elem: getElemName(elem) == name)
def findTypedElem(elems, typename):
"""Traverse a collection of elements with 'type' nodes or attributes, looking for and returning one with the right typename.
NOTE: Many places where this is used might be better served by changing to a dictionary.
"""
return findFirstWithPredicate(elems, lambda elem: getElemType(elem) == typename)
def findNamedObject(collection, name):
"""Traverse a collection of elements with 'name' attributes, looking for and returning one with the right name.
NOTE: Many places where this is used might be better served by changing to a dictionary.
"""
return findFirstWithPredicate(collection, lambda elt: elt.name == name)
|
<commit_before><commit_msg>Add new local dependency of scripts.<commit_after>"""Utility functions not closely tied to other spec_tools types."""
# Copyright (c) 2018-2019 Collabora, Ltd.
# Copyright (c) 2013-2019 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def getElemName(elem, default=None):
"""Get the name associated with an element, either a name child or name attribute."""
name_elem = elem.find('name')
if name_elem is not None:
return name_elem.text
# Fallback if there is no child.
return elem.get('name', default)
def getElemType(elem, default=None):
"""Get the type associated with an element, either a type child or type attribute."""
type_elem = elem.find('type')
if type_elem is not None:
return type_elem.text
# Fallback if there is no child.
return elem.get('type', default)
def findFirstWithPredicate(collection, pred):
"""Return the first element that satisfies the predicate, or None if none exist.
NOTE: Some places where this is used might be better served by changing to a dictionary.
"""
for elt in collection:
if pred(elt):
return elt
return None
def findNamedElem(elems, name):
"""Traverse a collection of elements with 'name' nodes or attributes, looking for and returning one with the right name.
NOTE: Many places where this is used might be better served by changing to a dictionary.
"""
return findFirstWithPredicate(elems, lambda elem: getElemName(elem) == name)
def findTypedElem(elems, typename):
"""Traverse a collection of elements with 'type' nodes or attributes, looking for and returning one with the right typename.
NOTE: Many places where this is used might be better served by changing to a dictionary.
"""
return findFirstWithPredicate(elems, lambda elem: getElemType(elem) == typename)
def findNamedObject(collection, name):
"""Traverse a collection of elements with 'name' attributes, looking for and returning one with the right name.
NOTE: Many places where this is used might be better served by changing to a dictionary.
"""
return findFirstWithPredicate(collection, lambda elt: elt.name == name)
|
|
1d7165dc88be1aea9a233c8e794ee66b99f1c70e
|
tools/rsyslog-cdn.py
|
tools/rsyslog-cdn.py
|
#!/usr/bin/python -u
import sys
import redis
import csv
import posixpath
import datetime
import logging
import logging.handlers
from email.utils import parsedate
PRECISIONS = [
("hour", "%y-%m-%d-%H", datetime.timedelta(days=2)),
("daily", "%y-%m-%d", datetime.timedelta(days=32)),
]
logger = logging.getLogger("rsyslog-cdn")
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.handlers.SysLogHandler(address="/dev/log"))
store = redis.Redis()
def make_key(precision, when, key):
return "downloads:%s:%s:%s" % (
precision[0], when.strftime(precision[1]), key)
def incr(when, project, filename):
# Increment our rolling counts in Redis
for prec in PRECISIONS:
key = make_key(prec, when, project)
store.incr(key)
store.expireat(key, when + prec[2])
# Increment our filename based bucket in Redis
for prec in PRECISIONS:
key = make_key(prec, when, ":".join([project, filename]))
store.incr(key)
store.expireat(key, when + prec[2])
def process(line):
try:
row = list(csv.reader([line], delimiter=" "))[0]
path = row[7].split(" ", 1)[1]
except Exception:
return
# We only care about /packages/ urls
if not path.startswith("/packages/"):
return
# We need to get the Project and Filename
directory, filename = posixpath.split(path)
project = posixpath.basename(directory)
# We need to get the time this request occurred
rtime = datetime.datetime(*parsedate(row[5])[:6])
incr(rtime, project, filename)
if __name__ == "__main__":
line = sys.stdin.readline()
while line:
try:
process(line)
except Exception:
logger.exception("Error occured while processing '%s'", line)
raise
line = sys.stdin.readline()
|
Add a script to parse incoming CDN log entries
|
Add a script to parse incoming CDN log entries
|
Python
|
bsd-3-clause
|
pydotorg/pypi,pydotorg/pypi,pydotorg/pypi,pydotorg/pypi
|
Add a script to parse incoming CDN log entries
|
#!/usr/bin/python -u
import sys
import redis
import csv
import posixpath
import datetime
import logging
import logging.handlers
from email.utils import parsedate
PRECISIONS = [
("hour", "%y-%m-%d-%H", datetime.timedelta(days=2)),
("daily", "%y-%m-%d", datetime.timedelta(days=32)),
]
logger = logging.getLogger("rsyslog-cdn")
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.handlers.SysLogHandler(address="/dev/log"))
store = redis.Redis()
def make_key(precision, when, key):
return "downloads:%s:%s:%s" % (
precision[0], when.strftime(precision[1]), key)
def incr(when, project, filename):
# Increment our rolling counts in Redis
for prec in PRECISIONS:
key = make_key(prec, when, project)
store.incr(key)
store.expireat(key, when + prec[2])
# Increment our filename based bucket in Redis
for prec in PRECISIONS:
key = make_key(prec, when, ":".join([project, filename]))
store.incr(key)
store.expireat(key, when + prec[2])
def process(line):
try:
row = list(csv.reader([line], delimiter=" "))[0]
path = row[7].split(" ", 1)[1]
except Exception:
return
# We only care about /packages/ urls
if not path.startswith("/packages/"):
return
# We need to get the Project and Filename
directory, filename = posixpath.split(path)
project = posixpath.basename(directory)
# We need to get the time this request occurred
rtime = datetime.datetime(*parsedate(row[5])[:6])
incr(rtime, project, filename)
if __name__ == "__main__":
line = sys.stdin.readline()
while line:
try:
process(line)
except Exception:
logger.exception("Error occured while processing '%s'", line)
raise
line = sys.stdin.readline()
|
<commit_before><commit_msg>Add a script to parse incoming CDN log entries<commit_after>
|
#!/usr/bin/python -u
import sys
import redis
import csv
import posixpath
import datetime
import logging
import logging.handlers
from email.utils import parsedate
PRECISIONS = [
("hour", "%y-%m-%d-%H", datetime.timedelta(days=2)),
("daily", "%y-%m-%d", datetime.timedelta(days=32)),
]
logger = logging.getLogger("rsyslog-cdn")
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.handlers.SysLogHandler(address="/dev/log"))
store = redis.Redis()
def make_key(precision, when, key):
return "downloads:%s:%s:%s" % (
precision[0], when.strftime(precision[1]), key)
def incr(when, project, filename):
# Increment our rolling counts in Redis
for prec in PRECISIONS:
key = make_key(prec, when, project)
store.incr(key)
store.expireat(key, when + prec[2])
# Increment our filename based bucket in Redis
for prec in PRECISIONS:
key = make_key(prec, when, ":".join([project, filename]))
store.incr(key)
store.expireat(key, when + prec[2])
def process(line):
try:
row = list(csv.reader([line], delimiter=" "))[0]
path = row[7].split(" ", 1)[1]
except Exception:
return
# We only care about /packages/ urls
if not path.startswith("/packages/"):
return
# We need to get the Project and Filename
directory, filename = posixpath.split(path)
project = posixpath.basename(directory)
# We need to get the time this request occurred
rtime = datetime.datetime(*parsedate(row[5])[:6])
incr(rtime, project, filename)
if __name__ == "__main__":
line = sys.stdin.readline()
while line:
try:
process(line)
except Exception:
logger.exception("Error occured while processing '%s'", line)
raise
line = sys.stdin.readline()
|
Add a script to parse incoming CDN log entries#!/usr/bin/python -u
import sys
import redis
import csv
import posixpath
import datetime
import logging
import logging.handlers
from email.utils import parsedate
PRECISIONS = [
("hour", "%y-%m-%d-%H", datetime.timedelta(days=2)),
("daily", "%y-%m-%d", datetime.timedelta(days=32)),
]
logger = logging.getLogger("rsyslog-cdn")
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.handlers.SysLogHandler(address="/dev/log"))
store = redis.Redis()
def make_key(precision, when, key):
return "downloads:%s:%s:%s" % (
precision[0], when.strftime(precision[1]), key)
def incr(when, project, filename):
# Increment our rolling counts in Redis
for prec in PRECISIONS:
key = make_key(prec, when, project)
store.incr(key)
store.expireat(key, when + prec[2])
# Increment our filename based bucket in Redis
for prec in PRECISIONS:
key = make_key(prec, when, ":".join([project, filename]))
store.incr(key)
store.expireat(key, when + prec[2])
def process(line):
try:
row = list(csv.reader([line], delimiter=" "))[0]
path = row[7].split(" ", 1)[1]
except Exception:
return
# We only care about /packages/ urls
if not path.startswith("/packages/"):
return
# We need to get the Project and Filename
directory, filename = posixpath.split(path)
project = posixpath.basename(directory)
# We need to get the time this request occurred
rtime = datetime.datetime(*parsedate(row[5])[:6])
incr(rtime, project, filename)
if __name__ == "__main__":
line = sys.stdin.readline()
while line:
try:
process(line)
except Exception:
logger.exception("Error occured while processing '%s'", line)
raise
line = sys.stdin.readline()
|
<commit_before><commit_msg>Add a script to parse incoming CDN log entries<commit_after>#!/usr/bin/python -u
import sys
import redis
import csv
import posixpath
import datetime
import logging
import logging.handlers
from email.utils import parsedate
PRECISIONS = [
("hour", "%y-%m-%d-%H", datetime.timedelta(days=2)),
("daily", "%y-%m-%d", datetime.timedelta(days=32)),
]
logger = logging.getLogger("rsyslog-cdn")
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.handlers.SysLogHandler(address="/dev/log"))
store = redis.Redis()
def make_key(precision, when, key):
return "downloads:%s:%s:%s" % (
precision[0], when.strftime(precision[1]), key)
def incr(when, project, filename):
# Increment our rolling counts in Redis
for prec in PRECISIONS:
key = make_key(prec, when, project)
store.incr(key)
store.expireat(key, when + prec[2])
# Increment our filename based bucket in Redis
for prec in PRECISIONS:
key = make_key(prec, when, ":".join([project, filename]))
store.incr(key)
store.expireat(key, when + prec[2])
def process(line):
try:
row = list(csv.reader([line], delimiter=" "))[0]
path = row[7].split(" ", 1)[1]
except Exception:
return
# We only care about /packages/ urls
if not path.startswith("/packages/"):
return
# We need to get the Project and Filename
directory, filename = posixpath.split(path)
project = posixpath.basename(directory)
# We need to get the time this request occurred
rtime = datetime.datetime(*parsedate(row[5])[:6])
incr(rtime, project, filename)
if __name__ == "__main__":
line = sys.stdin.readline()
while line:
try:
process(line)
except Exception:
logger.exception("Error occured while processing '%s'", line)
raise
line = sys.stdin.readline()
|
|
2d3d63adac30db637cedd0280679db1100c39525
|
tests/pool_connection_tests.py
|
tests/pool_connection_tests.py
|
"""
Tests for Connection class in the pool module
"""
import mock
try:
import unittest2 as unittest
except ImportError:
import unittest
import weakref
from queries import pool
class ConnectionTests(unittest.TestCase):
def setUp(self):
self.handle = mock.Mock()
self.handle.close = mock.Mock()
self.handle.closed = True
self.handle.isexecuting = mock.Mock(return_value=False)
self.connection = pool.Connection(self.handle)
self.connection.used_by = None
def test_handle_should_match(self):
self.assertEqual(self.handle, self.connection.handle)
def test_busy_isexecuting_is_false(self):
self.assertFalse(self.connection.busy)
def test_busy_isexecuting_is_true(self):
self.handle.isexecuting.return_value = True
self.assertTrue(self.connection.busy)
def test_busy_is_used(self):
self.handle.isexecuting.return_value = False
self.connection.used_by = mock.Mock()
self.assertTrue(self.connection.busy)
def test_closed_is_true(self):
self.handle.closed = True
self.assertTrue(self.connection.closed)
def test_closed_is_false(self):
self.handle.closed = False
self.assertFalse(self.connection.closed)
def test_close_raises_when_busy(self):
self.handle.isexecuting.return_value = True
self.assertRaises(pool.ConnectionBusyError, self.connection.close)
def test_close_invokes_handle_close(self):
self.handle.isexecuting.return_value = False
self.connection.used_by = None
self.connection.close()
self.handle.close.assertCalledOnce()
def test_free_raises_when_busy(self):
self.handle.isexecuting.return_value = True
self.assertRaises(pool.ConnectionBusyError, self.connection.free)
def test_free_resets_used_by(self):
self.handle.isexecuting.return_value = False
self.connection.used_by = mock.Mock()
self.connection.free()
self.assertIsNone(self.connection.used_by)
def test_id_value_matches(self):
self.assertEqual(id(self.handle), self.connection.id)
def test_lock_raises_when_busy(self):
self.connection.used_by = mock.Mock()
self.assertRaises(pool.ConnectionBusyError,
self.connection.lock, mock.Mock())
def test_lock_session_used_by(self):
session = mock.Mock()
self.connection.lock(session)
self.assertIn(self.connection.used_by,
weakref.getweakrefs(session))
|
Move pool.Connection tests into own file
|
Move pool.Connection tests into own file
|
Python
|
bsd-3-clause
|
gmr/queries,gmr/queries
|
Move pool.Connection tests into own file
|
"""
Tests for Connection class in the pool module
"""
import mock
try:
import unittest2 as unittest
except ImportError:
import unittest
import weakref
from queries import pool
class ConnectionTests(unittest.TestCase):
def setUp(self):
self.handle = mock.Mock()
self.handle.close = mock.Mock()
self.handle.closed = True
self.handle.isexecuting = mock.Mock(return_value=False)
self.connection = pool.Connection(self.handle)
self.connection.used_by = None
def test_handle_should_match(self):
self.assertEqual(self.handle, self.connection.handle)
def test_busy_isexecuting_is_false(self):
self.assertFalse(self.connection.busy)
def test_busy_isexecuting_is_true(self):
self.handle.isexecuting.return_value = True
self.assertTrue(self.connection.busy)
def test_busy_is_used(self):
self.handle.isexecuting.return_value = False
self.connection.used_by = mock.Mock()
self.assertTrue(self.connection.busy)
def test_closed_is_true(self):
self.handle.closed = True
self.assertTrue(self.connection.closed)
def test_closed_is_false(self):
self.handle.closed = False
self.assertFalse(self.connection.closed)
def test_close_raises_when_busy(self):
self.handle.isexecuting.return_value = True
self.assertRaises(pool.ConnectionBusyError, self.connection.close)
def test_close_invokes_handle_close(self):
self.handle.isexecuting.return_value = False
self.connection.used_by = None
self.connection.close()
self.handle.close.assertCalledOnce()
def test_free_raises_when_busy(self):
self.handle.isexecuting.return_value = True
self.assertRaises(pool.ConnectionBusyError, self.connection.free)
def test_free_resets_used_by(self):
self.handle.isexecuting.return_value = False
self.connection.used_by = mock.Mock()
self.connection.free()
self.assertIsNone(self.connection.used_by)
def test_id_value_matches(self):
self.assertEqual(id(self.handle), self.connection.id)
def test_lock_raises_when_busy(self):
self.connection.used_by = mock.Mock()
self.assertRaises(pool.ConnectionBusyError,
self.connection.lock, mock.Mock())
def test_lock_session_used_by(self):
session = mock.Mock()
self.connection.lock(session)
self.assertIn(self.connection.used_by,
weakref.getweakrefs(session))
|
<commit_before><commit_msg>Move pool.Connection tests into own file<commit_after>
|
"""
Tests for Connection class in the pool module
"""
import mock
try:
import unittest2 as unittest
except ImportError:
import unittest
import weakref
from queries import pool
class ConnectionTests(unittest.TestCase):
def setUp(self):
self.handle = mock.Mock()
self.handle.close = mock.Mock()
self.handle.closed = True
self.handle.isexecuting = mock.Mock(return_value=False)
self.connection = pool.Connection(self.handle)
self.connection.used_by = None
def test_handle_should_match(self):
self.assertEqual(self.handle, self.connection.handle)
def test_busy_isexecuting_is_false(self):
self.assertFalse(self.connection.busy)
def test_busy_isexecuting_is_true(self):
self.handle.isexecuting.return_value = True
self.assertTrue(self.connection.busy)
def test_busy_is_used(self):
self.handle.isexecuting.return_value = False
self.connection.used_by = mock.Mock()
self.assertTrue(self.connection.busy)
def test_closed_is_true(self):
self.handle.closed = True
self.assertTrue(self.connection.closed)
def test_closed_is_false(self):
self.handle.closed = False
self.assertFalse(self.connection.closed)
def test_close_raises_when_busy(self):
self.handle.isexecuting.return_value = True
self.assertRaises(pool.ConnectionBusyError, self.connection.close)
def test_close_invokes_handle_close(self):
self.handle.isexecuting.return_value = False
self.connection.used_by = None
self.connection.close()
self.handle.close.assertCalledOnce()
def test_free_raises_when_busy(self):
self.handle.isexecuting.return_value = True
self.assertRaises(pool.ConnectionBusyError, self.connection.free)
def test_free_resets_used_by(self):
self.handle.isexecuting.return_value = False
self.connection.used_by = mock.Mock()
self.connection.free()
self.assertIsNone(self.connection.used_by)
def test_id_value_matches(self):
self.assertEqual(id(self.handle), self.connection.id)
def test_lock_raises_when_busy(self):
self.connection.used_by = mock.Mock()
self.assertRaises(pool.ConnectionBusyError,
self.connection.lock, mock.Mock())
def test_lock_session_used_by(self):
session = mock.Mock()
self.connection.lock(session)
self.assertIn(self.connection.used_by,
weakref.getweakrefs(session))
|
Move pool.Connection tests into own file"""
Tests for Connection class in the pool module
"""
import mock
try:
import unittest2 as unittest
except ImportError:
import unittest
import weakref
from queries import pool
class ConnectionTests(unittest.TestCase):
def setUp(self):
self.handle = mock.Mock()
self.handle.close = mock.Mock()
self.handle.closed = True
self.handle.isexecuting = mock.Mock(return_value=False)
self.connection = pool.Connection(self.handle)
self.connection.used_by = None
def test_handle_should_match(self):
self.assertEqual(self.handle, self.connection.handle)
def test_busy_isexecuting_is_false(self):
self.assertFalse(self.connection.busy)
def test_busy_isexecuting_is_true(self):
self.handle.isexecuting.return_value = True
self.assertTrue(self.connection.busy)
def test_busy_is_used(self):
self.handle.isexecuting.return_value = False
self.connection.used_by = mock.Mock()
self.assertTrue(self.connection.busy)
def test_closed_is_true(self):
self.handle.closed = True
self.assertTrue(self.connection.closed)
def test_closed_is_false(self):
self.handle.closed = False
self.assertFalse(self.connection.closed)
def test_close_raises_when_busy(self):
self.handle.isexecuting.return_value = True
self.assertRaises(pool.ConnectionBusyError, self.connection.close)
def test_close_invokes_handle_close(self):
self.handle.isexecuting.return_value = False
self.connection.used_by = None
self.connection.close()
self.handle.close.assertCalledOnce()
def test_free_raises_when_busy(self):
self.handle.isexecuting.return_value = True
self.assertRaises(pool.ConnectionBusyError, self.connection.free)
def test_free_resets_used_by(self):
self.handle.isexecuting.return_value = False
self.connection.used_by = mock.Mock()
self.connection.free()
self.assertIsNone(self.connection.used_by)
def test_id_value_matches(self):
self.assertEqual(id(self.handle), self.connection.id)
def test_lock_raises_when_busy(self):
self.connection.used_by = mock.Mock()
self.assertRaises(pool.ConnectionBusyError,
self.connection.lock, mock.Mock())
def test_lock_session_used_by(self):
session = mock.Mock()
self.connection.lock(session)
self.assertIn(self.connection.used_by,
weakref.getweakrefs(session))
|
<commit_before><commit_msg>Move pool.Connection tests into own file<commit_after>"""
Tests for Connection class in the pool module
"""
import mock
try:
import unittest2 as unittest
except ImportError:
import unittest
import weakref
from queries import pool
class ConnectionTests(unittest.TestCase):
def setUp(self):
self.handle = mock.Mock()
self.handle.close = mock.Mock()
self.handle.closed = True
self.handle.isexecuting = mock.Mock(return_value=False)
self.connection = pool.Connection(self.handle)
self.connection.used_by = None
def test_handle_should_match(self):
self.assertEqual(self.handle, self.connection.handle)
def test_busy_isexecuting_is_false(self):
self.assertFalse(self.connection.busy)
def test_busy_isexecuting_is_true(self):
self.handle.isexecuting.return_value = True
self.assertTrue(self.connection.busy)
def test_busy_is_used(self):
self.handle.isexecuting.return_value = False
self.connection.used_by = mock.Mock()
self.assertTrue(self.connection.busy)
def test_closed_is_true(self):
self.handle.closed = True
self.assertTrue(self.connection.closed)
def test_closed_is_false(self):
self.handle.closed = False
self.assertFalse(self.connection.closed)
def test_close_raises_when_busy(self):
self.handle.isexecuting.return_value = True
self.assertRaises(pool.ConnectionBusyError, self.connection.close)
def test_close_invokes_handle_close(self):
self.handle.isexecuting.return_value = False
self.connection.used_by = None
self.connection.close()
self.handle.close.assertCalledOnce()
def test_free_raises_when_busy(self):
self.handle.isexecuting.return_value = True
self.assertRaises(pool.ConnectionBusyError, self.connection.free)
def test_free_resets_used_by(self):
self.handle.isexecuting.return_value = False
self.connection.used_by = mock.Mock()
self.connection.free()
self.assertIsNone(self.connection.used_by)
def test_id_value_matches(self):
self.assertEqual(id(self.handle), self.connection.id)
def test_lock_raises_when_busy(self):
self.connection.used_by = mock.Mock()
self.assertRaises(pool.ConnectionBusyError,
self.connection.lock, mock.Mock())
def test_lock_session_used_by(self):
session = mock.Mock()
self.connection.lock(session)
self.assertIn(self.connection.used_by,
weakref.getweakrefs(session))
|
|
67cc1656a7d075c1136c6e18b63167792b8747c3
|
assignment2/my_client.py
|
assignment2/my_client.py
|
import argparse
import ReplicatorClient
PORT = 3000
def main():
parser = argparse.ArgumentParser()
parser.add_argument("host", help="the ip of the host")
args = parser.parse_args()
print("Client is connecting to Server at {}:{}...".format(args.host, PORT))
client = ReplicatorClient(host=args.host, port=PORT)
value = 'foo'
print("## PUT Request: value = " + value)
resp = client.put(value)
key = resp.data
print("## PUT Response: key = " + key)
print("## GET Request: key = " + key)
resp = client.get(key)
print("## GET Response: value = " + resp.data)
if __name__ == "__main__":
main()
|
Add external client for pushing values
|
Add external client for pushing values
|
Python
|
mit
|
rimpybharot/CMPE273
|
Add external client for pushing values
|
import argparse
import ReplicatorClient
PORT = 3000
def main():
parser = argparse.ArgumentParser()
parser.add_argument("host", help="the ip of the host")
args = parser.parse_args()
print("Client is connecting to Server at {}:{}...".format(args.host, PORT))
client = ReplicatorClient(host=args.host, port=PORT)
value = 'foo'
print("## PUT Request: value = " + value)
resp = client.put(value)
key = resp.data
print("## PUT Response: key = " + key)
print("## GET Request: key = " + key)
resp = client.get(key)
print("## GET Response: value = " + resp.data)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add external client for pushing values<commit_after>
|
import argparse
import ReplicatorClient
PORT = 3000
def main():
parser = argparse.ArgumentParser()
parser.add_argument("host", help="the ip of the host")
args = parser.parse_args()
print("Client is connecting to Server at {}:{}...".format(args.host, PORT))
client = ReplicatorClient(host=args.host, port=PORT)
value = 'foo'
print("## PUT Request: value = " + value)
resp = client.put(value)
key = resp.data
print("## PUT Response: key = " + key)
print("## GET Request: key = " + key)
resp = client.get(key)
print("## GET Response: value = " + resp.data)
if __name__ == "__main__":
main()
|
Add external client for pushing valuesimport argparse
import ReplicatorClient
PORT = 3000
def main():
parser = argparse.ArgumentParser()
parser.add_argument("host", help="the ip of the host")
args = parser.parse_args()
print("Client is connecting to Server at {}:{}...".format(args.host, PORT))
client = ReplicatorClient(host=args.host, port=PORT)
value = 'foo'
print("## PUT Request: value = " + value)
resp = client.put(value)
key = resp.data
print("## PUT Response: key = " + key)
print("## GET Request: key = " + key)
resp = client.get(key)
print("## GET Response: value = " + resp.data)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add external client for pushing values<commit_after>import argparse
import ReplicatorClient
PORT = 3000
def main():
parser = argparse.ArgumentParser()
parser.add_argument("host", help="the ip of the host")
args = parser.parse_args()
print("Client is connecting to Server at {}:{}...".format(args.host, PORT))
client = ReplicatorClient(host=args.host, port=PORT)
value = 'foo'
print("## PUT Request: value = " + value)
resp = client.put(value)
key = resp.data
print("## PUT Response: key = " + key)
print("## GET Request: key = " + key)
resp = client.get(key)
print("## GET Response: value = " + resp.data)
if __name__ == "__main__":
main()
|
|
09099ab106ae4c0695502e3632e4ac1c2f459566
|
apps/teams/bulk_actions.py
|
apps/teams/bulk_actions.py
|
from django.contrib.contenttypes.models import ContentType
from subtitles.models import SubtitleLanguage
from teams.signals import api_subtitles_approved
from utils.csv_parser import UnicodeReader
from videos.tasks import video_changed_tasks
def complete_approve_tasks(tasks):
lang_ct = ContentType.objects.get_for_model(SubtitleLanguage)
video_ids = set()
for task in tasks:
task.do_complete_approve(lang_ct=lang_ct)
api_subtitles_approved.send(task.get_subtitle_version())
video_ids.add(task.team_video.video_id)
for video_id in video_ids:
video_changed_tasks.delay(video_id)
def add_videos_from_csv(team, user, csv_file):
from .tasks import add_team_videos
videos = []
fields = ['project', 'url', 'title', 'description', 'duration', 'language', 'transcript']
num_fields = len(fields)
try:
reader = UnicodeReader(csv_file)
header = reader.next()
if len(header) != num_fields:
raise Exception()
except:
raise ValueError(u'CSV format is not valid')
for row in reader:
videos.append(dict(zip(fields, row)))
add_team_videos.delay(team.pk, user.pk, videos)
|
from django.contrib.contenttypes.models import ContentType
from subtitles.models import SubtitleLanguage
from subtitles.signals import subtitles_published
from teams.signals import api_subtitles_approved
from utils.csv_parser import UnicodeReader
from videos.tasks import video_changed_tasks
def complete_approve_tasks(tasks):
lang_ct = ContentType.objects.get_for_model(SubtitleLanguage)
video_ids = set()
for task in tasks:
task.do_complete_approve(lang_ct=lang_ct)
version = task.get_subtitle_version()
api_subtitles_approved.send(version)
if version.is_public():
subtitles_published.send(version.subtitle_language, version=version)
video_ids.add(task.team_video.video_id)
for video_id in video_ids:
video_changed_tasks.delay(video_id)
def add_videos_from_csv(team, user, csv_file):
from .tasks import add_team_videos
videos = []
fields = ['project', 'url', 'title', 'description', 'duration', 'language', 'transcript']
num_fields = len(fields)
try:
reader = UnicodeReader(csv_file)
header = reader.next()
if len(header) != num_fields:
raise Exception()
except:
raise ValueError(u'CSV format is not valid')
for row in reader:
videos.append(dict(zip(fields, row)))
add_team_videos.delay(team.pk, user.pk, videos)
|
Send subtitles_published signal for bulk approvals
|
Send subtitles_published signal for bulk approvals
This fixes pculture/amara-enterprise#608
|
Python
|
agpl-3.0
|
pculture/unisubs,pculture/unisubs,wevoice/wesub,pculture/unisubs,wevoice/wesub,wevoice/wesub,pculture/unisubs,wevoice/wesub
|
from django.contrib.contenttypes.models import ContentType
from subtitles.models import SubtitleLanguage
from teams.signals import api_subtitles_approved
from utils.csv_parser import UnicodeReader
from videos.tasks import video_changed_tasks
def complete_approve_tasks(tasks):
lang_ct = ContentType.objects.get_for_model(SubtitleLanguage)
video_ids = set()
for task in tasks:
task.do_complete_approve(lang_ct=lang_ct)
api_subtitles_approved.send(task.get_subtitle_version())
video_ids.add(task.team_video.video_id)
for video_id in video_ids:
video_changed_tasks.delay(video_id)
def add_videos_from_csv(team, user, csv_file):
from .tasks import add_team_videos
videos = []
fields = ['project', 'url', 'title', 'description', 'duration', 'language', 'transcript']
num_fields = len(fields)
try:
reader = UnicodeReader(csv_file)
header = reader.next()
if len(header) != num_fields:
raise Exception()
except:
raise ValueError(u'CSV format is not valid')
for row in reader:
videos.append(dict(zip(fields, row)))
add_team_videos.delay(team.pk, user.pk, videos)
Send subtitles_published signal for bulk approvals
This fixes pculture/amara-enterprise#608
|
from django.contrib.contenttypes.models import ContentType
from subtitles.models import SubtitleLanguage
from subtitles.signals import subtitles_published
from teams.signals import api_subtitles_approved
from utils.csv_parser import UnicodeReader
from videos.tasks import video_changed_tasks
def complete_approve_tasks(tasks):
lang_ct = ContentType.objects.get_for_model(SubtitleLanguage)
video_ids = set()
for task in tasks:
task.do_complete_approve(lang_ct=lang_ct)
version = task.get_subtitle_version()
api_subtitles_approved.send(version)
if version.is_public():
subtitles_published.send(version.subtitle_language, version=version)
video_ids.add(task.team_video.video_id)
for video_id in video_ids:
video_changed_tasks.delay(video_id)
def add_videos_from_csv(team, user, csv_file):
from .tasks import add_team_videos
videos = []
fields = ['project', 'url', 'title', 'description', 'duration', 'language', 'transcript']
num_fields = len(fields)
try:
reader = UnicodeReader(csv_file)
header = reader.next()
if len(header) != num_fields:
raise Exception()
except:
raise ValueError(u'CSV format is not valid')
for row in reader:
videos.append(dict(zip(fields, row)))
add_team_videos.delay(team.pk, user.pk, videos)
|
<commit_before>from django.contrib.contenttypes.models import ContentType
from subtitles.models import SubtitleLanguage
from teams.signals import api_subtitles_approved
from utils.csv_parser import UnicodeReader
from videos.tasks import video_changed_tasks
def complete_approve_tasks(tasks):
lang_ct = ContentType.objects.get_for_model(SubtitleLanguage)
video_ids = set()
for task in tasks:
task.do_complete_approve(lang_ct=lang_ct)
api_subtitles_approved.send(task.get_subtitle_version())
video_ids.add(task.team_video.video_id)
for video_id in video_ids:
video_changed_tasks.delay(video_id)
def add_videos_from_csv(team, user, csv_file):
from .tasks import add_team_videos
videos = []
fields = ['project', 'url', 'title', 'description', 'duration', 'language', 'transcript']
num_fields = len(fields)
try:
reader = UnicodeReader(csv_file)
header = reader.next()
if len(header) != num_fields:
raise Exception()
except:
raise ValueError(u'CSV format is not valid')
for row in reader:
videos.append(dict(zip(fields, row)))
add_team_videos.delay(team.pk, user.pk, videos)
<commit_msg>Send subtitles_published signal for bulk approvals
This fixes pculture/amara-enterprise#608<commit_after>
|
from django.contrib.contenttypes.models import ContentType
from subtitles.models import SubtitleLanguage
from subtitles.signals import subtitles_published
from teams.signals import api_subtitles_approved
from utils.csv_parser import UnicodeReader
from videos.tasks import video_changed_tasks
def complete_approve_tasks(tasks):
lang_ct = ContentType.objects.get_for_model(SubtitleLanguage)
video_ids = set()
for task in tasks:
task.do_complete_approve(lang_ct=lang_ct)
version = task.get_subtitle_version()
api_subtitles_approved.send(version)
if version.is_public():
subtitles_published.send(version.subtitle_language, version=version)
video_ids.add(task.team_video.video_id)
for video_id in video_ids:
video_changed_tasks.delay(video_id)
def add_videos_from_csv(team, user, csv_file):
from .tasks import add_team_videos
videos = []
fields = ['project', 'url', 'title', 'description', 'duration', 'language', 'transcript']
num_fields = len(fields)
try:
reader = UnicodeReader(csv_file)
header = reader.next()
if len(header) != num_fields:
raise Exception()
except:
raise ValueError(u'CSV format is not valid')
for row in reader:
videos.append(dict(zip(fields, row)))
add_team_videos.delay(team.pk, user.pk, videos)
|
from django.contrib.contenttypes.models import ContentType
from subtitles.models import SubtitleLanguage
from teams.signals import api_subtitles_approved
from utils.csv_parser import UnicodeReader
from videos.tasks import video_changed_tasks
def complete_approve_tasks(tasks):
lang_ct = ContentType.objects.get_for_model(SubtitleLanguage)
video_ids = set()
for task in tasks:
task.do_complete_approve(lang_ct=lang_ct)
api_subtitles_approved.send(task.get_subtitle_version())
video_ids.add(task.team_video.video_id)
for video_id in video_ids:
video_changed_tasks.delay(video_id)
def add_videos_from_csv(team, user, csv_file):
from .tasks import add_team_videos
videos = []
fields = ['project', 'url', 'title', 'description', 'duration', 'language', 'transcript']
num_fields = len(fields)
try:
reader = UnicodeReader(csv_file)
header = reader.next()
if len(header) != num_fields:
raise Exception()
except:
raise ValueError(u'CSV format is not valid')
for row in reader:
videos.append(dict(zip(fields, row)))
add_team_videos.delay(team.pk, user.pk, videos)
Send subtitles_published signal for bulk approvals
This fixes pculture/amara-enterprise#608from django.contrib.contenttypes.models import ContentType
from subtitles.models import SubtitleLanguage
from subtitles.signals import subtitles_published
from teams.signals import api_subtitles_approved
from utils.csv_parser import UnicodeReader
from videos.tasks import video_changed_tasks
def complete_approve_tasks(tasks):
lang_ct = ContentType.objects.get_for_model(SubtitleLanguage)
video_ids = set()
for task in tasks:
task.do_complete_approve(lang_ct=lang_ct)
version = task.get_subtitle_version()
api_subtitles_approved.send(version)
if version.is_public():
subtitles_published.send(version.subtitle_language, version=version)
video_ids.add(task.team_video.video_id)
for video_id in video_ids:
video_changed_tasks.delay(video_id)
def add_videos_from_csv(team, user, csv_file):
from .tasks import add_team_videos
videos = []
fields = ['project', 'url', 'title', 'description', 'duration', 'language', 'transcript']
num_fields = len(fields)
try:
reader = UnicodeReader(csv_file)
header = reader.next()
if len(header) != num_fields:
raise Exception()
except:
raise ValueError(u'CSV format is not valid')
for row in reader:
videos.append(dict(zip(fields, row)))
add_team_videos.delay(team.pk, user.pk, videos)
|
<commit_before>from django.contrib.contenttypes.models import ContentType
from subtitles.models import SubtitleLanguage
from teams.signals import api_subtitles_approved
from utils.csv_parser import UnicodeReader
from videos.tasks import video_changed_tasks
def complete_approve_tasks(tasks):
lang_ct = ContentType.objects.get_for_model(SubtitleLanguage)
video_ids = set()
for task in tasks:
task.do_complete_approve(lang_ct=lang_ct)
api_subtitles_approved.send(task.get_subtitle_version())
video_ids.add(task.team_video.video_id)
for video_id in video_ids:
video_changed_tasks.delay(video_id)
def add_videos_from_csv(team, user, csv_file):
from .tasks import add_team_videos
videos = []
fields = ['project', 'url', 'title', 'description', 'duration', 'language', 'transcript']
num_fields = len(fields)
try:
reader = UnicodeReader(csv_file)
header = reader.next()
if len(header) != num_fields:
raise Exception()
except:
raise ValueError(u'CSV format is not valid')
for row in reader:
videos.append(dict(zip(fields, row)))
add_team_videos.delay(team.pk, user.pk, videos)
<commit_msg>Send subtitles_published signal for bulk approvals
This fixes pculture/amara-enterprise#608<commit_after>from django.contrib.contenttypes.models import ContentType
from subtitles.models import SubtitleLanguage
from subtitles.signals import subtitles_published
from teams.signals import api_subtitles_approved
from utils.csv_parser import UnicodeReader
from videos.tasks import video_changed_tasks
def complete_approve_tasks(tasks):
lang_ct = ContentType.objects.get_for_model(SubtitleLanguage)
video_ids = set()
for task in tasks:
task.do_complete_approve(lang_ct=lang_ct)
version = task.get_subtitle_version()
api_subtitles_approved.send(version)
if version.is_public():
subtitles_published.send(version.subtitle_language, version=version)
video_ids.add(task.team_video.video_id)
for video_id in video_ids:
video_changed_tasks.delay(video_id)
def add_videos_from_csv(team, user, csv_file):
from .tasks import add_team_videos
videos = []
fields = ['project', 'url', 'title', 'description', 'duration', 'language', 'transcript']
num_fields = len(fields)
try:
reader = UnicodeReader(csv_file)
header = reader.next()
if len(header) != num_fields:
raise Exception()
except:
raise ValueError(u'CSV format is not valid')
for row in reader:
videos.append(dict(zip(fields, row)))
add_team_videos.delay(team.pk, user.pk, videos)
|
dc038e9b0da446e4d8b3f3e5964d6d6b4ec82d8e
|
snowrunner/transfer_save.py
|
snowrunner/transfer_save.py
|
"""Automate Snowrunner save file renaming described in https://steamcommunity.com/sharedfiles/filedetails/?id=2530914231"""
import argparse
import csv
import os
import shutil
def load_csv(csv_path):
save_dict = {}
with open(csv_path) as csvfile:
csv_reader = csv.reader(csvfile, delimiter=",", quotechar="'")
for row in csv_reader:
if row[0] in save_dict:
save_dict[row[0]].append(row[1])
else:
save_dict[row[0]] = [row[1]]
return save_dict
def copy_file(input_dir, input_name, output_dir, output_name, dry_run):
input = os.path.join(input_dir, input_name)
output = os.path.join(output_dir, f"{output_name}.cfg")
print(f"Copy {input} to {output}")
if not dry_run:
shutil.copy(input, output)
def copy_rename_save(save_dict, input_dir, output_dir, dry_run):
for base_decoded_filename, hashed_filenames in save_dict.items():
if len(hashed_filenames) == 1:
copy_file(input_dir, hashed_filenames[0], output_dir, base_decoded_filename, dry_run)
elif 2 <= len(hashed_filenames) < 99:
for idx, hashed_filename in enumerate(hashed_filenames):
copy_file(input_dir, hashed_filename, output_dir, f"{base_decoded_filename}{idx+1:02}", dry_run)
else:
raise ValueError(f"Invalid number of hashed filenames ({len(hashed_filenames)}) for file {base_decoded_filename}")
if __name__=="__main__":
parser = argparse.ArgumentParser(description="Rename Snowrunner save files from Windows Store to Steam convention")
parser.add_argument("--input-csv", help="CSV generated from https://www.save-editor.com/tools/wse_save_file_name_check_for_microsoft_store.html", required=True)
parser.add_argument("--input-save-directory", help="Snowrunner Windows Store savegame directory", required=True)
parser.add_argument("--output-save-directory", help="Snowrunner Steam savegame directory", required=True)
parser.add_argument("--dry-run", action="store_true", default=False, help="Enable dry run, do not copy files")
args = parser.parse_args()
save_dict = load_csv(args.input_csv)
#print(save_dict)
if not os.path.exists(args.input_save_directory) or not os.path.isdir(args.input_save_directory):
parser.error(f"Error accessing {args.input_save_directory}")
if not args.dry_run:
os.makedirs(args.output_save_directory, exist_ok=True)
copy_rename_save(save_dict, args.input_save_directory, args.output_save_directory, args.dry_run)
|
Add automated Snowrunner save file renaming
|
Add automated Snowrunner save file renaming
|
Python
|
mit
|
esabouraud/scripts,esabouraud/scripts
|
Add automated Snowrunner save file renaming
|
"""Automate Snowrunner save file renaming described in https://steamcommunity.com/sharedfiles/filedetails/?id=2530914231"""
import argparse
import csv
import os
import shutil
def load_csv(csv_path):
save_dict = {}
with open(csv_path) as csvfile:
csv_reader = csv.reader(csvfile, delimiter=",", quotechar="'")
for row in csv_reader:
if row[0] in save_dict:
save_dict[row[0]].append(row[1])
else:
save_dict[row[0]] = [row[1]]
return save_dict
def copy_file(input_dir, input_name, output_dir, output_name, dry_run):
input = os.path.join(input_dir, input_name)
output = os.path.join(output_dir, f"{output_name}.cfg")
print(f"Copy {input} to {output}")
if not dry_run:
shutil.copy(input, output)
def copy_rename_save(save_dict, input_dir, output_dir, dry_run):
for base_decoded_filename, hashed_filenames in save_dict.items():
if len(hashed_filenames) == 1:
copy_file(input_dir, hashed_filenames[0], output_dir, base_decoded_filename, dry_run)
elif 2 <= len(hashed_filenames) < 99:
for idx, hashed_filename in enumerate(hashed_filenames):
copy_file(input_dir, hashed_filename, output_dir, f"{base_decoded_filename}{idx+1:02}", dry_run)
else:
raise ValueError(f"Invalid number of hashed filenames ({len(hashed_filenames)}) for file {base_decoded_filename}")
if __name__=="__main__":
parser = argparse.ArgumentParser(description="Rename Snowrunner save files from Windows Store to Steam convention")
parser.add_argument("--input-csv", help="CSV generated from https://www.save-editor.com/tools/wse_save_file_name_check_for_microsoft_store.html", required=True)
parser.add_argument("--input-save-directory", help="Snowrunner Windows Store savegame directory", required=True)
parser.add_argument("--output-save-directory", help="Snowrunner Steam savegame directory", required=True)
parser.add_argument("--dry-run", action="store_true", default=False, help="Enable dry run, do not copy files")
args = parser.parse_args()
save_dict = load_csv(args.input_csv)
#print(save_dict)
if not os.path.exists(args.input_save_directory) or not os.path.isdir(args.input_save_directory):
parser.error(f"Error accessing {args.input_save_directory}")
if not args.dry_run:
os.makedirs(args.output_save_directory, exist_ok=True)
copy_rename_save(save_dict, args.input_save_directory, args.output_save_directory, args.dry_run)
|
<commit_before><commit_msg>Add automated Snowrunner save file renaming<commit_after>
|
"""Automate Snowrunner save file renaming described in https://steamcommunity.com/sharedfiles/filedetails/?id=2530914231"""
import argparse
import csv
import os
import shutil
def load_csv(csv_path):
save_dict = {}
with open(csv_path) as csvfile:
csv_reader = csv.reader(csvfile, delimiter=",", quotechar="'")
for row in csv_reader:
if row[0] in save_dict:
save_dict[row[0]].append(row[1])
else:
save_dict[row[0]] = [row[1]]
return save_dict
def copy_file(input_dir, input_name, output_dir, output_name, dry_run):
input = os.path.join(input_dir, input_name)
output = os.path.join(output_dir, f"{output_name}.cfg")
print(f"Copy {input} to {output}")
if not dry_run:
shutil.copy(input, output)
def copy_rename_save(save_dict, input_dir, output_dir, dry_run):
for base_decoded_filename, hashed_filenames in save_dict.items():
if len(hashed_filenames) == 1:
copy_file(input_dir, hashed_filenames[0], output_dir, base_decoded_filename, dry_run)
elif 2 <= len(hashed_filenames) < 99:
for idx, hashed_filename in enumerate(hashed_filenames):
copy_file(input_dir, hashed_filename, output_dir, f"{base_decoded_filename}{idx+1:02}", dry_run)
else:
raise ValueError(f"Invalid number of hashed filenames ({len(hashed_filenames)}) for file {base_decoded_filename}")
if __name__=="__main__":
parser = argparse.ArgumentParser(description="Rename Snowrunner save files from Windows Store to Steam convention")
parser.add_argument("--input-csv", help="CSV generated from https://www.save-editor.com/tools/wse_save_file_name_check_for_microsoft_store.html", required=True)
parser.add_argument("--input-save-directory", help="Snowrunner Windows Store savegame directory", required=True)
parser.add_argument("--output-save-directory", help="Snowrunner Steam savegame directory", required=True)
parser.add_argument("--dry-run", action="store_true", default=False, help="Enable dry run, do not copy files")
args = parser.parse_args()
save_dict = load_csv(args.input_csv)
#print(save_dict)
if not os.path.exists(args.input_save_directory) or not os.path.isdir(args.input_save_directory):
parser.error(f"Error accessing {args.input_save_directory}")
if not args.dry_run:
os.makedirs(args.output_save_directory, exist_ok=True)
copy_rename_save(save_dict, args.input_save_directory, args.output_save_directory, args.dry_run)
|
Add automated Snowrunner save file renaming"""Automate Snowrunner save file renaming described in https://steamcommunity.com/sharedfiles/filedetails/?id=2530914231"""
import argparse
import csv
import os
import shutil
def load_csv(csv_path):
save_dict = {}
with open(csv_path) as csvfile:
csv_reader = csv.reader(csvfile, delimiter=",", quotechar="'")
for row in csv_reader:
if row[0] in save_dict:
save_dict[row[0]].append(row[1])
else:
save_dict[row[0]] = [row[1]]
return save_dict
def copy_file(input_dir, input_name, output_dir, output_name, dry_run):
input = os.path.join(input_dir, input_name)
output = os.path.join(output_dir, f"{output_name}.cfg")
print(f"Copy {input} to {output}")
if not dry_run:
shutil.copy(input, output)
def copy_rename_save(save_dict, input_dir, output_dir, dry_run):
for base_decoded_filename, hashed_filenames in save_dict.items():
if len(hashed_filenames) == 1:
copy_file(input_dir, hashed_filenames[0], output_dir, base_decoded_filename, dry_run)
elif 2 <= len(hashed_filenames) < 99:
for idx, hashed_filename in enumerate(hashed_filenames):
copy_file(input_dir, hashed_filename, output_dir, f"{base_decoded_filename}{idx+1:02}", dry_run)
else:
raise ValueError(f"Invalid number of hashed filenames ({len(hashed_filenames)}) for file {base_decoded_filename}")
if __name__=="__main__":
parser = argparse.ArgumentParser(description="Rename Snowrunner save files from Windows Store to Steam convention")
parser.add_argument("--input-csv", help="CSV generated from https://www.save-editor.com/tools/wse_save_file_name_check_for_microsoft_store.html", required=True)
parser.add_argument("--input-save-directory", help="Snowrunner Windows Store savegame directory", required=True)
parser.add_argument("--output-save-directory", help="Snowrunner Steam savegame directory", required=True)
parser.add_argument("--dry-run", action="store_true", default=False, help="Enable dry run, do not copy files")
args = parser.parse_args()
save_dict = load_csv(args.input_csv)
#print(save_dict)
if not os.path.exists(args.input_save_directory) or not os.path.isdir(args.input_save_directory):
parser.error(f"Error accessing {args.input_save_directory}")
if not args.dry_run:
os.makedirs(args.output_save_directory, exist_ok=True)
copy_rename_save(save_dict, args.input_save_directory, args.output_save_directory, args.dry_run)
|
<commit_before><commit_msg>Add automated Snowrunner save file renaming<commit_after>"""Automate Snowrunner save file renaming described in https://steamcommunity.com/sharedfiles/filedetails/?id=2530914231"""
import argparse
import csv
import os
import shutil
def load_csv(csv_path):
save_dict = {}
with open(csv_path) as csvfile:
csv_reader = csv.reader(csvfile, delimiter=",", quotechar="'")
for row in csv_reader:
if row[0] in save_dict:
save_dict[row[0]].append(row[1])
else:
save_dict[row[0]] = [row[1]]
return save_dict
def copy_file(input_dir, input_name, output_dir, output_name, dry_run):
input = os.path.join(input_dir, input_name)
output = os.path.join(output_dir, f"{output_name}.cfg")
print(f"Copy {input} to {output}")
if not dry_run:
shutil.copy(input, output)
def copy_rename_save(save_dict, input_dir, output_dir, dry_run):
for base_decoded_filename, hashed_filenames in save_dict.items():
if len(hashed_filenames) == 1:
copy_file(input_dir, hashed_filenames[0], output_dir, base_decoded_filename, dry_run)
elif 2 <= len(hashed_filenames) < 99:
for idx, hashed_filename in enumerate(hashed_filenames):
copy_file(input_dir, hashed_filename, output_dir, f"{base_decoded_filename}{idx+1:02}", dry_run)
else:
raise ValueError(f"Invalid number of hashed filenames ({len(hashed_filenames)}) for file {base_decoded_filename}")
if __name__=="__main__":
parser = argparse.ArgumentParser(description="Rename Snowrunner save files from Windows Store to Steam convention")
parser.add_argument("--input-csv", help="CSV generated from https://www.save-editor.com/tools/wse_save_file_name_check_for_microsoft_store.html", required=True)
parser.add_argument("--input-save-directory", help="Snowrunner Windows Store savegame directory", required=True)
parser.add_argument("--output-save-directory", help="Snowrunner Steam savegame directory", required=True)
parser.add_argument("--dry-run", action="store_true", default=False, help="Enable dry run, do not copy files")
args = parser.parse_args()
save_dict = load_csv(args.input_csv)
#print(save_dict)
if not os.path.exists(args.input_save_directory) or not os.path.isdir(args.input_save_directory):
parser.error(f"Error accessing {args.input_save_directory}")
if not args.dry_run:
os.makedirs(args.output_save_directory, exist_ok=True)
copy_rename_save(save_dict, args.input_save_directory, args.output_save_directory, args.dry_run)
|
|
3468794db82e020bc40c54a9399269169fbb37a8
|
CodeFights/concatenateArrays.py
|
CodeFights/concatenateArrays.py
|
#!/usr/local/bin/python
# Code Fights Concatenate Arrays Problem
def concatenateArrays(a, b):
return a + b
def main():
tests = [
[[2, 2, 1], [10, 11], [2, 2, 1, 10, 11]],
[[1, 2], [3, 1, 2], [1, 2, 3, 1, 2]],
[[1], [], [1]],
[
[2, 10, 3, 9, 5, 11, 11],
[4, 8, 1, 13, 3, 1, 14],
[2, 10, 3, 9, 5, 11, 11, 4, 8, 1, 13, 3, 1, 14]
],
[
[9, 6, 6, 9, 8, 14],
[3, 2, 2, 5, 3, 11, 12, 9, 7, 7],
[9, 6, 6, 9, 8, 14, 3, 2, 2, 5, 3, 11, 12, 9, 7, 7]
]
]
for t in tests:
res = concatenateArrays(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: concatenateArrays({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: concatenateArrays({}, {}) returned {}, answer: {}"
.format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights concatenate arrays problem
|
Solve Code Fights concatenate arrays problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights concatenate arrays problem
|
#!/usr/local/bin/python
# Code Fights Concatenate Arrays Problem
def concatenateArrays(a, b):
return a + b
def main():
tests = [
[[2, 2, 1], [10, 11], [2, 2, 1, 10, 11]],
[[1, 2], [3, 1, 2], [1, 2, 3, 1, 2]],
[[1], [], [1]],
[
[2, 10, 3, 9, 5, 11, 11],
[4, 8, 1, 13, 3, 1, 14],
[2, 10, 3, 9, 5, 11, 11, 4, 8, 1, 13, 3, 1, 14]
],
[
[9, 6, 6, 9, 8, 14],
[3, 2, 2, 5, 3, 11, 12, 9, 7, 7],
[9, 6, 6, 9, 8, 14, 3, 2, 2, 5, 3, 11, 12, 9, 7, 7]
]
]
for t in tests:
res = concatenateArrays(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: concatenateArrays({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: concatenateArrays({}, {}) returned {}, answer: {}"
.format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights concatenate arrays problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Concatenate Arrays Problem
def concatenateArrays(a, b):
return a + b
def main():
tests = [
[[2, 2, 1], [10, 11], [2, 2, 1, 10, 11]],
[[1, 2], [3, 1, 2], [1, 2, 3, 1, 2]],
[[1], [], [1]],
[
[2, 10, 3, 9, 5, 11, 11],
[4, 8, 1, 13, 3, 1, 14],
[2, 10, 3, 9, 5, 11, 11, 4, 8, 1, 13, 3, 1, 14]
],
[
[9, 6, 6, 9, 8, 14],
[3, 2, 2, 5, 3, 11, 12, 9, 7, 7],
[9, 6, 6, 9, 8, 14, 3, 2, 2, 5, 3, 11, 12, 9, 7, 7]
]
]
for t in tests:
res = concatenateArrays(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: concatenateArrays({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: concatenateArrays({}, {}) returned {}, answer: {}"
.format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights concatenate arrays problem#!/usr/local/bin/python
# Code Fights Concatenate Arrays Problem
def concatenateArrays(a, b):
return a + b
def main():
tests = [
[[2, 2, 1], [10, 11], [2, 2, 1, 10, 11]],
[[1, 2], [3, 1, 2], [1, 2, 3, 1, 2]],
[[1], [], [1]],
[
[2, 10, 3, 9, 5, 11, 11],
[4, 8, 1, 13, 3, 1, 14],
[2, 10, 3, 9, 5, 11, 11, 4, 8, 1, 13, 3, 1, 14]
],
[
[9, 6, 6, 9, 8, 14],
[3, 2, 2, 5, 3, 11, 12, 9, 7, 7],
[9, 6, 6, 9, 8, 14, 3, 2, 2, 5, 3, 11, 12, 9, 7, 7]
]
]
for t in tests:
res = concatenateArrays(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: concatenateArrays({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: concatenateArrays({}, {}) returned {}, answer: {}"
.format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights concatenate arrays problem<commit_after>#!/usr/local/bin/python
# Code Fights Concatenate Arrays Problem
def concatenateArrays(a, b):
return a + b
def main():
tests = [
[[2, 2, 1], [10, 11], [2, 2, 1, 10, 11]],
[[1, 2], [3, 1, 2], [1, 2, 3, 1, 2]],
[[1], [], [1]],
[
[2, 10, 3, 9, 5, 11, 11],
[4, 8, 1, 13, 3, 1, 14],
[2, 10, 3, 9, 5, 11, 11, 4, 8, 1, 13, 3, 1, 14]
],
[
[9, 6, 6, 9, 8, 14],
[3, 2, 2, 5, 3, 11, 12, 9, 7, 7],
[9, 6, 6, 9, 8, 14, 3, 2, 2, 5, 3, 11, 12, 9, 7, 7]
]
]
for t in tests:
res = concatenateArrays(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: concatenateArrays({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: concatenateArrays({}, {}) returned {}, answer: {}"
.format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
|
9116776bd62e6b7ae7a018fa2c2c0b3964c3fa7d
|
py/maximum-binary-tree.py
|
py/maximum-binary-tree.py
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def findMax(self, start, end):
bit_length = (end - start).bit_length() - 1
d = 1 << bit_length
return max(self.SparseTable[bit_length][start], self.SparseTable[bit_length][end - d])
def do_constructMaximumBinaryTree(self, start, end):
if start == end:
return None
v, i = self.findMax(start, end)
ret = TreeNode(v)
ret.left = self.do_constructMaximumBinaryTree(start, i)
ret.right = self.do_constructMaximumBinaryTree(i + 1, end)
return ret
def constructMaximumBinaryTree(self, nums):
"""
:type nums: List[int]
:rtype: TreeNode
"""
self.SparseTable = [[(v, i) for i, v in enumerate(nums)]]
l = len(nums)
t = 1
while t * 2 < l:
prevTable = self.SparseTable[-1]
self.SparseTable.append([max(prevTable[i], prevTable[i + t]) for i in xrange(l - t * 2 + 1)])
t *= 2
return self.do_constructMaximumBinaryTree(0, l)
|
Add py solution for 654. Maximum Binary Tree
|
Add py solution for 654. Maximum Binary Tree
654. Maximum Binary Tree: https://leetcode.com/problems/maximum-binary-tree/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 654. Maximum Binary Tree
654. Maximum Binary Tree: https://leetcode.com/problems/maximum-binary-tree/
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def findMax(self, start, end):
bit_length = (end - start).bit_length() - 1
d = 1 << bit_length
return max(self.SparseTable[bit_length][start], self.SparseTable[bit_length][end - d])
def do_constructMaximumBinaryTree(self, start, end):
if start == end:
return None
v, i = self.findMax(start, end)
ret = TreeNode(v)
ret.left = self.do_constructMaximumBinaryTree(start, i)
ret.right = self.do_constructMaximumBinaryTree(i + 1, end)
return ret
def constructMaximumBinaryTree(self, nums):
"""
:type nums: List[int]
:rtype: TreeNode
"""
self.SparseTable = [[(v, i) for i, v in enumerate(nums)]]
l = len(nums)
t = 1
while t * 2 < l:
prevTable = self.SparseTable[-1]
self.SparseTable.append([max(prevTable[i], prevTable[i + t]) for i in xrange(l - t * 2 + 1)])
t *= 2
return self.do_constructMaximumBinaryTree(0, l)
|
<commit_before><commit_msg>Add py solution for 654. Maximum Binary Tree
654. Maximum Binary Tree: https://leetcode.com/problems/maximum-binary-tree/<commit_after>
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def findMax(self, start, end):
bit_length = (end - start).bit_length() - 1
d = 1 << bit_length
return max(self.SparseTable[bit_length][start], self.SparseTable[bit_length][end - d])
def do_constructMaximumBinaryTree(self, start, end):
if start == end:
return None
v, i = self.findMax(start, end)
ret = TreeNode(v)
ret.left = self.do_constructMaximumBinaryTree(start, i)
ret.right = self.do_constructMaximumBinaryTree(i + 1, end)
return ret
def constructMaximumBinaryTree(self, nums):
"""
:type nums: List[int]
:rtype: TreeNode
"""
self.SparseTable = [[(v, i) for i, v in enumerate(nums)]]
l = len(nums)
t = 1
while t * 2 < l:
prevTable = self.SparseTable[-1]
self.SparseTable.append([max(prevTable[i], prevTable[i + t]) for i in xrange(l - t * 2 + 1)])
t *= 2
return self.do_constructMaximumBinaryTree(0, l)
|
Add py solution for 654. Maximum Binary Tree
654. Maximum Binary Tree: https://leetcode.com/problems/maximum-binary-tree/# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def findMax(self, start, end):
bit_length = (end - start).bit_length() - 1
d = 1 << bit_length
return max(self.SparseTable[bit_length][start], self.SparseTable[bit_length][end - d])
def do_constructMaximumBinaryTree(self, start, end):
if start == end:
return None
v, i = self.findMax(start, end)
ret = TreeNode(v)
ret.left = self.do_constructMaximumBinaryTree(start, i)
ret.right = self.do_constructMaximumBinaryTree(i + 1, end)
return ret
def constructMaximumBinaryTree(self, nums):
"""
:type nums: List[int]
:rtype: TreeNode
"""
self.SparseTable = [[(v, i) for i, v in enumerate(nums)]]
l = len(nums)
t = 1
while t * 2 < l:
prevTable = self.SparseTable[-1]
self.SparseTable.append([max(prevTable[i], prevTable[i + t]) for i in xrange(l - t * 2 + 1)])
t *= 2
return self.do_constructMaximumBinaryTree(0, l)
|
<commit_before><commit_msg>Add py solution for 654. Maximum Binary Tree
654. Maximum Binary Tree: https://leetcode.com/problems/maximum-binary-tree/<commit_after># Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def findMax(self, start, end):
bit_length = (end - start).bit_length() - 1
d = 1 << bit_length
return max(self.SparseTable[bit_length][start], self.SparseTable[bit_length][end - d])
def do_constructMaximumBinaryTree(self, start, end):
if start == end:
return None
v, i = self.findMax(start, end)
ret = TreeNode(v)
ret.left = self.do_constructMaximumBinaryTree(start, i)
ret.right = self.do_constructMaximumBinaryTree(i + 1, end)
return ret
def constructMaximumBinaryTree(self, nums):
"""
:type nums: List[int]
:rtype: TreeNode
"""
self.SparseTable = [[(v, i) for i, v in enumerate(nums)]]
l = len(nums)
t = 1
while t * 2 < l:
prevTable = self.SparseTable[-1]
self.SparseTable.append([max(prevTable[i], prevTable[i + t]) for i in xrange(l - t * 2 + 1)])
t *= 2
return self.do_constructMaximumBinaryTree(0, l)
|
|
e9c851ac5a328ed43c61df29b9a3b6129e08c7c5
|
pylua/tests/test_while.py
|
pylua/tests/test_while.py
|
from .helpers import codetest
class TestWhile(object):
"""
tests for the lua if then else and various comparisons
"""
def test_simple_while(self):
ret = codetest("""
x = 0
while x < 10 do
x = x + 1
end
return x
""")
assert ret.returnvalue == 10
def test_simple_while_false(self):
ret = codetest("""
x = 99
while x < 0 do
x = x + 1
end
return x
""")
assert ret.returnvalue == 99
def test_complex_while(self):
ret = codetest("""
i = 0
x = 0
while i < 10 do
i = i + 1
x = x + 1
j = 5
while j > 0 do
j = j - 1
x = x + 1
end
end
return x
""")
assert ret.returnvalue == 60
|
Implement simple for loop and SUBVN
|
Implement simple for loop and SUBVN
|
Python
|
bsd-3-clause
|
fhahn/luna,fhahn/luna
|
Implement simple for loop and SUBVN
|
from .helpers import codetest
class TestWhile(object):
"""
tests for the lua if then else and various comparisons
"""
def test_simple_while(self):
ret = codetest("""
x = 0
while x < 10 do
x = x + 1
end
return x
""")
assert ret.returnvalue == 10
def test_simple_while_false(self):
ret = codetest("""
x = 99
while x < 0 do
x = x + 1
end
return x
""")
assert ret.returnvalue == 99
def test_complex_while(self):
ret = codetest("""
i = 0
x = 0
while i < 10 do
i = i + 1
x = x + 1
j = 5
while j > 0 do
j = j - 1
x = x + 1
end
end
return x
""")
assert ret.returnvalue == 60
|
<commit_before><commit_msg>Implement simple for loop and SUBVN<commit_after>
|
from .helpers import codetest
class TestWhile(object):
"""
tests for the lua if then else and various comparisons
"""
def test_simple_while(self):
ret = codetest("""
x = 0
while x < 10 do
x = x + 1
end
return x
""")
assert ret.returnvalue == 10
def test_simple_while_false(self):
ret = codetest("""
x = 99
while x < 0 do
x = x + 1
end
return x
""")
assert ret.returnvalue == 99
def test_complex_while(self):
ret = codetest("""
i = 0
x = 0
while i < 10 do
i = i + 1
x = x + 1
j = 5
while j > 0 do
j = j - 1
x = x + 1
end
end
return x
""")
assert ret.returnvalue == 60
|
Implement simple for loop and SUBVNfrom .helpers import codetest
class TestWhile(object):
"""
tests for the lua if then else and various comparisons
"""
def test_simple_while(self):
ret = codetest("""
x = 0
while x < 10 do
x = x + 1
end
return x
""")
assert ret.returnvalue == 10
def test_simple_while_false(self):
ret = codetest("""
x = 99
while x < 0 do
x = x + 1
end
return x
""")
assert ret.returnvalue == 99
def test_complex_while(self):
ret = codetest("""
i = 0
x = 0
while i < 10 do
i = i + 1
x = x + 1
j = 5
while j > 0 do
j = j - 1
x = x + 1
end
end
return x
""")
assert ret.returnvalue == 60
|
<commit_before><commit_msg>Implement simple for loop and SUBVN<commit_after>from .helpers import codetest
class TestWhile(object):
"""
tests for the lua if then else and various comparisons
"""
def test_simple_while(self):
ret = codetest("""
x = 0
while x < 10 do
x = x + 1
end
return x
""")
assert ret.returnvalue == 10
def test_simple_while_false(self):
ret = codetest("""
x = 99
while x < 0 do
x = x + 1
end
return x
""")
assert ret.returnvalue == 99
def test_complex_while(self):
ret = codetest("""
i = 0
x = 0
while i < 10 do
i = i + 1
x = x + 1
j = 5
while j > 0 do
j = j - 1
x = x + 1
end
end
return x
""")
assert ret.returnvalue == 60
|
|
f659f773b7713fab2bb02a4a26eefc9e002145ca
|
test/tests/python-imports/container.py
|
test/tests/python-imports/container.py
|
import curses
import readline
import bz2
assert(bz2.decompress(bz2.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
import zlib
assert(zlib.decompress(zlib.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
|
import curses
import readline
import bz2
assert(bz2.decompress(bz2.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
import platform
if platform.python_implementation() != 'PyPy' and platform.python_version_tuple()[0] != '2':
# PyPy and Python 2 don't support lzma
import lzma
assert(lzma.decompress(lzma.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
import zlib
assert(zlib.decompress(zlib.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
|
Add "lzma" to "python-imports" test
|
Add "lzma" to "python-imports" test
|
Python
|
apache-2.0
|
chorrell/official-images,docker-flink/official-images,docker-library/official-images,nodejs-docker-bot/official-images,jperrin/official-images,docker-library/official-images,chorrell/official-images,pesho/docker-official-images,benbc/docker-official-images,mattrobenolt/official-images,docker-flink/official-images,31z4/official-images,jperrin/official-images,chorrell/official-images,jperrin/official-images,docker-flink/official-images,pesho/docker-official-images,docker-solr/official-images,thresheek/official-images,chorrell/official-images,emilevauge/official-images,nodejs-docker-bot/official-images,infosiftr/stackbrew,thresheek/official-images,pesho/docker-official-images,neo-technology/docker-official-images,davidl-zend/official-images,neo-technology/docker-official-images,benbc/docker-official-images,docker-solr/official-images,docker-library/official-images,infosiftr/stackbrew,nodejs-docker-bot/official-images,dinogun/official-images,31z4/official-images,robfrank/official-images,pesho/docker-official-images,pesho/docker-official-images,benbc/docker-official-images,docker-flink/official-images,docker-solr/official-images,docker-solr/official-images,pesho/docker-official-images,infosiftr/stackbrew,docker-library/official-images,mattrobenolt/official-images,infosiftr/stackbrew,infosiftr/stackbrew,infosiftr/stackbrew,docker-solr/official-images,docker-flink/official-images,mattrobenolt/official-images,dinogun/official-images,dinogun/official-images,nodejs-docker-bot/official-images,docker-flink/official-images,chorrell/official-images,thresheek/official-images,pesho/docker-official-images,thresheek/official-images,neo-technology/docker-official-images,dinogun/official-images,jperrin/official-images,benbc/docker-official-images,docker-library/official-images,pesho/docker-official-images,robfrank/official-images,davidl-zend/official-images,nodejs-docker-bot/official-images,chorrell/official-images,docker-flink/official-images,neo-technology/docker-official-images,31z4/official-images,31z4/official-images,davidl-zend/official-images,mattrobenolt/official-images,nodejs-docker-bot/official-images,docker-library/official-images,mattrobenolt/official-images,infosiftr/stackbrew,docker-solr/official-images,robfrank/official-images,thresheek/official-images,docker-solr/official-images,davidl-zend/official-images,dinogun/official-images,neo-technology/docker-official-images,robfrank/official-images,davidl-zend/official-images,emilevauge/official-images,docker-flink/official-images,mattrobenolt/official-images,dinogun/official-images,davidl-zend/official-images,docker-library/official-images,emilevauge/official-images,31z4/official-images,nodejs-docker-bot/official-images,robfrank/official-images,pesho/docker-official-images,dinogun/official-images,31z4/official-images,docker-library/official-images,emilevauge/official-images,docker-flink/official-images,31z4/official-images,docker-solr/official-images,davidl-zend/official-images,docker-solr/official-images,neo-technology/docker-official-images,dinogun/official-images,nodejs-docker-bot/official-images,docker-library/official-images,thresheek/official-images,jperrin/official-images,jperrin/official-images,benbc/docker-official-images,dinogun/official-images,jperrin/official-images,docker-library/official-images,infosiftr/stackbrew,neo-technology/docker-official-images,emilevauge/official-images,jperrin/official-images,mattrobenolt/official-images,emilevauge/official-images,emilevauge/official-images,chorrell/official-images,dinogun/official-images,mattrobenolt/official-images,dinogun/official-images,31z4/official-images,chorrell/official-images,mattrobenolt/official-images,neo-technology/docker-official-images,nodejs-docker-bot/official-images,benbc/docker-official-images,neo-technology/docker-official-images,docker-library/official-images,nodejs-docker-bot/official-images,jperrin/official-images,chorrell/official-images,pesho/docker-official-images,docker-flink/official-images,thresheek/official-images,thresheek/official-images,chorrell/official-images,nodejs-docker-bot/official-images,mattrobenolt/official-images,davidl-zend/official-images,docker-library/official-images,robfrank/official-images,dinogun/official-images,thresheek/official-images,emilevauge/official-images,infosiftr/stackbrew,pesho/docker-official-images,robfrank/official-images,nodejs-docker-bot/official-images,neo-technology/docker-official-images,thresheek/official-images,robfrank/official-images,chorrell/official-images,docker-flink/official-images,docker-library/official-images,davidl-zend/official-images,docker-solr/official-images,thresheek/official-images,neo-technology/docker-official-images,chorrell/official-images,mattrobenolt/official-images,docker-solr/official-images,neo-technology/docker-official-images,robfrank/official-images,docker-solr/official-images,infosiftr/stackbrew,infosiftr/stackbrew,benbc/docker-official-images,infosiftr/stackbrew,docker-library/official-images,robfrank/official-images,emilevauge/official-images,thresheek/official-images,jperrin/official-images,neo-technology/docker-official-images,emilevauge/official-images,infosiftr/stackbrew,emilevauge/official-images,jperrin/official-images,thresheek/official-images,davidl-zend/official-images,benbc/docker-official-images,neo-technology/docker-official-images,robfrank/official-images,chorrell/official-images,jperrin/official-images,infosiftr/stackbrew,docker-flink/official-images,31z4/official-images,docker-flink/official-images,docker-solr/official-images,31z4/official-images,mattrobenolt/official-images,robfrank/official-images,davidl-zend/official-images,31z4/official-images,31z4/official-images,davidl-zend/official-images,31z4/official-images,benbc/docker-official-images,emilevauge/official-images,dinogun/official-images,31z4/official-images,thresheek/official-images,jperrin/official-images,davidl-zend/official-images,docker-solr/official-images,robfrank/official-images,benbc/docker-official-images,mattrobenolt/official-images,pesho/docker-official-images
|
import curses
import readline
import bz2
assert(bz2.decompress(bz2.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
import zlib
assert(zlib.decompress(zlib.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
Add "lzma" to "python-imports" test
|
import curses
import readline
import bz2
assert(bz2.decompress(bz2.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
import platform
if platform.python_implementation() != 'PyPy' and platform.python_version_tuple()[0] != '2':
# PyPy and Python 2 don't support lzma
import lzma
assert(lzma.decompress(lzma.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
import zlib
assert(zlib.decompress(zlib.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
|
<commit_before>import curses
import readline
import bz2
assert(bz2.decompress(bz2.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
import zlib
assert(zlib.decompress(zlib.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
<commit_msg>Add "lzma" to "python-imports" test<commit_after>
|
import curses
import readline
import bz2
assert(bz2.decompress(bz2.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
import platform
if platform.python_implementation() != 'PyPy' and platform.python_version_tuple()[0] != '2':
# PyPy and Python 2 don't support lzma
import lzma
assert(lzma.decompress(lzma.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
import zlib
assert(zlib.decompress(zlib.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
|
import curses
import readline
import bz2
assert(bz2.decompress(bz2.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
import zlib
assert(zlib.decompress(zlib.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
Add "lzma" to "python-imports" testimport curses
import readline
import bz2
assert(bz2.decompress(bz2.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
import platform
if platform.python_implementation() != 'PyPy' and platform.python_version_tuple()[0] != '2':
# PyPy and Python 2 don't support lzma
import lzma
assert(lzma.decompress(lzma.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
import zlib
assert(zlib.decompress(zlib.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
|
<commit_before>import curses
import readline
import bz2
assert(bz2.decompress(bz2.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
import zlib
assert(zlib.decompress(zlib.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
<commit_msg>Add "lzma" to "python-imports" test<commit_after>import curses
import readline
import bz2
assert(bz2.decompress(bz2.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
import platform
if platform.python_implementation() != 'PyPy' and platform.python_version_tuple()[0] != '2':
# PyPy and Python 2 don't support lzma
import lzma
assert(lzma.decompress(lzma.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
import zlib
assert(zlib.decompress(zlib.compress(b'IT WORKS IT WORKS IT WORKS')) == b'IT WORKS IT WORKS IT WORKS')
|
4308a409ea970a59be5965cce2617c7cecf8b5b6
|
src/player.py
|
src/player.py
|
# card-fight-thingy - Simplistic battle card game... thingy
#
# The MIT License (MIT)
#
# Copyright (c) 2015 The Underscores
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
class Player():
def __init__(self):
self.health = 100
self.defense = 0
# TODO : Implement card class
# self.cards = Card.generateDeck()
def takeDamage(self, damage):
if self.defense > 0:
if damage > self.defense:
# Defense won't eat all damange
damage -= self.defense
self.defense = 0
else:
# Defense will eat all damage
self.defense -= damage
return
self.health -= damage
|
Add beginnings of Player class implementation
|
Add beginnings of Player class implementation
|
Python
|
mit
|
TheUnderscores/card-fight-thingy
|
Add beginnings of Player class implementation
|
# card-fight-thingy - Simplistic battle card game... thingy
#
# The MIT License (MIT)
#
# Copyright (c) 2015 The Underscores
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
class Player():
def __init__(self):
self.health = 100
self.defense = 0
# TODO : Implement card class
# self.cards = Card.generateDeck()
def takeDamage(self, damage):
if self.defense > 0:
if damage > self.defense:
# Defense won't eat all damange
damage -= self.defense
self.defense = 0
else:
# Defense will eat all damage
self.defense -= damage
return
self.health -= damage
|
<commit_before><commit_msg>Add beginnings of Player class implementation<commit_after>
|
# card-fight-thingy - Simplistic battle card game... thingy
#
# The MIT License (MIT)
#
# Copyright (c) 2015 The Underscores
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
class Player():
def __init__(self):
self.health = 100
self.defense = 0
# TODO : Implement card class
# self.cards = Card.generateDeck()
def takeDamage(self, damage):
if self.defense > 0:
if damage > self.defense:
# Defense won't eat all damange
damage -= self.defense
self.defense = 0
else:
# Defense will eat all damage
self.defense -= damage
return
self.health -= damage
|
Add beginnings of Player class implementation# card-fight-thingy - Simplistic battle card game... thingy
#
# The MIT License (MIT)
#
# Copyright (c) 2015 The Underscores
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
class Player():
def __init__(self):
self.health = 100
self.defense = 0
# TODO : Implement card class
# self.cards = Card.generateDeck()
def takeDamage(self, damage):
if self.defense > 0:
if damage > self.defense:
# Defense won't eat all damange
damage -= self.defense
self.defense = 0
else:
# Defense will eat all damage
self.defense -= damage
return
self.health -= damage
|
<commit_before><commit_msg>Add beginnings of Player class implementation<commit_after># card-fight-thingy - Simplistic battle card game... thingy
#
# The MIT License (MIT)
#
# Copyright (c) 2015 The Underscores
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
class Player():
def __init__(self):
self.health = 100
self.defense = 0
# TODO : Implement card class
# self.cards = Card.generateDeck()
def takeDamage(self, damage):
if self.defense > 0:
if damage > self.defense:
# Defense won't eat all damange
damage -= self.defense
self.defense = 0
else:
# Defense will eat all damage
self.defense -= damage
return
self.health -= damage
|
|
5ed3bc5627b8dc3643f6a4d0d6ee782b701bdfd1
|
InterleavePdf.py
|
InterleavePdf.py
|
import PyPDF2
from formlayout import fedit
paths = [('Input', ''), ('Output', '')]
pathsRead = fedit(paths,
title="Interleave pdf",
comment="Enter the full path to the source pdf and a path to output the result."
)
# Full path to files should be specified eg C:\Users\Sam\Documents\Input.pdf and C:\Users\Sam\Documents\Input.pdf
document = PyPDF2.PdfFileReader(pathsRead[0])
writer = PyPDF2.PdfFileWriter()
for page in document.pages:
writer.addPage(page)
writer.addBlankPage()
outputStream = open(pathsRead[1], 'wb')
writer.write(outputStream)
outputStream.close()
|
Add source file to repository.
|
Add source file to repository.
|
Python
|
mit
|
sproberts92/interleave-pdf
|
Add source file to repository.
|
import PyPDF2
from formlayout import fedit
paths = [('Input', ''), ('Output', '')]
pathsRead = fedit(paths,
title="Interleave pdf",
comment="Enter the full path to the source pdf and a path to output the result."
)
# Full path to files should be specified eg C:\Users\Sam\Documents\Input.pdf and C:\Users\Sam\Documents\Input.pdf
document = PyPDF2.PdfFileReader(pathsRead[0])
writer = PyPDF2.PdfFileWriter()
for page in document.pages:
writer.addPage(page)
writer.addBlankPage()
outputStream = open(pathsRead[1], 'wb')
writer.write(outputStream)
outputStream.close()
|
<commit_before><commit_msg>Add source file to repository.<commit_after>
|
import PyPDF2
from formlayout import fedit
paths = [('Input', ''), ('Output', '')]
pathsRead = fedit(paths,
title="Interleave pdf",
comment="Enter the full path to the source pdf and a path to output the result."
)
# Full path to files should be specified eg C:\Users\Sam\Documents\Input.pdf and C:\Users\Sam\Documents\Input.pdf
document = PyPDF2.PdfFileReader(pathsRead[0])
writer = PyPDF2.PdfFileWriter()
for page in document.pages:
writer.addPage(page)
writer.addBlankPage()
outputStream = open(pathsRead[1], 'wb')
writer.write(outputStream)
outputStream.close()
|
Add source file to repository.import PyPDF2
from formlayout import fedit
paths = [('Input', ''), ('Output', '')]
pathsRead = fedit(paths,
title="Interleave pdf",
comment="Enter the full path to the source pdf and a path to output the result."
)
# Full path to files should be specified eg C:\Users\Sam\Documents\Input.pdf and C:\Users\Sam\Documents\Input.pdf
document = PyPDF2.PdfFileReader(pathsRead[0])
writer = PyPDF2.PdfFileWriter()
for page in document.pages:
writer.addPage(page)
writer.addBlankPage()
outputStream = open(pathsRead[1], 'wb')
writer.write(outputStream)
outputStream.close()
|
<commit_before><commit_msg>Add source file to repository.<commit_after>import PyPDF2
from formlayout import fedit
paths = [('Input', ''), ('Output', '')]
pathsRead = fedit(paths,
title="Interleave pdf",
comment="Enter the full path to the source pdf and a path to output the result."
)
# Full path to files should be specified eg C:\Users\Sam\Documents\Input.pdf and C:\Users\Sam\Documents\Input.pdf
document = PyPDF2.PdfFileReader(pathsRead[0])
writer = PyPDF2.PdfFileWriter()
for page in document.pages:
writer.addPage(page)
writer.addBlankPage()
outputStream = open(pathsRead[1], 'wb')
writer.write(outputStream)
outputStream.close()
|
|
b373bfeaaf9017b9c16b21adb495155e0345a05a
|
tests/testrepos/FoodRelations/combineCooccurrences.py
|
tests/testrepos/FoodRelations/combineCooccurrences.py
|
import argparse
import os
import codecs
from collections import Counter
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Merges cooccurrences files down to a single file')
parser.add_argument('--inDir',type=str,required=True,help='Directory containing cooccurrence files. Expected to be tab-delimited three columns (with IDs in first to columns and cooccurrence count in third)')
parser.add_argument('--outFile',type=str,required=True,help='File to output combined cooccurrence data')
args = parser.parse_args()
assert os.path.isdir(args.inDir)
counter = Counter()
for filename in os.listdir(args.inDir):
fullpath = os.path.join(args.inDir,filename)
with codecs.open(fullpath,'r','utf-8') as f:
for line in f:
a,b,count = line.strip().split('\t')
counter[(a,b)] += int(count)
keys = sorted(counter.keys())
with codecs.open(args.outFile,'w','utf-8') as outF:
for a,b in keys:
count = counter[(a,b)]
outF.write("%s\t%s\t%d\n" % (a,b,count))
print ("Complete.")
|
Add combine cooccurrence script for Food project
|
Add combine cooccurrence script for Food project
|
Python
|
mit
|
jakelever/pubrunner,jakelever/pubrunner
|
Add combine cooccurrence script for Food project
|
import argparse
import os
import codecs
from collections import Counter
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Merges cooccurrences files down to a single file')
parser.add_argument('--inDir',type=str,required=True,help='Directory containing cooccurrence files. Expected to be tab-delimited three columns (with IDs in first to columns and cooccurrence count in third)')
parser.add_argument('--outFile',type=str,required=True,help='File to output combined cooccurrence data')
args = parser.parse_args()
assert os.path.isdir(args.inDir)
counter = Counter()
for filename in os.listdir(args.inDir):
fullpath = os.path.join(args.inDir,filename)
with codecs.open(fullpath,'r','utf-8') as f:
for line in f:
a,b,count = line.strip().split('\t')
counter[(a,b)] += int(count)
keys = sorted(counter.keys())
with codecs.open(args.outFile,'w','utf-8') as outF:
for a,b in keys:
count = counter[(a,b)]
outF.write("%s\t%s\t%d\n" % (a,b,count))
print ("Complete.")
|
<commit_before><commit_msg>Add combine cooccurrence script for Food project<commit_after>
|
import argparse
import os
import codecs
from collections import Counter
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Merges cooccurrences files down to a single file')
parser.add_argument('--inDir',type=str,required=True,help='Directory containing cooccurrence files. Expected to be tab-delimited three columns (with IDs in first to columns and cooccurrence count in third)')
parser.add_argument('--outFile',type=str,required=True,help='File to output combined cooccurrence data')
args = parser.parse_args()
assert os.path.isdir(args.inDir)
counter = Counter()
for filename in os.listdir(args.inDir):
fullpath = os.path.join(args.inDir,filename)
with codecs.open(fullpath,'r','utf-8') as f:
for line in f:
a,b,count = line.strip().split('\t')
counter[(a,b)] += int(count)
keys = sorted(counter.keys())
with codecs.open(args.outFile,'w','utf-8') as outF:
for a,b in keys:
count = counter[(a,b)]
outF.write("%s\t%s\t%d\n" % (a,b,count))
print ("Complete.")
|
Add combine cooccurrence script for Food projectimport argparse
import os
import codecs
from collections import Counter
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Merges cooccurrences files down to a single file')
parser.add_argument('--inDir',type=str,required=True,help='Directory containing cooccurrence files. Expected to be tab-delimited three columns (with IDs in first to columns and cooccurrence count in third)')
parser.add_argument('--outFile',type=str,required=True,help='File to output combined cooccurrence data')
args = parser.parse_args()
assert os.path.isdir(args.inDir)
counter = Counter()
for filename in os.listdir(args.inDir):
fullpath = os.path.join(args.inDir,filename)
with codecs.open(fullpath,'r','utf-8') as f:
for line in f:
a,b,count = line.strip().split('\t')
counter[(a,b)] += int(count)
keys = sorted(counter.keys())
with codecs.open(args.outFile,'w','utf-8') as outF:
for a,b in keys:
count = counter[(a,b)]
outF.write("%s\t%s\t%d\n" % (a,b,count))
print ("Complete.")
|
<commit_before><commit_msg>Add combine cooccurrence script for Food project<commit_after>import argparse
import os
import codecs
from collections import Counter
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Merges cooccurrences files down to a single file')
parser.add_argument('--inDir',type=str,required=True,help='Directory containing cooccurrence files. Expected to be tab-delimited three columns (with IDs in first to columns and cooccurrence count in third)')
parser.add_argument('--outFile',type=str,required=True,help='File to output combined cooccurrence data')
args = parser.parse_args()
assert os.path.isdir(args.inDir)
counter = Counter()
for filename in os.listdir(args.inDir):
fullpath = os.path.join(args.inDir,filename)
with codecs.open(fullpath,'r','utf-8') as f:
for line in f:
a,b,count = line.strip().split('\t')
counter[(a,b)] += int(count)
keys = sorted(counter.keys())
with codecs.open(args.outFile,'w','utf-8') as outF:
for a,b in keys:
count = counter[(a,b)]
outF.write("%s\t%s\t%d\n" % (a,b,count))
print ("Complete.")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.