commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
51fff51aaaa5139b8cf4cc5b79dd22f568f47a90
|
dynd/tests/test_numpy_compat.py
|
dynd/tests/test_numpy_compat.py
|
import sys
import unittest
from dynd import nd, ndt
class TestNumpyCompat(unittest.TestCase):
"""
Tests to validate interface intended to provide compatibility with the
NumPy interface.
"""
def setUp(self):
if sys.byteorder == 'little':
self.nonnative = '>'
else:
self.nonnative = '<'
def test_array_attributes(self):
a = nd.zeros(3, 5, ndt.int32)
# Property "ndim"
self.assertEqual(a.ndim, 2)
# Property "shape"
self.assertEqual(a.shape, (3, 5))
# Property "strides"
self.assertEqual(a.strides, (20, 4))
# Property "dtype"
self.assertEqual(a.dtype, ndt.int32)
if __name__ == '__main__':
unittest.main()
|
Add a test for validating numpy compatibility choices
|
Add a test for validating numpy compatibility choices
|
Python
|
bsd-2-clause
|
ContinuumIO/dynd-python,cpcloud/dynd-python,insertinterestingnamehere/dynd-python,michaelpacer/dynd-python,insertinterestingnamehere/dynd-python,cpcloud/dynd-python,ContinuumIO/dynd-python,michaelpacer/dynd-python,pombredanne/dynd-python,cpcloud/dynd-python,mwiebe/dynd-python,ContinuumIO/dynd-python,pombredanne/dynd-python,ContinuumIO/dynd-python,cpcloud/dynd-python,pombredanne/dynd-python,mwiebe/dynd-python,izaid/dynd-python,mwiebe/dynd-python,izaid/dynd-python,pombredanne/dynd-python,izaid/dynd-python,michaelpacer/dynd-python,insertinterestingnamehere/dynd-python,insertinterestingnamehere/dynd-python,mwiebe/dynd-python,izaid/dynd-python,michaelpacer/dynd-python
|
Add a test for validating numpy compatibility choices
|
import sys
import unittest
from dynd import nd, ndt
class TestNumpyCompat(unittest.TestCase):
"""
Tests to validate interface intended to provide compatibility with the
NumPy interface.
"""
def setUp(self):
if sys.byteorder == 'little':
self.nonnative = '>'
else:
self.nonnative = '<'
def test_array_attributes(self):
a = nd.zeros(3, 5, ndt.int32)
# Property "ndim"
self.assertEqual(a.ndim, 2)
# Property "shape"
self.assertEqual(a.shape, (3, 5))
# Property "strides"
self.assertEqual(a.strides, (20, 4))
# Property "dtype"
self.assertEqual(a.dtype, ndt.int32)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add a test for validating numpy compatibility choices<commit_after>
|
import sys
import unittest
from dynd import nd, ndt
class TestNumpyCompat(unittest.TestCase):
"""
Tests to validate interface intended to provide compatibility with the
NumPy interface.
"""
def setUp(self):
if sys.byteorder == 'little':
self.nonnative = '>'
else:
self.nonnative = '<'
def test_array_attributes(self):
a = nd.zeros(3, 5, ndt.int32)
# Property "ndim"
self.assertEqual(a.ndim, 2)
# Property "shape"
self.assertEqual(a.shape, (3, 5))
# Property "strides"
self.assertEqual(a.strides, (20, 4))
# Property "dtype"
self.assertEqual(a.dtype, ndt.int32)
if __name__ == '__main__':
unittest.main()
|
Add a test for validating numpy compatibility choicesimport sys
import unittest
from dynd import nd, ndt
class TestNumpyCompat(unittest.TestCase):
"""
Tests to validate interface intended to provide compatibility with the
NumPy interface.
"""
def setUp(self):
if sys.byteorder == 'little':
self.nonnative = '>'
else:
self.nonnative = '<'
def test_array_attributes(self):
a = nd.zeros(3, 5, ndt.int32)
# Property "ndim"
self.assertEqual(a.ndim, 2)
# Property "shape"
self.assertEqual(a.shape, (3, 5))
# Property "strides"
self.assertEqual(a.strides, (20, 4))
# Property "dtype"
self.assertEqual(a.dtype, ndt.int32)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add a test for validating numpy compatibility choices<commit_after>import sys
import unittest
from dynd import nd, ndt
class TestNumpyCompat(unittest.TestCase):
"""
Tests to validate interface intended to provide compatibility with the
NumPy interface.
"""
def setUp(self):
if sys.byteorder == 'little':
self.nonnative = '>'
else:
self.nonnative = '<'
def test_array_attributes(self):
a = nd.zeros(3, 5, ndt.int32)
# Property "ndim"
self.assertEqual(a.ndim, 2)
# Property "shape"
self.assertEqual(a.shape, (3, 5))
# Property "strides"
self.assertEqual(a.strides, (20, 4))
# Property "dtype"
self.assertEqual(a.dtype, ndt.int32)
if __name__ == '__main__':
unittest.main()
|
|
3d2698aa524d8b2ee617b6adcd6b346cbf71f071
|
test/buildbot/ensure_webcam_is_running.py
|
test/buildbot/ensure_webcam_is_running.py
|
#!/usr/bin/env python
# Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Checks if a virtual webcam is running and starts it if not.
Returns a non-zero return code if the webcam could not be started.
Prerequisites:
* Python must have the psutil package installed.
* Windows: a scheduled task named 'ManyCam' must exist and be configured to
launch ManyCam preconfigured to auto-play the test clip.
* Mac: ManyCam must be installed in the default location and be preconfigured
to auto-play the test clip.
* Linux: The v4l2loopback must be loaded to the kernel already (with the
devices=2 argument) and the v4l2_file_player application must be compiled and
put in the location specified below.
"""
import psutil
import subprocess
import sys
WEBCAM_WIN = ['schtasks', '/run', '/tn', 'ManyCam']
WEBCAM_MAC = ['open', '/Applications/ManyCam/ManyCam.app']
WEBCAM_LINUX = (
'$HOME/fake-webcam-driver/linux/v4l2_file_player/v4l2_file_player '
'$HOME/webrtc_video_quality/reference_video.yuv 640 480 /dev/video1 &')
def IsWebCamRunning():
if sys.platform == 'win32':
process_name = 'ManyCam.exe'
elif sys.platform.startswith('darwin'):
process_name = 'ManyCam'
elif sys.platform.startswith('linux'):
process_name = 'v4l2_file_player'
else:
raise Exception('Unsupported platform: %s' % sys.platform)
for p in psutil.get_process_list():
if process_name == p.name:
print 'Found a running virtual webcam (%s with PID %s)' % (p.name, p.pid)
return True
return False
def Main():
if IsWebCamRunning():
return 0
try:
if sys.platform == 'win32':
subprocess.check_call(WEBCAM_WIN)
elif sys.platform.startswith('darwin'):
subprocess.check_call(WEBCAM_MAC)
elif sys.platform.startswith('linux'):
subprocess.check_call(WEBCAM_LINUX, shell=True)
print 'Successfully launched virtual webcam.'
return 0
except Exception as e:
print 'Failed to launch virtual webcam: %s' % e
if __name__ == '__main__':
sys.exit(Main())
|
Add script to ensure virtual webcam is running.
|
Add script to ensure virtual webcam is running.
This script will check that a webcam is running and start it if it's
not currently running.
It's tailored to the way our buildbots are currently configured.
TEST=local execution on Windows, Mac and Linux.
BUG=none
R=phoglund@webrtc.org
Review URL: https://webrtc-codereview.appspot.com/1406005
Cr-Mirrored-From: https://chromium.googlesource.com/external/webrtc
Cr-Mirrored-Commit: 73a4d5ab12efc6035a5c44625e1e772fd6c23ffc
|
Python
|
bsd-3-clause
|
sippet/webrtc,sippet/webrtc,sippet/webrtc,sippet/webrtc,sippet/webrtc,sippet/webrtc
|
Add script to ensure virtual webcam is running.
This script will check that a webcam is running and start it if it's
not currently running.
It's tailored to the way our buildbots are currently configured.
TEST=local execution on Windows, Mac and Linux.
BUG=none
R=phoglund@webrtc.org
Review URL: https://webrtc-codereview.appspot.com/1406005
Cr-Mirrored-From: https://chromium.googlesource.com/external/webrtc
Cr-Mirrored-Commit: 73a4d5ab12efc6035a5c44625e1e772fd6c23ffc
|
#!/usr/bin/env python
# Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Checks if a virtual webcam is running and starts it if not.
Returns a non-zero return code if the webcam could not be started.
Prerequisites:
* Python must have the psutil package installed.
* Windows: a scheduled task named 'ManyCam' must exist and be configured to
launch ManyCam preconfigured to auto-play the test clip.
* Mac: ManyCam must be installed in the default location and be preconfigured
to auto-play the test clip.
* Linux: The v4l2loopback must be loaded to the kernel already (with the
devices=2 argument) and the v4l2_file_player application must be compiled and
put in the location specified below.
"""
import psutil
import subprocess
import sys
WEBCAM_WIN = ['schtasks', '/run', '/tn', 'ManyCam']
WEBCAM_MAC = ['open', '/Applications/ManyCam/ManyCam.app']
WEBCAM_LINUX = (
'$HOME/fake-webcam-driver/linux/v4l2_file_player/v4l2_file_player '
'$HOME/webrtc_video_quality/reference_video.yuv 640 480 /dev/video1 &')
def IsWebCamRunning():
if sys.platform == 'win32':
process_name = 'ManyCam.exe'
elif sys.platform.startswith('darwin'):
process_name = 'ManyCam'
elif sys.platform.startswith('linux'):
process_name = 'v4l2_file_player'
else:
raise Exception('Unsupported platform: %s' % sys.platform)
for p in psutil.get_process_list():
if process_name == p.name:
print 'Found a running virtual webcam (%s with PID %s)' % (p.name, p.pid)
return True
return False
def Main():
if IsWebCamRunning():
return 0
try:
if sys.platform == 'win32':
subprocess.check_call(WEBCAM_WIN)
elif sys.platform.startswith('darwin'):
subprocess.check_call(WEBCAM_MAC)
elif sys.platform.startswith('linux'):
subprocess.check_call(WEBCAM_LINUX, shell=True)
print 'Successfully launched virtual webcam.'
return 0
except Exception as e:
print 'Failed to launch virtual webcam: %s' % e
if __name__ == '__main__':
sys.exit(Main())
|
<commit_before><commit_msg>Add script to ensure virtual webcam is running.
This script will check that a webcam is running and start it if it's
not currently running.
It's tailored to the way our buildbots are currently configured.
TEST=local execution on Windows, Mac and Linux.
BUG=none
R=phoglund@webrtc.org
Review URL: https://webrtc-codereview.appspot.com/1406005
Cr-Mirrored-From: https://chromium.googlesource.com/external/webrtc
Cr-Mirrored-Commit: 73a4d5ab12efc6035a5c44625e1e772fd6c23ffc<commit_after>
|
#!/usr/bin/env python
# Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Checks if a virtual webcam is running and starts it if not.
Returns a non-zero return code if the webcam could not be started.
Prerequisites:
* Python must have the psutil package installed.
* Windows: a scheduled task named 'ManyCam' must exist and be configured to
launch ManyCam preconfigured to auto-play the test clip.
* Mac: ManyCam must be installed in the default location and be preconfigured
to auto-play the test clip.
* Linux: The v4l2loopback must be loaded to the kernel already (with the
devices=2 argument) and the v4l2_file_player application must be compiled and
put in the location specified below.
"""
import psutil
import subprocess
import sys
WEBCAM_WIN = ['schtasks', '/run', '/tn', 'ManyCam']
WEBCAM_MAC = ['open', '/Applications/ManyCam/ManyCam.app']
WEBCAM_LINUX = (
'$HOME/fake-webcam-driver/linux/v4l2_file_player/v4l2_file_player '
'$HOME/webrtc_video_quality/reference_video.yuv 640 480 /dev/video1 &')
def IsWebCamRunning():
if sys.platform == 'win32':
process_name = 'ManyCam.exe'
elif sys.platform.startswith('darwin'):
process_name = 'ManyCam'
elif sys.platform.startswith('linux'):
process_name = 'v4l2_file_player'
else:
raise Exception('Unsupported platform: %s' % sys.platform)
for p in psutil.get_process_list():
if process_name == p.name:
print 'Found a running virtual webcam (%s with PID %s)' % (p.name, p.pid)
return True
return False
def Main():
if IsWebCamRunning():
return 0
try:
if sys.platform == 'win32':
subprocess.check_call(WEBCAM_WIN)
elif sys.platform.startswith('darwin'):
subprocess.check_call(WEBCAM_MAC)
elif sys.platform.startswith('linux'):
subprocess.check_call(WEBCAM_LINUX, shell=True)
print 'Successfully launched virtual webcam.'
return 0
except Exception as e:
print 'Failed to launch virtual webcam: %s' % e
if __name__ == '__main__':
sys.exit(Main())
|
Add script to ensure virtual webcam is running.
This script will check that a webcam is running and start it if it's
not currently running.
It's tailored to the way our buildbots are currently configured.
TEST=local execution on Windows, Mac and Linux.
BUG=none
R=phoglund@webrtc.org
Review URL: https://webrtc-codereview.appspot.com/1406005
Cr-Mirrored-From: https://chromium.googlesource.com/external/webrtc
Cr-Mirrored-Commit: 73a4d5ab12efc6035a5c44625e1e772fd6c23ffc#!/usr/bin/env python
# Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Checks if a virtual webcam is running and starts it if not.
Returns a non-zero return code if the webcam could not be started.
Prerequisites:
* Python must have the psutil package installed.
* Windows: a scheduled task named 'ManyCam' must exist and be configured to
launch ManyCam preconfigured to auto-play the test clip.
* Mac: ManyCam must be installed in the default location and be preconfigured
to auto-play the test clip.
* Linux: The v4l2loopback must be loaded to the kernel already (with the
devices=2 argument) and the v4l2_file_player application must be compiled and
put in the location specified below.
"""
import psutil
import subprocess
import sys
WEBCAM_WIN = ['schtasks', '/run', '/tn', 'ManyCam']
WEBCAM_MAC = ['open', '/Applications/ManyCam/ManyCam.app']
WEBCAM_LINUX = (
'$HOME/fake-webcam-driver/linux/v4l2_file_player/v4l2_file_player '
'$HOME/webrtc_video_quality/reference_video.yuv 640 480 /dev/video1 &')
def IsWebCamRunning():
if sys.platform == 'win32':
process_name = 'ManyCam.exe'
elif sys.platform.startswith('darwin'):
process_name = 'ManyCam'
elif sys.platform.startswith('linux'):
process_name = 'v4l2_file_player'
else:
raise Exception('Unsupported platform: %s' % sys.platform)
for p in psutil.get_process_list():
if process_name == p.name:
print 'Found a running virtual webcam (%s with PID %s)' % (p.name, p.pid)
return True
return False
def Main():
if IsWebCamRunning():
return 0
try:
if sys.platform == 'win32':
subprocess.check_call(WEBCAM_WIN)
elif sys.platform.startswith('darwin'):
subprocess.check_call(WEBCAM_MAC)
elif sys.platform.startswith('linux'):
subprocess.check_call(WEBCAM_LINUX, shell=True)
print 'Successfully launched virtual webcam.'
return 0
except Exception as e:
print 'Failed to launch virtual webcam: %s' % e
if __name__ == '__main__':
sys.exit(Main())
|
<commit_before><commit_msg>Add script to ensure virtual webcam is running.
This script will check that a webcam is running and start it if it's
not currently running.
It's tailored to the way our buildbots are currently configured.
TEST=local execution on Windows, Mac and Linux.
BUG=none
R=phoglund@webrtc.org
Review URL: https://webrtc-codereview.appspot.com/1406005
Cr-Mirrored-From: https://chromium.googlesource.com/external/webrtc
Cr-Mirrored-Commit: 73a4d5ab12efc6035a5c44625e1e772fd6c23ffc<commit_after>#!/usr/bin/env python
# Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Checks if a virtual webcam is running and starts it if not.
Returns a non-zero return code if the webcam could not be started.
Prerequisites:
* Python must have the psutil package installed.
* Windows: a scheduled task named 'ManyCam' must exist and be configured to
launch ManyCam preconfigured to auto-play the test clip.
* Mac: ManyCam must be installed in the default location and be preconfigured
to auto-play the test clip.
* Linux: The v4l2loopback must be loaded to the kernel already (with the
devices=2 argument) and the v4l2_file_player application must be compiled and
put in the location specified below.
"""
import psutil
import subprocess
import sys
WEBCAM_WIN = ['schtasks', '/run', '/tn', 'ManyCam']
WEBCAM_MAC = ['open', '/Applications/ManyCam/ManyCam.app']
WEBCAM_LINUX = (
'$HOME/fake-webcam-driver/linux/v4l2_file_player/v4l2_file_player '
'$HOME/webrtc_video_quality/reference_video.yuv 640 480 /dev/video1 &')
def IsWebCamRunning():
if sys.platform == 'win32':
process_name = 'ManyCam.exe'
elif sys.platform.startswith('darwin'):
process_name = 'ManyCam'
elif sys.platform.startswith('linux'):
process_name = 'v4l2_file_player'
else:
raise Exception('Unsupported platform: %s' % sys.platform)
for p in psutil.get_process_list():
if process_name == p.name:
print 'Found a running virtual webcam (%s with PID %s)' % (p.name, p.pid)
return True
return False
def Main():
if IsWebCamRunning():
return 0
try:
if sys.platform == 'win32':
subprocess.check_call(WEBCAM_WIN)
elif sys.platform.startswith('darwin'):
subprocess.check_call(WEBCAM_MAC)
elif sys.platform.startswith('linux'):
subprocess.check_call(WEBCAM_LINUX, shell=True)
print 'Successfully launched virtual webcam.'
return 0
except Exception as e:
print 'Failed to launch virtual webcam: %s' % e
if __name__ == '__main__':
sys.exit(Main())
|
|
b046ee48f6a9276ed4cd5a17c5accf383a7f5cc5
|
tests/sentry/runner/commands/test_init.py
|
tests/sentry/runner/commands/test_init.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
from sentry.testutils import CliTestCase
from sentry.runner.commands.init import init
class InitTest(CliTestCase):
def test_simple(self):
with self.runner.isolated_filesystem():
rv = self.runner.invoke(init, ['config'], obj={})
assert rv.exit_code == 0, rv.output
contents = os.listdir('config')
assert set(contents) == {'sentry.conf.py', 'config.yml'}
# Make sure the python file is valid
ctx = {'__file__': 'sentry.conf.py'}
execfile('config/sentry.conf.py', ctx)
assert 'DEBUG' in ctx
# Make sure the yaml file is valid
from sentry.utils.yaml import safe_load
with open('config/config.yml', 'rb') as fp:
ctx = safe_load(fp)
assert 'system.secret-key' in ctx
|
Add tests for init command
|
Add tests for init command
|
Python
|
bsd-3-clause
|
alexm92/sentry,zenefits/sentry,jean/sentry,nicholasserra/sentry,ifduyue/sentry,fotinakis/sentry,jean/sentry,jean/sentry,fotinakis/sentry,JamesMura/sentry,zenefits/sentry,JackDanger/sentry,looker/sentry,nicholasserra/sentry,gencer/sentry,daevaorn/sentry,mitsuhiko/sentry,alexm92/sentry,looker/sentry,daevaorn/sentry,ifduyue/sentry,gencer/sentry,mvaled/sentry,daevaorn/sentry,ifduyue/sentry,jean/sentry,JamesMura/sentry,mvaled/sentry,looker/sentry,ifduyue/sentry,fotinakis/sentry,BuildingLink/sentry,gencer/sentry,mvaled/sentry,BuildingLink/sentry,beeftornado/sentry,beeftornado/sentry,mvaled/sentry,beeftornado/sentry,jean/sentry,ifduyue/sentry,fotinakis/sentry,JackDanger/sentry,looker/sentry,JamesMura/sentry,zenefits/sentry,BuildingLink/sentry,zenefits/sentry,JamesMura/sentry,mvaled/sentry,JamesMura/sentry,gencer/sentry,mitsuhiko/sentry,gencer/sentry,zenefits/sentry,mvaled/sentry,daevaorn/sentry,BuildingLink/sentry,nicholasserra/sentry,alexm92/sentry,BuildingLink/sentry,JackDanger/sentry,looker/sentry
|
Add tests for init command
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
from sentry.testutils import CliTestCase
from sentry.runner.commands.init import init
class InitTest(CliTestCase):
def test_simple(self):
with self.runner.isolated_filesystem():
rv = self.runner.invoke(init, ['config'], obj={})
assert rv.exit_code == 0, rv.output
contents = os.listdir('config')
assert set(contents) == {'sentry.conf.py', 'config.yml'}
# Make sure the python file is valid
ctx = {'__file__': 'sentry.conf.py'}
execfile('config/sentry.conf.py', ctx)
assert 'DEBUG' in ctx
# Make sure the yaml file is valid
from sentry.utils.yaml import safe_load
with open('config/config.yml', 'rb') as fp:
ctx = safe_load(fp)
assert 'system.secret-key' in ctx
|
<commit_before><commit_msg>Add tests for init command<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
from sentry.testutils import CliTestCase
from sentry.runner.commands.init import init
class InitTest(CliTestCase):
def test_simple(self):
with self.runner.isolated_filesystem():
rv = self.runner.invoke(init, ['config'], obj={})
assert rv.exit_code == 0, rv.output
contents = os.listdir('config')
assert set(contents) == {'sentry.conf.py', 'config.yml'}
# Make sure the python file is valid
ctx = {'__file__': 'sentry.conf.py'}
execfile('config/sentry.conf.py', ctx)
assert 'DEBUG' in ctx
# Make sure the yaml file is valid
from sentry.utils.yaml import safe_load
with open('config/config.yml', 'rb') as fp:
ctx = safe_load(fp)
assert 'system.secret-key' in ctx
|
Add tests for init command# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
from sentry.testutils import CliTestCase
from sentry.runner.commands.init import init
class InitTest(CliTestCase):
def test_simple(self):
with self.runner.isolated_filesystem():
rv = self.runner.invoke(init, ['config'], obj={})
assert rv.exit_code == 0, rv.output
contents = os.listdir('config')
assert set(contents) == {'sentry.conf.py', 'config.yml'}
# Make sure the python file is valid
ctx = {'__file__': 'sentry.conf.py'}
execfile('config/sentry.conf.py', ctx)
assert 'DEBUG' in ctx
# Make sure the yaml file is valid
from sentry.utils.yaml import safe_load
with open('config/config.yml', 'rb') as fp:
ctx = safe_load(fp)
assert 'system.secret-key' in ctx
|
<commit_before><commit_msg>Add tests for init command<commit_after># -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
from sentry.testutils import CliTestCase
from sentry.runner.commands.init import init
class InitTest(CliTestCase):
def test_simple(self):
with self.runner.isolated_filesystem():
rv = self.runner.invoke(init, ['config'], obj={})
assert rv.exit_code == 0, rv.output
contents = os.listdir('config')
assert set(contents) == {'sentry.conf.py', 'config.yml'}
# Make sure the python file is valid
ctx = {'__file__': 'sentry.conf.py'}
execfile('config/sentry.conf.py', ctx)
assert 'DEBUG' in ctx
# Make sure the yaml file is valid
from sentry.utils.yaml import safe_load
with open('config/config.yml', 'rb') as fp:
ctx = safe_load(fp)
assert 'system.secret-key' in ctx
|
|
a216d01adaae04289443343e790cfa3317863e6e
|
1_boilerpipe_lib_scrapping.py
|
1_boilerpipe_lib_scrapping.py
|
# -*- coding: UTF-8 -*-
from boilerpipe.extract import Extractor
from bs4 import BeautifulSoup
from urllib2 import urlopen
URL='http://sportv.globo.com/site/eventos/mundial-de-motovelocidade/noticia/2016/06/em-duelo-eletrizante-rossi-vence-marquez-salom-e-homenageado.html'
soup = BeautifulSoup(urlopen(URL).read(), "lxml")
lista = soup.find_all("li", "noticia-plantao")
for li in lista:
url = li.find("a")
extractor = Extractor(extractor='ArticleExtractor', url=url["href"])
print extractor.getText().encode('utf-8')
print '-------------'
extractor = Extractor(extractor='ArticleExtractor', url=URL)
print extractor.getText().encode('utf-8')
|
Add web scrapping example to boilerpipe lib
|
Add web scrapping example to boilerpipe lib
|
Python
|
apache-2.0
|
fabriciojoc/redes-sociais-web,fabriciojoc/redes-sociais-web
|
Add web scrapping example to boilerpipe lib
|
# -*- coding: UTF-8 -*-
from boilerpipe.extract import Extractor
from bs4 import BeautifulSoup
from urllib2 import urlopen
URL='http://sportv.globo.com/site/eventos/mundial-de-motovelocidade/noticia/2016/06/em-duelo-eletrizante-rossi-vence-marquez-salom-e-homenageado.html'
soup = BeautifulSoup(urlopen(URL).read(), "lxml")
lista = soup.find_all("li", "noticia-plantao")
for li in lista:
url = li.find("a")
extractor = Extractor(extractor='ArticleExtractor', url=url["href"])
print extractor.getText().encode('utf-8')
print '-------------'
extractor = Extractor(extractor='ArticleExtractor', url=URL)
print extractor.getText().encode('utf-8')
|
<commit_before><commit_msg>Add web scrapping example to boilerpipe lib<commit_after>
|
# -*- coding: UTF-8 -*-
from boilerpipe.extract import Extractor
from bs4 import BeautifulSoup
from urllib2 import urlopen
URL='http://sportv.globo.com/site/eventos/mundial-de-motovelocidade/noticia/2016/06/em-duelo-eletrizante-rossi-vence-marquez-salom-e-homenageado.html'
soup = BeautifulSoup(urlopen(URL).read(), "lxml")
lista = soup.find_all("li", "noticia-plantao")
for li in lista:
url = li.find("a")
extractor = Extractor(extractor='ArticleExtractor', url=url["href"])
print extractor.getText().encode('utf-8')
print '-------------'
extractor = Extractor(extractor='ArticleExtractor', url=URL)
print extractor.getText().encode('utf-8')
|
Add web scrapping example to boilerpipe lib# -*- coding: UTF-8 -*-
from boilerpipe.extract import Extractor
from bs4 import BeautifulSoup
from urllib2 import urlopen
URL='http://sportv.globo.com/site/eventos/mundial-de-motovelocidade/noticia/2016/06/em-duelo-eletrizante-rossi-vence-marquez-salom-e-homenageado.html'
soup = BeautifulSoup(urlopen(URL).read(), "lxml")
lista = soup.find_all("li", "noticia-plantao")
for li in lista:
url = li.find("a")
extractor = Extractor(extractor='ArticleExtractor', url=url["href"])
print extractor.getText().encode('utf-8')
print '-------------'
extractor = Extractor(extractor='ArticleExtractor', url=URL)
print extractor.getText().encode('utf-8')
|
<commit_before><commit_msg>Add web scrapping example to boilerpipe lib<commit_after># -*- coding: UTF-8 -*-
from boilerpipe.extract import Extractor
from bs4 import BeautifulSoup
from urllib2 import urlopen
URL='http://sportv.globo.com/site/eventos/mundial-de-motovelocidade/noticia/2016/06/em-duelo-eletrizante-rossi-vence-marquez-salom-e-homenageado.html'
soup = BeautifulSoup(urlopen(URL).read(), "lxml")
lista = soup.find_all("li", "noticia-plantao")
for li in lista:
url = li.find("a")
extractor = Extractor(extractor='ArticleExtractor', url=url["href"])
print extractor.getText().encode('utf-8')
print '-------------'
extractor = Extractor(extractor='ArticleExtractor', url=URL)
print extractor.getText().encode('utf-8')
|
|
3335f93e42473d0f807b2f701c32c213cec857bc
|
app_traffic/migrations/0002_auto_20161015_1724.py
|
app_traffic/migrations/0002_auto_20161015_1724.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-10-15 15:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app_traffic', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='apptraffic',
name='trip_duration',
field=models.PositiveSmallIntegerField(default=None, null=True, verbose_name='Duration'),
),
]
|
Add the missing app_traffic migration
|
Add the missing app_traffic migration
|
Python
|
agpl-3.0
|
laboiteproject/laboite-backend,laboiteproject/laboite-backend,laboiteproject/laboite-backend,bgaultier/laboitepro,bgaultier/laboitepro,bgaultier/laboitepro
|
Add the missing app_traffic migration
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-10-15 15:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app_traffic', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='apptraffic',
name='trip_duration',
field=models.PositiveSmallIntegerField(default=None, null=True, verbose_name='Duration'),
),
]
|
<commit_before><commit_msg>Add the missing app_traffic migration<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-10-15 15:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app_traffic', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='apptraffic',
name='trip_duration',
field=models.PositiveSmallIntegerField(default=None, null=True, verbose_name='Duration'),
),
]
|
Add the missing app_traffic migration# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-10-15 15:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app_traffic', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='apptraffic',
name='trip_duration',
field=models.PositiveSmallIntegerField(default=None, null=True, verbose_name='Duration'),
),
]
|
<commit_before><commit_msg>Add the missing app_traffic migration<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-10-15 15:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app_traffic', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='apptraffic',
name='trip_duration',
field=models.PositiveSmallIntegerField(default=None, null=True, verbose_name='Duration'),
),
]
|
|
4afa357984e50834427d7c2b6ed3563500aa04f9
|
get_items/load_items_into_db.py
|
get_items/load_items_into_db.py
|
#!/usr/bin/env python3
import csv
import get_db_connection
def main():
with open('items.csv', 'r', newline='') as csv_file:
csv_reader = csv.DictReader(csv_file)
with get_db_connection.connect() as conn:
with conn.cursor() as cur:
for row in csv_reader:
cur.execute(
'INSERT INTO items (wiki_link, canonical_name) VALUES (%s, %s)',
(row['wiki_link'], row['name']))
if __name__ == '__main__':
main()
|
Load items into the database
|
Load items into the database
|
Python
|
apache-2.0
|
p99tunnel/p99tunnel,p99tunnel/p99tunnel
|
Load items into the database
|
#!/usr/bin/env python3
import csv
import get_db_connection
def main():
with open('items.csv', 'r', newline='') as csv_file:
csv_reader = csv.DictReader(csv_file)
with get_db_connection.connect() as conn:
with conn.cursor() as cur:
for row in csv_reader:
cur.execute(
'INSERT INTO items (wiki_link, canonical_name) VALUES (%s, %s)',
(row['wiki_link'], row['name']))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Load items into the database<commit_after>
|
#!/usr/bin/env python3
import csv
import get_db_connection
def main():
with open('items.csv', 'r', newline='') as csv_file:
csv_reader = csv.DictReader(csv_file)
with get_db_connection.connect() as conn:
with conn.cursor() as cur:
for row in csv_reader:
cur.execute(
'INSERT INTO items (wiki_link, canonical_name) VALUES (%s, %s)',
(row['wiki_link'], row['name']))
if __name__ == '__main__':
main()
|
Load items into the database#!/usr/bin/env python3
import csv
import get_db_connection
def main():
with open('items.csv', 'r', newline='') as csv_file:
csv_reader = csv.DictReader(csv_file)
with get_db_connection.connect() as conn:
with conn.cursor() as cur:
for row in csv_reader:
cur.execute(
'INSERT INTO items (wiki_link, canonical_name) VALUES (%s, %s)',
(row['wiki_link'], row['name']))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Load items into the database<commit_after>#!/usr/bin/env python3
import csv
import get_db_connection
def main():
with open('items.csv', 'r', newline='') as csv_file:
csv_reader = csv.DictReader(csv_file)
with get_db_connection.connect() as conn:
with conn.cursor() as cur:
for row in csv_reader:
cur.execute(
'INSERT INTO items (wiki_link, canonical_name) VALUES (%s, %s)',
(row['wiki_link'], row['name']))
if __name__ == '__main__':
main()
|
|
3f9d440d31e85f9edb509a7acd3256da820ce74f
|
pickle_rename.py
|
pickle_rename.py
|
import os
# Pickle files should be .p file extensions, not .txt. Add a migration script.
def rename_pickles():
pickles = ['falsePositives.txt', 'whitelistedUsers.txt', 'blacklistedUsers.txt', 'ignoredPosts.txt',
'autoIgnoredPosts.txt', 'users.txt', 'notifications.txt', 'whyData.txt', 'whyDataAllspam.txt',
'latestMessages.txt', 'apiCalls.txt', 'bodyfetcherQueue.txt', 'bodyfetcherMaxIds.txt']
# Check if each of these is a file, and if it exists, rename it to .p extension.
for txt in pickles:
try:
if os.path.isfile(txt):
os.rename(txt, (txt[:-4] + '.p'))
except:
raise RuntimeError("Could not migrate Pickle file from .txt extension to .p extension.")
|
Add pickle migration script for .txt to .p
|
Add pickle migration script for .txt to .p
|
Python
|
apache-2.0
|
Charcoal-SE/SmokeDetector,Charcoal-SE/SmokeDetector
|
Add pickle migration script for .txt to .p
|
import os
# Pickle files should be .p file extensions, not .txt. Add a migration script.
def rename_pickles():
pickles = ['falsePositives.txt', 'whitelistedUsers.txt', 'blacklistedUsers.txt', 'ignoredPosts.txt',
'autoIgnoredPosts.txt', 'users.txt', 'notifications.txt', 'whyData.txt', 'whyDataAllspam.txt',
'latestMessages.txt', 'apiCalls.txt', 'bodyfetcherQueue.txt', 'bodyfetcherMaxIds.txt']
# Check if each of these is a file, and if it exists, rename it to .p extension.
for txt in pickles:
try:
if os.path.isfile(txt):
os.rename(txt, (txt[:-4] + '.p'))
except:
raise RuntimeError("Could not migrate Pickle file from .txt extension to .p extension.")
|
<commit_before><commit_msg>Add pickle migration script for .txt to .p<commit_after>
|
import os
# Pickle files should be .p file extensions, not .txt. Add a migration script.
def rename_pickles():
pickles = ['falsePositives.txt', 'whitelistedUsers.txt', 'blacklistedUsers.txt', 'ignoredPosts.txt',
'autoIgnoredPosts.txt', 'users.txt', 'notifications.txt', 'whyData.txt', 'whyDataAllspam.txt',
'latestMessages.txt', 'apiCalls.txt', 'bodyfetcherQueue.txt', 'bodyfetcherMaxIds.txt']
# Check if each of these is a file, and if it exists, rename it to .p extension.
for txt in pickles:
try:
if os.path.isfile(txt):
os.rename(txt, (txt[:-4] + '.p'))
except:
raise RuntimeError("Could not migrate Pickle file from .txt extension to .p extension.")
|
Add pickle migration script for .txt to .pimport os
# Pickle files should be .p file extensions, not .txt. Add a migration script.
def rename_pickles():
pickles = ['falsePositives.txt', 'whitelistedUsers.txt', 'blacklistedUsers.txt', 'ignoredPosts.txt',
'autoIgnoredPosts.txt', 'users.txt', 'notifications.txt', 'whyData.txt', 'whyDataAllspam.txt',
'latestMessages.txt', 'apiCalls.txt', 'bodyfetcherQueue.txt', 'bodyfetcherMaxIds.txt']
# Check if each of these is a file, and if it exists, rename it to .p extension.
for txt in pickles:
try:
if os.path.isfile(txt):
os.rename(txt, (txt[:-4] + '.p'))
except:
raise RuntimeError("Could not migrate Pickle file from .txt extension to .p extension.")
|
<commit_before><commit_msg>Add pickle migration script for .txt to .p<commit_after>import os
# Pickle files should be .p file extensions, not .txt. Add a migration script.
def rename_pickles():
pickles = ['falsePositives.txt', 'whitelistedUsers.txt', 'blacklistedUsers.txt', 'ignoredPosts.txt',
'autoIgnoredPosts.txt', 'users.txt', 'notifications.txt', 'whyData.txt', 'whyDataAllspam.txt',
'latestMessages.txt', 'apiCalls.txt', 'bodyfetcherQueue.txt', 'bodyfetcherMaxIds.txt']
# Check if each of these is a file, and if it exists, rename it to .p extension.
for txt in pickles:
try:
if os.path.isfile(txt):
os.rename(txt, (txt[:-4] + '.p'))
except:
raise RuntimeError("Could not migrate Pickle file from .txt extension to .p extension.")
|
|
f3c0e901eaa5b56b8310bea61cc2246516097639
|
ideascube/conf/kb_servir_ben.py
|
ideascube/conf/kb_servir_ben.py
|
# -*- coding: utf-8 -*-
"""KoomBook conf"""
from .base import * # noqa
from django.utils.translation import ugettext_lazy as _
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(os.environ.get('DEBUG', True))
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = ['.koombook.lan.', 'localhost', '127.0.0.1']
LANGUAGE_CODE = 'fr'
TIME_ZONE = None
# Ideas Box specifics
STORAGE_ROOT = '/media/hdd/ideascube/storage'
IDEASCUBE_NAME = 'Fondation SERVIR Bénin'
DOMAIN = 'koombook.lan'
BACKUP_FORMAT = 'gztar'
STAFF_HOME_CARDS = [c for c in STAFF_HOME_CARDS if c['url'] in ['user_list', 'server:power', 'server:backup']]
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'mediacenter',
},
{
'id': 'bsfcampus',
},
{
'id': 'wikipedia',
},
{
'id': 'khanacademy',
},
{
'id': 'wikisource',
},
{
'id': 'vikidia',
},
{
'id': 'gutenberg',
},
{
'id': 'cpassorcier',
},
{
'id': 'ted',
},
{
'id': 'software',
},
{
'id': 'ubuntudoc',
},
]
IDEASCUBE_BODY_ID = 'koombook'
|
Add conf file for KoomBook SERVIR
|
Add conf file for KoomBook SERVIR
|
Python
|
agpl-3.0
|
ideascube/ideascube,ideascube/ideascube,ideascube/ideascube,ideascube/ideascube
|
Add conf file for KoomBook SERVIR
|
# -*- coding: utf-8 -*-
"""KoomBook conf"""
from .base import * # noqa
from django.utils.translation import ugettext_lazy as _
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(os.environ.get('DEBUG', True))
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = ['.koombook.lan.', 'localhost', '127.0.0.1']
LANGUAGE_CODE = 'fr'
TIME_ZONE = None
# Ideas Box specifics
STORAGE_ROOT = '/media/hdd/ideascube/storage'
IDEASCUBE_NAME = 'Fondation SERVIR Bénin'
DOMAIN = 'koombook.lan'
BACKUP_FORMAT = 'gztar'
STAFF_HOME_CARDS = [c for c in STAFF_HOME_CARDS if c['url'] in ['user_list', 'server:power', 'server:backup']]
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'mediacenter',
},
{
'id': 'bsfcampus',
},
{
'id': 'wikipedia',
},
{
'id': 'khanacademy',
},
{
'id': 'wikisource',
},
{
'id': 'vikidia',
},
{
'id': 'gutenberg',
},
{
'id': 'cpassorcier',
},
{
'id': 'ted',
},
{
'id': 'software',
},
{
'id': 'ubuntudoc',
},
]
IDEASCUBE_BODY_ID = 'koombook'
|
<commit_before><commit_msg>Add conf file for KoomBook SERVIR<commit_after>
|
# -*- coding: utf-8 -*-
"""KoomBook conf"""
from .base import * # noqa
from django.utils.translation import ugettext_lazy as _
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(os.environ.get('DEBUG', True))
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = ['.koombook.lan.', 'localhost', '127.0.0.1']
LANGUAGE_CODE = 'fr'
TIME_ZONE = None
# Ideas Box specifics
STORAGE_ROOT = '/media/hdd/ideascube/storage'
IDEASCUBE_NAME = 'Fondation SERVIR Bénin'
DOMAIN = 'koombook.lan'
BACKUP_FORMAT = 'gztar'
STAFF_HOME_CARDS = [c for c in STAFF_HOME_CARDS if c['url'] in ['user_list', 'server:power', 'server:backup']]
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'mediacenter',
},
{
'id': 'bsfcampus',
},
{
'id': 'wikipedia',
},
{
'id': 'khanacademy',
},
{
'id': 'wikisource',
},
{
'id': 'vikidia',
},
{
'id': 'gutenberg',
},
{
'id': 'cpassorcier',
},
{
'id': 'ted',
},
{
'id': 'software',
},
{
'id': 'ubuntudoc',
},
]
IDEASCUBE_BODY_ID = 'koombook'
|
Add conf file for KoomBook SERVIR# -*- coding: utf-8 -*-
"""KoomBook conf"""
from .base import * # noqa
from django.utils.translation import ugettext_lazy as _
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(os.environ.get('DEBUG', True))
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = ['.koombook.lan.', 'localhost', '127.0.0.1']
LANGUAGE_CODE = 'fr'
TIME_ZONE = None
# Ideas Box specifics
STORAGE_ROOT = '/media/hdd/ideascube/storage'
IDEASCUBE_NAME = 'Fondation SERVIR Bénin'
DOMAIN = 'koombook.lan'
BACKUP_FORMAT = 'gztar'
STAFF_HOME_CARDS = [c for c in STAFF_HOME_CARDS if c['url'] in ['user_list', 'server:power', 'server:backup']]
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'mediacenter',
},
{
'id': 'bsfcampus',
},
{
'id': 'wikipedia',
},
{
'id': 'khanacademy',
},
{
'id': 'wikisource',
},
{
'id': 'vikidia',
},
{
'id': 'gutenberg',
},
{
'id': 'cpassorcier',
},
{
'id': 'ted',
},
{
'id': 'software',
},
{
'id': 'ubuntudoc',
},
]
IDEASCUBE_BODY_ID = 'koombook'
|
<commit_before><commit_msg>Add conf file for KoomBook SERVIR<commit_after># -*- coding: utf-8 -*-
"""KoomBook conf"""
from .base import * # noqa
from django.utils.translation import ugettext_lazy as _
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(os.environ.get('DEBUG', True))
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = ['.koombook.lan.', 'localhost', '127.0.0.1']
LANGUAGE_CODE = 'fr'
TIME_ZONE = None
# Ideas Box specifics
STORAGE_ROOT = '/media/hdd/ideascube/storage'
IDEASCUBE_NAME = 'Fondation SERVIR Bénin'
DOMAIN = 'koombook.lan'
BACKUP_FORMAT = 'gztar'
STAFF_HOME_CARDS = [c for c in STAFF_HOME_CARDS if c['url'] in ['user_list', 'server:power', 'server:backup']]
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'mediacenter',
},
{
'id': 'bsfcampus',
},
{
'id': 'wikipedia',
},
{
'id': 'khanacademy',
},
{
'id': 'wikisource',
},
{
'id': 'vikidia',
},
{
'id': 'gutenberg',
},
{
'id': 'cpassorcier',
},
{
'id': 'ted',
},
{
'id': 'software',
},
{
'id': 'ubuntudoc',
},
]
IDEASCUBE_BODY_ID = 'koombook'
|
|
e00c9ba04abd8ceaa0999ca5ca3308455a53afc8
|
prepare_images.py
|
prepare_images.py
|
import cv2
path_dataset = "h:/_diplomaData/LISA Traffic Sign Dataset/signDatabasePublicFramesOnly/"
path_file = path_dataset + "vid6/frameAnnotations-MVI_0071.MOV_annotations/pedestrian_1323896918.avi_image0.png"
img = cv2.imread(path_file, cv2.IMREAD_UNCHANGED)
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
Test openCV image open end convert RGB -> HSV
|
Test openCV image open end convert RGB -> HSV
|
Python
|
mit
|
dimmddr/roadSignsNN
|
Test openCV image open end convert RGB -> HSV
|
import cv2
path_dataset = "h:/_diplomaData/LISA Traffic Sign Dataset/signDatabasePublicFramesOnly/"
path_file = path_dataset + "vid6/frameAnnotations-MVI_0071.MOV_annotations/pedestrian_1323896918.avi_image0.png"
img = cv2.imread(path_file, cv2.IMREAD_UNCHANGED)
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
<commit_before><commit_msg>Test openCV image open end convert RGB -> HSV<commit_after>
|
import cv2
path_dataset = "h:/_diplomaData/LISA Traffic Sign Dataset/signDatabasePublicFramesOnly/"
path_file = path_dataset + "vid6/frameAnnotations-MVI_0071.MOV_annotations/pedestrian_1323896918.avi_image0.png"
img = cv2.imread(path_file, cv2.IMREAD_UNCHANGED)
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
Test openCV image open end convert RGB -> HSVimport cv2
path_dataset = "h:/_diplomaData/LISA Traffic Sign Dataset/signDatabasePublicFramesOnly/"
path_file = path_dataset + "vid6/frameAnnotations-MVI_0071.MOV_annotations/pedestrian_1323896918.avi_image0.png"
img = cv2.imread(path_file, cv2.IMREAD_UNCHANGED)
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
<commit_before><commit_msg>Test openCV image open end convert RGB -> HSV<commit_after>import cv2
path_dataset = "h:/_diplomaData/LISA Traffic Sign Dataset/signDatabasePublicFramesOnly/"
path_file = path_dataset + "vid6/frameAnnotations-MVI_0071.MOV_annotations/pedestrian_1323896918.avi_image0.png"
img = cv2.imread(path_file, cv2.IMREAD_UNCHANGED)
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
|
ab96ed2917af81266397870a80784d6f83cf9d36
|
examples/python/requestdata.py
|
examples/python/requestdata.py
|
from flask import request
def main():
msg = "%s %s:\n---HEADERS---\n%s\n--BODY--\n%s\n-----\n" % (request.method, request.path, request.headers, request.get_data())
return msg
|
Add a python example showing how to get at request data
|
Add a python example showing how to get at request data
|
Python
|
apache-2.0
|
ktrance/fission,ktrance/fission,life1347/fission,life1347/fission,ktrance/fission,life1347/fission,life1347/fission,platform9/fission,fission/fission,fission/fission,ktrance/fission,life1347/fission,ktrance/fission,life1347/fission,life1347/fission,life1347/fission,ktrance/fission,life1347/fission,fission/fission,platform9/fission,platform9/fission,platform9/fission,fission/fission
|
Add a python example showing how to get at request data
|
from flask import request
def main():
msg = "%s %s:\n---HEADERS---\n%s\n--BODY--\n%s\n-----\n" % (request.method, request.path, request.headers, request.get_data())
return msg
|
<commit_before><commit_msg>Add a python example showing how to get at request data<commit_after>
|
from flask import request
def main():
msg = "%s %s:\n---HEADERS---\n%s\n--BODY--\n%s\n-----\n" % (request.method, request.path, request.headers, request.get_data())
return msg
|
Add a python example showing how to get at request datafrom flask import request
def main():
msg = "%s %s:\n---HEADERS---\n%s\n--BODY--\n%s\n-----\n" % (request.method, request.path, request.headers, request.get_data())
return msg
|
<commit_before><commit_msg>Add a python example showing how to get at request data<commit_after>from flask import request
def main():
msg = "%s %s:\n---HEADERS---\n%s\n--BODY--\n%s\n-----\n" % (request.method, request.path, request.headers, request.get_data())
return msg
|
|
b1bea0ae2369a82a0689649fd2976b4bf23a9365
|
convert_column_to_timestamp.py
|
convert_column_to_timestamp.py
|
# Title: Convert a date string column to a UNIX timestamp
import graphlab as gl
from datetime import datetime
# Requires you to 'pip install python-dateutil==1.5'
from dateutil import parser
def str_to_timestamp(the_str):
try:
dt = parser.parse(the_str)
except:
return None
# UNIX epoch is January 1, 1970
return (dt - datetime(1970,1,1)).total_seconds()
# 02/29/2001 is invalid, so should be 'None' in output
sf = gl.SFrame({'date':['2000-08-21','2013-06-08 17:25:00.12753','02/29/2001'],'id':[1,2,3]})
sf['date'] = sf['date'].apply(str_to_timestamp)
|
Add first howto code snippet
|
Add first howto code snippet
|
Python
|
cc0-1.0
|
dato-code/how-to,nagyistoce/how-to-graphlab-create,srikris/how-to
|
Add first howto code snippet
|
# Title: Convert a date string column to a UNIX timestamp
import graphlab as gl
from datetime import datetime
# Requires you to 'pip install python-dateutil==1.5'
from dateutil import parser
def str_to_timestamp(the_str):
try:
dt = parser.parse(the_str)
except:
return None
# UNIX epoch is January 1, 1970
return (dt - datetime(1970,1,1)).total_seconds()
# 02/29/2001 is invalid, so should be 'None' in output
sf = gl.SFrame({'date':['2000-08-21','2013-06-08 17:25:00.12753','02/29/2001'],'id':[1,2,3]})
sf['date'] = sf['date'].apply(str_to_timestamp)
|
<commit_before><commit_msg>Add first howto code snippet<commit_after>
|
# Title: Convert a date string column to a UNIX timestamp
import graphlab as gl
from datetime import datetime
# Requires you to 'pip install python-dateutil==1.5'
from dateutil import parser
def str_to_timestamp(the_str):
try:
dt = parser.parse(the_str)
except:
return None
# UNIX epoch is January 1, 1970
return (dt - datetime(1970,1,1)).total_seconds()
# 02/29/2001 is invalid, so should be 'None' in output
sf = gl.SFrame({'date':['2000-08-21','2013-06-08 17:25:00.12753','02/29/2001'],'id':[1,2,3]})
sf['date'] = sf['date'].apply(str_to_timestamp)
|
Add first howto code snippet# Title: Convert a date string column to a UNIX timestamp
import graphlab as gl
from datetime import datetime
# Requires you to 'pip install python-dateutil==1.5'
from dateutil import parser
def str_to_timestamp(the_str):
try:
dt = parser.parse(the_str)
except:
return None
# UNIX epoch is January 1, 1970
return (dt - datetime(1970,1,1)).total_seconds()
# 02/29/2001 is invalid, so should be 'None' in output
sf = gl.SFrame({'date':['2000-08-21','2013-06-08 17:25:00.12753','02/29/2001'],'id':[1,2,3]})
sf['date'] = sf['date'].apply(str_to_timestamp)
|
<commit_before><commit_msg>Add first howto code snippet<commit_after># Title: Convert a date string column to a UNIX timestamp
import graphlab as gl
from datetime import datetime
# Requires you to 'pip install python-dateutil==1.5'
from dateutil import parser
def str_to_timestamp(the_str):
try:
dt = parser.parse(the_str)
except:
return None
# UNIX epoch is January 1, 1970
return (dt - datetime(1970,1,1)).total_seconds()
# 02/29/2001 is invalid, so should be 'None' in output
sf = gl.SFrame({'date':['2000-08-21','2013-06-08 17:25:00.12753','02/29/2001'],'id':[1,2,3]})
sf['date'] = sf['date'].apply(str_to_timestamp)
|
|
26f2c4b11125c4b859359f3011976376165544aa
|
src/ggrc/migrations/versions/20140930092005_53bb0f4f6ec8_switch_fulltext_record_properties.py
|
src/ggrc/migrations/versions/20140930092005_53bb0f4f6ec8_switch_fulltext_record_properties.py
|
"""Switch fulltext_record_properties to innodb
Revision ID: 53bb0f4f6ec8
Revises: 63fc392c91a
Create Date: 2014-09-30 09:20:05.884100
"""
# revision identifiers, used by Alembic.
revision = '53bb0f4f6ec8'
down_revision = '63fc392c91a'
from alembic import op
def upgrade():
op.drop_index('fulltext_record_properties_text_idx',
table_name='fulltext_record_properties')
op.execute("ALTER TABLE fulltext_record_properties ENGINE=InnoDB")
def downgrade():
op.execute("""
ALTER TABLE fulltext_record_properties
ENGINE=MyISAM,
ADD FULLTEXT INDEX `fulltext_record_properties_text_idx` (`content`)
""")
|
Switch myisam to innodb for fulltext_record_properties
|
Switch myisam to innodb for fulltext_record_properties
This change was made because fulltext record properties table
crashed on grc-dev and grc-test. The issue was resolved with
REPAIR TABLE clause. Running the reindex script did not help.
Based on https://cloud.google.com/sql/faq#innodb myisam should
only be used for full text search, but we aren't using full
text search anymore.
|
Python
|
apache-2.0
|
kr41/ggrc-core,plamut/ggrc-core,prasannav7/ggrc-core,vladan-m/ggrc-core,josthkko/ggrc-core,jmakov/ggrc-core,AleksNeStu/ggrc-core,prasannav7/ggrc-core,kr41/ggrc-core,VinnieJohns/ggrc-core,hasanalom/ggrc-core,vladan-m/ggrc-core,VinnieJohns/ggrc-core,vladan-m/ggrc-core,j0gurt/ggrc-core,uskudnik/ggrc-core,VinnieJohns/ggrc-core,kr41/ggrc-core,jmakov/ggrc-core,andrei-karalionak/ggrc-core,uskudnik/ggrc-core,jmakov/ggrc-core,josthkko/ggrc-core,j0gurt/ggrc-core,hyperNURb/ggrc-core,AleksNeStu/ggrc-core,selahssea/ggrc-core,hasanalom/ggrc-core,hyperNURb/ggrc-core,selahssea/ggrc-core,hasanalom/ggrc-core,selahssea/ggrc-core,edofic/ggrc-core,vladan-m/ggrc-core,jmakov/ggrc-core,andrei-karalionak/ggrc-core,hyperNURb/ggrc-core,kr41/ggrc-core,hasanalom/ggrc-core,uskudnik/ggrc-core,prasannav7/ggrc-core,josthkko/ggrc-core,j0gurt/ggrc-core,AleksNeStu/ggrc-core,AleksNeStu/ggrc-core,josthkko/ggrc-core,VinnieJohns/ggrc-core,edofic/ggrc-core,edofic/ggrc-core,edofic/ggrc-core,uskudnik/ggrc-core,selahssea/ggrc-core,plamut/ggrc-core,plamut/ggrc-core,jmakov/ggrc-core,hyperNURb/ggrc-core,hasanalom/ggrc-core,NejcZupec/ggrc-core,prasannav7/ggrc-core,hyperNURb/ggrc-core,NejcZupec/ggrc-core,plamut/ggrc-core,NejcZupec/ggrc-core,NejcZupec/ggrc-core,vladan-m/ggrc-core,andrei-karalionak/ggrc-core,uskudnik/ggrc-core,j0gurt/ggrc-core,andrei-karalionak/ggrc-core
|
Switch myisam to innodb for fulltext_record_properties
This change was made because fulltext record properties table
crashed on grc-dev and grc-test. The issue was resolved with
REPAIR TABLE clause. Running the reindex script did not help.
Based on https://cloud.google.com/sql/faq#innodb myisam should
only be used for full text search, but we aren't using full
text search anymore.
|
"""Switch fulltext_record_properties to innodb
Revision ID: 53bb0f4f6ec8
Revises: 63fc392c91a
Create Date: 2014-09-30 09:20:05.884100
"""
# revision identifiers, used by Alembic.
revision = '53bb0f4f6ec8'
down_revision = '63fc392c91a'
from alembic import op
def upgrade():
op.drop_index('fulltext_record_properties_text_idx',
table_name='fulltext_record_properties')
op.execute("ALTER TABLE fulltext_record_properties ENGINE=InnoDB")
def downgrade():
op.execute("""
ALTER TABLE fulltext_record_properties
ENGINE=MyISAM,
ADD FULLTEXT INDEX `fulltext_record_properties_text_idx` (`content`)
""")
|
<commit_before><commit_msg>Switch myisam to innodb for fulltext_record_properties
This change was made because fulltext record properties table
crashed on grc-dev and grc-test. The issue was resolved with
REPAIR TABLE clause. Running the reindex script did not help.
Based on https://cloud.google.com/sql/faq#innodb myisam should
only be used for full text search, but we aren't using full
text search anymore.<commit_after>
|
"""Switch fulltext_record_properties to innodb
Revision ID: 53bb0f4f6ec8
Revises: 63fc392c91a
Create Date: 2014-09-30 09:20:05.884100
"""
# revision identifiers, used by Alembic.
revision = '53bb0f4f6ec8'
down_revision = '63fc392c91a'
from alembic import op
def upgrade():
op.drop_index('fulltext_record_properties_text_idx',
table_name='fulltext_record_properties')
op.execute("ALTER TABLE fulltext_record_properties ENGINE=InnoDB")
def downgrade():
op.execute("""
ALTER TABLE fulltext_record_properties
ENGINE=MyISAM,
ADD FULLTEXT INDEX `fulltext_record_properties_text_idx` (`content`)
""")
|
Switch myisam to innodb for fulltext_record_properties
This change was made because fulltext record properties table
crashed on grc-dev and grc-test. The issue was resolved with
REPAIR TABLE clause. Running the reindex script did not help.
Based on https://cloud.google.com/sql/faq#innodb myisam should
only be used for full text search, but we aren't using full
text search anymore.
"""Switch fulltext_record_properties to innodb
Revision ID: 53bb0f4f6ec8
Revises: 63fc392c91a
Create Date: 2014-09-30 09:20:05.884100
"""
# revision identifiers, used by Alembic.
revision = '53bb0f4f6ec8'
down_revision = '63fc392c91a'
from alembic import op
def upgrade():
op.drop_index('fulltext_record_properties_text_idx',
table_name='fulltext_record_properties')
op.execute("ALTER TABLE fulltext_record_properties ENGINE=InnoDB")
def downgrade():
op.execute("""
ALTER TABLE fulltext_record_properties
ENGINE=MyISAM,
ADD FULLTEXT INDEX `fulltext_record_properties_text_idx` (`content`)
""")
|
<commit_before><commit_msg>Switch myisam to innodb for fulltext_record_properties
This change was made because fulltext record properties table
crashed on grc-dev and grc-test. The issue was resolved with
REPAIR TABLE clause. Running the reindex script did not help.
Based on https://cloud.google.com/sql/faq#innodb myisam should
only be used for full text search, but we aren't using full
text search anymore.<commit_after>
"""Switch fulltext_record_properties to innodb
Revision ID: 53bb0f4f6ec8
Revises: 63fc392c91a
Create Date: 2014-09-30 09:20:05.884100
"""
# revision identifiers, used by Alembic.
revision = '53bb0f4f6ec8'
down_revision = '63fc392c91a'
from alembic import op
def upgrade():
op.drop_index('fulltext_record_properties_text_idx',
table_name='fulltext_record_properties')
op.execute("ALTER TABLE fulltext_record_properties ENGINE=InnoDB")
def downgrade():
op.execute("""
ALTER TABLE fulltext_record_properties
ENGINE=MyISAM,
ADD FULLTEXT INDEX `fulltext_record_properties_text_idx` (`content`)
""")
|
|
0c5a26d14481a660294f4ba7a2eeea02cef42a4d
|
scripts/migration/migrate_prereg_schema_multiple_choice_responses.py
|
scripts/migration/migrate_prereg_schema_multiple_choice_responses.py
|
"""
Small migration - Prereg challenge schema q5 mult choice responses have extra trailing space. This gets confusing when
user trying to update prereg draft via API.
"""
import sys
import logging
from modularodm import Q
from website.app import init_app
from scripts import utils as scripts_utils
from website.models import DraftRegistration, Node, MetaSchema
from framework.transactions.context import TokuTransaction
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def migrate_drafts_q5_metadata(schema):
"""
Finds Prereg Challenge draft registrations and corrects q5 response metadata
"""
drafts = DraftRegistration.find(Q('registration_schema', 'eq', schema))
total_drafts = drafts.count()
logger.info('Examining {} drafts for q5 metadata'.format(total_drafts))
draft_count = 0
for draft in drafts:
draft_count += 1
if draft.registration_metadata.get('q5', {}).get('value', {}):
draft.registration_metadata['q5']['value'] = draft.registration_metadata['q5']['value'].rstrip()
draft.save()
logger.info('{}/{} Migrated q5 response for {}'.format(draft_count, total_drafts, draft._id))
else:
logger.info('{}/{} q5 not answered. No change needed for {}.'.format(draft_count, drafts.count(), draft._id))
def migrate_registrations_q5_metadata(schema):
"""
Finds Prereg Challenge registrations whose registered_meta includes q5 and corrects
"""
registrations = Node.find(Q('is_registration', 'eq', True) & Q('registered_schema', 'eq', schema))
total_reg = registrations.count()
logger.info('Examining {} registrations for q5 metadata'.format(total_reg))
reg_count = 0
for reg in registrations:
reg_count += 1
if reg.registered_meta.get(schema._id, {}).get('q5', {}).get('value', {}):
reg.registered_meta[schema._id]['q5']['value'] = reg.registered_meta[schema._id]['q5']['value'].rstrip()
reg.save()
logger.info('{}/{} Migrated q5 response for {}'.format(reg_count, total_reg, reg._id))
else:
# q5 is a required question, so should be answered, but just in case...
logger.info('{}/{} q5 not answered. No change needed for {}.'.format(reg_count, total_reg, reg._id))
def main(dry=True):
init_app(set_backends=True, routes=False)
if not dry:
scripts_utils.add_file_logger(logger, __file__)
prereg = MetaSchema.find_one(
Q('name', 'eq', "Prereg Challenge"))
migrate_drafts_q5_metadata(prereg)
migrate_registrations_q5_metadata(prereg)
if __name__ == '__main__':
dry_run = '--dry' in sys.argv
with TokuTransaction():
main(dry=dry_run)
if dry_run:
raise RuntimeError('Dry run, rolling back transaction.')
|
Add migration to correct q5 responses for registrations and drafts using Prereg Challenge schema.
|
Add migration to correct q5 responses for registrations and drafts using Prereg Challenge schema.
|
Python
|
apache-2.0
|
emetsger/osf.io,chrisseto/osf.io,kwierman/osf.io,Johnetordoff/osf.io,DanielSBrown/osf.io,TomBaxter/osf.io,monikagrabowska/osf.io,monikagrabowska/osf.io,caseyrollins/osf.io,leb2dg/osf.io,CenterForOpenScience/osf.io,wearpants/osf.io,acshi/osf.io,acshi/osf.io,mluo613/osf.io,sloria/osf.io,icereval/osf.io,baylee-d/osf.io,mluke93/osf.io,sloria/osf.io,cslzchen/osf.io,pattisdr/osf.io,mluke93/osf.io,mluo613/osf.io,felliott/osf.io,wearpants/osf.io,mluke93/osf.io,Johnetordoff/osf.io,crcresearch/osf.io,kwierman/osf.io,amyshi188/osf.io,crcresearch/osf.io,mfraezz/osf.io,erinspace/osf.io,cwisecarver/osf.io,erinspace/osf.io,mluo613/osf.io,icereval/osf.io,monikagrabowska/osf.io,pattisdr/osf.io,pattisdr/osf.io,monikagrabowska/osf.io,binoculars/osf.io,Nesiehr/osf.io,chennan47/osf.io,cslzchen/osf.io,caneruguz/osf.io,baylee-d/osf.io,mfraezz/osf.io,rdhyee/osf.io,mfraezz/osf.io,HalcyonChimera/osf.io,hmoco/osf.io,amyshi188/osf.io,SSJohns/osf.io,SSJohns/osf.io,samchrisinger/osf.io,kwierman/osf.io,caneruguz/osf.io,HalcyonChimera/osf.io,CenterForOpenScience/osf.io,leb2dg/osf.io,mattclark/osf.io,amyshi188/osf.io,emetsger/osf.io,laurenrevere/osf.io,DanielSBrown/osf.io,aaxelb/osf.io,chrisseto/osf.io,kwierman/osf.io,brianjgeiger/osf.io,TomBaxter/osf.io,Nesiehr/osf.io,crcresearch/osf.io,samchrisinger/osf.io,wearpants/osf.io,alexschiller/osf.io,CenterForOpenScience/osf.io,cslzchen/osf.io,DanielSBrown/osf.io,laurenrevere/osf.io,icereval/osf.io,felliott/osf.io,rdhyee/osf.io,adlius/osf.io,leb2dg/osf.io,cwisecarver/osf.io,alexschiller/osf.io,chennan47/osf.io,abought/osf.io,saradbowman/osf.io,cslzchen/osf.io,brianjgeiger/osf.io,binoculars/osf.io,felliott/osf.io,hmoco/osf.io,SSJohns/osf.io,TomBaxter/osf.io,DanielSBrown/osf.io,mattclark/osf.io,adlius/osf.io,chennan47/osf.io,caseyrollins/osf.io,alexschiller/osf.io,brianjgeiger/osf.io,acshi/osf.io,baylee-d/osf.io,caneruguz/osf.io,erinspace/osf.io,Nesiehr/osf.io,aaxelb/osf.io,amyshi188/osf.io,Johnetordoff/osf.io,mluke93/osf.io,alexschiller/osf.io,emetsger/osf.io,hmoco/osf.io,alexschiller/osf.io,acshi/osf.io,binoculars/osf.io,laurenrevere/osf.io,cwisecarver/osf.io,saradbowman/osf.io,SSJohns/osf.io,CenterForOpenScience/osf.io,mattclark/osf.io,leb2dg/osf.io,sloria/osf.io,monikagrabowska/osf.io,HalcyonChimera/osf.io,rdhyee/osf.io,Nesiehr/osf.io,HalcyonChimera/osf.io,aaxelb/osf.io,wearpants/osf.io,mluo613/osf.io,samchrisinger/osf.io,aaxelb/osf.io,acshi/osf.io,chrisseto/osf.io,abought/osf.io,rdhyee/osf.io,brianjgeiger/osf.io,abought/osf.io,mluo613/osf.io,abought/osf.io,emetsger/osf.io,mfraezz/osf.io,adlius/osf.io,Johnetordoff/osf.io,hmoco/osf.io,cwisecarver/osf.io,felliott/osf.io,samchrisinger/osf.io,adlius/osf.io,chrisseto/osf.io,caneruguz/osf.io,caseyrollins/osf.io
|
Add migration to correct q5 responses for registrations and drafts using Prereg Challenge schema.
|
"""
Small migration - Prereg challenge schema q5 mult choice responses have extra trailing space. This gets confusing when
user trying to update prereg draft via API.
"""
import sys
import logging
from modularodm import Q
from website.app import init_app
from scripts import utils as scripts_utils
from website.models import DraftRegistration, Node, MetaSchema
from framework.transactions.context import TokuTransaction
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def migrate_drafts_q5_metadata(schema):
"""
Finds Prereg Challenge draft registrations and corrects q5 response metadata
"""
drafts = DraftRegistration.find(Q('registration_schema', 'eq', schema))
total_drafts = drafts.count()
logger.info('Examining {} drafts for q5 metadata'.format(total_drafts))
draft_count = 0
for draft in drafts:
draft_count += 1
if draft.registration_metadata.get('q5', {}).get('value', {}):
draft.registration_metadata['q5']['value'] = draft.registration_metadata['q5']['value'].rstrip()
draft.save()
logger.info('{}/{} Migrated q5 response for {}'.format(draft_count, total_drafts, draft._id))
else:
logger.info('{}/{} q5 not answered. No change needed for {}.'.format(draft_count, drafts.count(), draft._id))
def migrate_registrations_q5_metadata(schema):
"""
Finds Prereg Challenge registrations whose registered_meta includes q5 and corrects
"""
registrations = Node.find(Q('is_registration', 'eq', True) & Q('registered_schema', 'eq', schema))
total_reg = registrations.count()
logger.info('Examining {} registrations for q5 metadata'.format(total_reg))
reg_count = 0
for reg in registrations:
reg_count += 1
if reg.registered_meta.get(schema._id, {}).get('q5', {}).get('value', {}):
reg.registered_meta[schema._id]['q5']['value'] = reg.registered_meta[schema._id]['q5']['value'].rstrip()
reg.save()
logger.info('{}/{} Migrated q5 response for {}'.format(reg_count, total_reg, reg._id))
else:
# q5 is a required question, so should be answered, but just in case...
logger.info('{}/{} q5 not answered. No change needed for {}.'.format(reg_count, total_reg, reg._id))
def main(dry=True):
init_app(set_backends=True, routes=False)
if not dry:
scripts_utils.add_file_logger(logger, __file__)
prereg = MetaSchema.find_one(
Q('name', 'eq', "Prereg Challenge"))
migrate_drafts_q5_metadata(prereg)
migrate_registrations_q5_metadata(prereg)
if __name__ == '__main__':
dry_run = '--dry' in sys.argv
with TokuTransaction():
main(dry=dry_run)
if dry_run:
raise RuntimeError('Dry run, rolling back transaction.')
|
<commit_before><commit_msg>Add migration to correct q5 responses for registrations and drafts using Prereg Challenge schema.<commit_after>
|
"""
Small migration - Prereg challenge schema q5 mult choice responses have extra trailing space. This gets confusing when
user trying to update prereg draft via API.
"""
import sys
import logging
from modularodm import Q
from website.app import init_app
from scripts import utils as scripts_utils
from website.models import DraftRegistration, Node, MetaSchema
from framework.transactions.context import TokuTransaction
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def migrate_drafts_q5_metadata(schema):
"""
Finds Prereg Challenge draft registrations and corrects q5 response metadata
"""
drafts = DraftRegistration.find(Q('registration_schema', 'eq', schema))
total_drafts = drafts.count()
logger.info('Examining {} drafts for q5 metadata'.format(total_drafts))
draft_count = 0
for draft in drafts:
draft_count += 1
if draft.registration_metadata.get('q5', {}).get('value', {}):
draft.registration_metadata['q5']['value'] = draft.registration_metadata['q5']['value'].rstrip()
draft.save()
logger.info('{}/{} Migrated q5 response for {}'.format(draft_count, total_drafts, draft._id))
else:
logger.info('{}/{} q5 not answered. No change needed for {}.'.format(draft_count, drafts.count(), draft._id))
def migrate_registrations_q5_metadata(schema):
"""
Finds Prereg Challenge registrations whose registered_meta includes q5 and corrects
"""
registrations = Node.find(Q('is_registration', 'eq', True) & Q('registered_schema', 'eq', schema))
total_reg = registrations.count()
logger.info('Examining {} registrations for q5 metadata'.format(total_reg))
reg_count = 0
for reg in registrations:
reg_count += 1
if reg.registered_meta.get(schema._id, {}).get('q5', {}).get('value', {}):
reg.registered_meta[schema._id]['q5']['value'] = reg.registered_meta[schema._id]['q5']['value'].rstrip()
reg.save()
logger.info('{}/{} Migrated q5 response for {}'.format(reg_count, total_reg, reg._id))
else:
# q5 is a required question, so should be answered, but just in case...
logger.info('{}/{} q5 not answered. No change needed for {}.'.format(reg_count, total_reg, reg._id))
def main(dry=True):
init_app(set_backends=True, routes=False)
if not dry:
scripts_utils.add_file_logger(logger, __file__)
prereg = MetaSchema.find_one(
Q('name', 'eq', "Prereg Challenge"))
migrate_drafts_q5_metadata(prereg)
migrate_registrations_q5_metadata(prereg)
if __name__ == '__main__':
dry_run = '--dry' in sys.argv
with TokuTransaction():
main(dry=dry_run)
if dry_run:
raise RuntimeError('Dry run, rolling back transaction.')
|
Add migration to correct q5 responses for registrations and drafts using Prereg Challenge schema."""
Small migration - Prereg challenge schema q5 mult choice responses have extra trailing space. This gets confusing when
user trying to update prereg draft via API.
"""
import sys
import logging
from modularodm import Q
from website.app import init_app
from scripts import utils as scripts_utils
from website.models import DraftRegistration, Node, MetaSchema
from framework.transactions.context import TokuTransaction
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def migrate_drafts_q5_metadata(schema):
"""
Finds Prereg Challenge draft registrations and corrects q5 response metadata
"""
drafts = DraftRegistration.find(Q('registration_schema', 'eq', schema))
total_drafts = drafts.count()
logger.info('Examining {} drafts for q5 metadata'.format(total_drafts))
draft_count = 0
for draft in drafts:
draft_count += 1
if draft.registration_metadata.get('q5', {}).get('value', {}):
draft.registration_metadata['q5']['value'] = draft.registration_metadata['q5']['value'].rstrip()
draft.save()
logger.info('{}/{} Migrated q5 response for {}'.format(draft_count, total_drafts, draft._id))
else:
logger.info('{}/{} q5 not answered. No change needed for {}.'.format(draft_count, drafts.count(), draft._id))
def migrate_registrations_q5_metadata(schema):
"""
Finds Prereg Challenge registrations whose registered_meta includes q5 and corrects
"""
registrations = Node.find(Q('is_registration', 'eq', True) & Q('registered_schema', 'eq', schema))
total_reg = registrations.count()
logger.info('Examining {} registrations for q5 metadata'.format(total_reg))
reg_count = 0
for reg in registrations:
reg_count += 1
if reg.registered_meta.get(schema._id, {}).get('q5', {}).get('value', {}):
reg.registered_meta[schema._id]['q5']['value'] = reg.registered_meta[schema._id]['q5']['value'].rstrip()
reg.save()
logger.info('{}/{} Migrated q5 response for {}'.format(reg_count, total_reg, reg._id))
else:
# q5 is a required question, so should be answered, but just in case...
logger.info('{}/{} q5 not answered. No change needed for {}.'.format(reg_count, total_reg, reg._id))
def main(dry=True):
init_app(set_backends=True, routes=False)
if not dry:
scripts_utils.add_file_logger(logger, __file__)
prereg = MetaSchema.find_one(
Q('name', 'eq', "Prereg Challenge"))
migrate_drafts_q5_metadata(prereg)
migrate_registrations_q5_metadata(prereg)
if __name__ == '__main__':
dry_run = '--dry' in sys.argv
with TokuTransaction():
main(dry=dry_run)
if dry_run:
raise RuntimeError('Dry run, rolling back transaction.')
|
<commit_before><commit_msg>Add migration to correct q5 responses for registrations and drafts using Prereg Challenge schema.<commit_after>"""
Small migration - Prereg challenge schema q5 mult choice responses have extra trailing space. This gets confusing when
user trying to update prereg draft via API.
"""
import sys
import logging
from modularodm import Q
from website.app import init_app
from scripts import utils as scripts_utils
from website.models import DraftRegistration, Node, MetaSchema
from framework.transactions.context import TokuTransaction
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def migrate_drafts_q5_metadata(schema):
"""
Finds Prereg Challenge draft registrations and corrects q5 response metadata
"""
drafts = DraftRegistration.find(Q('registration_schema', 'eq', schema))
total_drafts = drafts.count()
logger.info('Examining {} drafts for q5 metadata'.format(total_drafts))
draft_count = 0
for draft in drafts:
draft_count += 1
if draft.registration_metadata.get('q5', {}).get('value', {}):
draft.registration_metadata['q5']['value'] = draft.registration_metadata['q5']['value'].rstrip()
draft.save()
logger.info('{}/{} Migrated q5 response for {}'.format(draft_count, total_drafts, draft._id))
else:
logger.info('{}/{} q5 not answered. No change needed for {}.'.format(draft_count, drafts.count(), draft._id))
def migrate_registrations_q5_metadata(schema):
"""
Finds Prereg Challenge registrations whose registered_meta includes q5 and corrects
"""
registrations = Node.find(Q('is_registration', 'eq', True) & Q('registered_schema', 'eq', schema))
total_reg = registrations.count()
logger.info('Examining {} registrations for q5 metadata'.format(total_reg))
reg_count = 0
for reg in registrations:
reg_count += 1
if reg.registered_meta.get(schema._id, {}).get('q5', {}).get('value', {}):
reg.registered_meta[schema._id]['q5']['value'] = reg.registered_meta[schema._id]['q5']['value'].rstrip()
reg.save()
logger.info('{}/{} Migrated q5 response for {}'.format(reg_count, total_reg, reg._id))
else:
# q5 is a required question, so should be answered, but just in case...
logger.info('{}/{} q5 not answered. No change needed for {}.'.format(reg_count, total_reg, reg._id))
def main(dry=True):
init_app(set_backends=True, routes=False)
if not dry:
scripts_utils.add_file_logger(logger, __file__)
prereg = MetaSchema.find_one(
Q('name', 'eq', "Prereg Challenge"))
migrate_drafts_q5_metadata(prereg)
migrate_registrations_q5_metadata(prereg)
if __name__ == '__main__':
dry_run = '--dry' in sys.argv
with TokuTransaction():
main(dry=dry_run)
if dry_run:
raise RuntimeError('Dry run, rolling back transaction.')
|
|
0cca921ff11a537449e37b81dd47467eb83a7a0c
|
telemetry/telemetry/core/backends/chrome/ios_browser_finder_unittest.py
|
telemetry/telemetry/core/backends/chrome/ios_browser_finder_unittest.py
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.core import browser_options
from telemetry.core.backends.chrome import ios_browser_finder
from telemetry.unittest import test
class IosBrowserFinderUnitTest(unittest.TestCase):
# TODO(baxley): Currently the tests require a device with Chrome running.
# This should be stubbed out so it runs on any system, with no device
# dependencies.
@test.Enabled('ios')
def testFindIosChrome(self):
finder_options = browser_options.BrowserFinderOptions()
browsers = ios_browser_finder.FindAllAvailableBrowsers(finder_options)
self.assertTrue(browsers)
for browser in browsers:
self.assertEqual('ios-chrome', browser.browser_type)
if __name__ == '__main__':
unittest.main()
|
Add unit test for IosBrowserFinder.
|
Add unit test for IosBrowserFinder.
This test checks if Chrome on iOS is running. It only
runs on iOS platforms.
BUG=None
Review URL: https://codereview.chromium.org/350583002
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@279143 0039d316-1c4b-4281-b951-d872f2087c98
|
Python
|
bsd-3-clause
|
sahiljain/catapult,sahiljain/catapult,benschmaus/catapult,benschmaus/catapult,catapult-project/catapult,SummerLW/Perf-Insight-Report,sahiljain/catapult,SummerLW/Perf-Insight-Report,catapult-project/catapult-csm,SummerLW/Perf-Insight-Report,catapult-project/catapult,catapult-project/catapult,benschmaus/catapult,catapult-project/catapult-csm,benschmaus/catapult,catapult-project/catapult,benschmaus/catapult,catapult-project/catapult-csm,SummerLW/Perf-Insight-Report,catapult-project/catapult-csm,catapult-project/catapult,benschmaus/catapult,SummerLW/Perf-Insight-Report,catapult-project/catapult,sahiljain/catapult,catapult-project/catapult,benschmaus/catapult,catapult-project/catapult-csm,catapult-project/catapult-csm,catapult-project/catapult-csm,sahiljain/catapult,SummerLW/Perf-Insight-Report,sahiljain/catapult
|
Add unit test for IosBrowserFinder.
This test checks if Chrome on iOS is running. It only
runs on iOS platforms.
BUG=None
Review URL: https://codereview.chromium.org/350583002
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@279143 0039d316-1c4b-4281-b951-d872f2087c98
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.core import browser_options
from telemetry.core.backends.chrome import ios_browser_finder
from telemetry.unittest import test
class IosBrowserFinderUnitTest(unittest.TestCase):
# TODO(baxley): Currently the tests require a device with Chrome running.
# This should be stubbed out so it runs on any system, with no device
# dependencies.
@test.Enabled('ios')
def testFindIosChrome(self):
finder_options = browser_options.BrowserFinderOptions()
browsers = ios_browser_finder.FindAllAvailableBrowsers(finder_options)
self.assertTrue(browsers)
for browser in browsers:
self.assertEqual('ios-chrome', browser.browser_type)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unit test for IosBrowserFinder.
This test checks if Chrome on iOS is running. It only
runs on iOS platforms.
BUG=None
Review URL: https://codereview.chromium.org/350583002
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@279143 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.core import browser_options
from telemetry.core.backends.chrome import ios_browser_finder
from telemetry.unittest import test
class IosBrowserFinderUnitTest(unittest.TestCase):
# TODO(baxley): Currently the tests require a device with Chrome running.
# This should be stubbed out so it runs on any system, with no device
# dependencies.
@test.Enabled('ios')
def testFindIosChrome(self):
finder_options = browser_options.BrowserFinderOptions()
browsers = ios_browser_finder.FindAllAvailableBrowsers(finder_options)
self.assertTrue(browsers)
for browser in browsers:
self.assertEqual('ios-chrome', browser.browser_type)
if __name__ == '__main__':
unittest.main()
|
Add unit test for IosBrowserFinder.
This test checks if Chrome on iOS is running. It only
runs on iOS platforms.
BUG=None
Review URL: https://codereview.chromium.org/350583002
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@279143 0039d316-1c4b-4281-b951-d872f2087c98# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.core import browser_options
from telemetry.core.backends.chrome import ios_browser_finder
from telemetry.unittest import test
class IosBrowserFinderUnitTest(unittest.TestCase):
# TODO(baxley): Currently the tests require a device with Chrome running.
# This should be stubbed out so it runs on any system, with no device
# dependencies.
@test.Enabled('ios')
def testFindIosChrome(self):
finder_options = browser_options.BrowserFinderOptions()
browsers = ios_browser_finder.FindAllAvailableBrowsers(finder_options)
self.assertTrue(browsers)
for browser in browsers:
self.assertEqual('ios-chrome', browser.browser_type)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unit test for IosBrowserFinder.
This test checks if Chrome on iOS is running. It only
runs on iOS platforms.
BUG=None
Review URL: https://codereview.chromium.org/350583002
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@279143 0039d316-1c4b-4281-b951-d872f2087c98<commit_after># Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.core import browser_options
from telemetry.core.backends.chrome import ios_browser_finder
from telemetry.unittest import test
class IosBrowserFinderUnitTest(unittest.TestCase):
# TODO(baxley): Currently the tests require a device with Chrome running.
# This should be stubbed out so it runs on any system, with no device
# dependencies.
@test.Enabled('ios')
def testFindIosChrome(self):
finder_options = browser_options.BrowserFinderOptions()
browsers = ios_browser_finder.FindAllAvailableBrowsers(finder_options)
self.assertTrue(browsers)
for browser in browsers:
self.assertEqual('ios-chrome', browser.browser_type)
if __name__ == '__main__':
unittest.main()
|
|
68878c516c497103586cb4de38b371f02ab6bee2
|
oneflow/profiles/api.py
|
oneflow/profiles/api.py
|
# -*- coding: utf-8 -*-
import logging
from django.contrib.auth import get_user_model
from tastypie.resources import ModelResource
from tastypie import fields
from ..base.api import common_authentication, UserObjectsOnlyAuthorization
from .models import UserProfile
LOGGER = logging.getLogger(__name__)
User = get_user_model()
class EmberMeta:
# Ember-data expect the following 2 directives
always_return_data = True
allowed_methods = ('get', 'post', 'put', 'delete')
# These are specific to 1flow functionnals.
authentication = common_authentication
authorization = UserObjectsOnlyAuthorization()
class UserResource(ModelResource):
class Meta(EmberMeta):
queryset = User.objects.all()
resource_name = 'user'
class UserProfileResource(ModelResource):
user = fields.ForeignKey(UserResource, 'user')
class Meta(EmberMeta):
queryset = UserProfile.objects.all()
resource_name = 'user_profile'
|
# -*- coding: utf-8 -*-
import logging
from django.contrib.auth import get_user_model
from tastypie.resources import ModelResource
from tastypie import fields
from ..base.api import common_authentication, UserObjectsOnlyAuthorization
from .models import UserProfile
LOGGER = logging.getLogger(__name__)
User = get_user_model()
class EmberMeta:
# Ember-data expect the following 2 directives
always_return_data = True
allowed_methods = ('get', 'post', 'put', 'delete')
# These are specific to 1flow functionnals.
authentication = common_authentication
authorization = UserObjectsOnlyAuthorization()
class UserResource(ModelResource):
class Meta(EmberMeta):
queryset = User.objects.all()
resource_name = 'user'
class UserProfileResource(ModelResource):
# NOTE: "user" won't work because it's a OneToOne field in DJango.
# We need "user_id". See http://stackoverflow.com/a/15609667/654755
user_id = fields.ForeignKey(UserResource, 'user')
class Meta(EmberMeta):
queryset = UserProfile.objects.all()
resource_name = 'user_profile'
|
Fix the `User` not being loaded client side.
|
Fix the `User` not being loaded client side.
|
Python
|
agpl-3.0
|
WillianPaiva/1flow,1flow/1flow,WillianPaiva/1flow,1flow/1flow,1flow/1flow,WillianPaiva/1flow,WillianPaiva/1flow,1flow/1flow,WillianPaiva/1flow,1flow/1flow
|
# -*- coding: utf-8 -*-
import logging
from django.contrib.auth import get_user_model
from tastypie.resources import ModelResource
from tastypie import fields
from ..base.api import common_authentication, UserObjectsOnlyAuthorization
from .models import UserProfile
LOGGER = logging.getLogger(__name__)
User = get_user_model()
class EmberMeta:
# Ember-data expect the following 2 directives
always_return_data = True
allowed_methods = ('get', 'post', 'put', 'delete')
# These are specific to 1flow functionnals.
authentication = common_authentication
authorization = UserObjectsOnlyAuthorization()
class UserResource(ModelResource):
class Meta(EmberMeta):
queryset = User.objects.all()
resource_name = 'user'
class UserProfileResource(ModelResource):
user = fields.ForeignKey(UserResource, 'user')
class Meta(EmberMeta):
queryset = UserProfile.objects.all()
resource_name = 'user_profile'
Fix the `User` not being loaded client side.
|
# -*- coding: utf-8 -*-
import logging
from django.contrib.auth import get_user_model
from tastypie.resources import ModelResource
from tastypie import fields
from ..base.api import common_authentication, UserObjectsOnlyAuthorization
from .models import UserProfile
LOGGER = logging.getLogger(__name__)
User = get_user_model()
class EmberMeta:
# Ember-data expect the following 2 directives
always_return_data = True
allowed_methods = ('get', 'post', 'put', 'delete')
# These are specific to 1flow functionnals.
authentication = common_authentication
authorization = UserObjectsOnlyAuthorization()
class UserResource(ModelResource):
class Meta(EmberMeta):
queryset = User.objects.all()
resource_name = 'user'
class UserProfileResource(ModelResource):
# NOTE: "user" won't work because it's a OneToOne field in DJango.
# We need "user_id". See http://stackoverflow.com/a/15609667/654755
user_id = fields.ForeignKey(UserResource, 'user')
class Meta(EmberMeta):
queryset = UserProfile.objects.all()
resource_name = 'user_profile'
|
<commit_before># -*- coding: utf-8 -*-
import logging
from django.contrib.auth import get_user_model
from tastypie.resources import ModelResource
from tastypie import fields
from ..base.api import common_authentication, UserObjectsOnlyAuthorization
from .models import UserProfile
LOGGER = logging.getLogger(__name__)
User = get_user_model()
class EmberMeta:
# Ember-data expect the following 2 directives
always_return_data = True
allowed_methods = ('get', 'post', 'put', 'delete')
# These are specific to 1flow functionnals.
authentication = common_authentication
authorization = UserObjectsOnlyAuthorization()
class UserResource(ModelResource):
class Meta(EmberMeta):
queryset = User.objects.all()
resource_name = 'user'
class UserProfileResource(ModelResource):
user = fields.ForeignKey(UserResource, 'user')
class Meta(EmberMeta):
queryset = UserProfile.objects.all()
resource_name = 'user_profile'
<commit_msg>Fix the `User` not being loaded client side.<commit_after>
|
# -*- coding: utf-8 -*-
import logging
from django.contrib.auth import get_user_model
from tastypie.resources import ModelResource
from tastypie import fields
from ..base.api import common_authentication, UserObjectsOnlyAuthorization
from .models import UserProfile
LOGGER = logging.getLogger(__name__)
User = get_user_model()
class EmberMeta:
# Ember-data expect the following 2 directives
always_return_data = True
allowed_methods = ('get', 'post', 'put', 'delete')
# These are specific to 1flow functionnals.
authentication = common_authentication
authorization = UserObjectsOnlyAuthorization()
class UserResource(ModelResource):
class Meta(EmberMeta):
queryset = User.objects.all()
resource_name = 'user'
class UserProfileResource(ModelResource):
# NOTE: "user" won't work because it's a OneToOne field in DJango.
# We need "user_id". See http://stackoverflow.com/a/15609667/654755
user_id = fields.ForeignKey(UserResource, 'user')
class Meta(EmberMeta):
queryset = UserProfile.objects.all()
resource_name = 'user_profile'
|
# -*- coding: utf-8 -*-
import logging
from django.contrib.auth import get_user_model
from tastypie.resources import ModelResource
from tastypie import fields
from ..base.api import common_authentication, UserObjectsOnlyAuthorization
from .models import UserProfile
LOGGER = logging.getLogger(__name__)
User = get_user_model()
class EmberMeta:
# Ember-data expect the following 2 directives
always_return_data = True
allowed_methods = ('get', 'post', 'put', 'delete')
# These are specific to 1flow functionnals.
authentication = common_authentication
authorization = UserObjectsOnlyAuthorization()
class UserResource(ModelResource):
class Meta(EmberMeta):
queryset = User.objects.all()
resource_name = 'user'
class UserProfileResource(ModelResource):
user = fields.ForeignKey(UserResource, 'user')
class Meta(EmberMeta):
queryset = UserProfile.objects.all()
resource_name = 'user_profile'
Fix the `User` not being loaded client side.# -*- coding: utf-8 -*-
import logging
from django.contrib.auth import get_user_model
from tastypie.resources import ModelResource
from tastypie import fields
from ..base.api import common_authentication, UserObjectsOnlyAuthorization
from .models import UserProfile
LOGGER = logging.getLogger(__name__)
User = get_user_model()
class EmberMeta:
# Ember-data expect the following 2 directives
always_return_data = True
allowed_methods = ('get', 'post', 'put', 'delete')
# These are specific to 1flow functionnals.
authentication = common_authentication
authorization = UserObjectsOnlyAuthorization()
class UserResource(ModelResource):
class Meta(EmberMeta):
queryset = User.objects.all()
resource_name = 'user'
class UserProfileResource(ModelResource):
# NOTE: "user" won't work because it's a OneToOne field in DJango.
# We need "user_id". See http://stackoverflow.com/a/15609667/654755
user_id = fields.ForeignKey(UserResource, 'user')
class Meta(EmberMeta):
queryset = UserProfile.objects.all()
resource_name = 'user_profile'
|
<commit_before># -*- coding: utf-8 -*-
import logging
from django.contrib.auth import get_user_model
from tastypie.resources import ModelResource
from tastypie import fields
from ..base.api import common_authentication, UserObjectsOnlyAuthorization
from .models import UserProfile
LOGGER = logging.getLogger(__name__)
User = get_user_model()
class EmberMeta:
# Ember-data expect the following 2 directives
always_return_data = True
allowed_methods = ('get', 'post', 'put', 'delete')
# These are specific to 1flow functionnals.
authentication = common_authentication
authorization = UserObjectsOnlyAuthorization()
class UserResource(ModelResource):
class Meta(EmberMeta):
queryset = User.objects.all()
resource_name = 'user'
class UserProfileResource(ModelResource):
user = fields.ForeignKey(UserResource, 'user')
class Meta(EmberMeta):
queryset = UserProfile.objects.all()
resource_name = 'user_profile'
<commit_msg>Fix the `User` not being loaded client side.<commit_after># -*- coding: utf-8 -*-
import logging
from django.contrib.auth import get_user_model
from tastypie.resources import ModelResource
from tastypie import fields
from ..base.api import common_authentication, UserObjectsOnlyAuthorization
from .models import UserProfile
LOGGER = logging.getLogger(__name__)
User = get_user_model()
class EmberMeta:
# Ember-data expect the following 2 directives
always_return_data = True
allowed_methods = ('get', 'post', 'put', 'delete')
# These are specific to 1flow functionnals.
authentication = common_authentication
authorization = UserObjectsOnlyAuthorization()
class UserResource(ModelResource):
class Meta(EmberMeta):
queryset = User.objects.all()
resource_name = 'user'
class UserProfileResource(ModelResource):
# NOTE: "user" won't work because it's a OneToOne field in DJango.
# We need "user_id". See http://stackoverflow.com/a/15609667/654755
user_id = fields.ForeignKey(UserResource, 'user')
class Meta(EmberMeta):
queryset = UserProfile.objects.all()
resource_name = 'user_profile'
|
b1f772c788f0c652edbd8d7134c8318e4736b2fb
|
src/easy-plural-list-build.py
|
src/easy-plural-list-build.py
|
#!/usr/bin/env python3
import sys
import json
import os.path
def main(argv):
list = []
if os.path.isfile("easy-plural-list.json"):
f = open("easy-plural-list.json","r",encoding="utf-8")
list = json.load(f)
f.close()
if len(argv) == 3 :
if (argv[1] == "--add") or (argv[1] == "-a") :
if argv[2].replace(' ','') in list :
print("Already in list.")
else:
list.append(argv[2].replace(' ',''))
f = open("easy-plural-list.json","w",encoding="utf-8")
json.dump(list,f)
f.close()
if (len(argv) == 1) or ((len(argv)==2) and (argv[1] == "--generate" or argv[1] == "-g")) :
print("Maybe sometime...")
if __name__ == "__main__":
main(sys.argv)
|
Manage and generate a C list of plurals
|
Manage and generate a C list of plurals
|
Python
|
apache-2.0
|
DIG-/easygettext,DIG-/easygettext
|
Manage and generate a C list of plurals
|
#!/usr/bin/env python3
import sys
import json
import os.path
def main(argv):
list = []
if os.path.isfile("easy-plural-list.json"):
f = open("easy-plural-list.json","r",encoding="utf-8")
list = json.load(f)
f.close()
if len(argv) == 3 :
if (argv[1] == "--add") or (argv[1] == "-a") :
if argv[2].replace(' ','') in list :
print("Already in list.")
else:
list.append(argv[2].replace(' ',''))
f = open("easy-plural-list.json","w",encoding="utf-8")
json.dump(list,f)
f.close()
if (len(argv) == 1) or ((len(argv)==2) and (argv[1] == "--generate" or argv[1] == "-g")) :
print("Maybe sometime...")
if __name__ == "__main__":
main(sys.argv)
|
<commit_before><commit_msg>Manage and generate a C list of plurals<commit_after>
|
#!/usr/bin/env python3
import sys
import json
import os.path
def main(argv):
list = []
if os.path.isfile("easy-plural-list.json"):
f = open("easy-plural-list.json","r",encoding="utf-8")
list = json.load(f)
f.close()
if len(argv) == 3 :
if (argv[1] == "--add") or (argv[1] == "-a") :
if argv[2].replace(' ','') in list :
print("Already in list.")
else:
list.append(argv[2].replace(' ',''))
f = open("easy-plural-list.json","w",encoding="utf-8")
json.dump(list,f)
f.close()
if (len(argv) == 1) or ((len(argv)==2) and (argv[1] == "--generate" or argv[1] == "-g")) :
print("Maybe sometime...")
if __name__ == "__main__":
main(sys.argv)
|
Manage and generate a C list of plurals#!/usr/bin/env python3
import sys
import json
import os.path
def main(argv):
list = []
if os.path.isfile("easy-plural-list.json"):
f = open("easy-plural-list.json","r",encoding="utf-8")
list = json.load(f)
f.close()
if len(argv) == 3 :
if (argv[1] == "--add") or (argv[1] == "-a") :
if argv[2].replace(' ','') in list :
print("Already in list.")
else:
list.append(argv[2].replace(' ',''))
f = open("easy-plural-list.json","w",encoding="utf-8")
json.dump(list,f)
f.close()
if (len(argv) == 1) or ((len(argv)==2) and (argv[1] == "--generate" or argv[1] == "-g")) :
print("Maybe sometime...")
if __name__ == "__main__":
main(sys.argv)
|
<commit_before><commit_msg>Manage and generate a C list of plurals<commit_after>#!/usr/bin/env python3
import sys
import json
import os.path
def main(argv):
list = []
if os.path.isfile("easy-plural-list.json"):
f = open("easy-plural-list.json","r",encoding="utf-8")
list = json.load(f)
f.close()
if len(argv) == 3 :
if (argv[1] == "--add") or (argv[1] == "-a") :
if argv[2].replace(' ','') in list :
print("Already in list.")
else:
list.append(argv[2].replace(' ',''))
f = open("easy-plural-list.json","w",encoding="utf-8")
json.dump(list,f)
f.close()
if (len(argv) == 1) or ((len(argv)==2) and (argv[1] == "--generate" or argv[1] == "-g")) :
print("Maybe sometime...")
if __name__ == "__main__":
main(sys.argv)
|
|
2e0e6239b690a55ed7f89c11b29621b6329a8983
|
thinc/tests/unit/test_pooling.py
|
thinc/tests/unit/test_pooling.py
|
import pytest
import numpy
from numpy.testing import assert_allclose
from ...neural._classes.model import Model
from ...neural.vecs2vec import MeanPooling, MaxPooling
from ...neural.vecs2vec import MinPooling
@pytest.fixture(params=[MeanPooling, MaxPooling, MinPooling])
def PoolClass(request):
return request.param
@pytest.fixture
def X(nB, nI):
return [numpy.zeros((nB, nI))+i for i in range(5)]
@pytest.fixture
def dY(X, nI):
return numpy.ones((len(X), nI))
@pytest.fixture
def model(PoolClass):
return PoolClass()
def test_pools_are_created_successfully(model):
assert hasattr(model, 'predict')
assert hasattr(model, 'begin_update')
assert isinstance(model, Model)
def test_pools_predict_shapes(model, X, nB, nI):
y = model.predict(X)
assert y.shape == (len(X), nI)
def test_pools_begin_update_shapes(model, X, nB, nI):
y, _ = model.begin_update(X)
assert y.shape == (len(X), nI)
def test_pools_finish_update_shapes(model, X, dY, nB, nI):
y, finish_update = model.begin_update(X)
gradient = finish_update(dY)
assert len(gradient) == len(X)
assert all([g.shape == x.shape for g, x in zip(gradient, X)])
def test_pools_predict_matches_finish_update(model, X):
y = model.predict(X)
y2, _ = model.begin_update(X)
assert_allclose(y, y2)
def test_zero_length_input_succeeds(model):
zero = numpy.ones((0, 10))
ones = numpy.ones((5, 10))
y = model.predict([zero, ones])
assert y.shape == (2, 10)
|
Add simple unit tests for max, min and mean pooling
|
Add simple unit tests for max, min and mean pooling
|
Python
|
mit
|
explosion/thinc,spacy-io/thinc,spacy-io/thinc,explosion/thinc,explosion/thinc,spacy-io/thinc,explosion/thinc
|
Add simple unit tests for max, min and mean pooling
|
import pytest
import numpy
from numpy.testing import assert_allclose
from ...neural._classes.model import Model
from ...neural.vecs2vec import MeanPooling, MaxPooling
from ...neural.vecs2vec import MinPooling
@pytest.fixture(params=[MeanPooling, MaxPooling, MinPooling])
def PoolClass(request):
return request.param
@pytest.fixture
def X(nB, nI):
return [numpy.zeros((nB, nI))+i for i in range(5)]
@pytest.fixture
def dY(X, nI):
return numpy.ones((len(X), nI))
@pytest.fixture
def model(PoolClass):
return PoolClass()
def test_pools_are_created_successfully(model):
assert hasattr(model, 'predict')
assert hasattr(model, 'begin_update')
assert isinstance(model, Model)
def test_pools_predict_shapes(model, X, nB, nI):
y = model.predict(X)
assert y.shape == (len(X), nI)
def test_pools_begin_update_shapes(model, X, nB, nI):
y, _ = model.begin_update(X)
assert y.shape == (len(X), nI)
def test_pools_finish_update_shapes(model, X, dY, nB, nI):
y, finish_update = model.begin_update(X)
gradient = finish_update(dY)
assert len(gradient) == len(X)
assert all([g.shape == x.shape for g, x in zip(gradient, X)])
def test_pools_predict_matches_finish_update(model, X):
y = model.predict(X)
y2, _ = model.begin_update(X)
assert_allclose(y, y2)
def test_zero_length_input_succeeds(model):
zero = numpy.ones((0, 10))
ones = numpy.ones((5, 10))
y = model.predict([zero, ones])
assert y.shape == (2, 10)
|
<commit_before><commit_msg>Add simple unit tests for max, min and mean pooling<commit_after>
|
import pytest
import numpy
from numpy.testing import assert_allclose
from ...neural._classes.model import Model
from ...neural.vecs2vec import MeanPooling, MaxPooling
from ...neural.vecs2vec import MinPooling
@pytest.fixture(params=[MeanPooling, MaxPooling, MinPooling])
def PoolClass(request):
return request.param
@pytest.fixture
def X(nB, nI):
return [numpy.zeros((nB, nI))+i for i in range(5)]
@pytest.fixture
def dY(X, nI):
return numpy.ones((len(X), nI))
@pytest.fixture
def model(PoolClass):
return PoolClass()
def test_pools_are_created_successfully(model):
assert hasattr(model, 'predict')
assert hasattr(model, 'begin_update')
assert isinstance(model, Model)
def test_pools_predict_shapes(model, X, nB, nI):
y = model.predict(X)
assert y.shape == (len(X), nI)
def test_pools_begin_update_shapes(model, X, nB, nI):
y, _ = model.begin_update(X)
assert y.shape == (len(X), nI)
def test_pools_finish_update_shapes(model, X, dY, nB, nI):
y, finish_update = model.begin_update(X)
gradient = finish_update(dY)
assert len(gradient) == len(X)
assert all([g.shape == x.shape for g, x in zip(gradient, X)])
def test_pools_predict_matches_finish_update(model, X):
y = model.predict(X)
y2, _ = model.begin_update(X)
assert_allclose(y, y2)
def test_zero_length_input_succeeds(model):
zero = numpy.ones((0, 10))
ones = numpy.ones((5, 10))
y = model.predict([zero, ones])
assert y.shape == (2, 10)
|
Add simple unit tests for max, min and mean poolingimport pytest
import numpy
from numpy.testing import assert_allclose
from ...neural._classes.model import Model
from ...neural.vecs2vec import MeanPooling, MaxPooling
from ...neural.vecs2vec import MinPooling
@pytest.fixture(params=[MeanPooling, MaxPooling, MinPooling])
def PoolClass(request):
return request.param
@pytest.fixture
def X(nB, nI):
return [numpy.zeros((nB, nI))+i for i in range(5)]
@pytest.fixture
def dY(X, nI):
return numpy.ones((len(X), nI))
@pytest.fixture
def model(PoolClass):
return PoolClass()
def test_pools_are_created_successfully(model):
assert hasattr(model, 'predict')
assert hasattr(model, 'begin_update')
assert isinstance(model, Model)
def test_pools_predict_shapes(model, X, nB, nI):
y = model.predict(X)
assert y.shape == (len(X), nI)
def test_pools_begin_update_shapes(model, X, nB, nI):
y, _ = model.begin_update(X)
assert y.shape == (len(X), nI)
def test_pools_finish_update_shapes(model, X, dY, nB, nI):
y, finish_update = model.begin_update(X)
gradient = finish_update(dY)
assert len(gradient) == len(X)
assert all([g.shape == x.shape for g, x in zip(gradient, X)])
def test_pools_predict_matches_finish_update(model, X):
y = model.predict(X)
y2, _ = model.begin_update(X)
assert_allclose(y, y2)
def test_zero_length_input_succeeds(model):
zero = numpy.ones((0, 10))
ones = numpy.ones((5, 10))
y = model.predict([zero, ones])
assert y.shape == (2, 10)
|
<commit_before><commit_msg>Add simple unit tests for max, min and mean pooling<commit_after>import pytest
import numpy
from numpy.testing import assert_allclose
from ...neural._classes.model import Model
from ...neural.vecs2vec import MeanPooling, MaxPooling
from ...neural.vecs2vec import MinPooling
@pytest.fixture(params=[MeanPooling, MaxPooling, MinPooling])
def PoolClass(request):
return request.param
@pytest.fixture
def X(nB, nI):
return [numpy.zeros((nB, nI))+i for i in range(5)]
@pytest.fixture
def dY(X, nI):
return numpy.ones((len(X), nI))
@pytest.fixture
def model(PoolClass):
return PoolClass()
def test_pools_are_created_successfully(model):
assert hasattr(model, 'predict')
assert hasattr(model, 'begin_update')
assert isinstance(model, Model)
def test_pools_predict_shapes(model, X, nB, nI):
y = model.predict(X)
assert y.shape == (len(X), nI)
def test_pools_begin_update_shapes(model, X, nB, nI):
y, _ = model.begin_update(X)
assert y.shape == (len(X), nI)
def test_pools_finish_update_shapes(model, X, dY, nB, nI):
y, finish_update = model.begin_update(X)
gradient = finish_update(dY)
assert len(gradient) == len(X)
assert all([g.shape == x.shape for g, x in zip(gradient, X)])
def test_pools_predict_matches_finish_update(model, X):
y = model.predict(X)
y2, _ = model.begin_update(X)
assert_allclose(y, y2)
def test_zero_length_input_succeeds(model):
zero = numpy.ones((0, 10))
ones = numpy.ones((5, 10))
y = model.predict([zero, ones])
assert y.shape == (2, 10)
|
|
4705eae5d233ea573da3482541fd52778cff88ef
|
corehq/apps/data_interfaces/migrations/0019_remove_old_rule_models.py
|
corehq/apps/data_interfaces/migrations/0019_remove_old_rule_models.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-09-11 15:24
from __future__ import unicode_literals
from __future__ import absolute_import
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('data_interfaces', '0018_check_for_rule_migration'),
]
operations = [
migrations.RemoveField(
model_name='automaticupdateaction',
name='rule',
),
migrations.RemoveField(
model_name='automaticupdaterulecriteria',
name='rule',
),
migrations.DeleteModel(
name='AutomaticUpdateAction',
),
migrations.DeleteModel(
name='AutomaticUpdateRuleCriteria',
),
]
|
Add migration to remove old rule models
|
Add migration to remove old rule models
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Add migration to remove old rule models
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-09-11 15:24
from __future__ import unicode_literals
from __future__ import absolute_import
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('data_interfaces', '0018_check_for_rule_migration'),
]
operations = [
migrations.RemoveField(
model_name='automaticupdateaction',
name='rule',
),
migrations.RemoveField(
model_name='automaticupdaterulecriteria',
name='rule',
),
migrations.DeleteModel(
name='AutomaticUpdateAction',
),
migrations.DeleteModel(
name='AutomaticUpdateRuleCriteria',
),
]
|
<commit_before><commit_msg>Add migration to remove old rule models<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-09-11 15:24
from __future__ import unicode_literals
from __future__ import absolute_import
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('data_interfaces', '0018_check_for_rule_migration'),
]
operations = [
migrations.RemoveField(
model_name='automaticupdateaction',
name='rule',
),
migrations.RemoveField(
model_name='automaticupdaterulecriteria',
name='rule',
),
migrations.DeleteModel(
name='AutomaticUpdateAction',
),
migrations.DeleteModel(
name='AutomaticUpdateRuleCriteria',
),
]
|
Add migration to remove old rule models# -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-09-11 15:24
from __future__ import unicode_literals
from __future__ import absolute_import
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('data_interfaces', '0018_check_for_rule_migration'),
]
operations = [
migrations.RemoveField(
model_name='automaticupdateaction',
name='rule',
),
migrations.RemoveField(
model_name='automaticupdaterulecriteria',
name='rule',
),
migrations.DeleteModel(
name='AutomaticUpdateAction',
),
migrations.DeleteModel(
name='AutomaticUpdateRuleCriteria',
),
]
|
<commit_before><commit_msg>Add migration to remove old rule models<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-09-11 15:24
from __future__ import unicode_literals
from __future__ import absolute_import
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('data_interfaces', '0018_check_for_rule_migration'),
]
operations = [
migrations.RemoveField(
model_name='automaticupdateaction',
name='rule',
),
migrations.RemoveField(
model_name='automaticupdaterulecriteria',
name='rule',
),
migrations.DeleteModel(
name='AutomaticUpdateAction',
),
migrations.DeleteModel(
name='AutomaticUpdateRuleCriteria',
),
]
|
|
52cc08dd2df39d8b64ac1a95b6861985ca7ac487
|
erpnext/manufacturing/doctype/bom_update_tool/test_bom_update_tool.py
|
erpnext/manufacturing/doctype/bom_update_tool/test_bom_update_tool.py
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest
import frappe
test_records = frappe.get_test_records('BOM')
class TestBOMUpdateTool(unittest.TestCase):
def test_replace_bom(self):
current_bom = "BOM-_Test Item Home Desktop Manufactured-001"
bom_doc = frappe.copy_doc(test_records[0])
bom_doc.items[1].item_code = "_Test Item"
bom_doc.insert()
update_tool = frappe.get_doc("BOM Update Tool")
update_tool.current_bom = current_bom
update_tool.new_bom = bom_doc.name
update_tool.replace_bom()
self.assertFalse(frappe.db.sql("select name from `tabBOM Item` where bom_no=%s", current_bom))
self.assertTrue(frappe.db.sql("select name from `tabBOM Item` where bom_no=%s", bom_doc.name))
|
Test case added for replacing BOM
|
Test case added for replacing BOM
|
Python
|
agpl-3.0
|
gsnbng/erpnext,geekroot/erpnext,geekroot/erpnext,indictranstech/erpnext,indictranstech/erpnext,gsnbng/erpnext,indictranstech/erpnext,geekroot/erpnext,gsnbng/erpnext,gsnbng/erpnext,geekroot/erpnext,indictranstech/erpnext
|
Test case added for replacing BOM
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest
import frappe
test_records = frappe.get_test_records('BOM')
class TestBOMUpdateTool(unittest.TestCase):
def test_replace_bom(self):
current_bom = "BOM-_Test Item Home Desktop Manufactured-001"
bom_doc = frappe.copy_doc(test_records[0])
bom_doc.items[1].item_code = "_Test Item"
bom_doc.insert()
update_tool = frappe.get_doc("BOM Update Tool")
update_tool.current_bom = current_bom
update_tool.new_bom = bom_doc.name
update_tool.replace_bom()
self.assertFalse(frappe.db.sql("select name from `tabBOM Item` where bom_no=%s", current_bom))
self.assertTrue(frappe.db.sql("select name from `tabBOM Item` where bom_no=%s", bom_doc.name))
|
<commit_before><commit_msg>Test case added for replacing BOM<commit_after>
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest
import frappe
test_records = frappe.get_test_records('BOM')
class TestBOMUpdateTool(unittest.TestCase):
def test_replace_bom(self):
current_bom = "BOM-_Test Item Home Desktop Manufactured-001"
bom_doc = frappe.copy_doc(test_records[0])
bom_doc.items[1].item_code = "_Test Item"
bom_doc.insert()
update_tool = frappe.get_doc("BOM Update Tool")
update_tool.current_bom = current_bom
update_tool.new_bom = bom_doc.name
update_tool.replace_bom()
self.assertFalse(frappe.db.sql("select name from `tabBOM Item` where bom_no=%s", current_bom))
self.assertTrue(frappe.db.sql("select name from `tabBOM Item` where bom_no=%s", bom_doc.name))
|
Test case added for replacing BOM# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest
import frappe
test_records = frappe.get_test_records('BOM')
class TestBOMUpdateTool(unittest.TestCase):
def test_replace_bom(self):
current_bom = "BOM-_Test Item Home Desktop Manufactured-001"
bom_doc = frappe.copy_doc(test_records[0])
bom_doc.items[1].item_code = "_Test Item"
bom_doc.insert()
update_tool = frappe.get_doc("BOM Update Tool")
update_tool.current_bom = current_bom
update_tool.new_bom = bom_doc.name
update_tool.replace_bom()
self.assertFalse(frappe.db.sql("select name from `tabBOM Item` where bom_no=%s", current_bom))
self.assertTrue(frappe.db.sql("select name from `tabBOM Item` where bom_no=%s", bom_doc.name))
|
<commit_before><commit_msg>Test case added for replacing BOM<commit_after># Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import unittest
import frappe
test_records = frappe.get_test_records('BOM')
class TestBOMUpdateTool(unittest.TestCase):
def test_replace_bom(self):
current_bom = "BOM-_Test Item Home Desktop Manufactured-001"
bom_doc = frappe.copy_doc(test_records[0])
bom_doc.items[1].item_code = "_Test Item"
bom_doc.insert()
update_tool = frappe.get_doc("BOM Update Tool")
update_tool.current_bom = current_bom
update_tool.new_bom = bom_doc.name
update_tool.replace_bom()
self.assertFalse(frappe.db.sql("select name from `tabBOM Item` where bom_no=%s", current_bom))
self.assertTrue(frappe.db.sql("select name from `tabBOM Item` where bom_no=%s", bom_doc.name))
|
|
06d88903d5eac69d217959dedf946f31f940ea19
|
datasets/migrations/0019_auto_20170621_1528.py
|
datasets/migrations/0019_auto_20170621_1528.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-06-21 13:28
from __future__ import unicode_literals
from django.db import migrations
from django.core.exceptions import ObjectDoesNotExist
def add_fs_examples(apps, schema_editor):
Sound = apps.get_model('datasets', 'Sound')
TaxonomyNode = apps.get_model('datasets', 'TaxonomyNode')
Taxonomy = apps.get_model('datasets', 'Taxonomy')
all_taxonomies = Taxonomy.objects.all()
# loop on all taxonomy
for taxonomy in all_taxonomies:
all_taxonomy_nodes = taxonomy.taxonomynode_set.all()
#loop for adding example Sound relations
for taxonomy_node in all_taxonomy_nodes:
for freesound_id in taxonomy.data[taxonomy_node.node_id]['positive_examples_FS']:
try:
sound = Sound.objects.get(freesound_id=freesound_id)
taxonomy_node.freesound_examples.add(sound)
except ObjectDoesNotExist:
continue
class Migration(migrations.Migration):
dependencies = [
('datasets', '0018_auto_20170620_1305'),
]
operations = [migrations.RunPython(add_fs_examples, migrations.RunPython.noop),
]
|
Add migration for freesound examples
|
Add migration for freesound examples
|
Python
|
agpl-3.0
|
MTG/freesound-datasets,MTG/freesound-datasets,MTG/freesound-datasets,MTG/freesound-datasets
|
Add migration for freesound examples
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-06-21 13:28
from __future__ import unicode_literals
from django.db import migrations
from django.core.exceptions import ObjectDoesNotExist
def add_fs_examples(apps, schema_editor):
Sound = apps.get_model('datasets', 'Sound')
TaxonomyNode = apps.get_model('datasets', 'TaxonomyNode')
Taxonomy = apps.get_model('datasets', 'Taxonomy')
all_taxonomies = Taxonomy.objects.all()
# loop on all taxonomy
for taxonomy in all_taxonomies:
all_taxonomy_nodes = taxonomy.taxonomynode_set.all()
#loop for adding example Sound relations
for taxonomy_node in all_taxonomy_nodes:
for freesound_id in taxonomy.data[taxonomy_node.node_id]['positive_examples_FS']:
try:
sound = Sound.objects.get(freesound_id=freesound_id)
taxonomy_node.freesound_examples.add(sound)
except ObjectDoesNotExist:
continue
class Migration(migrations.Migration):
dependencies = [
('datasets', '0018_auto_20170620_1305'),
]
operations = [migrations.RunPython(add_fs_examples, migrations.RunPython.noop),
]
|
<commit_before><commit_msg>Add migration for freesound examples<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-06-21 13:28
from __future__ import unicode_literals
from django.db import migrations
from django.core.exceptions import ObjectDoesNotExist
def add_fs_examples(apps, schema_editor):
Sound = apps.get_model('datasets', 'Sound')
TaxonomyNode = apps.get_model('datasets', 'TaxonomyNode')
Taxonomy = apps.get_model('datasets', 'Taxonomy')
all_taxonomies = Taxonomy.objects.all()
# loop on all taxonomy
for taxonomy in all_taxonomies:
all_taxonomy_nodes = taxonomy.taxonomynode_set.all()
#loop for adding example Sound relations
for taxonomy_node in all_taxonomy_nodes:
for freesound_id in taxonomy.data[taxonomy_node.node_id]['positive_examples_FS']:
try:
sound = Sound.objects.get(freesound_id=freesound_id)
taxonomy_node.freesound_examples.add(sound)
except ObjectDoesNotExist:
continue
class Migration(migrations.Migration):
dependencies = [
('datasets', '0018_auto_20170620_1305'),
]
operations = [migrations.RunPython(add_fs_examples, migrations.RunPython.noop),
]
|
Add migration for freesound examples# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-06-21 13:28
from __future__ import unicode_literals
from django.db import migrations
from django.core.exceptions import ObjectDoesNotExist
def add_fs_examples(apps, schema_editor):
Sound = apps.get_model('datasets', 'Sound')
TaxonomyNode = apps.get_model('datasets', 'TaxonomyNode')
Taxonomy = apps.get_model('datasets', 'Taxonomy')
all_taxonomies = Taxonomy.objects.all()
# loop on all taxonomy
for taxonomy in all_taxonomies:
all_taxonomy_nodes = taxonomy.taxonomynode_set.all()
#loop for adding example Sound relations
for taxonomy_node in all_taxonomy_nodes:
for freesound_id in taxonomy.data[taxonomy_node.node_id]['positive_examples_FS']:
try:
sound = Sound.objects.get(freesound_id=freesound_id)
taxonomy_node.freesound_examples.add(sound)
except ObjectDoesNotExist:
continue
class Migration(migrations.Migration):
dependencies = [
('datasets', '0018_auto_20170620_1305'),
]
operations = [migrations.RunPython(add_fs_examples, migrations.RunPython.noop),
]
|
<commit_before><commit_msg>Add migration for freesound examples<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-06-21 13:28
from __future__ import unicode_literals
from django.db import migrations
from django.core.exceptions import ObjectDoesNotExist
def add_fs_examples(apps, schema_editor):
Sound = apps.get_model('datasets', 'Sound')
TaxonomyNode = apps.get_model('datasets', 'TaxonomyNode')
Taxonomy = apps.get_model('datasets', 'Taxonomy')
all_taxonomies = Taxonomy.objects.all()
# loop on all taxonomy
for taxonomy in all_taxonomies:
all_taxonomy_nodes = taxonomy.taxonomynode_set.all()
#loop for adding example Sound relations
for taxonomy_node in all_taxonomy_nodes:
for freesound_id in taxonomy.data[taxonomy_node.node_id]['positive_examples_FS']:
try:
sound = Sound.objects.get(freesound_id=freesound_id)
taxonomy_node.freesound_examples.add(sound)
except ObjectDoesNotExist:
continue
class Migration(migrations.Migration):
dependencies = [
('datasets', '0018_auto_20170620_1305'),
]
operations = [migrations.RunPython(add_fs_examples, migrations.RunPython.noop),
]
|
|
d36f0e06533f08e471981f6afee56a78a0be87fc
|
scripts/merge_translations.py
|
scripts/merge_translations.py
|
import sys
import yaml
def main(base_file, new_file, overwrite_language):
old = yaml.load(file(base_file).read())
new = yaml.load(file(new_file).read())
assert len(overwrite_language) == 2
for o, n in zip(old, new):
if overwrite_language in n['text']:
o['text'][overwrite_language] = n['text'][overwrite_language]
if o['type'] == 'multiple_choice':
for oo, on in zip(o['options'], n['options']):
if 'details' in oo and overwrite_language in on['details']:
oo['details'][overwrite_language] = on['details'][overwrite_language]
sys.stdout.write(yaml.safe_dump(old, allow_unicode=True, default_flow_style=False, encoding='utf-8', width=10000))
if __name__ == '__main__':
main(sys.argv[1], sys.argv[2], sys.argv[3])
|
Add translation merging python script
|
Add translation merging python script
|
Python
|
mit
|
okfde/eucopyright,okfde/eucopyright,okfde/eucopyright
|
Add translation merging python script
|
import sys
import yaml
def main(base_file, new_file, overwrite_language):
old = yaml.load(file(base_file).read())
new = yaml.load(file(new_file).read())
assert len(overwrite_language) == 2
for o, n in zip(old, new):
if overwrite_language in n['text']:
o['text'][overwrite_language] = n['text'][overwrite_language]
if o['type'] == 'multiple_choice':
for oo, on in zip(o['options'], n['options']):
if 'details' in oo and overwrite_language in on['details']:
oo['details'][overwrite_language] = on['details'][overwrite_language]
sys.stdout.write(yaml.safe_dump(old, allow_unicode=True, default_flow_style=False, encoding='utf-8', width=10000))
if __name__ == '__main__':
main(sys.argv[1], sys.argv[2], sys.argv[3])
|
<commit_before><commit_msg>Add translation merging python script<commit_after>
|
import sys
import yaml
def main(base_file, new_file, overwrite_language):
old = yaml.load(file(base_file).read())
new = yaml.load(file(new_file).read())
assert len(overwrite_language) == 2
for o, n in zip(old, new):
if overwrite_language in n['text']:
o['text'][overwrite_language] = n['text'][overwrite_language]
if o['type'] == 'multiple_choice':
for oo, on in zip(o['options'], n['options']):
if 'details' in oo and overwrite_language in on['details']:
oo['details'][overwrite_language] = on['details'][overwrite_language]
sys.stdout.write(yaml.safe_dump(old, allow_unicode=True, default_flow_style=False, encoding='utf-8', width=10000))
if __name__ == '__main__':
main(sys.argv[1], sys.argv[2], sys.argv[3])
|
Add translation merging python scriptimport sys
import yaml
def main(base_file, new_file, overwrite_language):
old = yaml.load(file(base_file).read())
new = yaml.load(file(new_file).read())
assert len(overwrite_language) == 2
for o, n in zip(old, new):
if overwrite_language in n['text']:
o['text'][overwrite_language] = n['text'][overwrite_language]
if o['type'] == 'multiple_choice':
for oo, on in zip(o['options'], n['options']):
if 'details' in oo and overwrite_language in on['details']:
oo['details'][overwrite_language] = on['details'][overwrite_language]
sys.stdout.write(yaml.safe_dump(old, allow_unicode=True, default_flow_style=False, encoding='utf-8', width=10000))
if __name__ == '__main__':
main(sys.argv[1], sys.argv[2], sys.argv[3])
|
<commit_before><commit_msg>Add translation merging python script<commit_after>import sys
import yaml
def main(base_file, new_file, overwrite_language):
old = yaml.load(file(base_file).read())
new = yaml.load(file(new_file).read())
assert len(overwrite_language) == 2
for o, n in zip(old, new):
if overwrite_language in n['text']:
o['text'][overwrite_language] = n['text'][overwrite_language]
if o['type'] == 'multiple_choice':
for oo, on in zip(o['options'], n['options']):
if 'details' in oo and overwrite_language in on['details']:
oo['details'][overwrite_language] = on['details'][overwrite_language]
sys.stdout.write(yaml.safe_dump(old, allow_unicode=True, default_flow_style=False, encoding='utf-8', width=10000))
if __name__ == '__main__':
main(sys.argv[1], sys.argv[2], sys.argv[3])
|
|
02cfc7a3dd310fd3da130f69a52ef76eaa8fc47c
|
square1.py
|
square1.py
|
# Step 1: Make all the "turtle" commands available to us.
import turtle
# Step 2: create a new turtle, we'll call him simon
simon = turtle.Turtle()
# Lets draw a square!
for loop in range(4):
simon.forward(200)
simon.left(90)
|
Add script for a square
|
Add script for a square
|
Python
|
mit
|
SimonDevon/simple-python-shapes
|
Add script for a square
|
# Step 1: Make all the "turtle" commands available to us.
import turtle
# Step 2: create a new turtle, we'll call him simon
simon = turtle.Turtle()
# Lets draw a square!
for loop in range(4):
simon.forward(200)
simon.left(90)
|
<commit_before><commit_msg>Add script for a square<commit_after>
|
# Step 1: Make all the "turtle" commands available to us.
import turtle
# Step 2: create a new turtle, we'll call him simon
simon = turtle.Turtle()
# Lets draw a square!
for loop in range(4):
simon.forward(200)
simon.left(90)
|
Add script for a square# Step 1: Make all the "turtle" commands available to us.
import turtle
# Step 2: create a new turtle, we'll call him simon
simon = turtle.Turtle()
# Lets draw a square!
for loop in range(4):
simon.forward(200)
simon.left(90)
|
<commit_before><commit_msg>Add script for a square<commit_after># Step 1: Make all the "turtle" commands available to us.
import turtle
# Step 2: create a new turtle, we'll call him simon
simon = turtle.Turtle()
# Lets draw a square!
for loop in range(4):
simon.forward(200)
simon.left(90)
|
|
07fbb086bc13bb00d4d9d538d0739547c08a7372
|
scripts/util/delete_flowpath.py
|
scripts/util/delete_flowpath.py
|
"""Utility script to delete a flowpath from the database and on-disk"""
import psycopg2
import sys
import os
def do_delete(huc12, fpath, scenario):
"""Delete a flowpath from the database and on disk
Args:
huc12 (str): The HUC12 that contains the flowpath
fpath (str): The flowpath within that HUC12 that needs removal
scenario (str): The IDEP scenario to remove this flowpath from
"""
pgconn = psycopg2.connect(database='idep', host='iemdb')
cursor = pgconn.cursor()
#Find the FID
cursor.execute("""SELECT fid from flowpaths WHERE huc_12 = %s and
fpath = %s and scenario = %s""", (huc12, fpath, scenario))
if cursor.rowcount == 0:
print("ERROR: Can't find FID for HUC12: %s FPATH: %s SCENARIO: %s" % (
huc12, fpath, scenario))
return
fid = cursor.fetchone()[0]
# Delete flowpath points
cursor.execute("""DELETE from flowpath_points where flowpath = %s
and scenario = %s""", (fid, scenario))
# Delete flowpath
cursor.execute("""DELETE from flowpaths where fid = %s
and scenario = %s""", (fid, scenario))
# Remove some files
for prefix in ['env', 'error', 'man', 'prj', 'run', 'slp', 'sol', 'wb']:
fn = "/i/%s/%s/%s/%s/%s_%s.%s" % (scenario, prefix, huc12[:8],
huc12[8:], huc12, fpath, prefix)
if os.path.isfile(fn):
print("REMOVE %s" % (fn,))
os.unlink(fn)
else:
print("MISSING %s" % (fn,))
cursor.close()
pgconn.commit()
def main():
"""Go Main Go"""
huc12 = sys.argv[1]
fpath = sys.argv[2]
scenario = sys.argv[3]
do_delete(huc12, fpath, scenario)
if __name__ == '__main__':
main()
|
Add util script for removal of flowpaths
|
Add util script for removal of flowpaths
|
Python
|
mit
|
akrherz/idep,akrherz/idep,akrherz/dep,akrherz/dep,akrherz/idep,akrherz/dep,akrherz/dep,akrherz/idep,akrherz/idep,akrherz/idep,akrherz/dep
|
Add util script for removal of flowpaths
|
"""Utility script to delete a flowpath from the database and on-disk"""
import psycopg2
import sys
import os
def do_delete(huc12, fpath, scenario):
"""Delete a flowpath from the database and on disk
Args:
huc12 (str): The HUC12 that contains the flowpath
fpath (str): The flowpath within that HUC12 that needs removal
scenario (str): The IDEP scenario to remove this flowpath from
"""
pgconn = psycopg2.connect(database='idep', host='iemdb')
cursor = pgconn.cursor()
#Find the FID
cursor.execute("""SELECT fid from flowpaths WHERE huc_12 = %s and
fpath = %s and scenario = %s""", (huc12, fpath, scenario))
if cursor.rowcount == 0:
print("ERROR: Can't find FID for HUC12: %s FPATH: %s SCENARIO: %s" % (
huc12, fpath, scenario))
return
fid = cursor.fetchone()[0]
# Delete flowpath points
cursor.execute("""DELETE from flowpath_points where flowpath = %s
and scenario = %s""", (fid, scenario))
# Delete flowpath
cursor.execute("""DELETE from flowpaths where fid = %s
and scenario = %s""", (fid, scenario))
# Remove some files
for prefix in ['env', 'error', 'man', 'prj', 'run', 'slp', 'sol', 'wb']:
fn = "/i/%s/%s/%s/%s/%s_%s.%s" % (scenario, prefix, huc12[:8],
huc12[8:], huc12, fpath, prefix)
if os.path.isfile(fn):
print("REMOVE %s" % (fn,))
os.unlink(fn)
else:
print("MISSING %s" % (fn,))
cursor.close()
pgconn.commit()
def main():
"""Go Main Go"""
huc12 = sys.argv[1]
fpath = sys.argv[2]
scenario = sys.argv[3]
do_delete(huc12, fpath, scenario)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add util script for removal of flowpaths<commit_after>
|
"""Utility script to delete a flowpath from the database and on-disk"""
import psycopg2
import sys
import os
def do_delete(huc12, fpath, scenario):
"""Delete a flowpath from the database and on disk
Args:
huc12 (str): The HUC12 that contains the flowpath
fpath (str): The flowpath within that HUC12 that needs removal
scenario (str): The IDEP scenario to remove this flowpath from
"""
pgconn = psycopg2.connect(database='idep', host='iemdb')
cursor = pgconn.cursor()
#Find the FID
cursor.execute("""SELECT fid from flowpaths WHERE huc_12 = %s and
fpath = %s and scenario = %s""", (huc12, fpath, scenario))
if cursor.rowcount == 0:
print("ERROR: Can't find FID for HUC12: %s FPATH: %s SCENARIO: %s" % (
huc12, fpath, scenario))
return
fid = cursor.fetchone()[0]
# Delete flowpath points
cursor.execute("""DELETE from flowpath_points where flowpath = %s
and scenario = %s""", (fid, scenario))
# Delete flowpath
cursor.execute("""DELETE from flowpaths where fid = %s
and scenario = %s""", (fid, scenario))
# Remove some files
for prefix in ['env', 'error', 'man', 'prj', 'run', 'slp', 'sol', 'wb']:
fn = "/i/%s/%s/%s/%s/%s_%s.%s" % (scenario, prefix, huc12[:8],
huc12[8:], huc12, fpath, prefix)
if os.path.isfile(fn):
print("REMOVE %s" % (fn,))
os.unlink(fn)
else:
print("MISSING %s" % (fn,))
cursor.close()
pgconn.commit()
def main():
"""Go Main Go"""
huc12 = sys.argv[1]
fpath = sys.argv[2]
scenario = sys.argv[3]
do_delete(huc12, fpath, scenario)
if __name__ == '__main__':
main()
|
Add util script for removal of flowpaths"""Utility script to delete a flowpath from the database and on-disk"""
import psycopg2
import sys
import os
def do_delete(huc12, fpath, scenario):
"""Delete a flowpath from the database and on disk
Args:
huc12 (str): The HUC12 that contains the flowpath
fpath (str): The flowpath within that HUC12 that needs removal
scenario (str): The IDEP scenario to remove this flowpath from
"""
pgconn = psycopg2.connect(database='idep', host='iemdb')
cursor = pgconn.cursor()
#Find the FID
cursor.execute("""SELECT fid from flowpaths WHERE huc_12 = %s and
fpath = %s and scenario = %s""", (huc12, fpath, scenario))
if cursor.rowcount == 0:
print("ERROR: Can't find FID for HUC12: %s FPATH: %s SCENARIO: %s" % (
huc12, fpath, scenario))
return
fid = cursor.fetchone()[0]
# Delete flowpath points
cursor.execute("""DELETE from flowpath_points where flowpath = %s
and scenario = %s""", (fid, scenario))
# Delete flowpath
cursor.execute("""DELETE from flowpaths where fid = %s
and scenario = %s""", (fid, scenario))
# Remove some files
for prefix in ['env', 'error', 'man', 'prj', 'run', 'slp', 'sol', 'wb']:
fn = "/i/%s/%s/%s/%s/%s_%s.%s" % (scenario, prefix, huc12[:8],
huc12[8:], huc12, fpath, prefix)
if os.path.isfile(fn):
print("REMOVE %s" % (fn,))
os.unlink(fn)
else:
print("MISSING %s" % (fn,))
cursor.close()
pgconn.commit()
def main():
"""Go Main Go"""
huc12 = sys.argv[1]
fpath = sys.argv[2]
scenario = sys.argv[3]
do_delete(huc12, fpath, scenario)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add util script for removal of flowpaths<commit_after>"""Utility script to delete a flowpath from the database and on-disk"""
import psycopg2
import sys
import os
def do_delete(huc12, fpath, scenario):
"""Delete a flowpath from the database and on disk
Args:
huc12 (str): The HUC12 that contains the flowpath
fpath (str): The flowpath within that HUC12 that needs removal
scenario (str): The IDEP scenario to remove this flowpath from
"""
pgconn = psycopg2.connect(database='idep', host='iemdb')
cursor = pgconn.cursor()
#Find the FID
cursor.execute("""SELECT fid from flowpaths WHERE huc_12 = %s and
fpath = %s and scenario = %s""", (huc12, fpath, scenario))
if cursor.rowcount == 0:
print("ERROR: Can't find FID for HUC12: %s FPATH: %s SCENARIO: %s" % (
huc12, fpath, scenario))
return
fid = cursor.fetchone()[0]
# Delete flowpath points
cursor.execute("""DELETE from flowpath_points where flowpath = %s
and scenario = %s""", (fid, scenario))
# Delete flowpath
cursor.execute("""DELETE from flowpaths where fid = %s
and scenario = %s""", (fid, scenario))
# Remove some files
for prefix in ['env', 'error', 'man', 'prj', 'run', 'slp', 'sol', 'wb']:
fn = "/i/%s/%s/%s/%s/%s_%s.%s" % (scenario, prefix, huc12[:8],
huc12[8:], huc12, fpath, prefix)
if os.path.isfile(fn):
print("REMOVE %s" % (fn,))
os.unlink(fn)
else:
print("MISSING %s" % (fn,))
cursor.close()
pgconn.commit()
def main():
"""Go Main Go"""
huc12 = sys.argv[1]
fpath = sys.argv[2]
scenario = sys.argv[3]
do_delete(huc12, fpath, scenario)
if __name__ == '__main__':
main()
|
|
c8a44d0689c325491ebbc1c05bab2672731cf58d
|
GcodeParser.py
|
GcodeParser.py
|
#!/usr/bin/env python
# coding=UTF-8
"""Module containing Gcode parsing functions"""
__author__ = "Dylan Armitage"
__email__ = "d.armitage89@gmail.com"
####---- Imports ----####
from pygcode import Line, GCodeLinearMove
def bounding_box(gcode_file):
"""Take in file of gcode, return dict of max and min bounding values"""
raise NotImplemented
def box_gcode(min_xy, max_xy):
"""Take in min/max coordinate tuples, return G0 commands to bound it"""
raise NotImplemented
def mid_gcode(min_xy, max_xy):
"""Take in min/max coord tuples, return G0 to go to midpoint"""
raise NotImplemented
|
CREATE module to parse gcode
|
CREATE module to parse gcode
|
Python
|
mit
|
RootAccessHackerspace/k40-laser-scripts,RootAccessHackerspace/k40-laser-scripts
|
CREATE module to parse gcode
|
#!/usr/bin/env python
# coding=UTF-8
"""Module containing Gcode parsing functions"""
__author__ = "Dylan Armitage"
__email__ = "d.armitage89@gmail.com"
####---- Imports ----####
from pygcode import Line, GCodeLinearMove
def bounding_box(gcode_file):
"""Take in file of gcode, return dict of max and min bounding values"""
raise NotImplemented
def box_gcode(min_xy, max_xy):
"""Take in min/max coordinate tuples, return G0 commands to bound it"""
raise NotImplemented
def mid_gcode(min_xy, max_xy):
"""Take in min/max coord tuples, return G0 to go to midpoint"""
raise NotImplemented
|
<commit_before><commit_msg>CREATE module to parse gcode<commit_after>
|
#!/usr/bin/env python
# coding=UTF-8
"""Module containing Gcode parsing functions"""
__author__ = "Dylan Armitage"
__email__ = "d.armitage89@gmail.com"
####---- Imports ----####
from pygcode import Line, GCodeLinearMove
def bounding_box(gcode_file):
"""Take in file of gcode, return dict of max and min bounding values"""
raise NotImplemented
def box_gcode(min_xy, max_xy):
"""Take in min/max coordinate tuples, return G0 commands to bound it"""
raise NotImplemented
def mid_gcode(min_xy, max_xy):
"""Take in min/max coord tuples, return G0 to go to midpoint"""
raise NotImplemented
|
CREATE module to parse gcode#!/usr/bin/env python
# coding=UTF-8
"""Module containing Gcode parsing functions"""
__author__ = "Dylan Armitage"
__email__ = "d.armitage89@gmail.com"
####---- Imports ----####
from pygcode import Line, GCodeLinearMove
def bounding_box(gcode_file):
"""Take in file of gcode, return dict of max and min bounding values"""
raise NotImplemented
def box_gcode(min_xy, max_xy):
"""Take in min/max coordinate tuples, return G0 commands to bound it"""
raise NotImplemented
def mid_gcode(min_xy, max_xy):
"""Take in min/max coord tuples, return G0 to go to midpoint"""
raise NotImplemented
|
<commit_before><commit_msg>CREATE module to parse gcode<commit_after>#!/usr/bin/env python
# coding=UTF-8
"""Module containing Gcode parsing functions"""
__author__ = "Dylan Armitage"
__email__ = "d.armitage89@gmail.com"
####---- Imports ----####
from pygcode import Line, GCodeLinearMove
def bounding_box(gcode_file):
"""Take in file of gcode, return dict of max and min bounding values"""
raise NotImplemented
def box_gcode(min_xy, max_xy):
"""Take in min/max coordinate tuples, return G0 commands to bound it"""
raise NotImplemented
def mid_gcode(min_xy, max_xy):
"""Take in min/max coord tuples, return G0 to go to midpoint"""
raise NotImplemented
|
|
2276d86ff451b5fd834ee9b6163c85b3a51c8a7e
|
dendrogenous/test/test_utils.py
|
dendrogenous/test/test_utils.py
|
import dendrogenous as dg
import dendrogenous.utils
from Bio import SeqRecord
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import IUPAC
from dendrogenous.test.base import BaseTestCase
class TestReformatAccession(BaseTestCase):
def test_reformat_accession_method_for_too_long_accessions(self):
"""
Test reformat accession works as expected
"""
too_long = SeqRecord(\
Seq("X",
IUPAC.protein),
id="012345678901234567890123456789",
name="foo",
description="bar, baz")
truncated = dg.utils.reformat_accession(too_long)
self.assertEqual(len(truncated.id), 20)
self.assertEqual(truncated.id, "01234567890123456789")
def test_reformat_accession_method_for_problematic_characters(self):
"""
Test reformat accession works as expected
"""
bad_char = SeqRecord(\
Seq("X",
IUPAC.protein),
id="|blah,|t:()",
name="foo",
description="bar, baz")
fixed_chars = dg.utils.reformat_accession(bad_char)
self.assertEqual(fixed_chars.id, "_blah__t___")
|
Add unit tests for reformatting accession id
|
Add unit tests for reformatting accession id
|
Python
|
bsd-3-clause
|
fmaguire/dendrogenous
|
Add unit tests for reformatting accession id
|
import dendrogenous as dg
import dendrogenous.utils
from Bio import SeqRecord
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import IUPAC
from dendrogenous.test.base import BaseTestCase
class TestReformatAccession(BaseTestCase):
def test_reformat_accession_method_for_too_long_accessions(self):
"""
Test reformat accession works as expected
"""
too_long = SeqRecord(\
Seq("X",
IUPAC.protein),
id="012345678901234567890123456789",
name="foo",
description="bar, baz")
truncated = dg.utils.reformat_accession(too_long)
self.assertEqual(len(truncated.id), 20)
self.assertEqual(truncated.id, "01234567890123456789")
def test_reformat_accession_method_for_problematic_characters(self):
"""
Test reformat accession works as expected
"""
bad_char = SeqRecord(\
Seq("X",
IUPAC.protein),
id="|blah,|t:()",
name="foo",
description="bar, baz")
fixed_chars = dg.utils.reformat_accession(bad_char)
self.assertEqual(fixed_chars.id, "_blah__t___")
|
<commit_before><commit_msg>Add unit tests for reformatting accession id<commit_after>
|
import dendrogenous as dg
import dendrogenous.utils
from Bio import SeqRecord
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import IUPAC
from dendrogenous.test.base import BaseTestCase
class TestReformatAccession(BaseTestCase):
def test_reformat_accession_method_for_too_long_accessions(self):
"""
Test reformat accession works as expected
"""
too_long = SeqRecord(\
Seq("X",
IUPAC.protein),
id="012345678901234567890123456789",
name="foo",
description="bar, baz")
truncated = dg.utils.reformat_accession(too_long)
self.assertEqual(len(truncated.id), 20)
self.assertEqual(truncated.id, "01234567890123456789")
def test_reformat_accession_method_for_problematic_characters(self):
"""
Test reformat accession works as expected
"""
bad_char = SeqRecord(\
Seq("X",
IUPAC.protein),
id="|blah,|t:()",
name="foo",
description="bar, baz")
fixed_chars = dg.utils.reformat_accession(bad_char)
self.assertEqual(fixed_chars.id, "_blah__t___")
|
Add unit tests for reformatting accession idimport dendrogenous as dg
import dendrogenous.utils
from Bio import SeqRecord
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import IUPAC
from dendrogenous.test.base import BaseTestCase
class TestReformatAccession(BaseTestCase):
def test_reformat_accession_method_for_too_long_accessions(self):
"""
Test reformat accession works as expected
"""
too_long = SeqRecord(\
Seq("X",
IUPAC.protein),
id="012345678901234567890123456789",
name="foo",
description="bar, baz")
truncated = dg.utils.reformat_accession(too_long)
self.assertEqual(len(truncated.id), 20)
self.assertEqual(truncated.id, "01234567890123456789")
def test_reformat_accession_method_for_problematic_characters(self):
"""
Test reformat accession works as expected
"""
bad_char = SeqRecord(\
Seq("X",
IUPAC.protein),
id="|blah,|t:()",
name="foo",
description="bar, baz")
fixed_chars = dg.utils.reformat_accession(bad_char)
self.assertEqual(fixed_chars.id, "_blah__t___")
|
<commit_before><commit_msg>Add unit tests for reformatting accession id<commit_after>import dendrogenous as dg
import dendrogenous.utils
from Bio import SeqRecord
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import IUPAC
from dendrogenous.test.base import BaseTestCase
class TestReformatAccession(BaseTestCase):
def test_reformat_accession_method_for_too_long_accessions(self):
"""
Test reformat accession works as expected
"""
too_long = SeqRecord(\
Seq("X",
IUPAC.protein),
id="012345678901234567890123456789",
name="foo",
description="bar, baz")
truncated = dg.utils.reformat_accession(too_long)
self.assertEqual(len(truncated.id), 20)
self.assertEqual(truncated.id, "01234567890123456789")
def test_reformat_accession_method_for_problematic_characters(self):
"""
Test reformat accession works as expected
"""
bad_char = SeqRecord(\
Seq("X",
IUPAC.protein),
id="|blah,|t:()",
name="foo",
description="bar, baz")
fixed_chars = dg.utils.reformat_accession(bad_char)
self.assertEqual(fixed_chars.id, "_blah__t___")
|
|
8d756379ae98576c61fee18e4964a98e2014e67d
|
hooks/variable_logger_hook.py
|
hooks/variable_logger_hook.py
|
# coding=utf-8
# Copyright 2019 The Tensor2Robot Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""A hook to log all variables."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Optional
from absl import logging
import numpy as np
import tensorflow as tf
class VariableLoggerHook(tf.train.SessionRunHook):
"""A hook to log variables via a session run hook."""
def __init__(self, max_num_variable_values = None):
"""Initializes a VariableLoggerHook.
Args:
max_num_variable_values: If not None, at most max_num_variable_values will
be logged per variable.
"""
super(VariableLoggerHook, self).__init__()
self._max_num_variable_values = max_num_variable_values
def begin(self):
"""Captures all variables to be read out during the session run."""
self._variables_to_log = tf.contrib.framework.get_variables()
def before_run(self, run_context):
"""Adds the variables to the run args."""
return tf.train.SessionRunArgs(self._variables_to_log)
def after_run(self, run_context, run_values):
del run_context
original = np.get_printoptions()
np.set_printoptions(suppress=True)
for variable, variable_value in zip(self._variables_to_log,
run_values.results):
if not isinstance(variable_value, np.ndarray):
continue
variable_value = variable_value.ravel()
logging.info('%s.mean = %s', variable.op.name, np.mean(variable_value))
logging.info('%s.std = %s', variable.op.name, np.std(variable_value))
if self._max_variable_values:
variable_value = variable_value[:self._max_variable_values]
logging.info('%s = %s', variable.op.name, variable_value)
np.set_printoptions(**original)
|
Introduce a variable logging hook which will log the values of all variables during training via a session run hook.
|
Introduce a variable logging hook which will log the values of all variables during training via a session run hook.
PiperOrigin-RevId: 276103839
|
Python
|
apache-2.0
|
google-research/tensor2robot
|
Introduce a variable logging hook which will log the values of all variables during training via a session run hook.
PiperOrigin-RevId: 276103839
|
# coding=utf-8
# Copyright 2019 The Tensor2Robot Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""A hook to log all variables."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Optional
from absl import logging
import numpy as np
import tensorflow as tf
class VariableLoggerHook(tf.train.SessionRunHook):
"""A hook to log variables via a session run hook."""
def __init__(self, max_num_variable_values = None):
"""Initializes a VariableLoggerHook.
Args:
max_num_variable_values: If not None, at most max_num_variable_values will
be logged per variable.
"""
super(VariableLoggerHook, self).__init__()
self._max_num_variable_values = max_num_variable_values
def begin(self):
"""Captures all variables to be read out during the session run."""
self._variables_to_log = tf.contrib.framework.get_variables()
def before_run(self, run_context):
"""Adds the variables to the run args."""
return tf.train.SessionRunArgs(self._variables_to_log)
def after_run(self, run_context, run_values):
del run_context
original = np.get_printoptions()
np.set_printoptions(suppress=True)
for variable, variable_value in zip(self._variables_to_log,
run_values.results):
if not isinstance(variable_value, np.ndarray):
continue
variable_value = variable_value.ravel()
logging.info('%s.mean = %s', variable.op.name, np.mean(variable_value))
logging.info('%s.std = %s', variable.op.name, np.std(variable_value))
if self._max_variable_values:
variable_value = variable_value[:self._max_variable_values]
logging.info('%s = %s', variable.op.name, variable_value)
np.set_printoptions(**original)
|
<commit_before><commit_msg>Introduce a variable logging hook which will log the values of all variables during training via a session run hook.
PiperOrigin-RevId: 276103839<commit_after>
|
# coding=utf-8
# Copyright 2019 The Tensor2Robot Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""A hook to log all variables."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Optional
from absl import logging
import numpy as np
import tensorflow as tf
class VariableLoggerHook(tf.train.SessionRunHook):
"""A hook to log variables via a session run hook."""
def __init__(self, max_num_variable_values = None):
"""Initializes a VariableLoggerHook.
Args:
max_num_variable_values: If not None, at most max_num_variable_values will
be logged per variable.
"""
super(VariableLoggerHook, self).__init__()
self._max_num_variable_values = max_num_variable_values
def begin(self):
"""Captures all variables to be read out during the session run."""
self._variables_to_log = tf.contrib.framework.get_variables()
def before_run(self, run_context):
"""Adds the variables to the run args."""
return tf.train.SessionRunArgs(self._variables_to_log)
def after_run(self, run_context, run_values):
del run_context
original = np.get_printoptions()
np.set_printoptions(suppress=True)
for variable, variable_value in zip(self._variables_to_log,
run_values.results):
if not isinstance(variable_value, np.ndarray):
continue
variable_value = variable_value.ravel()
logging.info('%s.mean = %s', variable.op.name, np.mean(variable_value))
logging.info('%s.std = %s', variable.op.name, np.std(variable_value))
if self._max_variable_values:
variable_value = variable_value[:self._max_variable_values]
logging.info('%s = %s', variable.op.name, variable_value)
np.set_printoptions(**original)
|
Introduce a variable logging hook which will log the values of all variables during training via a session run hook.
PiperOrigin-RevId: 276103839# coding=utf-8
# Copyright 2019 The Tensor2Robot Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""A hook to log all variables."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Optional
from absl import logging
import numpy as np
import tensorflow as tf
class VariableLoggerHook(tf.train.SessionRunHook):
"""A hook to log variables via a session run hook."""
def __init__(self, max_num_variable_values = None):
"""Initializes a VariableLoggerHook.
Args:
max_num_variable_values: If not None, at most max_num_variable_values will
be logged per variable.
"""
super(VariableLoggerHook, self).__init__()
self._max_num_variable_values = max_num_variable_values
def begin(self):
"""Captures all variables to be read out during the session run."""
self._variables_to_log = tf.contrib.framework.get_variables()
def before_run(self, run_context):
"""Adds the variables to the run args."""
return tf.train.SessionRunArgs(self._variables_to_log)
def after_run(self, run_context, run_values):
del run_context
original = np.get_printoptions()
np.set_printoptions(suppress=True)
for variable, variable_value in zip(self._variables_to_log,
run_values.results):
if not isinstance(variable_value, np.ndarray):
continue
variable_value = variable_value.ravel()
logging.info('%s.mean = %s', variable.op.name, np.mean(variable_value))
logging.info('%s.std = %s', variable.op.name, np.std(variable_value))
if self._max_variable_values:
variable_value = variable_value[:self._max_variable_values]
logging.info('%s = %s', variable.op.name, variable_value)
np.set_printoptions(**original)
|
<commit_before><commit_msg>Introduce a variable logging hook which will log the values of all variables during training via a session run hook.
PiperOrigin-RevId: 276103839<commit_after># coding=utf-8
# Copyright 2019 The Tensor2Robot Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""A hook to log all variables."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Optional
from absl import logging
import numpy as np
import tensorflow as tf
class VariableLoggerHook(tf.train.SessionRunHook):
"""A hook to log variables via a session run hook."""
def __init__(self, max_num_variable_values = None):
"""Initializes a VariableLoggerHook.
Args:
max_num_variable_values: If not None, at most max_num_variable_values will
be logged per variable.
"""
super(VariableLoggerHook, self).__init__()
self._max_num_variable_values = max_num_variable_values
def begin(self):
"""Captures all variables to be read out during the session run."""
self._variables_to_log = tf.contrib.framework.get_variables()
def before_run(self, run_context):
"""Adds the variables to the run args."""
return tf.train.SessionRunArgs(self._variables_to_log)
def after_run(self, run_context, run_values):
del run_context
original = np.get_printoptions()
np.set_printoptions(suppress=True)
for variable, variable_value in zip(self._variables_to_log,
run_values.results):
if not isinstance(variable_value, np.ndarray):
continue
variable_value = variable_value.ravel()
logging.info('%s.mean = %s', variable.op.name, np.mean(variable_value))
logging.info('%s.std = %s', variable.op.name, np.std(variable_value))
if self._max_variable_values:
variable_value = variable_value[:self._max_variable_values]
logging.info('%s = %s', variable.op.name, variable_value)
np.set_printoptions(**original)
|
|
ed7ea9e4bea8f4722b57876044d3c35570376462
|
scripts/dummycanbms.py
|
scripts/dummycanbms.py
|
#!/usr/bin/env python
from dbus.mainloop.glib import DBusGMainLoop
import gobject
import argparse
import logging
import sys
import os
# our own packages
sys.path.insert(1, os.path.join(os.path.dirname(__file__), '../ext/velib_python'))
from dbusdummyservice import DbusDummyService
from logger import setup_logging
# Argument parsing
parser = argparse.ArgumentParser(
description='dummy dbus service'
)
parser.add_argument("-n", "--name",
help="the D-Bus service you want me to claim",
type=str, default="com.victronenergy.battery.socketcan_can0")
args = parser.parse_args()
print(__file__ + " is starting up, use -h argument to see optional arguments")
logger = setup_logging(debug=True)
# Have a mainloop, so we can send/receive asynchronous calls to and from dbus
DBusGMainLoop(set_as_default=True)
s = DbusDummyService(
servicename=args.name,
deviceinstance=0,
paths={
'/Alarms/CellImbalance': {'initial': 0},
'/Alarms/HighChargeCurrent': {'initial': 0},
'/Alarms/HighChargeTemperature': {'initial': 0},
'/Alarms/HighDischargeCurrent': {'initial': 0},
'/Alarms/HighTemperature': {'initial': 0},
'/Alarms/HighVoltage': {'initial': 0},
'/Alarms/InternalFailure': {'initial': 0},
'/Alarms/LowChargeTemperature': {'initial': 0},
'/Alarms/LowTemperature': {'initial': 0},
'/Alarms/LowVoltage': {'initial': 0},
'/Soc': {'initial': 40},
'/Dc/0/Voltage': {'initial': 25},
'/Dc/0/Current': {'initial': 20},
'/Dc/0/Power': {'initial': 500},
'/Dc/0/Temperature': {'initial': 23.8},
'/Info/BatteryLowVoltage': {'initial': 23},
'/Info/MaxChargeCurrent': {'initial': 600},
'/Info/MaxChargeVoltage': {'initial': 28.4},
'/Info/MaxDischargeCurrent': {'initial': 600},
},
productname='ACME BMS battery',
connection='CAN-bus')
logger.info('Connected to dbus, and switching over to gobject.MainLoop() (= event based)')
mainloop = gobject.MainLoop()
mainloop.run()
|
Add dummy canbus-battery service for testing.
|
Add dummy canbus-battery service for testing.
|
Python
|
mit
|
victronenergy/dbus-systemcalc-py
|
Add dummy canbus-battery service for testing.
|
#!/usr/bin/env python
from dbus.mainloop.glib import DBusGMainLoop
import gobject
import argparse
import logging
import sys
import os
# our own packages
sys.path.insert(1, os.path.join(os.path.dirname(__file__), '../ext/velib_python'))
from dbusdummyservice import DbusDummyService
from logger import setup_logging
# Argument parsing
parser = argparse.ArgumentParser(
description='dummy dbus service'
)
parser.add_argument("-n", "--name",
help="the D-Bus service you want me to claim",
type=str, default="com.victronenergy.battery.socketcan_can0")
args = parser.parse_args()
print(__file__ + " is starting up, use -h argument to see optional arguments")
logger = setup_logging(debug=True)
# Have a mainloop, so we can send/receive asynchronous calls to and from dbus
DBusGMainLoop(set_as_default=True)
s = DbusDummyService(
servicename=args.name,
deviceinstance=0,
paths={
'/Alarms/CellImbalance': {'initial': 0},
'/Alarms/HighChargeCurrent': {'initial': 0},
'/Alarms/HighChargeTemperature': {'initial': 0},
'/Alarms/HighDischargeCurrent': {'initial': 0},
'/Alarms/HighTemperature': {'initial': 0},
'/Alarms/HighVoltage': {'initial': 0},
'/Alarms/InternalFailure': {'initial': 0},
'/Alarms/LowChargeTemperature': {'initial': 0},
'/Alarms/LowTemperature': {'initial': 0},
'/Alarms/LowVoltage': {'initial': 0},
'/Soc': {'initial': 40},
'/Dc/0/Voltage': {'initial': 25},
'/Dc/0/Current': {'initial': 20},
'/Dc/0/Power': {'initial': 500},
'/Dc/0/Temperature': {'initial': 23.8},
'/Info/BatteryLowVoltage': {'initial': 23},
'/Info/MaxChargeCurrent': {'initial': 600},
'/Info/MaxChargeVoltage': {'initial': 28.4},
'/Info/MaxDischargeCurrent': {'initial': 600},
},
productname='ACME BMS battery',
connection='CAN-bus')
logger.info('Connected to dbus, and switching over to gobject.MainLoop() (= event based)')
mainloop = gobject.MainLoop()
mainloop.run()
|
<commit_before><commit_msg>Add dummy canbus-battery service for testing.<commit_after>
|
#!/usr/bin/env python
from dbus.mainloop.glib import DBusGMainLoop
import gobject
import argparse
import logging
import sys
import os
# our own packages
sys.path.insert(1, os.path.join(os.path.dirname(__file__), '../ext/velib_python'))
from dbusdummyservice import DbusDummyService
from logger import setup_logging
# Argument parsing
parser = argparse.ArgumentParser(
description='dummy dbus service'
)
parser.add_argument("-n", "--name",
help="the D-Bus service you want me to claim",
type=str, default="com.victronenergy.battery.socketcan_can0")
args = parser.parse_args()
print(__file__ + " is starting up, use -h argument to see optional arguments")
logger = setup_logging(debug=True)
# Have a mainloop, so we can send/receive asynchronous calls to and from dbus
DBusGMainLoop(set_as_default=True)
s = DbusDummyService(
servicename=args.name,
deviceinstance=0,
paths={
'/Alarms/CellImbalance': {'initial': 0},
'/Alarms/HighChargeCurrent': {'initial': 0},
'/Alarms/HighChargeTemperature': {'initial': 0},
'/Alarms/HighDischargeCurrent': {'initial': 0},
'/Alarms/HighTemperature': {'initial': 0},
'/Alarms/HighVoltage': {'initial': 0},
'/Alarms/InternalFailure': {'initial': 0},
'/Alarms/LowChargeTemperature': {'initial': 0},
'/Alarms/LowTemperature': {'initial': 0},
'/Alarms/LowVoltage': {'initial': 0},
'/Soc': {'initial': 40},
'/Dc/0/Voltage': {'initial': 25},
'/Dc/0/Current': {'initial': 20},
'/Dc/0/Power': {'initial': 500},
'/Dc/0/Temperature': {'initial': 23.8},
'/Info/BatteryLowVoltage': {'initial': 23},
'/Info/MaxChargeCurrent': {'initial': 600},
'/Info/MaxChargeVoltage': {'initial': 28.4},
'/Info/MaxDischargeCurrent': {'initial': 600},
},
productname='ACME BMS battery',
connection='CAN-bus')
logger.info('Connected to dbus, and switching over to gobject.MainLoop() (= event based)')
mainloop = gobject.MainLoop()
mainloop.run()
|
Add dummy canbus-battery service for testing.#!/usr/bin/env python
from dbus.mainloop.glib import DBusGMainLoop
import gobject
import argparse
import logging
import sys
import os
# our own packages
sys.path.insert(1, os.path.join(os.path.dirname(__file__), '../ext/velib_python'))
from dbusdummyservice import DbusDummyService
from logger import setup_logging
# Argument parsing
parser = argparse.ArgumentParser(
description='dummy dbus service'
)
parser.add_argument("-n", "--name",
help="the D-Bus service you want me to claim",
type=str, default="com.victronenergy.battery.socketcan_can0")
args = parser.parse_args()
print(__file__ + " is starting up, use -h argument to see optional arguments")
logger = setup_logging(debug=True)
# Have a mainloop, so we can send/receive asynchronous calls to and from dbus
DBusGMainLoop(set_as_default=True)
s = DbusDummyService(
servicename=args.name,
deviceinstance=0,
paths={
'/Alarms/CellImbalance': {'initial': 0},
'/Alarms/HighChargeCurrent': {'initial': 0},
'/Alarms/HighChargeTemperature': {'initial': 0},
'/Alarms/HighDischargeCurrent': {'initial': 0},
'/Alarms/HighTemperature': {'initial': 0},
'/Alarms/HighVoltage': {'initial': 0},
'/Alarms/InternalFailure': {'initial': 0},
'/Alarms/LowChargeTemperature': {'initial': 0},
'/Alarms/LowTemperature': {'initial': 0},
'/Alarms/LowVoltage': {'initial': 0},
'/Soc': {'initial': 40},
'/Dc/0/Voltage': {'initial': 25},
'/Dc/0/Current': {'initial': 20},
'/Dc/0/Power': {'initial': 500},
'/Dc/0/Temperature': {'initial': 23.8},
'/Info/BatteryLowVoltage': {'initial': 23},
'/Info/MaxChargeCurrent': {'initial': 600},
'/Info/MaxChargeVoltage': {'initial': 28.4},
'/Info/MaxDischargeCurrent': {'initial': 600},
},
productname='ACME BMS battery',
connection='CAN-bus')
logger.info('Connected to dbus, and switching over to gobject.MainLoop() (= event based)')
mainloop = gobject.MainLoop()
mainloop.run()
|
<commit_before><commit_msg>Add dummy canbus-battery service for testing.<commit_after>#!/usr/bin/env python
from dbus.mainloop.glib import DBusGMainLoop
import gobject
import argparse
import logging
import sys
import os
# our own packages
sys.path.insert(1, os.path.join(os.path.dirname(__file__), '../ext/velib_python'))
from dbusdummyservice import DbusDummyService
from logger import setup_logging
# Argument parsing
parser = argparse.ArgumentParser(
description='dummy dbus service'
)
parser.add_argument("-n", "--name",
help="the D-Bus service you want me to claim",
type=str, default="com.victronenergy.battery.socketcan_can0")
args = parser.parse_args()
print(__file__ + " is starting up, use -h argument to see optional arguments")
logger = setup_logging(debug=True)
# Have a mainloop, so we can send/receive asynchronous calls to and from dbus
DBusGMainLoop(set_as_default=True)
s = DbusDummyService(
servicename=args.name,
deviceinstance=0,
paths={
'/Alarms/CellImbalance': {'initial': 0},
'/Alarms/HighChargeCurrent': {'initial': 0},
'/Alarms/HighChargeTemperature': {'initial': 0},
'/Alarms/HighDischargeCurrent': {'initial': 0},
'/Alarms/HighTemperature': {'initial': 0},
'/Alarms/HighVoltage': {'initial': 0},
'/Alarms/InternalFailure': {'initial': 0},
'/Alarms/LowChargeTemperature': {'initial': 0},
'/Alarms/LowTemperature': {'initial': 0},
'/Alarms/LowVoltage': {'initial': 0},
'/Soc': {'initial': 40},
'/Dc/0/Voltage': {'initial': 25},
'/Dc/0/Current': {'initial': 20},
'/Dc/0/Power': {'initial': 500},
'/Dc/0/Temperature': {'initial': 23.8},
'/Info/BatteryLowVoltage': {'initial': 23},
'/Info/MaxChargeCurrent': {'initial': 600},
'/Info/MaxChargeVoltage': {'initial': 28.4},
'/Info/MaxDischargeCurrent': {'initial': 600},
},
productname='ACME BMS battery',
connection='CAN-bus')
logger.info('Connected to dbus, and switching over to gobject.MainLoop() (= event based)')
mainloop = gobject.MainLoop()
mainloop.run()
|
|
a106e33b4d45a365aac29c87e3e657bac3539d53
|
tests/test_db.py
|
tests/test_db.py
|
"""Unit tests for ckanserviceprovider/db.py."""
import nose.tools
import ckanserviceprovider.db as db
def test_validate_error_with_none():
"""_validate_error() should return None if given None."""
assert db._validate_error(None) is None
def test_validate_error_with_string():
"""If given a string _validate_error() should return it wrapped in a dict.
"""
assert db._validate_error("Something went wrong") == {
"message": "Something went wrong"}
def test_validate_error_with_valid_dict():
"""If given a valid dict _validate_error() should return the same dict."""
job_dict = {"message": "Something went wrong"}
assert db._validate_error(job_dict) == job_dict
def test_validate_error_with_dict_with_invalid_error():
"""_validate_error() should raise if given a dict with an invalid message.
"""
job_dict = {"message": 42} # Error message is invalid: it's not a string.
nose.tools.assert_raises(
db.InvalidErrorObjectError, db._validate_error, job_dict)
def test_validate_error_with_dict_with_no_error_key():
"""_validate_error() should raise if given a dict with no "message" key."""
job_dict = {"foo": "bar"}
nose.tools.assert_raises(
db.InvalidErrorObjectError, db._validate_error, job_dict)
def test_validate_error_with_random_object():
"""_validate_error() should raise if given an object of the wrong type."""
class Foo(object):
pass
# An error object that is not None and is not string- or dict-like at all.
error_obj = Foo()
nose.tools.assert_raises(
db.InvalidErrorObjectError, db._validate_error, error_obj)
|
Add unit tests for new _validate_error() function
|
Add unit tests for new _validate_error() function
|
Python
|
agpl-3.0
|
ckan/ckan-service-provider,deniszgonjanin/ckan-service-provider,ESRC-CDRC/ckan-service-provider,datawagovau/ckan-service-provider
|
Add unit tests for new _validate_error() function
|
"""Unit tests for ckanserviceprovider/db.py."""
import nose.tools
import ckanserviceprovider.db as db
def test_validate_error_with_none():
"""_validate_error() should return None if given None."""
assert db._validate_error(None) is None
def test_validate_error_with_string():
"""If given a string _validate_error() should return it wrapped in a dict.
"""
assert db._validate_error("Something went wrong") == {
"message": "Something went wrong"}
def test_validate_error_with_valid_dict():
"""If given a valid dict _validate_error() should return the same dict."""
job_dict = {"message": "Something went wrong"}
assert db._validate_error(job_dict) == job_dict
def test_validate_error_with_dict_with_invalid_error():
"""_validate_error() should raise if given a dict with an invalid message.
"""
job_dict = {"message": 42} # Error message is invalid: it's not a string.
nose.tools.assert_raises(
db.InvalidErrorObjectError, db._validate_error, job_dict)
def test_validate_error_with_dict_with_no_error_key():
"""_validate_error() should raise if given a dict with no "message" key."""
job_dict = {"foo": "bar"}
nose.tools.assert_raises(
db.InvalidErrorObjectError, db._validate_error, job_dict)
def test_validate_error_with_random_object():
"""_validate_error() should raise if given an object of the wrong type."""
class Foo(object):
pass
# An error object that is not None and is not string- or dict-like at all.
error_obj = Foo()
nose.tools.assert_raises(
db.InvalidErrorObjectError, db._validate_error, error_obj)
|
<commit_before><commit_msg>Add unit tests for new _validate_error() function<commit_after>
|
"""Unit tests for ckanserviceprovider/db.py."""
import nose.tools
import ckanserviceprovider.db as db
def test_validate_error_with_none():
"""_validate_error() should return None if given None."""
assert db._validate_error(None) is None
def test_validate_error_with_string():
"""If given a string _validate_error() should return it wrapped in a dict.
"""
assert db._validate_error("Something went wrong") == {
"message": "Something went wrong"}
def test_validate_error_with_valid_dict():
"""If given a valid dict _validate_error() should return the same dict."""
job_dict = {"message": "Something went wrong"}
assert db._validate_error(job_dict) == job_dict
def test_validate_error_with_dict_with_invalid_error():
"""_validate_error() should raise if given a dict with an invalid message.
"""
job_dict = {"message": 42} # Error message is invalid: it's not a string.
nose.tools.assert_raises(
db.InvalidErrorObjectError, db._validate_error, job_dict)
def test_validate_error_with_dict_with_no_error_key():
"""_validate_error() should raise if given a dict with no "message" key."""
job_dict = {"foo": "bar"}
nose.tools.assert_raises(
db.InvalidErrorObjectError, db._validate_error, job_dict)
def test_validate_error_with_random_object():
"""_validate_error() should raise if given an object of the wrong type."""
class Foo(object):
pass
# An error object that is not None and is not string- or dict-like at all.
error_obj = Foo()
nose.tools.assert_raises(
db.InvalidErrorObjectError, db._validate_error, error_obj)
|
Add unit tests for new _validate_error() function"""Unit tests for ckanserviceprovider/db.py."""
import nose.tools
import ckanserviceprovider.db as db
def test_validate_error_with_none():
"""_validate_error() should return None if given None."""
assert db._validate_error(None) is None
def test_validate_error_with_string():
"""If given a string _validate_error() should return it wrapped in a dict.
"""
assert db._validate_error("Something went wrong") == {
"message": "Something went wrong"}
def test_validate_error_with_valid_dict():
"""If given a valid dict _validate_error() should return the same dict."""
job_dict = {"message": "Something went wrong"}
assert db._validate_error(job_dict) == job_dict
def test_validate_error_with_dict_with_invalid_error():
"""_validate_error() should raise if given a dict with an invalid message.
"""
job_dict = {"message": 42} # Error message is invalid: it's not a string.
nose.tools.assert_raises(
db.InvalidErrorObjectError, db._validate_error, job_dict)
def test_validate_error_with_dict_with_no_error_key():
"""_validate_error() should raise if given a dict with no "message" key."""
job_dict = {"foo": "bar"}
nose.tools.assert_raises(
db.InvalidErrorObjectError, db._validate_error, job_dict)
def test_validate_error_with_random_object():
"""_validate_error() should raise if given an object of the wrong type."""
class Foo(object):
pass
# An error object that is not None and is not string- or dict-like at all.
error_obj = Foo()
nose.tools.assert_raises(
db.InvalidErrorObjectError, db._validate_error, error_obj)
|
<commit_before><commit_msg>Add unit tests for new _validate_error() function<commit_after>"""Unit tests for ckanserviceprovider/db.py."""
import nose.tools
import ckanserviceprovider.db as db
def test_validate_error_with_none():
"""_validate_error() should return None if given None."""
assert db._validate_error(None) is None
def test_validate_error_with_string():
"""If given a string _validate_error() should return it wrapped in a dict.
"""
assert db._validate_error("Something went wrong") == {
"message": "Something went wrong"}
def test_validate_error_with_valid_dict():
"""If given a valid dict _validate_error() should return the same dict."""
job_dict = {"message": "Something went wrong"}
assert db._validate_error(job_dict) == job_dict
def test_validate_error_with_dict_with_invalid_error():
"""_validate_error() should raise if given a dict with an invalid message.
"""
job_dict = {"message": 42} # Error message is invalid: it's not a string.
nose.tools.assert_raises(
db.InvalidErrorObjectError, db._validate_error, job_dict)
def test_validate_error_with_dict_with_no_error_key():
"""_validate_error() should raise if given a dict with no "message" key."""
job_dict = {"foo": "bar"}
nose.tools.assert_raises(
db.InvalidErrorObjectError, db._validate_error, job_dict)
def test_validate_error_with_random_object():
"""_validate_error() should raise if given an object of the wrong type."""
class Foo(object):
pass
# An error object that is not None and is not string- or dict-like at all.
error_obj = Foo()
nose.tools.assert_raises(
db.InvalidErrorObjectError, db._validate_error, error_obj)
|
|
eaf44fe741fc3fa0224e121d1017260eb8a457a1
|
src/data/0.1-kz-citibike-stations.py
|
src/data/0.1-kz-citibike-stations.py
|
#Get Citibike station location data
#url format: https://feeds.citibikenyc.com/stations/stations.json
import json
import urllib
import codecs
import pandas as pd
import numpy as np
citibike_serviceurl = "https://feeds.citibikenyc.com/stations/stations.json"
while True:
url = citibike_serviceurl
print 'Retrieving', url
data = urllib.urlopen(url).read()
try:
data_js = json.loads(str(data))
except:
data_js = None
print '==Failure to Retrieve=='
continue
#fhand = codecs.open('citibike_loc_json.js', 'w', "utf-8") #create a json file to inspect the format and content
#fhand.writelines(json.dumps(data_js, indent=4))
fname = 'citibike_station_info.csv'
fh_cb = open(fname, 'w+')
fh_cb.writelines('Station_id,Station_Name,Location,Latitude,Longitude,Total_Docks,Available_Docks,Available_Bikes,lastCommunicationTime,Status,Test_Station,Status_Key')
all_station_info = data_js['stationBeanList']
for station in all_station_info:
cb_station_id = station['id']
cb_station_name = station['stationName']
cb_location = station['stAddress1']
lat = station['latitude']
lon = station['longitude']
tot_docks = station["totalDocks"]
avb_docks = station['availableDocks']
avb_bikes = station['availableBikes']
last_commu_time = station['lastCommunicationTime']
status = station['statusValue'] # "In Service"
is_test_station = station['testStation']
status_key = station['statusKey']
print '{},{},{},{},{},{},{},{},{},{},{},{}'.format(cb_station_id, cb_station_name,cb_location,lat,lon,tot_docks,avb_docks,avb_bikes,last_commu_time,status,is_test_station,status_key)
fh_cb.writelines('\n{},{},{},{},{},{},{},{},{},{},{},{}'.format(cb_station_id, cb_station_name,cb_location,lat,lon,tot_docks,avb_docks,avb_bikes,last_commu_time,status,is_test_station,status_key))
station_count = len(all_station_info)
print '\nNumber of Citi Bike stations ', station_count
break
# create DataFrame using Pandas
#fh_cb = open(fname, 'r')
#df_cb = pd.read_csv(fname)
#df_cb.head(20)
|
Add Citi Bike Station Data
|
Add Citi Bike Station Data
python file
|
Python
|
mit
|
pichot/citibike-publicspace
|
Add Citi Bike Station Data
python file
|
#Get Citibike station location data
#url format: https://feeds.citibikenyc.com/stations/stations.json
import json
import urllib
import codecs
import pandas as pd
import numpy as np
citibike_serviceurl = "https://feeds.citibikenyc.com/stations/stations.json"
while True:
url = citibike_serviceurl
print 'Retrieving', url
data = urllib.urlopen(url).read()
try:
data_js = json.loads(str(data))
except:
data_js = None
print '==Failure to Retrieve=='
continue
#fhand = codecs.open('citibike_loc_json.js', 'w', "utf-8") #create a json file to inspect the format and content
#fhand.writelines(json.dumps(data_js, indent=4))
fname = 'citibike_station_info.csv'
fh_cb = open(fname, 'w+')
fh_cb.writelines('Station_id,Station_Name,Location,Latitude,Longitude,Total_Docks,Available_Docks,Available_Bikes,lastCommunicationTime,Status,Test_Station,Status_Key')
all_station_info = data_js['stationBeanList']
for station in all_station_info:
cb_station_id = station['id']
cb_station_name = station['stationName']
cb_location = station['stAddress1']
lat = station['latitude']
lon = station['longitude']
tot_docks = station["totalDocks"]
avb_docks = station['availableDocks']
avb_bikes = station['availableBikes']
last_commu_time = station['lastCommunicationTime']
status = station['statusValue'] # "In Service"
is_test_station = station['testStation']
status_key = station['statusKey']
print '{},{},{},{},{},{},{},{},{},{},{},{}'.format(cb_station_id, cb_station_name,cb_location,lat,lon,tot_docks,avb_docks,avb_bikes,last_commu_time,status,is_test_station,status_key)
fh_cb.writelines('\n{},{},{},{},{},{},{},{},{},{},{},{}'.format(cb_station_id, cb_station_name,cb_location,lat,lon,tot_docks,avb_docks,avb_bikes,last_commu_time,status,is_test_station,status_key))
station_count = len(all_station_info)
print '\nNumber of Citi Bike stations ', station_count
break
# create DataFrame using Pandas
#fh_cb = open(fname, 'r')
#df_cb = pd.read_csv(fname)
#df_cb.head(20)
|
<commit_before><commit_msg>Add Citi Bike Station Data
python file<commit_after>
|
#Get Citibike station location data
#url format: https://feeds.citibikenyc.com/stations/stations.json
import json
import urllib
import codecs
import pandas as pd
import numpy as np
citibike_serviceurl = "https://feeds.citibikenyc.com/stations/stations.json"
while True:
url = citibike_serviceurl
print 'Retrieving', url
data = urllib.urlopen(url).read()
try:
data_js = json.loads(str(data))
except:
data_js = None
print '==Failure to Retrieve=='
continue
#fhand = codecs.open('citibike_loc_json.js', 'w', "utf-8") #create a json file to inspect the format and content
#fhand.writelines(json.dumps(data_js, indent=4))
fname = 'citibike_station_info.csv'
fh_cb = open(fname, 'w+')
fh_cb.writelines('Station_id,Station_Name,Location,Latitude,Longitude,Total_Docks,Available_Docks,Available_Bikes,lastCommunicationTime,Status,Test_Station,Status_Key')
all_station_info = data_js['stationBeanList']
for station in all_station_info:
cb_station_id = station['id']
cb_station_name = station['stationName']
cb_location = station['stAddress1']
lat = station['latitude']
lon = station['longitude']
tot_docks = station["totalDocks"]
avb_docks = station['availableDocks']
avb_bikes = station['availableBikes']
last_commu_time = station['lastCommunicationTime']
status = station['statusValue'] # "In Service"
is_test_station = station['testStation']
status_key = station['statusKey']
print '{},{},{},{},{},{},{},{},{},{},{},{}'.format(cb_station_id, cb_station_name,cb_location,lat,lon,tot_docks,avb_docks,avb_bikes,last_commu_time,status,is_test_station,status_key)
fh_cb.writelines('\n{},{},{},{},{},{},{},{},{},{},{},{}'.format(cb_station_id, cb_station_name,cb_location,lat,lon,tot_docks,avb_docks,avb_bikes,last_commu_time,status,is_test_station,status_key))
station_count = len(all_station_info)
print '\nNumber of Citi Bike stations ', station_count
break
# create DataFrame using Pandas
#fh_cb = open(fname, 'r')
#df_cb = pd.read_csv(fname)
#df_cb.head(20)
|
Add Citi Bike Station Data
python file
#Get Citibike station location data
#url format: https://feeds.citibikenyc.com/stations/stations.json
import json
import urllib
import codecs
import pandas as pd
import numpy as np
citibike_serviceurl = "https://feeds.citibikenyc.com/stations/stations.json"
while True:
url = citibike_serviceurl
print 'Retrieving', url
data = urllib.urlopen(url).read()
try:
data_js = json.loads(str(data))
except:
data_js = None
print '==Failure to Retrieve=='
continue
#fhand = codecs.open('citibike_loc_json.js', 'w', "utf-8") #create a json file to inspect the format and content
#fhand.writelines(json.dumps(data_js, indent=4))
fname = 'citibike_station_info.csv'
fh_cb = open(fname, 'w+')
fh_cb.writelines('Station_id,Station_Name,Location,Latitude,Longitude,Total_Docks,Available_Docks,Available_Bikes,lastCommunicationTime,Status,Test_Station,Status_Key')
all_station_info = data_js['stationBeanList']
for station in all_station_info:
cb_station_id = station['id']
cb_station_name = station['stationName']
cb_location = station['stAddress1']
lat = station['latitude']
lon = station['longitude']
tot_docks = station["totalDocks"]
avb_docks = station['availableDocks']
avb_bikes = station['availableBikes']
last_commu_time = station['lastCommunicationTime']
status = station['statusValue'] # "In Service"
is_test_station = station['testStation']
status_key = station['statusKey']
print '{},{},{},{},{},{},{},{},{},{},{},{}'.format(cb_station_id, cb_station_name,cb_location,lat,lon,tot_docks,avb_docks,avb_bikes,last_commu_time,status,is_test_station,status_key)
fh_cb.writelines('\n{},{},{},{},{},{},{},{},{},{},{},{}'.format(cb_station_id, cb_station_name,cb_location,lat,lon,tot_docks,avb_docks,avb_bikes,last_commu_time,status,is_test_station,status_key))
station_count = len(all_station_info)
print '\nNumber of Citi Bike stations ', station_count
break
# create DataFrame using Pandas
#fh_cb = open(fname, 'r')
#df_cb = pd.read_csv(fname)
#df_cb.head(20)
|
<commit_before><commit_msg>Add Citi Bike Station Data
python file<commit_after>
#Get Citibike station location data
#url format: https://feeds.citibikenyc.com/stations/stations.json
import json
import urllib
import codecs
import pandas as pd
import numpy as np
citibike_serviceurl = "https://feeds.citibikenyc.com/stations/stations.json"
while True:
url = citibike_serviceurl
print 'Retrieving', url
data = urllib.urlopen(url).read()
try:
data_js = json.loads(str(data))
except:
data_js = None
print '==Failure to Retrieve=='
continue
#fhand = codecs.open('citibike_loc_json.js', 'w', "utf-8") #create a json file to inspect the format and content
#fhand.writelines(json.dumps(data_js, indent=4))
fname = 'citibike_station_info.csv'
fh_cb = open(fname, 'w+')
fh_cb.writelines('Station_id,Station_Name,Location,Latitude,Longitude,Total_Docks,Available_Docks,Available_Bikes,lastCommunicationTime,Status,Test_Station,Status_Key')
all_station_info = data_js['stationBeanList']
for station in all_station_info:
cb_station_id = station['id']
cb_station_name = station['stationName']
cb_location = station['stAddress1']
lat = station['latitude']
lon = station['longitude']
tot_docks = station["totalDocks"]
avb_docks = station['availableDocks']
avb_bikes = station['availableBikes']
last_commu_time = station['lastCommunicationTime']
status = station['statusValue'] # "In Service"
is_test_station = station['testStation']
status_key = station['statusKey']
print '{},{},{},{},{},{},{},{},{},{},{},{}'.format(cb_station_id, cb_station_name,cb_location,lat,lon,tot_docks,avb_docks,avb_bikes,last_commu_time,status,is_test_station,status_key)
fh_cb.writelines('\n{},{},{},{},{},{},{},{},{},{},{},{}'.format(cb_station_id, cb_station_name,cb_location,lat,lon,tot_docks,avb_docks,avb_bikes,last_commu_time,status,is_test_station,status_key))
station_count = len(all_station_info)
print '\nNumber of Citi Bike stations ', station_count
break
# create DataFrame using Pandas
#fh_cb = open(fname, 'r')
#df_cb = pd.read_csv(fname)
#df_cb.head(20)
|
|
155249ba18980544844957e1dfed748ef51f6c53
|
tests/test_cli.py
|
tests/test_cli.py
|
# -*- coding: utf-8 -*-
"""Command Line Interface test cases."""
import argparse
import os
import tempfile
import unittest
from pic2map.cli import valid_directory
class ValidDirectoryTest(unittest.TestCase):
"""Valid directory test cases."""
def test_valid_directory(self):
"""Valid directory path."""
temp_directory = tempfile.mkdtemp()
try:
self.assertTrue(
valid_directory(temp_directory),
temp_directory,
)
finally:
os.rmdir(temp_directory)
def test_invalid_directory(self):
"""Invalid directory."""
with tempfile.NamedTemporaryFile() as temp_file:
with self.assertRaises(argparse.ArgumentTypeError):
valid_directory(temp_file.name)
def test_unreadable_directory(self):
"""Unreadable diretory."""
temp_directory = tempfile.mkdtemp()
try:
os.chmod(temp_directory, 0)
with self.assertRaises(argparse.ArgumentTypeError):
valid_directory(temp_directory)
finally:
os.rmdir(temp_directory)
|
Add valid directory test cases
|
Add valid directory test cases
|
Python
|
mit
|
jcollado/pic2map,jcollado/pic2map,jcollado/pic2map
|
Add valid directory test cases
|
# -*- coding: utf-8 -*-
"""Command Line Interface test cases."""
import argparse
import os
import tempfile
import unittest
from pic2map.cli import valid_directory
class ValidDirectoryTest(unittest.TestCase):
"""Valid directory test cases."""
def test_valid_directory(self):
"""Valid directory path."""
temp_directory = tempfile.mkdtemp()
try:
self.assertTrue(
valid_directory(temp_directory),
temp_directory,
)
finally:
os.rmdir(temp_directory)
def test_invalid_directory(self):
"""Invalid directory."""
with tempfile.NamedTemporaryFile() as temp_file:
with self.assertRaises(argparse.ArgumentTypeError):
valid_directory(temp_file.name)
def test_unreadable_directory(self):
"""Unreadable diretory."""
temp_directory = tempfile.mkdtemp()
try:
os.chmod(temp_directory, 0)
with self.assertRaises(argparse.ArgumentTypeError):
valid_directory(temp_directory)
finally:
os.rmdir(temp_directory)
|
<commit_before><commit_msg>Add valid directory test cases<commit_after>
|
# -*- coding: utf-8 -*-
"""Command Line Interface test cases."""
import argparse
import os
import tempfile
import unittest
from pic2map.cli import valid_directory
class ValidDirectoryTest(unittest.TestCase):
"""Valid directory test cases."""
def test_valid_directory(self):
"""Valid directory path."""
temp_directory = tempfile.mkdtemp()
try:
self.assertTrue(
valid_directory(temp_directory),
temp_directory,
)
finally:
os.rmdir(temp_directory)
def test_invalid_directory(self):
"""Invalid directory."""
with tempfile.NamedTemporaryFile() as temp_file:
with self.assertRaises(argparse.ArgumentTypeError):
valid_directory(temp_file.name)
def test_unreadable_directory(self):
"""Unreadable diretory."""
temp_directory = tempfile.mkdtemp()
try:
os.chmod(temp_directory, 0)
with self.assertRaises(argparse.ArgumentTypeError):
valid_directory(temp_directory)
finally:
os.rmdir(temp_directory)
|
Add valid directory test cases# -*- coding: utf-8 -*-
"""Command Line Interface test cases."""
import argparse
import os
import tempfile
import unittest
from pic2map.cli import valid_directory
class ValidDirectoryTest(unittest.TestCase):
"""Valid directory test cases."""
def test_valid_directory(self):
"""Valid directory path."""
temp_directory = tempfile.mkdtemp()
try:
self.assertTrue(
valid_directory(temp_directory),
temp_directory,
)
finally:
os.rmdir(temp_directory)
def test_invalid_directory(self):
"""Invalid directory."""
with tempfile.NamedTemporaryFile() as temp_file:
with self.assertRaises(argparse.ArgumentTypeError):
valid_directory(temp_file.name)
def test_unreadable_directory(self):
"""Unreadable diretory."""
temp_directory = tempfile.mkdtemp()
try:
os.chmod(temp_directory, 0)
with self.assertRaises(argparse.ArgumentTypeError):
valid_directory(temp_directory)
finally:
os.rmdir(temp_directory)
|
<commit_before><commit_msg>Add valid directory test cases<commit_after># -*- coding: utf-8 -*-
"""Command Line Interface test cases."""
import argparse
import os
import tempfile
import unittest
from pic2map.cli import valid_directory
class ValidDirectoryTest(unittest.TestCase):
"""Valid directory test cases."""
def test_valid_directory(self):
"""Valid directory path."""
temp_directory = tempfile.mkdtemp()
try:
self.assertTrue(
valid_directory(temp_directory),
temp_directory,
)
finally:
os.rmdir(temp_directory)
def test_invalid_directory(self):
"""Invalid directory."""
with tempfile.NamedTemporaryFile() as temp_file:
with self.assertRaises(argparse.ArgumentTypeError):
valid_directory(temp_file.name)
def test_unreadable_directory(self):
"""Unreadable diretory."""
temp_directory = tempfile.mkdtemp()
try:
os.chmod(temp_directory, 0)
with self.assertRaises(argparse.ArgumentTypeError):
valid_directory(temp_directory)
finally:
os.rmdir(temp_directory)
|
|
67c776d209699197dd37833351c79d3599c6419c
|
tests/test_wkt.py
|
tests/test_wkt.py
|
from math import pi
import pytest
from shapely.geometry import LineString, Point
from shapely.wkt import dumps
@pytest.fixture(scope="module")
def pipi():
return Point((pi, -pi))
@pytest.fixture(scope="module")
def pipi4():
return Point((pi*4, -pi*4))
def test_wkt(pipi):
""".wkt and wkt.dumps() both do not trim by default."""
assert pipi.wkt == "POINT ({0:.16f} {1:.16f})".format(pi, -pi)
def test_wkt(pipi4):
""".wkt and wkt.dumps() both do not trim by default."""
assert pipi4.wkt == "POINT ({0:.16f} {1:.16f})".format(pi*4, -pi*4)
def test_dumps(pipi4):
assert dumps(pipi4) == "POINT ({0:.16f} {1:.16f})".format(pi*4, -pi*4)
def test_dumps_precision(pipi4):
assert dumps(pipi4, rounding_precision=4) == "POINT ({0:.4f} {1:.4f})".format(pi*4, -pi*4)
|
Test that .wkt and wkt.dumps produce same results
|
Test that .wkt and wkt.dumps produce same results
|
Python
|
bsd-3-clause
|
jdmcbr/Shapely,jdmcbr/Shapely
|
Test that .wkt and wkt.dumps produce same results
|
from math import pi
import pytest
from shapely.geometry import LineString, Point
from shapely.wkt import dumps
@pytest.fixture(scope="module")
def pipi():
return Point((pi, -pi))
@pytest.fixture(scope="module")
def pipi4():
return Point((pi*4, -pi*4))
def test_wkt(pipi):
""".wkt and wkt.dumps() both do not trim by default."""
assert pipi.wkt == "POINT ({0:.16f} {1:.16f})".format(pi, -pi)
def test_wkt(pipi4):
""".wkt and wkt.dumps() both do not trim by default."""
assert pipi4.wkt == "POINT ({0:.16f} {1:.16f})".format(pi*4, -pi*4)
def test_dumps(pipi4):
assert dumps(pipi4) == "POINT ({0:.16f} {1:.16f})".format(pi*4, -pi*4)
def test_dumps_precision(pipi4):
assert dumps(pipi4, rounding_precision=4) == "POINT ({0:.4f} {1:.4f})".format(pi*4, -pi*4)
|
<commit_before><commit_msg>Test that .wkt and wkt.dumps produce same results<commit_after>
|
from math import pi
import pytest
from shapely.geometry import LineString, Point
from shapely.wkt import dumps
@pytest.fixture(scope="module")
def pipi():
return Point((pi, -pi))
@pytest.fixture(scope="module")
def pipi4():
return Point((pi*4, -pi*4))
def test_wkt(pipi):
""".wkt and wkt.dumps() both do not trim by default."""
assert pipi.wkt == "POINT ({0:.16f} {1:.16f})".format(pi, -pi)
def test_wkt(pipi4):
""".wkt and wkt.dumps() both do not trim by default."""
assert pipi4.wkt == "POINT ({0:.16f} {1:.16f})".format(pi*4, -pi*4)
def test_dumps(pipi4):
assert dumps(pipi4) == "POINT ({0:.16f} {1:.16f})".format(pi*4, -pi*4)
def test_dumps_precision(pipi4):
assert dumps(pipi4, rounding_precision=4) == "POINT ({0:.4f} {1:.4f})".format(pi*4, -pi*4)
|
Test that .wkt and wkt.dumps produce same resultsfrom math import pi
import pytest
from shapely.geometry import LineString, Point
from shapely.wkt import dumps
@pytest.fixture(scope="module")
def pipi():
return Point((pi, -pi))
@pytest.fixture(scope="module")
def pipi4():
return Point((pi*4, -pi*4))
def test_wkt(pipi):
""".wkt and wkt.dumps() both do not trim by default."""
assert pipi.wkt == "POINT ({0:.16f} {1:.16f})".format(pi, -pi)
def test_wkt(pipi4):
""".wkt and wkt.dumps() both do not trim by default."""
assert pipi4.wkt == "POINT ({0:.16f} {1:.16f})".format(pi*4, -pi*4)
def test_dumps(pipi4):
assert dumps(pipi4) == "POINT ({0:.16f} {1:.16f})".format(pi*4, -pi*4)
def test_dumps_precision(pipi4):
assert dumps(pipi4, rounding_precision=4) == "POINT ({0:.4f} {1:.4f})".format(pi*4, -pi*4)
|
<commit_before><commit_msg>Test that .wkt and wkt.dumps produce same results<commit_after>from math import pi
import pytest
from shapely.geometry import LineString, Point
from shapely.wkt import dumps
@pytest.fixture(scope="module")
def pipi():
return Point((pi, -pi))
@pytest.fixture(scope="module")
def pipi4():
return Point((pi*4, -pi*4))
def test_wkt(pipi):
""".wkt and wkt.dumps() both do not trim by default."""
assert pipi.wkt == "POINT ({0:.16f} {1:.16f})".format(pi, -pi)
def test_wkt(pipi4):
""".wkt and wkt.dumps() both do not trim by default."""
assert pipi4.wkt == "POINT ({0:.16f} {1:.16f})".format(pi*4, -pi*4)
def test_dumps(pipi4):
assert dumps(pipi4) == "POINT ({0:.16f} {1:.16f})".format(pi*4, -pi*4)
def test_dumps_precision(pipi4):
assert dumps(pipi4, rounding_precision=4) == "POINT ({0:.4f} {1:.4f})".format(pi*4, -pi*4)
|
|
63a80b711e1be9a6047965b8d0061b676d8c50ed
|
cartridge/shop/management/commands/syncreshooks.py
|
cartridge/shop/management/commands/syncreshooks.py
|
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
from mezzanine.conf import settings
from cartridge.shop.models import *
class Command(BaseCommand):
help = 'Sync reservations from external hook'
def handle(self, *args, **options):
p = ReservableProduct.objects.all()[0]
p.update_from_hook()
|
Add a manage command to sync reservations from external hook .. hard coded first product only atm
|
Add a manage command to sync reservations from external hook .. hard coded first product only atm
|
Python
|
bsd-2-clause
|
jaywink/cartridge-reservable,jaywink/cartridge-reservable,jaywink/cartridge-reservable
|
Add a manage command to sync reservations from external hook .. hard coded first product only atm
|
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
from mezzanine.conf import settings
from cartridge.shop.models import *
class Command(BaseCommand):
help = 'Sync reservations from external hook'
def handle(self, *args, **options):
p = ReservableProduct.objects.all()[0]
p.update_from_hook()
|
<commit_before><commit_msg>Add a manage command to sync reservations from external hook .. hard coded first product only atm<commit_after>
|
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
from mezzanine.conf import settings
from cartridge.shop.models import *
class Command(BaseCommand):
help = 'Sync reservations from external hook'
def handle(self, *args, **options):
p = ReservableProduct.objects.all()[0]
p.update_from_hook()
|
Add a manage command to sync reservations from external hook .. hard coded first product only atmfrom django.core.management.base import BaseCommand
from django.core.management.base import CommandError
from mezzanine.conf import settings
from cartridge.shop.models import *
class Command(BaseCommand):
help = 'Sync reservations from external hook'
def handle(self, *args, **options):
p = ReservableProduct.objects.all()[0]
p.update_from_hook()
|
<commit_before><commit_msg>Add a manage command to sync reservations from external hook .. hard coded first product only atm<commit_after>from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
from mezzanine.conf import settings
from cartridge.shop.models import *
class Command(BaseCommand):
help = 'Sync reservations from external hook'
def handle(self, *args, **options):
p = ReservableProduct.objects.all()[0]
p.update_from_hook()
|
|
d26a880dcc49658fa369d11d4286a3e2c9b3d487
|
cloudbrain/run.py
|
cloudbrain/run.py
|
import argparse
import subprocess
import sys
import cloudbrain.publishers.sensor_publisher
def publish(args):
sys.argv = args or ['-h']
cloudbrain.publishers.sensor_publisher.main()
def subscribe(args):
return NotImplemented
def parse_args():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
publish_parser = subparsers.add_parser('publish',
help="Publish data stream - For example: cloudbrain publish --mock -n muse -i octopicorn")
publish_parser.set_defaults(func=publish)
subscribe_parser = subparsers.add_parser('subscribe',
help="Subscribe to data stream - For example: cloudbrain subscribe -n muse -i octopicorn")
subscribe_parser.set_defaults(func=subscribe)
args, unknown = parser.parse_known_args()
args.func(unknown)
def main():
parse_args()
if __name__ == "__main__":
main()
|
Add entrypoint script to support subcommands
|
Add entrypoint script to support subcommands
|
Python
|
agpl-3.0
|
marionleborgne/cloudbrain,singlerider/cloudbrain,alessiodm/cloudbrain,prescottprue/cloudbrain,octopicorn/cloudbrain,singlerider/cloudbrain,prescottprue/cloudbrain,andyh616/cloudbrain,alessiodm/cloudbrain,alessiodm/cloudbrain,marionleborgne/cloudbrain,singlerider/cloudbrain,lambdaloop/cloudbrain,prescottprue/cloudbrain,lambdaloop/cloudbrain,lambdaloop/cloudbrain,octopicorn/cloudbrain,prescottprue/cloudbrain,octopicorn/cloudbrain,cloudbrain/cloudbrain,octopicorn/cloudbrain,singlerider/cloudbrain,andyh616/cloudbrain,andyh616/cloudbrain,alessiodm/cloudbrain,andyh616/cloudbrain,lambdaloop/cloudbrain,cloudbrain/cloudbrain
|
Add entrypoint script to support subcommands
|
import argparse
import subprocess
import sys
import cloudbrain.publishers.sensor_publisher
def publish(args):
sys.argv = args or ['-h']
cloudbrain.publishers.sensor_publisher.main()
def subscribe(args):
return NotImplemented
def parse_args():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
publish_parser = subparsers.add_parser('publish',
help="Publish data stream - For example: cloudbrain publish --mock -n muse -i octopicorn")
publish_parser.set_defaults(func=publish)
subscribe_parser = subparsers.add_parser('subscribe',
help="Subscribe to data stream - For example: cloudbrain subscribe -n muse -i octopicorn")
subscribe_parser.set_defaults(func=subscribe)
args, unknown = parser.parse_known_args()
args.func(unknown)
def main():
parse_args()
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add entrypoint script to support subcommands<commit_after>
|
import argparse
import subprocess
import sys
import cloudbrain.publishers.sensor_publisher
def publish(args):
sys.argv = args or ['-h']
cloudbrain.publishers.sensor_publisher.main()
def subscribe(args):
return NotImplemented
def parse_args():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
publish_parser = subparsers.add_parser('publish',
help="Publish data stream - For example: cloudbrain publish --mock -n muse -i octopicorn")
publish_parser.set_defaults(func=publish)
subscribe_parser = subparsers.add_parser('subscribe',
help="Subscribe to data stream - For example: cloudbrain subscribe -n muse -i octopicorn")
subscribe_parser.set_defaults(func=subscribe)
args, unknown = parser.parse_known_args()
args.func(unknown)
def main():
parse_args()
if __name__ == "__main__":
main()
|
Add entrypoint script to support subcommandsimport argparse
import subprocess
import sys
import cloudbrain.publishers.sensor_publisher
def publish(args):
sys.argv = args or ['-h']
cloudbrain.publishers.sensor_publisher.main()
def subscribe(args):
return NotImplemented
def parse_args():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
publish_parser = subparsers.add_parser('publish',
help="Publish data stream - For example: cloudbrain publish --mock -n muse -i octopicorn")
publish_parser.set_defaults(func=publish)
subscribe_parser = subparsers.add_parser('subscribe',
help="Subscribe to data stream - For example: cloudbrain subscribe -n muse -i octopicorn")
subscribe_parser.set_defaults(func=subscribe)
args, unknown = parser.parse_known_args()
args.func(unknown)
def main():
parse_args()
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add entrypoint script to support subcommands<commit_after>import argparse
import subprocess
import sys
import cloudbrain.publishers.sensor_publisher
def publish(args):
sys.argv = args or ['-h']
cloudbrain.publishers.sensor_publisher.main()
def subscribe(args):
return NotImplemented
def parse_args():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
publish_parser = subparsers.add_parser('publish',
help="Publish data stream - For example: cloudbrain publish --mock -n muse -i octopicorn")
publish_parser.set_defaults(func=publish)
subscribe_parser = subparsers.add_parser('subscribe',
help="Subscribe to data stream - For example: cloudbrain subscribe -n muse -i octopicorn")
subscribe_parser.set_defaults(func=subscribe)
args, unknown = parser.parse_known_args()
args.func(unknown)
def main():
parse_args()
if __name__ == "__main__":
main()
|
|
fe82933732aed8c1a32834520f1ba244085fcb5d
|
tools/otter_ai_to_our_format.py
|
tools/otter_ai_to_our_format.py
|
import os
import sys
def main():
if len(sys.argv) != 2:
print("Usage: Pass the file name for the source transcript txt file.")
sys.exit(-1)
file = sys.argv[1]
out_file = os.path.expanduser(
os.path.join(
'~/Desktop',
os.path.basename(file)
)
)
print("Files:")
print("Reading source file: ", file)
print("Exported version at: ", out_file)
fin = open(file, 'r', encoding='utf-8')
fout = open(out_file, 'w', encoding='utf-8')
with fin, fout:
time = "0:00"
for line in fin:
if is_time(line):
time = get_time_text(line)
elif line and line.strip():
text = f"{time} {line.strip()}\n\n"
fout.write(text)
# print(text)
def is_time(line: str) -> bool:
if not line or not line.strip():
return False
parts = line.split(':')
if not parts:
return False
return all(p.strip().isnumeric() for p in parts)
def get_time_text(line: str) -> str:
if ':' not in line:
raise Exception(f"Text doesn't seem to be a time: {line}")
parts = line.split(':')
hour_text = "0"
min_text = "0"
sec_text = "0"
if len(parts) == 3:
hour_text = parts[0].strip()
min_text = parts[1].strip()
sec_text = parts[2].strip()
elif len(parts) == 2:
min_text = parts[0].strip()
sec_text = parts[1].strip()
elif len(parts) == 1:
sec_text = parts[0].strip()
return f"{hour_text.zfill(2)}:{min_text.zfill(2)}:{sec_text.zfill(2)}"
if __name__ == '__main__':
main()
|
Add a tool to convert from otter formats to what we use on the website.
|
Add a tool to convert from otter formats to what we use on the website.
|
Python
|
mit
|
mikeckennedy/python_bytes_show_notes
|
Add a tool to convert from otter formats to what we use on the website.
|
import os
import sys
def main():
if len(sys.argv) != 2:
print("Usage: Pass the file name for the source transcript txt file.")
sys.exit(-1)
file = sys.argv[1]
out_file = os.path.expanduser(
os.path.join(
'~/Desktop',
os.path.basename(file)
)
)
print("Files:")
print("Reading source file: ", file)
print("Exported version at: ", out_file)
fin = open(file, 'r', encoding='utf-8')
fout = open(out_file, 'w', encoding='utf-8')
with fin, fout:
time = "0:00"
for line in fin:
if is_time(line):
time = get_time_text(line)
elif line and line.strip():
text = f"{time} {line.strip()}\n\n"
fout.write(text)
# print(text)
def is_time(line: str) -> bool:
if not line or not line.strip():
return False
parts = line.split(':')
if not parts:
return False
return all(p.strip().isnumeric() for p in parts)
def get_time_text(line: str) -> str:
if ':' not in line:
raise Exception(f"Text doesn't seem to be a time: {line}")
parts = line.split(':')
hour_text = "0"
min_text = "0"
sec_text = "0"
if len(parts) == 3:
hour_text = parts[0].strip()
min_text = parts[1].strip()
sec_text = parts[2].strip()
elif len(parts) == 2:
min_text = parts[0].strip()
sec_text = parts[1].strip()
elif len(parts) == 1:
sec_text = parts[0].strip()
return f"{hour_text.zfill(2)}:{min_text.zfill(2)}:{sec_text.zfill(2)}"
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a tool to convert from otter formats to what we use on the website.<commit_after>
|
import os
import sys
def main():
if len(sys.argv) != 2:
print("Usage: Pass the file name for the source transcript txt file.")
sys.exit(-1)
file = sys.argv[1]
out_file = os.path.expanduser(
os.path.join(
'~/Desktop',
os.path.basename(file)
)
)
print("Files:")
print("Reading source file: ", file)
print("Exported version at: ", out_file)
fin = open(file, 'r', encoding='utf-8')
fout = open(out_file, 'w', encoding='utf-8')
with fin, fout:
time = "0:00"
for line in fin:
if is_time(line):
time = get_time_text(line)
elif line and line.strip():
text = f"{time} {line.strip()}\n\n"
fout.write(text)
# print(text)
def is_time(line: str) -> bool:
if not line or not line.strip():
return False
parts = line.split(':')
if not parts:
return False
return all(p.strip().isnumeric() for p in parts)
def get_time_text(line: str) -> str:
if ':' not in line:
raise Exception(f"Text doesn't seem to be a time: {line}")
parts = line.split(':')
hour_text = "0"
min_text = "0"
sec_text = "0"
if len(parts) == 3:
hour_text = parts[0].strip()
min_text = parts[1].strip()
sec_text = parts[2].strip()
elif len(parts) == 2:
min_text = parts[0].strip()
sec_text = parts[1].strip()
elif len(parts) == 1:
sec_text = parts[0].strip()
return f"{hour_text.zfill(2)}:{min_text.zfill(2)}:{sec_text.zfill(2)}"
if __name__ == '__main__':
main()
|
Add a tool to convert from otter formats to what we use on the website.import os
import sys
def main():
if len(sys.argv) != 2:
print("Usage: Pass the file name for the source transcript txt file.")
sys.exit(-1)
file = sys.argv[1]
out_file = os.path.expanduser(
os.path.join(
'~/Desktop',
os.path.basename(file)
)
)
print("Files:")
print("Reading source file: ", file)
print("Exported version at: ", out_file)
fin = open(file, 'r', encoding='utf-8')
fout = open(out_file, 'w', encoding='utf-8')
with fin, fout:
time = "0:00"
for line in fin:
if is_time(line):
time = get_time_text(line)
elif line and line.strip():
text = f"{time} {line.strip()}\n\n"
fout.write(text)
# print(text)
def is_time(line: str) -> bool:
if not line or not line.strip():
return False
parts = line.split(':')
if not parts:
return False
return all(p.strip().isnumeric() for p in parts)
def get_time_text(line: str) -> str:
if ':' not in line:
raise Exception(f"Text doesn't seem to be a time: {line}")
parts = line.split(':')
hour_text = "0"
min_text = "0"
sec_text = "0"
if len(parts) == 3:
hour_text = parts[0].strip()
min_text = parts[1].strip()
sec_text = parts[2].strip()
elif len(parts) == 2:
min_text = parts[0].strip()
sec_text = parts[1].strip()
elif len(parts) == 1:
sec_text = parts[0].strip()
return f"{hour_text.zfill(2)}:{min_text.zfill(2)}:{sec_text.zfill(2)}"
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a tool to convert from otter formats to what we use on the website.<commit_after>import os
import sys
def main():
if len(sys.argv) != 2:
print("Usage: Pass the file name for the source transcript txt file.")
sys.exit(-1)
file = sys.argv[1]
out_file = os.path.expanduser(
os.path.join(
'~/Desktop',
os.path.basename(file)
)
)
print("Files:")
print("Reading source file: ", file)
print("Exported version at: ", out_file)
fin = open(file, 'r', encoding='utf-8')
fout = open(out_file, 'w', encoding='utf-8')
with fin, fout:
time = "0:00"
for line in fin:
if is_time(line):
time = get_time_text(line)
elif line and line.strip():
text = f"{time} {line.strip()}\n\n"
fout.write(text)
# print(text)
def is_time(line: str) -> bool:
if not line or not line.strip():
return False
parts = line.split(':')
if not parts:
return False
return all(p.strip().isnumeric() for p in parts)
def get_time_text(line: str) -> str:
if ':' not in line:
raise Exception(f"Text doesn't seem to be a time: {line}")
parts = line.split(':')
hour_text = "0"
min_text = "0"
sec_text = "0"
if len(parts) == 3:
hour_text = parts[0].strip()
min_text = parts[1].strip()
sec_text = parts[2].strip()
elif len(parts) == 2:
min_text = parts[0].strip()
sec_text = parts[1].strip()
elif len(parts) == 1:
sec_text = parts[0].strip()
return f"{hour_text.zfill(2)}:{min_text.zfill(2)}:{sec_text.zfill(2)}"
if __name__ == '__main__':
main()
|
|
a808ae6ff89290a65802769e925610ba123a99f7
|
tests/auto/keras/test_constraints.py
|
tests/auto/keras/test_constraints.py
|
import unittest
import numpy as np
from theano import tensor as T
class TestConstraints(unittest.TestCase):
def setUp(self):
self.some_values = [0.1,0.5,3,8,1e-7]
self.example_array = np.random.random((100,100))*100. - 50.
self.example_array[0,0] = 0. # 0 could possibly cause trouble
def test_maxnorm(self):
from keras.constraints import maxnorm
for m in self.some_values:
norm_instance = maxnorm(m)
normed = norm_instance(self.example_array)
assert(np.all(normed.eval() < m))
if __name__ == '__main__':
unittest.main()
|
Add a test for the max-norm constraint
|
Add a test for the max-norm constraint
|
Python
|
mit
|
jayhetee/keras,nzer0/keras,nebw/keras,sjuvekar/keras,Yingmin-Li/keras,cvfish/keras,nehz/keras,eulerreich/keras,harshhemani/keras,pthaike/keras,wubr2000/keras,zhmz90/keras,daviddiazvico/keras,Aureliu/keras,cheng6076/keras,dhruvparamhans/keras,dribnet/keras,tencrance/keras,DeepGnosis/keras,keras-team/keras,saurav111/keras,brainwater/keras,nt/keras,florentchandelier/keras,asampat3090/keras,jimgoo/keras,kuza55/keras,DLlearn/keras,why11002526/keras,xiaoda99/keras,JasonTam/keras,jonberliner/keras,kod3r/keras,bboalimoe/keras,wxs/keras,jasonyaw/keras,fmacias64/keras,rudaoshi/keras,jiumem/keras,vseledkin/keras,MagicSen/keras,ogrisel/keras,bottler/keras,jalexvig/keras,ypkang/keras,printedheart/keras,keskarnitish/keras,johmathe/keras,yingzha/keras,ekamioka/keras,amy12xx/keras,stephenbalaban/keras,3dconv/keras,untom/keras,llcao/keras,rlkelly/keras,chenych11/keras,xurantju/keras,abayowbo/keras,jhauswald/keras,dxj19831029/keras,mikekestemont/keras,rodrigob/keras,EderSantana/keras,OlafLee/keras,Cadene/keras,Smerity/keras,jbolinge/keras,hhaoyan/keras,ml-lab/keras,marchick209/keras,meanmee/keras,gamer13/keras,navyjeff/keras,iScienceLuvr/keras,LIBOTAO/keras,kfoss/keras,zxytim/keras,pjadzinsky/keras,iamtrask/keras,imcomking/Convolutional-GRU-keras-extension-,keras-team/keras,gavinmh/keras,zhangxujinsh/keras,dolaameng/keras,relh/keras,ashhher3/keras,kemaswill/keras,happyboy310/keras,danielforsyth/keras,zxsted/keras,ledbetdr/keras
|
Add a test for the max-norm constraint
|
import unittest
import numpy as np
from theano import tensor as T
class TestConstraints(unittest.TestCase):
def setUp(self):
self.some_values = [0.1,0.5,3,8,1e-7]
self.example_array = np.random.random((100,100))*100. - 50.
self.example_array[0,0] = 0. # 0 could possibly cause trouble
def test_maxnorm(self):
from keras.constraints import maxnorm
for m in self.some_values:
norm_instance = maxnorm(m)
normed = norm_instance(self.example_array)
assert(np.all(normed.eval() < m))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add a test for the max-norm constraint<commit_after>
|
import unittest
import numpy as np
from theano import tensor as T
class TestConstraints(unittest.TestCase):
def setUp(self):
self.some_values = [0.1,0.5,3,8,1e-7]
self.example_array = np.random.random((100,100))*100. - 50.
self.example_array[0,0] = 0. # 0 could possibly cause trouble
def test_maxnorm(self):
from keras.constraints import maxnorm
for m in self.some_values:
norm_instance = maxnorm(m)
normed = norm_instance(self.example_array)
assert(np.all(normed.eval() < m))
if __name__ == '__main__':
unittest.main()
|
Add a test for the max-norm constraintimport unittest
import numpy as np
from theano import tensor as T
class TestConstraints(unittest.TestCase):
def setUp(self):
self.some_values = [0.1,0.5,3,8,1e-7]
self.example_array = np.random.random((100,100))*100. - 50.
self.example_array[0,0] = 0. # 0 could possibly cause trouble
def test_maxnorm(self):
from keras.constraints import maxnorm
for m in self.some_values:
norm_instance = maxnorm(m)
normed = norm_instance(self.example_array)
assert(np.all(normed.eval() < m))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add a test for the max-norm constraint<commit_after>import unittest
import numpy as np
from theano import tensor as T
class TestConstraints(unittest.TestCase):
def setUp(self):
self.some_values = [0.1,0.5,3,8,1e-7]
self.example_array = np.random.random((100,100))*100. - 50.
self.example_array[0,0] = 0. # 0 could possibly cause trouble
def test_maxnorm(self):
from keras.constraints import maxnorm
for m in self.some_values:
norm_instance = maxnorm(m)
normed = norm_instance(self.example_array)
assert(np.all(normed.eval() < m))
if __name__ == '__main__':
unittest.main()
|
|
0851752e0b2a2234f43ca8632ffd23e10118feb5
|
scikits/learn/machine/manifold_learning/regression/tests/test_neighbors.py
|
scikits/learn/machine/manifold_learning/regression/tests/test_neighbors.py
|
import numpy as np
from ..neighbors import Neighbors
from numpy.testing import assert_array_equal
def test_neighbors_1D():
"""
Nearest Neighbors in a line.
"""
# some constants
n = 10
n_2 = n/2
samples = [[x] for x in range(0, n)]
labels = [0]*n_2 + [1]*n_2
zeros = np.zeros(n_2)
ones = np.ones(n_2)
# k = 1
nn = Neighbors(samples, labels=labels, k=1)
assert_array_equal( nn.predict([ [i +0.01] for i in range(0,n_2)]),
zeros)
assert_array_equal( nn.predict([ [i-0.01] for i in range(n_2, 10)]),
ones)
# k = 3
nn = Neighbors(samples, labels=labels, k=3)
assert_array_equal( nn.predict([ [i +0.01] for i in range(0,5)]),
zeros)
assert_array_equal( nn.predict([ [i-0.01] for i in range(5, 10)]),
ones)
|
Add tests for Nearest Neighbor Algorithm.
|
Add tests for Nearest Neighbor Algorithm.
TODO: a lot more tests are needed.
git-svn-id: a2d1b0e147e530765aaf3e1662d4a98e2f63c719@369 22fbfee3-77ab-4535-9bad-27d1bd3bc7d8
|
Python
|
bsd-3-clause
|
iismd17/scikit-learn,JeanKossaifi/scikit-learn,fzalkow/scikit-learn,cauchycui/scikit-learn,florian-f/sklearn,abhishekkrthakur/scikit-learn,ZENGXH/scikit-learn,rrohan/scikit-learn,LohithBlaze/scikit-learn,yyjiang/scikit-learn,ilo10/scikit-learn,etkirsch/scikit-learn,luo66/scikit-learn,nmayorov/scikit-learn,LiaoPan/scikit-learn,stylianos-kampakis/scikit-learn,wazeerzulfikar/scikit-learn,YinongLong/scikit-learn,arabenjamin/scikit-learn,AnasGhrab/scikit-learn,PatrickOReilly/scikit-learn,equialgo/scikit-learn,plissonf/scikit-learn,meduz/scikit-learn,jpautom/scikit-learn,gclenaghan/scikit-learn,mhdella/scikit-learn,ldirer/scikit-learn,smartscheduling/scikit-learn-categorical-tree,jkarnows/scikit-learn,anntzer/scikit-learn,zuku1985/scikit-learn,jlegendary/scikit-learn,ltiao/scikit-learn,petosegan/scikit-learn,thientu/scikit-learn,tawsifkhan/scikit-learn,loli/sklearn-ensembletrees,glennq/scikit-learn,rahuldhote/scikit-learn,abhishekgahlot/scikit-learn,jseabold/scikit-learn,aetilley/scikit-learn,YinongLong/scikit-learn,Clyde-fare/scikit-learn,kagayakidan/scikit-learn,jakirkham/scikit-learn,michigraber/scikit-learn,mhue/scikit-learn,shangwuhencc/scikit-learn,samuel1208/scikit-learn,jblackburne/scikit-learn,shikhardb/scikit-learn,vivekmishra1991/scikit-learn,ahoyosid/scikit-learn,shikhardb/scikit-learn,fabianp/scikit-learn,ZENGXH/scikit-learn,macks22/scikit-learn,JsNoNo/scikit-learn,pv/scikit-learn,maheshakya/scikit-learn,mehdidc/scikit-learn,spallavolu/scikit-learn,f3r/scikit-learn,alexeyum/scikit-learn,heli522/scikit-learn,idlead/scikit-learn,elkingtonmcb/scikit-learn,nomadcube/scikit-learn,rrohan/scikit-learn,raghavrv/scikit-learn,jzt5132/scikit-learn,mhdella/scikit-learn,zihua/scikit-learn,arjoly/scikit-learn,Lawrence-Liu/scikit-learn,carrillo/scikit-learn,ssaeger/scikit-learn,qifeigit/scikit-learn,huobaowangxi/scikit-learn,mwv/scikit-learn,andrewnc/scikit-learn,IshankGulati/scikit-learn,IndraVikas/scikit-learn,andrewnc/scikit-learn,jorik041/scikit-learn,jzt5132/scikit-learn,mikebenfield/scikit-learn,lenovor/scikit-learn,anirudhjayaraman/scikit-learn,0x0all/scikit-learn,voxlol/scikit-learn,ankurankan/scikit-learn,chrsrds/scikit-learn,pratapvardhan/scikit-learn,deepesch/scikit-learn,jblackburne/scikit-learn,jseabold/scikit-learn,thilbern/scikit-learn,nvoron23/scikit-learn,mugizico/scikit-learn,ishanic/scikit-learn,abhishekkrthakur/scikit-learn,ilyes14/scikit-learn,larsmans/scikit-learn,nomadcube/scikit-learn,BiaDarkia/scikit-learn,Barmaley-exe/scikit-learn,MohammedWasim/scikit-learn,ankurankan/scikit-learn,pv/scikit-learn,hainm/scikit-learn,HolgerPeters/scikit-learn,zorojean/scikit-learn,walterreade/scikit-learn,yyjiang/scikit-learn,robbymeals/scikit-learn,krez13/scikit-learn,kevin-intel/scikit-learn,Titan-C/scikit-learn,henridwyer/scikit-learn,mblondel/scikit-learn,equialgo/scikit-learn,aabadie/scikit-learn,kylerbrown/scikit-learn,RPGOne/scikit-learn,lucidfrontier45/scikit-learn,billy-inn/scikit-learn,Jimmy-Morzaria/scikit-learn,JPFrancoia/scikit-learn,wzbozon/scikit-learn,shahankhatch/scikit-learn,trankmichael/scikit-learn,dsullivan7/scikit-learn,tosolveit/scikit-learn,aabadie/scikit-learn,NunoEdgarGub1/scikit-learn,pythonvietnam/scikit-learn,gotomypc/scikit-learn,bigdataelephants/scikit-learn,nikitasingh981/scikit-learn,jereze/scikit-learn,wlamond/scikit-learn,jpautom/scikit-learn,ahoyosid/scikit-learn,PatrickChrist/scikit-learn,spallavolu/scikit-learn,Sentient07/scikit-learn,tomlof/scikit-learn,CVML/scikit-learn,vortex-ape/scikit-learn,btabibian/scikit-learn,wlamond/scikit-learn,kevin-intel/scikit-learn,RachitKansal/scikit-learn,massmutual/scikit-learn,bigdataelephants/scikit-learn,maheshakya/scikit-learn,michigraber/scikit-learn,liberatorqjw/scikit-learn,macks22/scikit-learn,rahuldhote/scikit-learn,loli/semisupervisedforests,raghavrv/scikit-learn,mlyundin/scikit-learn,tomlof/scikit-learn,ningchi/scikit-learn,florian-f/sklearn,hsiaoyi0504/scikit-learn,anntzer/scikit-learn,jseabold/scikit-learn,wlamond/scikit-learn,btabibian/scikit-learn,appapantula/scikit-learn,CforED/Machine-Learning,scikit-learn/scikit-learn,mhue/scikit-learn,etkirsch/scikit-learn,Fireblend/scikit-learn,themrmax/scikit-learn,rvraghav93/scikit-learn,AlexandreAbraham/scikit-learn,nmayorov/scikit-learn,sonnyhu/scikit-learn,btabibian/scikit-learn,loli/semisupervisedforests,fabioticconi/scikit-learn,HolgerPeters/scikit-learn,Vimos/scikit-learn,hitszxp/scikit-learn,liangz0707/scikit-learn,ningchi/scikit-learn,nesterione/scikit-learn,bthirion/scikit-learn,mehdidc/scikit-learn,imaculate/scikit-learn,victorbergelin/scikit-learn,anntzer/scikit-learn,466152112/scikit-learn,hlin117/scikit-learn,sgenoud/scikit-learn,cainiaocome/scikit-learn,walterreade/scikit-learn,Clyde-fare/scikit-learn,AlexandreAbraham/scikit-learn,pianomania/scikit-learn,pypot/scikit-learn,altairpearl/scikit-learn,pkruskal/scikit-learn,cainiaocome/scikit-learn,nrhine1/scikit-learn,Jimmy-Morzaria/scikit-learn,schets/scikit-learn,rishikksh20/scikit-learn,larsmans/scikit-learn,toastedcornflakes/scikit-learn,kashif/scikit-learn,ssaeger/scikit-learn,TomDLT/scikit-learn,DSLituiev/scikit-learn,bnaul/scikit-learn,alexsavio/scikit-learn,hugobowne/scikit-learn,hainm/scikit-learn,lbishal/scikit-learn,jorik041/scikit-learn,etkirsch/scikit-learn,sanketloke/scikit-learn,ankurankan/scikit-learn,Adai0808/scikit-learn,lesteve/scikit-learn,jaidevd/scikit-learn,arahuja/scikit-learn,JsNoNo/scikit-learn,NelisVerhoef/scikit-learn,ashhher3/scikit-learn,mjudsp/Tsallis,huzq/scikit-learn,Garrett-R/scikit-learn,mattilyra/scikit-learn,appapantula/scikit-learn,cybernet14/scikit-learn,alvarofierroclavero/scikit-learn,aewhatley/scikit-learn,alexsavio/scikit-learn,jorik041/scikit-learn,jereze/scikit-learn,potash/scikit-learn,ElDeveloper/scikit-learn,vibhorag/scikit-learn,alvarofierroclavero/scikit-learn,manashmndl/scikit-learn,OshynSong/scikit-learn,ningchi/scikit-learn,Achuth17/scikit-learn,Windy-Ground/scikit-learn,jjx02230808/project0223,CVML/scikit-learn,jayflo/scikit-learn,nikitasingh981/scikit-learn,jmschrei/scikit-learn,xavierwu/scikit-learn,saiwing-yeung/scikit-learn,RayMick/scikit-learn,quheng/scikit-learn,ChanChiChoi/scikit-learn,sanketloke/scikit-learn,RayMick/scikit-learn,trankmichael/scikit-learn,arabenjamin/scikit-learn,zorroblue/scikit-learn,iismd17/scikit-learn,AlexanderFabisch/scikit-learn,mattilyra/scikit-learn,evgchz/scikit-learn,shyamalschandra/scikit-learn,MatthieuBizien/scikit-learn,PrashntS/scikit-learn,lucidfrontier45/scikit-learn,treycausey/scikit-learn,shyamalschandra/scikit-learn,ndingwall/scikit-learn,ishanic/scikit-learn,JPFrancoia/scikit-learn,AlexanderFabisch/scikit-learn,LohithBlaze/scikit-learn,schets/scikit-learn,IssamLaradji/scikit-learn,hsuantien/scikit-learn,procoder317/scikit-learn,OshynSong/scikit-learn,ZenDevelopmentSystems/scikit-learn,bikong2/scikit-learn,Lawrence-Liu/scikit-learn,anirudhjayaraman/scikit-learn,nvoron23/scikit-learn,rajat1994/scikit-learn,ZENGXH/scikit-learn,IssamLaradji/scikit-learn,liangz0707/scikit-learn,sinhrks/scikit-learn,ZenDevelopmentSystems/scikit-learn,madjelan/scikit-learn,walterreade/scikit-learn,anirudhjayaraman/scikit-learn,cdegroc/scikit-learn,yanlend/scikit-learn,pv/scikit-learn,0x0all/scikit-learn,treycausey/scikit-learn,nrhine1/scikit-learn,xwolf12/scikit-learn,larsmans/scikit-learn,Windy-Ground/scikit-learn,0asa/scikit-learn,Myasuka/scikit-learn,rahuldhote/scikit-learn,ChanderG/scikit-learn,samuel1208/scikit-learn,harshaneelhg/scikit-learn,rrohan/scikit-learn,AIML/scikit-learn,vermouthmjl/scikit-learn,ChanderG/scikit-learn,ndingwall/scikit-learn,ClimbsRocks/scikit-learn,kevin-intel/scikit-learn,MartinDelzant/scikit-learn,yanlend/scikit-learn,untom/scikit-learn,nomadcube/scikit-learn,mojoboss/scikit-learn,russel1237/scikit-learn,elkingtonmcb/scikit-learn,mhdella/scikit-learn,ElDeveloper/scikit-learn,poryfly/scikit-learn,mjgrav2001/scikit-learn,manashmndl/scikit-learn,abhishekgahlot/scikit-learn,evgchz/scikit-learn,sanketloke/scikit-learn,mojoboss/scikit-learn,terkkila/scikit-learn,fbagirov/scikit-learn,q1ang/scikit-learn,fengzhyuan/scikit-learn,fabianp/scikit-learn,PatrickChrist/scikit-learn,sonnyhu/scikit-learn,arabenjamin/scikit-learn,JeanKossaifi/scikit-learn,cwu2011/scikit-learn,trungnt13/scikit-learn,AIML/scikit-learn,3manuek/scikit-learn,andrewnc/scikit-learn,Windy-Ground/scikit-learn,vibhorag/scikit-learn,ChanChiChoi/scikit-learn,costypetrisor/scikit-learn,thilbern/scikit-learn,massmutual/scikit-learn,h2educ/scikit-learn,sgenoud/scikit-learn,anurag313/scikit-learn,samuel1208/scikit-learn,AlexanderFabisch/scikit-learn,nhejazi/scikit-learn,rahuldhote/scikit-learn,roxyboy/scikit-learn,murali-munna/scikit-learn,tmhm/scikit-learn,henridwyer/scikit-learn,schets/scikit-learn,jakobworldpeace/scikit-learn,glouppe/scikit-learn,lenovor/scikit-learn,kaichogami/scikit-learn,jakirkham/scikit-learn,gotomypc/scikit-learn,henrykironde/scikit-learn,Jimmy-Morzaria/scikit-learn,ominux/scikit-learn,fredhusser/scikit-learn,olologin/scikit-learn,ilo10/scikit-learn,ngoix/OCRF,Lawrence-Liu/scikit-learn,sergeyf/scikit-learn,MechCoder/scikit-learn,Achuth17/scikit-learn,jmschrei/scikit-learn,mjudsp/Tsallis,ssaeger/scikit-learn,cainiaocome/scikit-learn,carrillo/scikit-learn,bnaul/scikit-learn,MatthieuBizien/scikit-learn,waterponey/scikit-learn,shikhardb/scikit-learn,AlexRobson/scikit-learn,fyffyt/scikit-learn,vinayak-mehta/scikit-learn,spallavolu/scikit-learn,treycausey/scikit-learn,q1ang/scikit-learn,frank-tancf/scikit-learn,hdmetor/scikit-learn,belltailjp/scikit-learn,fabianp/scikit-learn,bhargav/scikit-learn,Garrett-R/scikit-learn,wanggang3333/scikit-learn,beepee14/scikit-learn,NelisVerhoef/scikit-learn,Aasmi/scikit-learn,eickenberg/scikit-learn,ngoix/OCRF,akionakamura/scikit-learn,loli/semisupervisedforests,fzalkow/scikit-learn,hsuantien/scikit-learn,devanshdalal/scikit-learn,lucidfrontier45/scikit-learn,rahul-c1/scikit-learn,waterponey/scikit-learn,loli/sklearn-ensembletrees,liangz0707/scikit-learn,icdishb/scikit-learn,mfjb/scikit-learn,dsullivan7/scikit-learn,hugobowne/scikit-learn,alexeyum/scikit-learn,altairpearl/scikit-learn,yyjiang/scikit-learn,Barmaley-exe/scikit-learn,victorbergelin/scikit-learn,roxyboy/scikit-learn,stylianos-kampakis/scikit-learn,anntzer/scikit-learn,jmetzen/scikit-learn,bnaul/scikit-learn,chrisburr/scikit-learn,xwolf12/scikit-learn,harshaneelhg/scikit-learn,cainiaocome/scikit-learn,equialgo/scikit-learn,harshaneelhg/scikit-learn,cybernet14/scikit-learn,voxlol/scikit-learn,abhishekgahlot/scikit-learn,Nyker510/scikit-learn,qifeigit/scikit-learn,terkkila/scikit-learn,alvarofierroclavero/scikit-learn,hugobowne/scikit-learn,MartinDelzant/scikit-learn,ankurankan/scikit-learn,gotomypc/scikit-learn,xzh86/scikit-learn,shangwuhencc/scikit-learn,xuewei4d/scikit-learn,liberatorqjw/scikit-learn,tdhopper/scikit-learn,fengzhyuan/scikit-learn,lesteve/scikit-learn,maheshakya/scikit-learn,anirudhjayaraman/scikit-learn,rahul-c1/scikit-learn,simon-pepin/scikit-learn,JosmanPS/scikit-learn,michigraber/scikit-learn,ivannz/scikit-learn,PatrickChrist/scikit-learn,jzt5132/scikit-learn,HolgerPeters/scikit-learn,pompiduskus/scikit-learn,vivekmishra1991/scikit-learn,sergeyf/scikit-learn,Adai0808/scikit-learn,xzh86/scikit-learn,kaichogami/scikit-learn,Nyker510/scikit-learn,idlead/scikit-learn,larsmans/scikit-learn,IshankGulati/scikit-learn,smartscheduling/scikit-learn-categorical-tree,fbagirov/scikit-learn,loli/sklearn-ensembletrees,466152112/scikit-learn,mxjl620/scikit-learn,wanggang3333/scikit-learn,mattgiguere/scikit-learn,NelisVerhoef/scikit-learn,pianomania/scikit-learn,petosegan/scikit-learn,altairpearl/scikit-learn,voxlol/scikit-learn,wzbozon/scikit-learn,shahankhatch/scikit-learn,0x0all/scikit-learn,quheng/scikit-learn,Akshay0724/scikit-learn,0asa/scikit-learn,pythonvietnam/scikit-learn,tomlof/scikit-learn,LiaoPan/scikit-learn,toastedcornflakes/scikit-learn,eickenberg/scikit-learn,cwu2011/scikit-learn,fzalkow/scikit-learn,rsivapr/scikit-learn,bhargav/scikit-learn,madjelan/scikit-learn,Titan-C/scikit-learn,zhenv5/scikit-learn,nesterione/scikit-learn,mwv/scikit-learn,plissonf/scikit-learn,lazywei/scikit-learn,massmutual/scikit-learn,jzt5132/scikit-learn,zaxtax/scikit-learn,mehdidc/scikit-learn,mfjb/scikit-learn,UNR-AERIAL/scikit-learn,YinongLong/scikit-learn,murali-munna/scikit-learn,pkruskal/scikit-learn,AIML/scikit-learn,abhishekkrthakur/scikit-learn,tosolveit/scikit-learn,yunfeilu/scikit-learn,bthirion/scikit-learn,huzq/scikit-learn,Srisai85/scikit-learn,ndingwall/scikit-learn,hsiaoyi0504/scikit-learn,lbishal/scikit-learn,Myasuka/scikit-learn,ephes/scikit-learn,jseabold/scikit-learn,larsmans/scikit-learn,lbishal/scikit-learn,fabioticconi/scikit-learn,untom/scikit-learn,yunfeilu/scikit-learn,sarahgrogan/scikit-learn,hrjn/scikit-learn,lesteve/scikit-learn,rsivapr/scikit-learn,ishanic/scikit-learn,fbagirov/scikit-learn,walterreade/scikit-learn,moutai/scikit-learn,OshynSong/scikit-learn,DonBeo/scikit-learn,saiwing-yeung/scikit-learn,CforED/Machine-Learning,glennq/scikit-learn,vortex-ape/scikit-learn,lin-credible/scikit-learn,cl4rke/scikit-learn,fyffyt/scikit-learn,Vimos/scikit-learn,AnasGhrab/scikit-learn,ldirer/scikit-learn,heli522/scikit-learn,jorik041/scikit-learn,olologin/scikit-learn,sumspr/scikit-learn,xzh86/scikit-learn,nrhine1/scikit-learn,ilyes14/scikit-learn,altairpearl/scikit-learn,Garrett-R/scikit-learn,bhargav/scikit-learn,wzbozon/scikit-learn,lin-credible/scikit-learn,huobaowangxi/scikit-learn,moutai/scikit-learn,olologin/scikit-learn,kashif/scikit-learn,poryfly/scikit-learn,nelson-liu/scikit-learn,mattgiguere/scikit-learn,evgchz/scikit-learn,dhruv13J/scikit-learn,ivannz/scikit-learn,mjgrav2001/scikit-learn,terkkila/scikit-learn,CforED/Machine-Learning,petosegan/scikit-learn,lin-credible/scikit-learn,JeanKossaifi/scikit-learn,mattilyra/scikit-learn,hdmetor/scikit-learn,potash/scikit-learn,mattgiguere/scikit-learn,thientu/scikit-learn,f3r/scikit-learn,jayflo/scikit-learn,ClimbsRocks/scikit-learn,rsivapr/scikit-learn,ycaihua/scikit-learn,manhhomienbienthuy/scikit-learn,stylianos-kampakis/scikit-learn,rohanp/scikit-learn,pypot/scikit-learn,vigilv/scikit-learn,joernhees/scikit-learn,Djabbz/scikit-learn,sergeyf/scikit-learn,krez13/scikit-learn,khkaminska/scikit-learn,plissonf/scikit-learn,jm-begon/scikit-learn,harshaneelhg/scikit-learn,kagayakidan/scikit-learn,gclenaghan/scikit-learn,terkkila/scikit-learn,trungnt13/scikit-learn,DonBeo/scikit-learn,dsullivan7/scikit-learn,olologin/scikit-learn,cauchycui/scikit-learn,JsNoNo/scikit-learn,joshloyal/scikit-learn,ngoix/OCRF,dsquareindia/scikit-learn,aminert/scikit-learn,sgenoud/scikit-learn,shusenl/scikit-learn,poryfly/scikit-learn,MohammedWasim/scikit-learn,lucidfrontier45/scikit-learn,shusenl/scikit-learn,mayblue9/scikit-learn,ChanChiChoi/scikit-learn,ominux/scikit-learn,zorroblue/scikit-learn,qifeigit/scikit-learn,Aasmi/scikit-learn,xwolf12/scikit-learn,amueller/scikit-learn,ndingwall/scikit-learn,victorbergelin/scikit-learn,PatrickChrist/scikit-learn,mblondel/scikit-learn,ashhher3/scikit-learn,B3AU/waveTree,CVML/scikit-learn,AnasGhrab/scikit-learn,Myasuka/scikit-learn,sarahgrogan/scikit-learn,glemaitre/scikit-learn,jm-begon/scikit-learn,RomainBrault/scikit-learn,zaxtax/scikit-learn,kylerbrown/scikit-learn,vivekmishra1991/scikit-learn,ogrisel/scikit-learn,djgagne/scikit-learn,mwv/scikit-learn,Sentient07/scikit-learn,MartinSavc/scikit-learn,RPGOne/scikit-learn,Obus/scikit-learn,untom/scikit-learn,mayblue9/scikit-learn,mikebenfield/scikit-learn,clemkoa/scikit-learn,lazywei/scikit-learn,themrmax/scikit-learn,kevin-intel/scikit-learn,ilo10/scikit-learn,dingocuster/scikit-learn,justincassidy/scikit-learn,rishikksh20/scikit-learn,huzq/scikit-learn,hitszxp/scikit-learn,vybstat/scikit-learn,dhruv13J/scikit-learn,fyffyt/scikit-learn,marcocaccin/scikit-learn,jm-begon/scikit-learn,samzhang111/scikit-learn,jorge2703/scikit-learn,ZENGXH/scikit-learn,ilyes14/scikit-learn,costypetrisor/scikit-learn,justincassidy/scikit-learn,hdmetor/scikit-learn,jjx02230808/project0223,lenovor/scikit-learn,wazeerzulfikar/scikit-learn,mrshu/scikit-learn,andaag/scikit-learn,ilyes14/scikit-learn,JosmanPS/scikit-learn,UNR-AERIAL/scikit-learn,sumspr/scikit-learn,hdmetor/scikit-learn,jakirkham/scikit-learn,vshtanko/scikit-learn,shenzebang/scikit-learn,depet/scikit-learn,jlegendary/scikit-learn,robbymeals/scikit-learn,nikitasingh981/scikit-learn,akionakamura/scikit-learn,jayflo/scikit-learn,Lawrence-Liu/scikit-learn,jereze/scikit-learn,jmschrei/scikit-learn,Titan-C/scikit-learn,costypetrisor/scikit-learn,ephes/scikit-learn,murali-munna/scikit-learn,Sentient07/scikit-learn,sinhrks/scikit-learn,kaichogami/scikit-learn,mrshu/scikit-learn,mjgrav2001/scikit-learn,eickenberg/scikit-learn,mxjl620/scikit-learn,themrmax/scikit-learn,maheshakya/scikit-learn,MartinSavc/scikit-learn,betatim/scikit-learn,toastedcornflakes/scikit-learn,mblondel/scikit-learn,dingocuster/scikit-learn,MartinDelzant/scikit-learn,joernhees/scikit-learn,hitszxp/scikit-learn,luo66/scikit-learn,jaidevd/scikit-learn,3manuek/scikit-learn,glemaitre/scikit-learn,manhhomienbienthuy/scikit-learn,zaxtax/scikit-learn,jmetzen/scikit-learn,ChanChiChoi/scikit-learn,Jimmy-Morzaria/scikit-learn,bthirion/scikit-learn,MartinSavc/scikit-learn,vshtanko/scikit-learn,fabioticconi/scikit-learn,rvraghav93/scikit-learn,IndraVikas/scikit-learn,mayblue9/scikit-learn,glouppe/scikit-learn,fengzhyuan/scikit-learn,RachitKansal/scikit-learn,xubenben/scikit-learn,cwu2011/scikit-learn,manhhomienbienthuy/scikit-learn,dhruv13J/scikit-learn,ZenDevelopmentSystems/scikit-learn,hlin117/scikit-learn,kashif/scikit-learn,yanlend/scikit-learn,bikong2/scikit-learn,pypot/scikit-learn,marcocaccin/scikit-learn,rishikksh20/scikit-learn,thilbern/scikit-learn,sgenoud/scikit-learn,billy-inn/scikit-learn,heli522/scikit-learn,davidgbe/scikit-learn,cl4rke/scikit-learn,joshloyal/scikit-learn,rohanp/scikit-learn,jakirkham/scikit-learn,ngoix/OCRF,frank-tancf/scikit-learn,JsNoNo/scikit-learn,vigilv/scikit-learn,f3r/scikit-learn,AlexRobson/scikit-learn,fyffyt/scikit-learn,ldirer/scikit-learn,roxyboy/scikit-learn,wazeerzulfikar/scikit-learn,jaidevd/scikit-learn,manhhomienbienthuy/scikit-learn,moutai/scikit-learn,depet/scikit-learn,akionakamura/scikit-learn,abhishekgahlot/scikit-learn,sinhrks/scikit-learn,hitszxp/scikit-learn,RPGOne/scikit-learn,davidgbe/scikit-learn,abimannans/scikit-learn,ltiao/scikit-learn,Fireblend/scikit-learn,raghavrv/scikit-learn,pratapvardhan/scikit-learn,aewhatley/scikit-learn,nmayorov/scikit-learn,adamgreenhall/scikit-learn,madjelan/scikit-learn,betatim/scikit-learn,manashmndl/scikit-learn,bhargav/scikit-learn,pompiduskus/scikit-learn,MatthieuBizien/scikit-learn,cybernet14/scikit-learn,rahul-c1/scikit-learn,herilalaina/scikit-learn,hsiaoyi0504/scikit-learn,joernhees/scikit-learn,maheshakya/scikit-learn,simon-pepin/scikit-learn,liangz0707/scikit-learn,mayblue9/scikit-learn,fzalkow/scikit-learn,pkruskal/scikit-learn,russel1237/scikit-learn,aminert/scikit-learn,pratapvardhan/scikit-learn,vivekmishra1991/scikit-learn,arahuja/scikit-learn,kylerbrown/scikit-learn,ClimbsRocks/scikit-learn,kmike/scikit-learn,pratapvardhan/scikit-learn,krez13/scikit-learn,LohithBlaze/scikit-learn,xzh86/scikit-learn,B3AU/waveTree,rexshihaoren/scikit-learn,xyguo/scikit-learn,yask123/scikit-learn,nikitasingh981/scikit-learn,AlexRobson/scikit-learn,jjx02230808/project0223,joernhees/scikit-learn,alvarofierroclavero/scikit-learn,Srisai85/scikit-learn,kjung/scikit-learn,kagayakidan/scikit-learn,huobaowangxi/scikit-learn,scikit-learn/scikit-learn,hrjn/scikit-learn,djgagne/scikit-learn,alexsavio/scikit-learn,B3AU/waveTree,Srisai85/scikit-learn,jaidevd/scikit-learn,theoryno3/scikit-learn,PrashntS/scikit-learn,shyamalschandra/scikit-learn,mugizico/scikit-learn,joshloyal/scikit-learn,NunoEdgarGub1/scikit-learn,fengzhyuan/scikit-learn,liberatorqjw/scikit-learn,imaculate/scikit-learn,wazeerzulfikar/scikit-learn,mikebenfield/scikit-learn,ycaihua/scikit-learn,carrillo/scikit-learn,Akshay0724/scikit-learn,RachitKansal/scikit-learn,marcocaccin/scikit-learn,ivannz/scikit-learn,procoder317/scikit-learn,potash/scikit-learn,alexsavio/scikit-learn,eickenberg/scikit-learn,mojoboss/scikit-learn,shusenl/scikit-learn,saiwing-yeung/scikit-learn,466152112/scikit-learn,mattgiguere/scikit-learn,ogrisel/scikit-learn,scikit-learn/scikit-learn,abhishekgahlot/scikit-learn,simon-pepin/scikit-learn,deepesch/scikit-learn,macks22/scikit-learn,manashmndl/scikit-learn,jereze/scikit-learn,DSLituiev/scikit-learn,alexeyum/scikit-learn,frank-tancf/scikit-learn,Garrett-R/scikit-learn,shenzebang/scikit-learn,AlexandreAbraham/scikit-learn,hsiaoyi0504/scikit-learn,jorge2703/scikit-learn,lbishal/scikit-learn,h2educ/scikit-learn,mlyundin/scikit-learn,mxjl620/scikit-learn,jpautom/scikit-learn,liyu1990/sklearn,shusenl/scikit-learn,heli522/scikit-learn,chrisburr/scikit-learn,kjung/scikit-learn,thientu/scikit-learn,fbagirov/scikit-learn,belltailjp/scikit-learn,glouppe/scikit-learn,kmike/scikit-learn,LiaoPan/scikit-learn,xuewei4d/scikit-learn,loli/sklearn-ensembletrees,nhejazi/scikit-learn,giorgiop/scikit-learn,mfjb/scikit-learn,jmschrei/scikit-learn,kjung/scikit-learn,NunoEdgarGub1/scikit-learn,macks22/scikit-learn,ElDeveloper/scikit-learn,Aasmi/scikit-learn,xiaoxiamii/scikit-learn,Obus/scikit-learn,kmike/scikit-learn,Fireblend/scikit-learn,schets/scikit-learn,liyu1990/sklearn,robbymeals/scikit-learn,stylianos-kampakis/scikit-learn,shahankhatch/scikit-learn,devanshdalal/scikit-learn,RomainBrault/scikit-learn,mrshu/scikit-learn,eg-zhang/scikit-learn,aflaxman/scikit-learn,cybernet14/scikit-learn,lin-credible/scikit-learn,Adai0808/scikit-learn,belltailjp/scikit-learn,robin-lai/scikit-learn,giorgiop/scikit-learn,Vimos/scikit-learn,wlamond/scikit-learn,rsivapr/scikit-learn,Garrett-R/scikit-learn,ashhher3/scikit-learn,xyguo/scikit-learn,dingocuster/scikit-learn,thientu/scikit-learn,michigraber/scikit-learn,nesterione/scikit-learn,TomDLT/scikit-learn,deepesch/scikit-learn,rexshihaoren/scikit-learn,vinayak-mehta/scikit-learn,adamgreenhall/scikit-learn,OshynSong/scikit-learn,pnedunuri/scikit-learn,PatrickOReilly/scikit-learn,sarahgrogan/scikit-learn,Akshay0724/scikit-learn,zaxtax/scikit-learn,ChanderG/scikit-learn,xubenben/scikit-learn,NelisVerhoef/scikit-learn,mlyundin/scikit-learn,jpautom/scikit-learn,h2educ/scikit-learn,xyguo/scikit-learn,eg-zhang/scikit-learn,xavierwu/scikit-learn,simon-pepin/scikit-learn,xwolf12/scikit-learn,glemaitre/scikit-learn,MechCoder/scikit-learn,mwv/scikit-learn,aewhatley/scikit-learn,mugizico/scikit-learn,hainm/scikit-learn,adamgreenhall/scikit-learn,xiaoxiamii/scikit-learn,akionakamura/scikit-learn,fredhusser/scikit-learn,NunoEdgarGub1/scikit-learn,ElDeveloper/scikit-learn,mlyundin/scikit-learn,meduz/scikit-learn,mehdidc/scikit-learn,andaag/scikit-learn,lesteve/scikit-learn,Djabbz/scikit-learn,betatim/scikit-learn,florian-f/sklearn,robin-lai/scikit-learn,icdishb/scikit-learn,untom/scikit-learn,joshloyal/scikit-learn,raghavrv/scikit-learn,beepee14/scikit-learn,bigdataelephants/scikit-learn,robbymeals/scikit-learn,PatrickOReilly/scikit-learn,eg-zhang/scikit-learn,hsuantien/scikit-learn,amueller/scikit-learn,waterponey/scikit-learn,yunfeilu/scikit-learn,equialgo/scikit-learn,vibhorag/scikit-learn,AlexanderFabisch/scikit-learn,hugobowne/scikit-learn,TomDLT/scikit-learn,f3r/scikit-learn,xavierwu/scikit-learn,henridwyer/scikit-learn,ChanderG/scikit-learn,IshankGulati/scikit-learn,jlegendary/scikit-learn,PrashntS/scikit-learn,jlegendary/scikit-learn,samzhang111/scikit-learn,trankmichael/scikit-learn,abimannans/scikit-learn,0x0all/scikit-learn,toastedcornflakes/scikit-learn,ZenDevelopmentSystems/scikit-learn,trungnt13/scikit-learn,shangwuhencc/scikit-learn,cl4rke/scikit-learn,vibhorag/scikit-learn,ltiao/scikit-learn,nhejazi/scikit-learn,pianomania/scikit-learn,vybstat/scikit-learn,pnedunuri/scikit-learn,bikong2/scikit-learn,aminert/scikit-learn,tawsifkhan/scikit-learn,hlin117/scikit-learn,aetilley/scikit-learn,ogrisel/scikit-learn,mjudsp/Tsallis,3manuek/scikit-learn,tawsifkhan/scikit-learn,iismd17/scikit-learn,mblondel/scikit-learn,devanshdalal/scikit-learn,florian-f/sklearn,yanlend/scikit-learn,aflaxman/scikit-learn,rajat1994/scikit-learn,siutanwong/scikit-learn,r-mart/scikit-learn,RPGOne/scikit-learn,jmetzen/scikit-learn,cl4rke/scikit-learn,shikhardb/scikit-learn,IssamLaradji/scikit-learn,MohammedWasim/scikit-learn,ky822/scikit-learn,hrjn/scikit-learn,clemkoa/scikit-learn,kaichogami/scikit-learn,hainm/scikit-learn,arjoly/scikit-learn,quheng/scikit-learn,sonnyhu/scikit-learn,thilbern/scikit-learn,pompiduskus/scikit-learn,Adai0808/scikit-learn,r-mart/scikit-learn,JosmanPS/scikit-learn,AlexandreAbraham/scikit-learn,tosolveit/scikit-learn,RomainBrault/scikit-learn,JPFrancoia/scikit-learn,rahul-c1/scikit-learn,vigilv/scikit-learn,AIML/scikit-learn,quheng/scikit-learn,dingocuster/scikit-learn,justincassidy/scikit-learn,carrillo/scikit-learn,vortex-ape/scikit-learn,shangwuhencc/scikit-learn,wanggang3333/scikit-learn,RomainBrault/scikit-learn,tdhopper/scikit-learn,bnaul/scikit-learn,andaag/scikit-learn,ky822/scikit-learn,yunfeilu/scikit-learn,Akshay0724/scikit-learn,rvraghav93/scikit-learn,andrewnc/scikit-learn,DonBeo/scikit-learn,plissonf/scikit-learn,ogrisel/scikit-learn,ngoix/OCRF,dhruv13J/scikit-learn,elkingtonmcb/scikit-learn,massmutual/scikit-learn,liyu1990/sklearn,ominux/scikit-learn,ycaihua/scikit-learn,sarahgrogan/scikit-learn,MohammedWasim/scikit-learn,chrisburr/scikit-learn,tomlof/scikit-learn,RayMick/scikit-learn,arabenjamin/scikit-learn,Clyde-fare/scikit-learn,mhdella/scikit-learn,nesterione/scikit-learn,tmhm/scikit-learn,idlead/scikit-learn,466152112/scikit-learn,waterponey/scikit-learn,devanshdalal/scikit-learn,zorojean/scikit-learn,russel1237/scikit-learn,lazywei/scikit-learn,yonglehou/scikit-learn,DSLituiev/scikit-learn,hrjn/scikit-learn,UNR-AERIAL/scikit-learn,tdhopper/scikit-learn,Nyker510/scikit-learn,mikebenfield/scikit-learn,henridwyer/scikit-learn,procoder317/scikit-learn,yonglehou/scikit-learn,dsquareindia/scikit-learn,Aasmi/scikit-learn,rexshihaoren/scikit-learn,giorgiop/scikit-learn,zorojean/scikit-learn,mhue/scikit-learn,xiaoxiamii/scikit-learn,h2educ/scikit-learn,robin-lai/scikit-learn,jorge2703/scikit-learn,rishikksh20/scikit-learn,murali-munna/scikit-learn,bikong2/scikit-learn,chrsrds/scikit-learn,davidgbe/scikit-learn,etkirsch/scikit-learn,eickenberg/scikit-learn,Clyde-fare/scikit-learn,bthirion/scikit-learn,eg-zhang/scikit-learn,huobaowangxi/scikit-learn,ssaeger/scikit-learn,pythonvietnam/scikit-learn,petosegan/scikit-learn,MechCoder/scikit-learn,Myasuka/scikit-learn,mrshu/scikit-learn,r-mart/scikit-learn,Nyker510/scikit-learn,xuewei4d/scikit-learn,arjoly/scikit-learn,zuku1985/scikit-learn,wanggang3333/scikit-learn,depet/scikit-learn,CforED/Machine-Learning,gclenaghan/scikit-learn,abimannans/scikit-learn,mrshu/scikit-learn,poryfly/scikit-learn,ClimbsRocks/scikit-learn,kjung/scikit-learn,evgchz/scikit-learn,Achuth17/scikit-learn,elkingtonmcb/scikit-learn,aabadie/scikit-learn,meduz/scikit-learn,Djabbz/scikit-learn,yonglehou/scikit-learn,victorbergelin/scikit-learn,ky822/scikit-learn,glemaitre/scikit-learn,siutanwong/scikit-learn,frank-tancf/scikit-learn,Fireblend/scikit-learn,sgenoud/scikit-learn,pianomania/scikit-learn,BiaDarkia/scikit-learn,jakobworldpeace/scikit-learn,wzbozon/scikit-learn,jkarnows/scikit-learn,herilalaina/scikit-learn,AnasGhrab/scikit-learn,sergeyf/scikit-learn,qifeigit/scikit-learn,xubenben/scikit-learn,nvoron23/scikit-learn,zihua/scikit-learn,q1ang/scikit-learn,sumspr/scikit-learn,UNR-AERIAL/scikit-learn,BiaDarkia/scikit-learn,clemkoa/scikit-learn,procoder317/scikit-learn,meduz/scikit-learn,aminert/scikit-learn,djgagne/scikit-learn,loli/sklearn-ensembletrees,samzhang111/scikit-learn,JPFrancoia/scikit-learn,Barmaley-exe/scikit-learn,mxjl620/scikit-learn,lucidfrontier45/scikit-learn,pnedunuri/scikit-learn,russel1237/scikit-learn,clemkoa/scikit-learn,costypetrisor/scikit-learn,jjx02230808/project0223,theoryno3/scikit-learn,chrisburr/scikit-learn,zhenv5/scikit-learn,aewhatley/scikit-learn,samzhang111/scikit-learn,PrashntS/scikit-learn,mojoboss/scikit-learn,pnedunuri/scikit-learn,chrsrds/scikit-learn,ephes/scikit-learn,IssamLaradji/scikit-learn,mattilyra/scikit-learn,vermouthmjl/scikit-learn,mjudsp/Tsallis,alexeyum/scikit-learn,shenzebang/scikit-learn,voxlol/scikit-learn,vermouthmjl/scikit-learn,mugizico/scikit-learn,IndraVikas/scikit-learn,imaculate/scikit-learn,mattilyra/scikit-learn,gotomypc/scikit-learn,tmhm/scikit-learn,rajat1994/scikit-learn,yask123/scikit-learn,ngoix/OCRF,ycaihua/scikit-learn,btabibian/scikit-learn,phdowling/scikit-learn,depet/scikit-learn,gclenaghan/scikit-learn,rohanp/scikit-learn,ilo10/scikit-learn,henrykironde/scikit-learn,fabioticconi/scikit-learn,fabianp/scikit-learn,AlexRobson/scikit-learn,dsquareindia/scikit-learn,nelson-liu/scikit-learn,evgchz/scikit-learn,djgagne/scikit-learn,BiaDarkia/scikit-learn,justincassidy/scikit-learn,espg/scikit-learn,theoryno3/scikit-learn,DSLituiev/scikit-learn,rexshihaoren/scikit-learn,huzq/scikit-learn,espg/scikit-learn,espg/scikit-learn,jblackburne/scikit-learn,LiaoPan/scikit-learn,Windy-Ground/scikit-learn,andaag/scikit-learn,ankurankan/scikit-learn,MatthieuBizien/scikit-learn,amueller/scikit-learn,phdowling/scikit-learn,tmhm/scikit-learn,siutanwong/scikit-learn,0asa/scikit-learn,phdowling/scikit-learn,madjelan/scikit-learn,scikit-learn/scikit-learn,hsuantien/scikit-learn,themrmax/scikit-learn,cdegroc/scikit-learn,herilalaina/scikit-learn,Sentient07/scikit-learn,CVML/scikit-learn,rohanp/scikit-learn,henrykironde/scikit-learn,trankmichael/scikit-learn,sonnyhu/scikit-learn,tdhopper/scikit-learn,zihua/scikit-learn,ashhher3/scikit-learn,appapantula/scikit-learn,zuku1985/scikit-learn,depet/scikit-learn,RayMick/scikit-learn,trungnt13/scikit-learn,saiwing-yeung/scikit-learn,zhenv5/scikit-learn,Djabbz/scikit-learn,xubenben/scikit-learn,aflaxman/scikit-learn,treycausey/scikit-learn,jayflo/scikit-learn,aetilley/scikit-learn,jakobworldpeace/scikit-learn,mhue/scikit-learn,shenzebang/scikit-learn,vshtanko/scikit-learn,amueller/scikit-learn,giorgiop/scikit-learn,TomDLT/scikit-learn,arahuja/scikit-learn,vigilv/scikit-learn,aetilley/scikit-learn,jm-begon/scikit-learn,vermouthmjl/scikit-learn,ningchi/scikit-learn,beepee14/scikit-learn,dsquareindia/scikit-learn,nelson-liu/scikit-learn,betatim/scikit-learn,lenovor/scikit-learn,zuku1985/scikit-learn,shahankhatch/scikit-learn,herilalaina/scikit-learn,xiaoxiamii/scikit-learn,belltailjp/scikit-learn,rsivapr/scikit-learn,sinhrks/scikit-learn,roxyboy/scikit-learn,cdegroc/scikit-learn,nomadcube/scikit-learn,anurag313/scikit-learn,ishanic/scikit-learn,spallavolu/scikit-learn,krez13/scikit-learn,zorroblue/scikit-learn,espg/scikit-learn,loli/semisupervisedforests,pypot/scikit-learn,arahuja/scikit-learn,davidgbe/scikit-learn,fredhusser/scikit-learn,Barmaley-exe/scikit-learn,vybstat/scikit-learn,aflaxman/scikit-learn,xyguo/scikit-learn,vinayak-mehta/scikit-learn,IshankGulati/scikit-learn,DonBeo/scikit-learn,icdishb/scikit-learn,Obus/scikit-learn,liberatorqjw/scikit-learn,arjoly/scikit-learn,khkaminska/scikit-learn,ltiao/scikit-learn,rrohan/scikit-learn,Titan-C/scikit-learn,liyu1990/sklearn,yyjiang/scikit-learn,ky822/scikit-learn,jorge2703/scikit-learn,xuewei4d/scikit-learn,glennq/scikit-learn,tosolveit/scikit-learn,MechCoder/scikit-learn,pythonvietnam/scikit-learn,deepesch/scikit-learn,yonglehou/scikit-learn,zorroblue/scikit-learn,cauchycui/scikit-learn,aabadie/scikit-learn,B3AU/waveTree,mjgrav2001/scikit-learn,siutanwong/scikit-learn,idlead/scikit-learn,MartinSavc/scikit-learn,adamgreenhall/scikit-learn,ominux/scikit-learn,Achuth17/scikit-learn,icdishb/scikit-learn,potash/scikit-learn,samuel1208/scikit-learn,3manuek/scikit-learn,0asa/scikit-learn,fredhusser/scikit-learn,glennq/scikit-learn,ahoyosid/scikit-learn,phdowling/scikit-learn,abhishekkrthakur/scikit-learn,anurag313/scikit-learn,nvoron23/scikit-learn,ephes/scikit-learn,rajat1994/scikit-learn,theoryno3/scikit-learn,yask123/scikit-learn,ivannz/scikit-learn,iismd17/scikit-learn,vshtanko/scikit-learn,mfjb/scikit-learn,Obus/scikit-learn,ahoyosid/scikit-learn,cauchycui/scikit-learn,kagayakidan/scikit-learn,imaculate/scikit-learn,vinayak-mehta/scikit-learn,nhejazi/scikit-learn,abimannans/scikit-learn,yask123/scikit-learn,xavierwu/scikit-learn,kashif/scikit-learn,henrykironde/scikit-learn,vortex-ape/scikit-learn,hlin117/scikit-learn,IndraVikas/scikit-learn,billy-inn/scikit-learn,PatrickOReilly/scikit-learn,dsullivan7/scikit-learn,treycausey/scikit-learn,zhenv5/scikit-learn,jmetzen/scikit-learn,jkarnows/scikit-learn,kmike/scikit-learn,rvraghav93/scikit-learn,bigdataelephants/scikit-learn,ycaihua/scikit-learn,0asa/scikit-learn,chrsrds/scikit-learn,pompiduskus/scikit-learn,cdegroc/scikit-learn,nmayorov/scikit-learn,vybstat/scikit-learn,appapantula/scikit-learn,nelson-liu/scikit-learn,smartscheduling/scikit-learn-categorical-tree,jakobworldpeace/scikit-learn,LohithBlaze/scikit-learn,RachitKansal/scikit-learn,tawsifkhan/scikit-learn,billy-inn/scikit-learn,marcocaccin/scikit-learn,anurag313/scikit-learn,MartinDelzant/scikit-learn,khkaminska/scikit-learn,JosmanPS/scikit-learn,khkaminska/scikit-learn,JeanKossaifi/scikit-learn,q1ang/scikit-learn,nrhine1/scikit-learn,zihua/scikit-learn,robin-lai/scikit-learn,mjudsp/Tsallis,HolgerPeters/scikit-learn,smartscheduling/scikit-learn-categorical-tree,sanketloke/scikit-learn,cwu2011/scikit-learn,lazywei/scikit-learn,hitszxp/scikit-learn,sumspr/scikit-learn,moutai/scikit-learn,pkruskal/scikit-learn,Vimos/scikit-learn,0x0all/scikit-learn,kylerbrown/scikit-learn,beepee14/scikit-learn,zorojean/scikit-learn,jblackburne/scikit-learn,luo66/scikit-learn,r-mart/scikit-learn,B3AU/waveTree,kmike/scikit-learn,jkarnows/scikit-learn,luo66/scikit-learn,ldirer/scikit-learn,Srisai85/scikit-learn,glouppe/scikit-learn,shyamalschandra/scikit-learn,pv/scikit-learn,YinongLong/scikit-learn,florian-f/sklearn
|
Add tests for Nearest Neighbor Algorithm.
TODO: a lot more tests are needed.
git-svn-id: a2d1b0e147e530765aaf3e1662d4a98e2f63c719@369 22fbfee3-77ab-4535-9bad-27d1bd3bc7d8
|
import numpy as np
from ..neighbors import Neighbors
from numpy.testing import assert_array_equal
def test_neighbors_1D():
"""
Nearest Neighbors in a line.
"""
# some constants
n = 10
n_2 = n/2
samples = [[x] for x in range(0, n)]
labels = [0]*n_2 + [1]*n_2
zeros = np.zeros(n_2)
ones = np.ones(n_2)
# k = 1
nn = Neighbors(samples, labels=labels, k=1)
assert_array_equal( nn.predict([ [i +0.01] for i in range(0,n_2)]),
zeros)
assert_array_equal( nn.predict([ [i-0.01] for i in range(n_2, 10)]),
ones)
# k = 3
nn = Neighbors(samples, labels=labels, k=3)
assert_array_equal( nn.predict([ [i +0.01] for i in range(0,5)]),
zeros)
assert_array_equal( nn.predict([ [i-0.01] for i in range(5, 10)]),
ones)
|
<commit_before><commit_msg>Add tests for Nearest Neighbor Algorithm.
TODO: a lot more tests are needed.
git-svn-id: a2d1b0e147e530765aaf3e1662d4a98e2f63c719@369 22fbfee3-77ab-4535-9bad-27d1bd3bc7d8<commit_after>
|
import numpy as np
from ..neighbors import Neighbors
from numpy.testing import assert_array_equal
def test_neighbors_1D():
"""
Nearest Neighbors in a line.
"""
# some constants
n = 10
n_2 = n/2
samples = [[x] for x in range(0, n)]
labels = [0]*n_2 + [1]*n_2
zeros = np.zeros(n_2)
ones = np.ones(n_2)
# k = 1
nn = Neighbors(samples, labels=labels, k=1)
assert_array_equal( nn.predict([ [i +0.01] for i in range(0,n_2)]),
zeros)
assert_array_equal( nn.predict([ [i-0.01] for i in range(n_2, 10)]),
ones)
# k = 3
nn = Neighbors(samples, labels=labels, k=3)
assert_array_equal( nn.predict([ [i +0.01] for i in range(0,5)]),
zeros)
assert_array_equal( nn.predict([ [i-0.01] for i in range(5, 10)]),
ones)
|
Add tests for Nearest Neighbor Algorithm.
TODO: a lot more tests are needed.
git-svn-id: a2d1b0e147e530765aaf3e1662d4a98e2f63c719@369 22fbfee3-77ab-4535-9bad-27d1bd3bc7d8import numpy as np
from ..neighbors import Neighbors
from numpy.testing import assert_array_equal
def test_neighbors_1D():
"""
Nearest Neighbors in a line.
"""
# some constants
n = 10
n_2 = n/2
samples = [[x] for x in range(0, n)]
labels = [0]*n_2 + [1]*n_2
zeros = np.zeros(n_2)
ones = np.ones(n_2)
# k = 1
nn = Neighbors(samples, labels=labels, k=1)
assert_array_equal( nn.predict([ [i +0.01] for i in range(0,n_2)]),
zeros)
assert_array_equal( nn.predict([ [i-0.01] for i in range(n_2, 10)]),
ones)
# k = 3
nn = Neighbors(samples, labels=labels, k=3)
assert_array_equal( nn.predict([ [i +0.01] for i in range(0,5)]),
zeros)
assert_array_equal( nn.predict([ [i-0.01] for i in range(5, 10)]),
ones)
|
<commit_before><commit_msg>Add tests for Nearest Neighbor Algorithm.
TODO: a lot more tests are needed.
git-svn-id: a2d1b0e147e530765aaf3e1662d4a98e2f63c719@369 22fbfee3-77ab-4535-9bad-27d1bd3bc7d8<commit_after>import numpy as np
from ..neighbors import Neighbors
from numpy.testing import assert_array_equal
def test_neighbors_1D():
"""
Nearest Neighbors in a line.
"""
# some constants
n = 10
n_2 = n/2
samples = [[x] for x in range(0, n)]
labels = [0]*n_2 + [1]*n_2
zeros = np.zeros(n_2)
ones = np.ones(n_2)
# k = 1
nn = Neighbors(samples, labels=labels, k=1)
assert_array_equal( nn.predict([ [i +0.01] for i in range(0,n_2)]),
zeros)
assert_array_equal( nn.predict([ [i-0.01] for i in range(n_2, 10)]),
ones)
# k = 3
nn = Neighbors(samples, labels=labels, k=3)
assert_array_equal( nn.predict([ [i +0.01] for i in range(0,5)]),
zeros)
assert_array_equal( nn.predict([ [i-0.01] for i in range(5, 10)]),
ones)
|
|
4fe0ea924f4cd7ac1bff718c6fc99c0e6780c803
|
cache_bind.py
|
cache_bind.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2012 OpenERP - Team de Localización Argentina.
# https://launchpad.net/~openerp-l10n-ar-localization
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
This structure keep a cache structure to bind to not have continues connection
to the server.
"""
from sslhttps import HTTPSConnectionSSLVersion
from stub.LoginCMSService_types import *
PROXY_SIZE=5
TRACE_FILE=None # To DEBUG: sys.stdout
ServiceSoap=ServiceLocator().getServiceSoap
_bind_cache = {}
_bind_list = []
def get_bind(server):
if not server.id in _bind_cache:
if len(_bind_list) > PROXY_SIZE:
del _bind_cache[_bind_list.pop(0)]
_bind_cache[server.id] = ServiceSoap(url=server.url, transport=HTTPSConnectionSSLVersion, tracefile=TRACE_FILE)
_bind_list.append(server.id)
return _bind_cache[server.id]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Test for electronic invoice. [FIX] Change l10n_ar_wsfe to l10n_ar_wsafip_fe
|
[ADD] Test for electronic invoice. [FIX] Change l10n_ar_wsfe to l10n_ar_wsafip_fe
|
Python
|
agpl-3.0
|
odoo-l10n-ar/l10n_ar_wsafip
|
[ADD] Test for electronic invoice. [FIX] Change l10n_ar_wsfe to l10n_ar_wsafip_fe
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2012 OpenERP - Team de Localización Argentina.
# https://launchpad.net/~openerp-l10n-ar-localization
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
This structure keep a cache structure to bind to not have continues connection
to the server.
"""
from sslhttps import HTTPSConnectionSSLVersion
from stub.LoginCMSService_types import *
PROXY_SIZE=5
TRACE_FILE=None # To DEBUG: sys.stdout
ServiceSoap=ServiceLocator().getServiceSoap
_bind_cache = {}
_bind_list = []
def get_bind(server):
if not server.id in _bind_cache:
if len(_bind_list) > PROXY_SIZE:
del _bind_cache[_bind_list.pop(0)]
_bind_cache[server.id] = ServiceSoap(url=server.url, transport=HTTPSConnectionSSLVersion, tracefile=TRACE_FILE)
_bind_list.append(server.id)
return _bind_cache[server.id]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
<commit_before><commit_msg>[ADD] Test for electronic invoice. [FIX] Change l10n_ar_wsfe to l10n_ar_wsafip_fe<commit_after>
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2012 OpenERP - Team de Localización Argentina.
# https://launchpad.net/~openerp-l10n-ar-localization
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
This structure keep a cache structure to bind to not have continues connection
to the server.
"""
from sslhttps import HTTPSConnectionSSLVersion
from stub.LoginCMSService_types import *
PROXY_SIZE=5
TRACE_FILE=None # To DEBUG: sys.stdout
ServiceSoap=ServiceLocator().getServiceSoap
_bind_cache = {}
_bind_list = []
def get_bind(server):
if not server.id in _bind_cache:
if len(_bind_list) > PROXY_SIZE:
del _bind_cache[_bind_list.pop(0)]
_bind_cache[server.id] = ServiceSoap(url=server.url, transport=HTTPSConnectionSSLVersion, tracefile=TRACE_FILE)
_bind_list.append(server.id)
return _bind_cache[server.id]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
[ADD] Test for electronic invoice. [FIX] Change l10n_ar_wsfe to l10n_ar_wsafip_fe# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2012 OpenERP - Team de Localización Argentina.
# https://launchpad.net/~openerp-l10n-ar-localization
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
This structure keep a cache structure to bind to not have continues connection
to the server.
"""
from sslhttps import HTTPSConnectionSSLVersion
from stub.LoginCMSService_types import *
PROXY_SIZE=5
TRACE_FILE=None # To DEBUG: sys.stdout
ServiceSoap=ServiceLocator().getServiceSoap
_bind_cache = {}
_bind_list = []
def get_bind(server):
if not server.id in _bind_cache:
if len(_bind_list) > PROXY_SIZE:
del _bind_cache[_bind_list.pop(0)]
_bind_cache[server.id] = ServiceSoap(url=server.url, transport=HTTPSConnectionSSLVersion, tracefile=TRACE_FILE)
_bind_list.append(server.id)
return _bind_cache[server.id]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
<commit_before><commit_msg>[ADD] Test for electronic invoice. [FIX] Change l10n_ar_wsfe to l10n_ar_wsafip_fe<commit_after># -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2012 OpenERP - Team de Localización Argentina.
# https://launchpad.net/~openerp-l10n-ar-localization
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
This structure keep a cache structure to bind to not have continues connection
to the server.
"""
from sslhttps import HTTPSConnectionSSLVersion
from stub.LoginCMSService_types import *
PROXY_SIZE=5
TRACE_FILE=None # To DEBUG: sys.stdout
ServiceSoap=ServiceLocator().getServiceSoap
_bind_cache = {}
_bind_list = []
def get_bind(server):
if not server.id in _bind_cache:
if len(_bind_list) > PROXY_SIZE:
del _bind_cache[_bind_list.pop(0)]
_bind_cache[server.id] = ServiceSoap(url=server.url, transport=HTTPSConnectionSSLVersion, tracefile=TRACE_FILE)
_bind_list.append(server.id)
return _bind_cache[server.id]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
|
d2f3829ef024a663ecd82255860b0165838f8086
|
BasicSequences/BlankingSweeps.py
|
BasicSequences/BlankingSweeps.py
|
"""
Sequences for optimizing gating timing.
"""
from ..PulsePrimitives import *
from ..Compiler import compile_to_hardware
def sweep_gateDelay(qubit, sweepPts):
"""
Sweep the gate delay associated with a qubit channel using a simple Id, Id, X90, X90
seqeuence.
Parameters
---------
qubit : logical qubit to create sequences for
sweepPts : iterable to sweep the gate delay over.
"""
generator = qubit.physChan.generator
oldDelay = generator.gateDelay
for ct, delay in enumerate(sweepPts):
seqs = [[Id(qubit, length=120e-9), Id(qubit), MEAS(qubit)],
[Id(qubit, length=120e-9), MEAS(qubit)],
[Id(qubit, length=120e-9), X90(qubit), MEAS(qubit)],
[Id(qubit, length=120e-9), X90(qubit), MEAS(qubit)]]
generator.gateDelay = delay
compile_to_hardware(seqs, 'BlankingSweeps/GateDelay', suffix='_{}'.format(ct+1))
generator.gateDelay = oldDelay
|
Add a sequence module for optimizing gating
|
Add a sequence module for optimizing gating
--CAR
|
Python
|
apache-2.0
|
BBN-Q/QGL,BBN-Q/QGL
|
Add a sequence module for optimizing gating
--CAR
|
"""
Sequences for optimizing gating timing.
"""
from ..PulsePrimitives import *
from ..Compiler import compile_to_hardware
def sweep_gateDelay(qubit, sweepPts):
"""
Sweep the gate delay associated with a qubit channel using a simple Id, Id, X90, X90
seqeuence.
Parameters
---------
qubit : logical qubit to create sequences for
sweepPts : iterable to sweep the gate delay over.
"""
generator = qubit.physChan.generator
oldDelay = generator.gateDelay
for ct, delay in enumerate(sweepPts):
seqs = [[Id(qubit, length=120e-9), Id(qubit), MEAS(qubit)],
[Id(qubit, length=120e-9), MEAS(qubit)],
[Id(qubit, length=120e-9), X90(qubit), MEAS(qubit)],
[Id(qubit, length=120e-9), X90(qubit), MEAS(qubit)]]
generator.gateDelay = delay
compile_to_hardware(seqs, 'BlankingSweeps/GateDelay', suffix='_{}'.format(ct+1))
generator.gateDelay = oldDelay
|
<commit_before><commit_msg>Add a sequence module for optimizing gating
--CAR<commit_after>
|
"""
Sequences for optimizing gating timing.
"""
from ..PulsePrimitives import *
from ..Compiler import compile_to_hardware
def sweep_gateDelay(qubit, sweepPts):
"""
Sweep the gate delay associated with a qubit channel using a simple Id, Id, X90, X90
seqeuence.
Parameters
---------
qubit : logical qubit to create sequences for
sweepPts : iterable to sweep the gate delay over.
"""
generator = qubit.physChan.generator
oldDelay = generator.gateDelay
for ct, delay in enumerate(sweepPts):
seqs = [[Id(qubit, length=120e-9), Id(qubit), MEAS(qubit)],
[Id(qubit, length=120e-9), MEAS(qubit)],
[Id(qubit, length=120e-9), X90(qubit), MEAS(qubit)],
[Id(qubit, length=120e-9), X90(qubit), MEAS(qubit)]]
generator.gateDelay = delay
compile_to_hardware(seqs, 'BlankingSweeps/GateDelay', suffix='_{}'.format(ct+1))
generator.gateDelay = oldDelay
|
Add a sequence module for optimizing gating
--CAR"""
Sequences for optimizing gating timing.
"""
from ..PulsePrimitives import *
from ..Compiler import compile_to_hardware
def sweep_gateDelay(qubit, sweepPts):
"""
Sweep the gate delay associated with a qubit channel using a simple Id, Id, X90, X90
seqeuence.
Parameters
---------
qubit : logical qubit to create sequences for
sweepPts : iterable to sweep the gate delay over.
"""
generator = qubit.physChan.generator
oldDelay = generator.gateDelay
for ct, delay in enumerate(sweepPts):
seqs = [[Id(qubit, length=120e-9), Id(qubit), MEAS(qubit)],
[Id(qubit, length=120e-9), MEAS(qubit)],
[Id(qubit, length=120e-9), X90(qubit), MEAS(qubit)],
[Id(qubit, length=120e-9), X90(qubit), MEAS(qubit)]]
generator.gateDelay = delay
compile_to_hardware(seqs, 'BlankingSweeps/GateDelay', suffix='_{}'.format(ct+1))
generator.gateDelay = oldDelay
|
<commit_before><commit_msg>Add a sequence module for optimizing gating
--CAR<commit_after>"""
Sequences for optimizing gating timing.
"""
from ..PulsePrimitives import *
from ..Compiler import compile_to_hardware
def sweep_gateDelay(qubit, sweepPts):
"""
Sweep the gate delay associated with a qubit channel using a simple Id, Id, X90, X90
seqeuence.
Parameters
---------
qubit : logical qubit to create sequences for
sweepPts : iterable to sweep the gate delay over.
"""
generator = qubit.physChan.generator
oldDelay = generator.gateDelay
for ct, delay in enumerate(sweepPts):
seqs = [[Id(qubit, length=120e-9), Id(qubit), MEAS(qubit)],
[Id(qubit, length=120e-9), MEAS(qubit)],
[Id(qubit, length=120e-9), X90(qubit), MEAS(qubit)],
[Id(qubit, length=120e-9), X90(qubit), MEAS(qubit)]]
generator.gateDelay = delay
compile_to_hardware(seqs, 'BlankingSweeps/GateDelay', suffix='_{}'.format(ct+1))
generator.gateDelay = oldDelay
|
|
1ce040e0642c6dcc888b3787f7448c65ba0318f8
|
logos_setup_data/__openerp__.py
|
logos_setup_data/__openerp__.py
|
# -*- coding: utf-8 -*-
{
'name': 'Logos Set Up Data',
'version': '1.0',
'category': 'Accounting',
'sequence': 14,
'summary': '',
'description': """
Logos Set Up Data
=====================
""",
'author': 'Ingenieria ADHOC',
'website': 'www.ingadhoc.com',
'images': [
],
'depends': [
'crm',
'purchase',
'sale',
'portal_sale_distributor',
'website_sale',
# 'base_location',
'price_security',
'product_price_currency',
'logos_product_attributes',
'product_catalog_aeroo_report',
],
'data': [
# Para arreglar error
'security/ir.model.access.csv',
'security/logos_security.xml',
'report_data.xml',
'product_view.xml',
'crm_view.xml'
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': False,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
# -*- coding: utf-8 -*-
{
'name': 'Logos Set Up Data',
'version': '1.0',
'category': 'Accounting',
'sequence': 14,
'summary': '',
'description': """
Logos Set Up Data
=====================
""",
'author': 'Ingenieria ADHOC',
'website': 'www.ingadhoc.com',
'images': [
],
'depends': [
'crm',
'purchase',
'sale',
'portal_sale_distributor',
'website_sale',
'base_location',
'price_security',
'product_price_currency',
'logos_product_attributes',
'product_catalog_aeroo_report',
],
'data': [
# Para arreglar error
'security/ir.model.access.csv',
'security/logos_security.xml',
'report_data.xml',
'product_view.xml',
'crm_view.xml'
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': False,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
ADD base location to logos as it is necesary for security rules
|
ADD base location to logos as it is necesary for security rules
|
Python
|
agpl-3.0
|
ingadhoc/odoo-personalizations,adhoc-dev/odoo-personalizations
|
# -*- coding: utf-8 -*-
{
'name': 'Logos Set Up Data',
'version': '1.0',
'category': 'Accounting',
'sequence': 14,
'summary': '',
'description': """
Logos Set Up Data
=====================
""",
'author': 'Ingenieria ADHOC',
'website': 'www.ingadhoc.com',
'images': [
],
'depends': [
'crm',
'purchase',
'sale',
'portal_sale_distributor',
'website_sale',
# 'base_location',
'price_security',
'product_price_currency',
'logos_product_attributes',
'product_catalog_aeroo_report',
],
'data': [
# Para arreglar error
'security/ir.model.access.csv',
'security/logos_security.xml',
'report_data.xml',
'product_view.xml',
'crm_view.xml'
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': False,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:ADD base location to logos as it is necesary for security rules
|
# -*- coding: utf-8 -*-
{
'name': 'Logos Set Up Data',
'version': '1.0',
'category': 'Accounting',
'sequence': 14,
'summary': '',
'description': """
Logos Set Up Data
=====================
""",
'author': 'Ingenieria ADHOC',
'website': 'www.ingadhoc.com',
'images': [
],
'depends': [
'crm',
'purchase',
'sale',
'portal_sale_distributor',
'website_sale',
'base_location',
'price_security',
'product_price_currency',
'logos_product_attributes',
'product_catalog_aeroo_report',
],
'data': [
# Para arreglar error
'security/ir.model.access.csv',
'security/logos_security.xml',
'report_data.xml',
'product_view.xml',
'crm_view.xml'
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': False,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
<commit_before># -*- coding: utf-8 -*-
{
'name': 'Logos Set Up Data',
'version': '1.0',
'category': 'Accounting',
'sequence': 14,
'summary': '',
'description': """
Logos Set Up Data
=====================
""",
'author': 'Ingenieria ADHOC',
'website': 'www.ingadhoc.com',
'images': [
],
'depends': [
'crm',
'purchase',
'sale',
'portal_sale_distributor',
'website_sale',
# 'base_location',
'price_security',
'product_price_currency',
'logos_product_attributes',
'product_catalog_aeroo_report',
],
'data': [
# Para arreglar error
'security/ir.model.access.csv',
'security/logos_security.xml',
'report_data.xml',
'product_view.xml',
'crm_view.xml'
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': False,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:<commit_msg>ADD base location to logos as it is necesary for security rules<commit_after>
|
# -*- coding: utf-8 -*-
{
'name': 'Logos Set Up Data',
'version': '1.0',
'category': 'Accounting',
'sequence': 14,
'summary': '',
'description': """
Logos Set Up Data
=====================
""",
'author': 'Ingenieria ADHOC',
'website': 'www.ingadhoc.com',
'images': [
],
'depends': [
'crm',
'purchase',
'sale',
'portal_sale_distributor',
'website_sale',
'base_location',
'price_security',
'product_price_currency',
'logos_product_attributes',
'product_catalog_aeroo_report',
],
'data': [
# Para arreglar error
'security/ir.model.access.csv',
'security/logos_security.xml',
'report_data.xml',
'product_view.xml',
'crm_view.xml'
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': False,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
# -*- coding: utf-8 -*-
{
'name': 'Logos Set Up Data',
'version': '1.0',
'category': 'Accounting',
'sequence': 14,
'summary': '',
'description': """
Logos Set Up Data
=====================
""",
'author': 'Ingenieria ADHOC',
'website': 'www.ingadhoc.com',
'images': [
],
'depends': [
'crm',
'purchase',
'sale',
'portal_sale_distributor',
'website_sale',
# 'base_location',
'price_security',
'product_price_currency',
'logos_product_attributes',
'product_catalog_aeroo_report',
],
'data': [
# Para arreglar error
'security/ir.model.access.csv',
'security/logos_security.xml',
'report_data.xml',
'product_view.xml',
'crm_view.xml'
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': False,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:ADD base location to logos as it is necesary for security rules# -*- coding: utf-8 -*-
{
'name': 'Logos Set Up Data',
'version': '1.0',
'category': 'Accounting',
'sequence': 14,
'summary': '',
'description': """
Logos Set Up Data
=====================
""",
'author': 'Ingenieria ADHOC',
'website': 'www.ingadhoc.com',
'images': [
],
'depends': [
'crm',
'purchase',
'sale',
'portal_sale_distributor',
'website_sale',
'base_location',
'price_security',
'product_price_currency',
'logos_product_attributes',
'product_catalog_aeroo_report',
],
'data': [
# Para arreglar error
'security/ir.model.access.csv',
'security/logos_security.xml',
'report_data.xml',
'product_view.xml',
'crm_view.xml'
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': False,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
<commit_before># -*- coding: utf-8 -*-
{
'name': 'Logos Set Up Data',
'version': '1.0',
'category': 'Accounting',
'sequence': 14,
'summary': '',
'description': """
Logos Set Up Data
=====================
""",
'author': 'Ingenieria ADHOC',
'website': 'www.ingadhoc.com',
'images': [
],
'depends': [
'crm',
'purchase',
'sale',
'portal_sale_distributor',
'website_sale',
# 'base_location',
'price_security',
'product_price_currency',
'logos_product_attributes',
'product_catalog_aeroo_report',
],
'data': [
# Para arreglar error
'security/ir.model.access.csv',
'security/logos_security.xml',
'report_data.xml',
'product_view.xml',
'crm_view.xml'
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': False,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:<commit_msg>ADD base location to logos as it is necesary for security rules<commit_after># -*- coding: utf-8 -*-
{
'name': 'Logos Set Up Data',
'version': '1.0',
'category': 'Accounting',
'sequence': 14,
'summary': '',
'description': """
Logos Set Up Data
=====================
""",
'author': 'Ingenieria ADHOC',
'website': 'www.ingadhoc.com',
'images': [
],
'depends': [
'crm',
'purchase',
'sale',
'portal_sale_distributor',
'website_sale',
'base_location',
'price_security',
'product_price_currency',
'logos_product_attributes',
'product_catalog_aeroo_report',
],
'data': [
# Para arreglar error
'security/ir.model.access.csv',
'security/logos_security.xml',
'report_data.xml',
'product_view.xml',
'crm_view.xml'
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': False,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
0fd252360ee340ae402ab8c9f007a673b4f330b9
|
src/ggrc/migrations/versions/20160412134448_11cee57a4149_add_assessor_reminder_notification.py
|
src/ggrc/migrations/versions/20160412134448_11cee57a4149_add_assessor_reminder_notification.py
|
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: urban@reciprocitylabs.com
# Maintained By: urban@reciprocitylabs.com
"""
Add assessors reminder notification
Create Date: 2016-04-12 13:44:48.265193
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from sqlalchemy.sql import column
from sqlalchemy.sql import table
from sqlalchemy.sql import select
from alembic import op
# revision identifiers, used by Alembic.
revision = '11cee57a4149'
down_revision = '50c374901d42'
notifications_table = table(
'notifications',
column('id', sa.Integer),
column('notification_type_id', sa.Integer),
)
notification_types_table = table(
'notification_types',
column('id', sa.Integer),
column('name', sa.String),
column('description', sa.Text),
column('template', sa.String),
column('instant', sa.Boolean),
column('advance_notice', sa.Integer)
)
def upgrade():
"""Inserts new notification type"""
op.bulk_insert(
notification_types_table,
[{
"name": "assessment_assessor_reminder",
"description": ("Notify all Assessors that they should take a look "
"at the assessment."),
"template": "",
"advance_notice": 0,
"instant": False
}]
)
def downgrade():
"""First removes notifications and then removes notification type"""
op.execute(
notifications_table.delete().where(
notifications_table.c.notification_type_id == select(
[notification_types_table.c.id]).where(
notification_types_table.c.name == "assessment_assessor_reminder"
)
)
)
op.execute(
notification_types_table.delete().where(
notification_types_table.c.name == "assessment_assessor_reminder"
)
)
|
Add new notification type for assessors reminder
|
Add new notification type for assessors reminder
|
Python
|
apache-2.0
|
edofic/ggrc-core,NejcZupec/ggrc-core,selahssea/ggrc-core,VinnieJohns/ggrc-core,plamut/ggrc-core,selahssea/ggrc-core,edofic/ggrc-core,andrei-karalionak/ggrc-core,kr41/ggrc-core,VinnieJohns/ggrc-core,selahssea/ggrc-core,plamut/ggrc-core,josthkko/ggrc-core,AleksNeStu/ggrc-core,AleksNeStu/ggrc-core,prasannav7/ggrc-core,josthkko/ggrc-core,kr41/ggrc-core,j0gurt/ggrc-core,edofic/ggrc-core,j0gurt/ggrc-core,VinnieJohns/ggrc-core,selahssea/ggrc-core,NejcZupec/ggrc-core,edofic/ggrc-core,kr41/ggrc-core,prasannav7/ggrc-core,andrei-karalionak/ggrc-core,NejcZupec/ggrc-core,plamut/ggrc-core,andrei-karalionak/ggrc-core,plamut/ggrc-core,j0gurt/ggrc-core,prasannav7/ggrc-core,VinnieJohns/ggrc-core,josthkko/ggrc-core,andrei-karalionak/ggrc-core,prasannav7/ggrc-core,josthkko/ggrc-core,kr41/ggrc-core,j0gurt/ggrc-core,AleksNeStu/ggrc-core,AleksNeStu/ggrc-core,NejcZupec/ggrc-core
|
Add new notification type for assessors reminder
|
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: urban@reciprocitylabs.com
# Maintained By: urban@reciprocitylabs.com
"""
Add assessors reminder notification
Create Date: 2016-04-12 13:44:48.265193
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from sqlalchemy.sql import column
from sqlalchemy.sql import table
from sqlalchemy.sql import select
from alembic import op
# revision identifiers, used by Alembic.
revision = '11cee57a4149'
down_revision = '50c374901d42'
notifications_table = table(
'notifications',
column('id', sa.Integer),
column('notification_type_id', sa.Integer),
)
notification_types_table = table(
'notification_types',
column('id', sa.Integer),
column('name', sa.String),
column('description', sa.Text),
column('template', sa.String),
column('instant', sa.Boolean),
column('advance_notice', sa.Integer)
)
def upgrade():
"""Inserts new notification type"""
op.bulk_insert(
notification_types_table,
[{
"name": "assessment_assessor_reminder",
"description": ("Notify all Assessors that they should take a look "
"at the assessment."),
"template": "",
"advance_notice": 0,
"instant": False
}]
)
def downgrade():
"""First removes notifications and then removes notification type"""
op.execute(
notifications_table.delete().where(
notifications_table.c.notification_type_id == select(
[notification_types_table.c.id]).where(
notification_types_table.c.name == "assessment_assessor_reminder"
)
)
)
op.execute(
notification_types_table.delete().where(
notification_types_table.c.name == "assessment_assessor_reminder"
)
)
|
<commit_before><commit_msg>Add new notification type for assessors reminder<commit_after>
|
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: urban@reciprocitylabs.com
# Maintained By: urban@reciprocitylabs.com
"""
Add assessors reminder notification
Create Date: 2016-04-12 13:44:48.265193
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from sqlalchemy.sql import column
from sqlalchemy.sql import table
from sqlalchemy.sql import select
from alembic import op
# revision identifiers, used by Alembic.
revision = '11cee57a4149'
down_revision = '50c374901d42'
notifications_table = table(
'notifications',
column('id', sa.Integer),
column('notification_type_id', sa.Integer),
)
notification_types_table = table(
'notification_types',
column('id', sa.Integer),
column('name', sa.String),
column('description', sa.Text),
column('template', sa.String),
column('instant', sa.Boolean),
column('advance_notice', sa.Integer)
)
def upgrade():
"""Inserts new notification type"""
op.bulk_insert(
notification_types_table,
[{
"name": "assessment_assessor_reminder",
"description": ("Notify all Assessors that they should take a look "
"at the assessment."),
"template": "",
"advance_notice": 0,
"instant": False
}]
)
def downgrade():
"""First removes notifications and then removes notification type"""
op.execute(
notifications_table.delete().where(
notifications_table.c.notification_type_id == select(
[notification_types_table.c.id]).where(
notification_types_table.c.name == "assessment_assessor_reminder"
)
)
)
op.execute(
notification_types_table.delete().where(
notification_types_table.c.name == "assessment_assessor_reminder"
)
)
|
Add new notification type for assessors reminder# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: urban@reciprocitylabs.com
# Maintained By: urban@reciprocitylabs.com
"""
Add assessors reminder notification
Create Date: 2016-04-12 13:44:48.265193
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from sqlalchemy.sql import column
from sqlalchemy.sql import table
from sqlalchemy.sql import select
from alembic import op
# revision identifiers, used by Alembic.
revision = '11cee57a4149'
down_revision = '50c374901d42'
notifications_table = table(
'notifications',
column('id', sa.Integer),
column('notification_type_id', sa.Integer),
)
notification_types_table = table(
'notification_types',
column('id', sa.Integer),
column('name', sa.String),
column('description', sa.Text),
column('template', sa.String),
column('instant', sa.Boolean),
column('advance_notice', sa.Integer)
)
def upgrade():
"""Inserts new notification type"""
op.bulk_insert(
notification_types_table,
[{
"name": "assessment_assessor_reminder",
"description": ("Notify all Assessors that they should take a look "
"at the assessment."),
"template": "",
"advance_notice": 0,
"instant": False
}]
)
def downgrade():
"""First removes notifications and then removes notification type"""
op.execute(
notifications_table.delete().where(
notifications_table.c.notification_type_id == select(
[notification_types_table.c.id]).where(
notification_types_table.c.name == "assessment_assessor_reminder"
)
)
)
op.execute(
notification_types_table.delete().where(
notification_types_table.c.name == "assessment_assessor_reminder"
)
)
|
<commit_before><commit_msg>Add new notification type for assessors reminder<commit_after># Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: urban@reciprocitylabs.com
# Maintained By: urban@reciprocitylabs.com
"""
Add assessors reminder notification
Create Date: 2016-04-12 13:44:48.265193
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from sqlalchemy.sql import column
from sqlalchemy.sql import table
from sqlalchemy.sql import select
from alembic import op
# revision identifiers, used by Alembic.
revision = '11cee57a4149'
down_revision = '50c374901d42'
notifications_table = table(
'notifications',
column('id', sa.Integer),
column('notification_type_id', sa.Integer),
)
notification_types_table = table(
'notification_types',
column('id', sa.Integer),
column('name', sa.String),
column('description', sa.Text),
column('template', sa.String),
column('instant', sa.Boolean),
column('advance_notice', sa.Integer)
)
def upgrade():
"""Inserts new notification type"""
op.bulk_insert(
notification_types_table,
[{
"name": "assessment_assessor_reminder",
"description": ("Notify all Assessors that they should take a look "
"at the assessment."),
"template": "",
"advance_notice": 0,
"instant": False
}]
)
def downgrade():
"""First removes notifications and then removes notification type"""
op.execute(
notifications_table.delete().where(
notifications_table.c.notification_type_id == select(
[notification_types_table.c.id]).where(
notification_types_table.c.name == "assessment_assessor_reminder"
)
)
)
op.execute(
notification_types_table.delete().where(
notification_types_table.c.name == "assessment_assessor_reminder"
)
)
|
|
eedf9070aeca9335809b0c09597cbc67dc92e407
|
plugins/CoD5_WaW.py
|
plugins/CoD5_WaW.py
|
import os
from lib.base_plugin import BasePlugin
class CoDWaWPlugin(BasePlugin):
Name = "Call of Duty: World at War"
support_os = ["Windows"]
def backup(self, _):
_.add_folder('Profile', os.path.join(os.environ['LOCALAPPDATA'], 'Activision'), 'CoDWaW')
def restore(self, _):
_.restore_folder('Profile', os.path.join(os.environ['LOCALAPPDATA'], 'Activision'), 'CoDWaW')
def detect(self):
if os.path.isdir(os.path.join(os.environ['LOCALAPPDATA'], 'Activision', 'CoDWaW')):
return True
return False
|
Call of Duty: World at War plugin
|
Call of Duty: World at War plugin
|
Python
|
mit
|
Pr0Ger/SGSB
|
Call of Duty: World at War plugin
|
import os
from lib.base_plugin import BasePlugin
class CoDWaWPlugin(BasePlugin):
Name = "Call of Duty: World at War"
support_os = ["Windows"]
def backup(self, _):
_.add_folder('Profile', os.path.join(os.environ['LOCALAPPDATA'], 'Activision'), 'CoDWaW')
def restore(self, _):
_.restore_folder('Profile', os.path.join(os.environ['LOCALAPPDATA'], 'Activision'), 'CoDWaW')
def detect(self):
if os.path.isdir(os.path.join(os.environ['LOCALAPPDATA'], 'Activision', 'CoDWaW')):
return True
return False
|
<commit_before><commit_msg>Call of Duty: World at War plugin<commit_after>
|
import os
from lib.base_plugin import BasePlugin
class CoDWaWPlugin(BasePlugin):
Name = "Call of Duty: World at War"
support_os = ["Windows"]
def backup(self, _):
_.add_folder('Profile', os.path.join(os.environ['LOCALAPPDATA'], 'Activision'), 'CoDWaW')
def restore(self, _):
_.restore_folder('Profile', os.path.join(os.environ['LOCALAPPDATA'], 'Activision'), 'CoDWaW')
def detect(self):
if os.path.isdir(os.path.join(os.environ['LOCALAPPDATA'], 'Activision', 'CoDWaW')):
return True
return False
|
Call of Duty: World at War pluginimport os
from lib.base_plugin import BasePlugin
class CoDWaWPlugin(BasePlugin):
Name = "Call of Duty: World at War"
support_os = ["Windows"]
def backup(self, _):
_.add_folder('Profile', os.path.join(os.environ['LOCALAPPDATA'], 'Activision'), 'CoDWaW')
def restore(self, _):
_.restore_folder('Profile', os.path.join(os.environ['LOCALAPPDATA'], 'Activision'), 'CoDWaW')
def detect(self):
if os.path.isdir(os.path.join(os.environ['LOCALAPPDATA'], 'Activision', 'CoDWaW')):
return True
return False
|
<commit_before><commit_msg>Call of Duty: World at War plugin<commit_after>import os
from lib.base_plugin import BasePlugin
class CoDWaWPlugin(BasePlugin):
Name = "Call of Duty: World at War"
support_os = ["Windows"]
def backup(self, _):
_.add_folder('Profile', os.path.join(os.environ['LOCALAPPDATA'], 'Activision'), 'CoDWaW')
def restore(self, _):
_.restore_folder('Profile', os.path.join(os.environ['LOCALAPPDATA'], 'Activision'), 'CoDWaW')
def detect(self):
if os.path.isdir(os.path.join(os.environ['LOCALAPPDATA'], 'Activision', 'CoDWaW')):
return True
return False
|
|
9daa2c08587d21f677e5a0461aa3a5fc7bff113b
|
analysis/create-jacobians.py
|
analysis/create-jacobians.py
|
import climate
import database
def main(root, pattern='*'):
for trial in database.Experiment(root).trials_matching(pattern, load=False):
trial.load()
body = database.Movement(trial.df.copy())
body.make_body_relative()
body.add_velocities()
goal = database.Movement(trial.df.copy())
tgt = goal.target_trajectory
goal.recenter(tgt.x, tgt.y, tgt.z)
goal.add_velocities()
jac = pd.DataFrame([], index=trial.df.index)
for cb in body.marker_channel_columns:
for cg in body.marker_channel_columns:
jac['body-{}/goal-{}'.format(cb, cg)] = body[cb] / goal[cg]
break
if __name__ == '__main__':
climate.call(main)
|
Add script for computing jacobian, as a dataframe.
|
Add script for computing jacobian, as a dataframe.
|
Python
|
mit
|
lmjohns3/cube-experiment,lmjohns3/cube-experiment,lmjohns3/cube-experiment
|
Add script for computing jacobian, as a dataframe.
|
import climate
import database
def main(root, pattern='*'):
for trial in database.Experiment(root).trials_matching(pattern, load=False):
trial.load()
body = database.Movement(trial.df.copy())
body.make_body_relative()
body.add_velocities()
goal = database.Movement(trial.df.copy())
tgt = goal.target_trajectory
goal.recenter(tgt.x, tgt.y, tgt.z)
goal.add_velocities()
jac = pd.DataFrame([], index=trial.df.index)
for cb in body.marker_channel_columns:
for cg in body.marker_channel_columns:
jac['body-{}/goal-{}'.format(cb, cg)] = body[cb] / goal[cg]
break
if __name__ == '__main__':
climate.call(main)
|
<commit_before><commit_msg>Add script for computing jacobian, as a dataframe.<commit_after>
|
import climate
import database
def main(root, pattern='*'):
for trial in database.Experiment(root).trials_matching(pattern, load=False):
trial.load()
body = database.Movement(trial.df.copy())
body.make_body_relative()
body.add_velocities()
goal = database.Movement(trial.df.copy())
tgt = goal.target_trajectory
goal.recenter(tgt.x, tgt.y, tgt.z)
goal.add_velocities()
jac = pd.DataFrame([], index=trial.df.index)
for cb in body.marker_channel_columns:
for cg in body.marker_channel_columns:
jac['body-{}/goal-{}'.format(cb, cg)] = body[cb] / goal[cg]
break
if __name__ == '__main__':
climate.call(main)
|
Add script for computing jacobian, as a dataframe.import climate
import database
def main(root, pattern='*'):
for trial in database.Experiment(root).trials_matching(pattern, load=False):
trial.load()
body = database.Movement(trial.df.copy())
body.make_body_relative()
body.add_velocities()
goal = database.Movement(trial.df.copy())
tgt = goal.target_trajectory
goal.recenter(tgt.x, tgt.y, tgt.z)
goal.add_velocities()
jac = pd.DataFrame([], index=trial.df.index)
for cb in body.marker_channel_columns:
for cg in body.marker_channel_columns:
jac['body-{}/goal-{}'.format(cb, cg)] = body[cb] / goal[cg]
break
if __name__ == '__main__':
climate.call(main)
|
<commit_before><commit_msg>Add script for computing jacobian, as a dataframe.<commit_after>import climate
import database
def main(root, pattern='*'):
for trial in database.Experiment(root).trials_matching(pattern, load=False):
trial.load()
body = database.Movement(trial.df.copy())
body.make_body_relative()
body.add_velocities()
goal = database.Movement(trial.df.copy())
tgt = goal.target_trajectory
goal.recenter(tgt.x, tgt.y, tgt.z)
goal.add_velocities()
jac = pd.DataFrame([], index=trial.df.index)
for cb in body.marker_channel_columns:
for cg in body.marker_channel_columns:
jac['body-{}/goal-{}'.format(cb, cg)] = body[cb] / goal[cg]
break
if __name__ == '__main__':
climate.call(main)
|
|
54c03cfa54f44fd9a839701a9ffcaa9c742d1dd8
|
pycom/oslolog.py
|
pycom/oslolog.py
|
# coding: utf-8
"""
Use the library 'oslo.log' to configure the logging.
Applications should use oslo.log’s configuration functions to register
logging-related configuration options and configure the root and other default
loggers.
(1) Call register_options() before parsing command line options.
(2) Call set_defaults() before configuring logging.
(3) Call setup() to configure logging for the application.
## Example
import sys
from oslo_log.log import register_options, set_defaults, setup
def set_log(conf, project, args=None, version="unknown", default_log_levels=None):
# Register the command line and configuration options used by oslo.log.
register_options(conf)
# Set default values for the configuration options used by oslo.log.
set_defaults(default_log_levels=default_log_levels)
# Parse the command line options.
args = args if args else sys.argv[1:]
conf(args, project=project, version=version)
# Setup logging for the current application.
setup(conf, project, version)
"""
|
Add the usage of `oslo.log`.
|
Add the usage of `oslo.log`.
|
Python
|
mit
|
xgfone/xutils,xgfone/pycom
|
Add the usage of `oslo.log`.
|
# coding: utf-8
"""
Use the library 'oslo.log' to configure the logging.
Applications should use oslo.log’s configuration functions to register
logging-related configuration options and configure the root and other default
loggers.
(1) Call register_options() before parsing command line options.
(2) Call set_defaults() before configuring logging.
(3) Call setup() to configure logging for the application.
## Example
import sys
from oslo_log.log import register_options, set_defaults, setup
def set_log(conf, project, args=None, version="unknown", default_log_levels=None):
# Register the command line and configuration options used by oslo.log.
register_options(conf)
# Set default values for the configuration options used by oslo.log.
set_defaults(default_log_levels=default_log_levels)
# Parse the command line options.
args = args if args else sys.argv[1:]
conf(args, project=project, version=version)
# Setup logging for the current application.
setup(conf, project, version)
"""
|
<commit_before><commit_msg>Add the usage of `oslo.log`.<commit_after>
|
# coding: utf-8
"""
Use the library 'oslo.log' to configure the logging.
Applications should use oslo.log’s configuration functions to register
logging-related configuration options and configure the root and other default
loggers.
(1) Call register_options() before parsing command line options.
(2) Call set_defaults() before configuring logging.
(3) Call setup() to configure logging for the application.
## Example
import sys
from oslo_log.log import register_options, set_defaults, setup
def set_log(conf, project, args=None, version="unknown", default_log_levels=None):
# Register the command line and configuration options used by oslo.log.
register_options(conf)
# Set default values for the configuration options used by oslo.log.
set_defaults(default_log_levels=default_log_levels)
# Parse the command line options.
args = args if args else sys.argv[1:]
conf(args, project=project, version=version)
# Setup logging for the current application.
setup(conf, project, version)
"""
|
Add the usage of `oslo.log`.# coding: utf-8
"""
Use the library 'oslo.log' to configure the logging.
Applications should use oslo.log’s configuration functions to register
logging-related configuration options and configure the root and other default
loggers.
(1) Call register_options() before parsing command line options.
(2) Call set_defaults() before configuring logging.
(3) Call setup() to configure logging for the application.
## Example
import sys
from oslo_log.log import register_options, set_defaults, setup
def set_log(conf, project, args=None, version="unknown", default_log_levels=None):
# Register the command line and configuration options used by oslo.log.
register_options(conf)
# Set default values for the configuration options used by oslo.log.
set_defaults(default_log_levels=default_log_levels)
# Parse the command line options.
args = args if args else sys.argv[1:]
conf(args, project=project, version=version)
# Setup logging for the current application.
setup(conf, project, version)
"""
|
<commit_before><commit_msg>Add the usage of `oslo.log`.<commit_after># coding: utf-8
"""
Use the library 'oslo.log' to configure the logging.
Applications should use oslo.log’s configuration functions to register
logging-related configuration options and configure the root and other default
loggers.
(1) Call register_options() before parsing command line options.
(2) Call set_defaults() before configuring logging.
(3) Call setup() to configure logging for the application.
## Example
import sys
from oslo_log.log import register_options, set_defaults, setup
def set_log(conf, project, args=None, version="unknown", default_log_levels=None):
# Register the command line and configuration options used by oslo.log.
register_options(conf)
# Set default values for the configuration options used by oslo.log.
set_defaults(default_log_levels=default_log_levels)
# Parse the command line options.
args = args if args else sys.argv[1:]
conf(args, project=project, version=version)
# Setup logging for the current application.
setup(conf, project, version)
"""
|
|
68273e1826ca19e508b616713093c37e4e18381c
|
test/geocoders/openmapquest.py
|
test/geocoders/openmapquest.py
|
from geopy.compat import u
from geopy.geocoders import OpenMapQuest
from test.geocoders.util import GeocoderTestBase
class OpenMapQuestTestCase(GeocoderTestBase): # pylint: disable=R0904,C0111
@classmethod
def setUpClass(cls):
cls.geocoder = OpenMapQuest(scheme='http', timeout=3)
cls.delta = 0.04
def test_user_agent_custom(self):
geocoder = OpenMapQuest(
user_agent='my_user_agent/1.0'
)
self.assertEqual(geocoder.headers['User-Agent'], 'my_user_agent/1.0')
def test_geocode(self):
"""
OpenMapQuest.geocode
"""
self.geocode_run(
{"query": "435 north michigan ave, chicago il 60611 usa"},
{"latitude": 41.890, "longitude": -87.624},
)
def test_unicode_name(self):
"""
OpenMapQuest.geocode unicode
"""
self.geocode_run(
{"query": u("\u6545\u5bab")},
{"latitude": 39.916, "longitude": 116.390},
)
|
from geopy.compat import u
from geopy.geocoders import OpenMapQuest
from test.geocoders.util import GeocoderTestBase, env
import unittest
@unittest.skipUnless( # pylint: disable=R0904,C0111
bool(env.get('OPENMAPQUEST_APIKEY')),
"No OPENMAPQUEST_APIKEY env variable set"
)
class OpenMapQuestTestCase(GeocoderTestBase): # pylint: disable=R0904,C0111
@classmethod
def setUpClass(cls):
cls.geocoder = OpenMapQuest(scheme='http', timeout=3, api_key=env['OPENMAPQUEST_APIKEY'])
cls.delta = 0.04
def test_user_agent_custom(self):
geocoder = OpenMapQuest(
user_agent='my_user_agent/1.0'
)
self.assertEqual(geocoder.headers['User-Agent'], 'my_user_agent/1.0')
def test_geocode(self):
"""
OpenMapQuest.geocode
"""
self.geocode_run(
{"query": "435 north michigan ave, chicago il 60611 usa"},
{"latitude": 41.890, "longitude": -87.624},
)
def test_unicode_name(self):
"""
OpenMapQuest.geocode unicode
"""
self.geocode_run(
{"query": u("\u6545\u5bab")},
{"latitude": 39.916, "longitude": 116.390},
)
|
Make OpenMapQuest tests conditional on environment variable
|
Make OpenMapQuest tests conditional on environment variable
|
Python
|
mit
|
geopy/geopy,Vimos/geopy,mthh/geopy,magnushiie/geopy,Vimos/geopy,magnushiie/geopy,jmb/geopy,mthh/geopy
|
from geopy.compat import u
from geopy.geocoders import OpenMapQuest
from test.geocoders.util import GeocoderTestBase
class OpenMapQuestTestCase(GeocoderTestBase): # pylint: disable=R0904,C0111
@classmethod
def setUpClass(cls):
cls.geocoder = OpenMapQuest(scheme='http', timeout=3)
cls.delta = 0.04
def test_user_agent_custom(self):
geocoder = OpenMapQuest(
user_agent='my_user_agent/1.0'
)
self.assertEqual(geocoder.headers['User-Agent'], 'my_user_agent/1.0')
def test_geocode(self):
"""
OpenMapQuest.geocode
"""
self.geocode_run(
{"query": "435 north michigan ave, chicago il 60611 usa"},
{"latitude": 41.890, "longitude": -87.624},
)
def test_unicode_name(self):
"""
OpenMapQuest.geocode unicode
"""
self.geocode_run(
{"query": u("\u6545\u5bab")},
{"latitude": 39.916, "longitude": 116.390},
)
Make OpenMapQuest tests conditional on environment variable
|
from geopy.compat import u
from geopy.geocoders import OpenMapQuest
from test.geocoders.util import GeocoderTestBase, env
import unittest
@unittest.skipUnless( # pylint: disable=R0904,C0111
bool(env.get('OPENMAPQUEST_APIKEY')),
"No OPENMAPQUEST_APIKEY env variable set"
)
class OpenMapQuestTestCase(GeocoderTestBase): # pylint: disable=R0904,C0111
@classmethod
def setUpClass(cls):
cls.geocoder = OpenMapQuest(scheme='http', timeout=3, api_key=env['OPENMAPQUEST_APIKEY'])
cls.delta = 0.04
def test_user_agent_custom(self):
geocoder = OpenMapQuest(
user_agent='my_user_agent/1.0'
)
self.assertEqual(geocoder.headers['User-Agent'], 'my_user_agent/1.0')
def test_geocode(self):
"""
OpenMapQuest.geocode
"""
self.geocode_run(
{"query": "435 north michigan ave, chicago il 60611 usa"},
{"latitude": 41.890, "longitude": -87.624},
)
def test_unicode_name(self):
"""
OpenMapQuest.geocode unicode
"""
self.geocode_run(
{"query": u("\u6545\u5bab")},
{"latitude": 39.916, "longitude": 116.390},
)
|
<commit_before>
from geopy.compat import u
from geopy.geocoders import OpenMapQuest
from test.geocoders.util import GeocoderTestBase
class OpenMapQuestTestCase(GeocoderTestBase): # pylint: disable=R0904,C0111
@classmethod
def setUpClass(cls):
cls.geocoder = OpenMapQuest(scheme='http', timeout=3)
cls.delta = 0.04
def test_user_agent_custom(self):
geocoder = OpenMapQuest(
user_agent='my_user_agent/1.0'
)
self.assertEqual(geocoder.headers['User-Agent'], 'my_user_agent/1.0')
def test_geocode(self):
"""
OpenMapQuest.geocode
"""
self.geocode_run(
{"query": "435 north michigan ave, chicago il 60611 usa"},
{"latitude": 41.890, "longitude": -87.624},
)
def test_unicode_name(self):
"""
OpenMapQuest.geocode unicode
"""
self.geocode_run(
{"query": u("\u6545\u5bab")},
{"latitude": 39.916, "longitude": 116.390},
)
<commit_msg>Make OpenMapQuest tests conditional on environment variable<commit_after>
|
from geopy.compat import u
from geopy.geocoders import OpenMapQuest
from test.geocoders.util import GeocoderTestBase, env
import unittest
@unittest.skipUnless( # pylint: disable=R0904,C0111
bool(env.get('OPENMAPQUEST_APIKEY')),
"No OPENMAPQUEST_APIKEY env variable set"
)
class OpenMapQuestTestCase(GeocoderTestBase): # pylint: disable=R0904,C0111
@classmethod
def setUpClass(cls):
cls.geocoder = OpenMapQuest(scheme='http', timeout=3, api_key=env['OPENMAPQUEST_APIKEY'])
cls.delta = 0.04
def test_user_agent_custom(self):
geocoder = OpenMapQuest(
user_agent='my_user_agent/1.0'
)
self.assertEqual(geocoder.headers['User-Agent'], 'my_user_agent/1.0')
def test_geocode(self):
"""
OpenMapQuest.geocode
"""
self.geocode_run(
{"query": "435 north michigan ave, chicago il 60611 usa"},
{"latitude": 41.890, "longitude": -87.624},
)
def test_unicode_name(self):
"""
OpenMapQuest.geocode unicode
"""
self.geocode_run(
{"query": u("\u6545\u5bab")},
{"latitude": 39.916, "longitude": 116.390},
)
|
from geopy.compat import u
from geopy.geocoders import OpenMapQuest
from test.geocoders.util import GeocoderTestBase
class OpenMapQuestTestCase(GeocoderTestBase): # pylint: disable=R0904,C0111
@classmethod
def setUpClass(cls):
cls.geocoder = OpenMapQuest(scheme='http', timeout=3)
cls.delta = 0.04
def test_user_agent_custom(self):
geocoder = OpenMapQuest(
user_agent='my_user_agent/1.0'
)
self.assertEqual(geocoder.headers['User-Agent'], 'my_user_agent/1.0')
def test_geocode(self):
"""
OpenMapQuest.geocode
"""
self.geocode_run(
{"query": "435 north michigan ave, chicago il 60611 usa"},
{"latitude": 41.890, "longitude": -87.624},
)
def test_unicode_name(self):
"""
OpenMapQuest.geocode unicode
"""
self.geocode_run(
{"query": u("\u6545\u5bab")},
{"latitude": 39.916, "longitude": 116.390},
)
Make OpenMapQuest tests conditional on environment variable
from geopy.compat import u
from geopy.geocoders import OpenMapQuest
from test.geocoders.util import GeocoderTestBase, env
import unittest
@unittest.skipUnless( # pylint: disable=R0904,C0111
bool(env.get('OPENMAPQUEST_APIKEY')),
"No OPENMAPQUEST_APIKEY env variable set"
)
class OpenMapQuestTestCase(GeocoderTestBase): # pylint: disable=R0904,C0111
@classmethod
def setUpClass(cls):
cls.geocoder = OpenMapQuest(scheme='http', timeout=3, api_key=env['OPENMAPQUEST_APIKEY'])
cls.delta = 0.04
def test_user_agent_custom(self):
geocoder = OpenMapQuest(
user_agent='my_user_agent/1.0'
)
self.assertEqual(geocoder.headers['User-Agent'], 'my_user_agent/1.0')
def test_geocode(self):
"""
OpenMapQuest.geocode
"""
self.geocode_run(
{"query": "435 north michigan ave, chicago il 60611 usa"},
{"latitude": 41.890, "longitude": -87.624},
)
def test_unicode_name(self):
"""
OpenMapQuest.geocode unicode
"""
self.geocode_run(
{"query": u("\u6545\u5bab")},
{"latitude": 39.916, "longitude": 116.390},
)
|
<commit_before>
from geopy.compat import u
from geopy.geocoders import OpenMapQuest
from test.geocoders.util import GeocoderTestBase
class OpenMapQuestTestCase(GeocoderTestBase): # pylint: disable=R0904,C0111
@classmethod
def setUpClass(cls):
cls.geocoder = OpenMapQuest(scheme='http', timeout=3)
cls.delta = 0.04
def test_user_agent_custom(self):
geocoder = OpenMapQuest(
user_agent='my_user_agent/1.0'
)
self.assertEqual(geocoder.headers['User-Agent'], 'my_user_agent/1.0')
def test_geocode(self):
"""
OpenMapQuest.geocode
"""
self.geocode_run(
{"query": "435 north michigan ave, chicago il 60611 usa"},
{"latitude": 41.890, "longitude": -87.624},
)
def test_unicode_name(self):
"""
OpenMapQuest.geocode unicode
"""
self.geocode_run(
{"query": u("\u6545\u5bab")},
{"latitude": 39.916, "longitude": 116.390},
)
<commit_msg>Make OpenMapQuest tests conditional on environment variable<commit_after>
from geopy.compat import u
from geopy.geocoders import OpenMapQuest
from test.geocoders.util import GeocoderTestBase, env
import unittest
@unittest.skipUnless( # pylint: disable=R0904,C0111
bool(env.get('OPENMAPQUEST_APIKEY')),
"No OPENMAPQUEST_APIKEY env variable set"
)
class OpenMapQuestTestCase(GeocoderTestBase): # pylint: disable=R0904,C0111
@classmethod
def setUpClass(cls):
cls.geocoder = OpenMapQuest(scheme='http', timeout=3, api_key=env['OPENMAPQUEST_APIKEY'])
cls.delta = 0.04
def test_user_agent_custom(self):
geocoder = OpenMapQuest(
user_agent='my_user_agent/1.0'
)
self.assertEqual(geocoder.headers['User-Agent'], 'my_user_agent/1.0')
def test_geocode(self):
"""
OpenMapQuest.geocode
"""
self.geocode_run(
{"query": "435 north michigan ave, chicago il 60611 usa"},
{"latitude": 41.890, "longitude": -87.624},
)
def test_unicode_name(self):
"""
OpenMapQuest.geocode unicode
"""
self.geocode_run(
{"query": u("\u6545\u5bab")},
{"latitude": 39.916, "longitude": 116.390},
)
|
7a0e8dfaa49b31876b4210b56bced77142ce7f39
|
set1/challenge-5.py
|
set1/challenge-5.py
|
from __future__ import division
import math
import base64
def repeating_xor_encrypt(text, key):
padded_key = key * int(math.ceil(len(text) / len(key)))
encrypted = [chr(ord(a) ^ ord(b)) for a, b in zip(text, padded_key)]
return base64.b16encode(''.join(encrypted))
if __name__ == '__main__':
text = ("Burning 'em, if you ain't quick and nimble\n"
"I go crazy when I hear a cymbal")
key = "ICE"
print repeating_xor_encrypt(text, key).lower()
|
Add solution to challenge 5.
|
Add solution to challenge 5.
|
Python
|
mit
|
ericnorris/cryptopals-solutions
|
Add solution to challenge 5.
|
from __future__ import division
import math
import base64
def repeating_xor_encrypt(text, key):
padded_key = key * int(math.ceil(len(text) / len(key)))
encrypted = [chr(ord(a) ^ ord(b)) for a, b in zip(text, padded_key)]
return base64.b16encode(''.join(encrypted))
if __name__ == '__main__':
text = ("Burning 'em, if you ain't quick and nimble\n"
"I go crazy when I hear a cymbal")
key = "ICE"
print repeating_xor_encrypt(text, key).lower()
|
<commit_before><commit_msg>Add solution to challenge 5.<commit_after>
|
from __future__ import division
import math
import base64
def repeating_xor_encrypt(text, key):
padded_key = key * int(math.ceil(len(text) / len(key)))
encrypted = [chr(ord(a) ^ ord(b)) for a, b in zip(text, padded_key)]
return base64.b16encode(''.join(encrypted))
if __name__ == '__main__':
text = ("Burning 'em, if you ain't quick and nimble\n"
"I go crazy when I hear a cymbal")
key = "ICE"
print repeating_xor_encrypt(text, key).lower()
|
Add solution to challenge 5.from __future__ import division
import math
import base64
def repeating_xor_encrypt(text, key):
padded_key = key * int(math.ceil(len(text) / len(key)))
encrypted = [chr(ord(a) ^ ord(b)) for a, b in zip(text, padded_key)]
return base64.b16encode(''.join(encrypted))
if __name__ == '__main__':
text = ("Burning 'em, if you ain't quick and nimble\n"
"I go crazy when I hear a cymbal")
key = "ICE"
print repeating_xor_encrypt(text, key).lower()
|
<commit_before><commit_msg>Add solution to challenge 5.<commit_after>from __future__ import division
import math
import base64
def repeating_xor_encrypt(text, key):
padded_key = key * int(math.ceil(len(text) / len(key)))
encrypted = [chr(ord(a) ^ ord(b)) for a, b in zip(text, padded_key)]
return base64.b16encode(''.join(encrypted))
if __name__ == '__main__':
text = ("Burning 'em, if you ain't quick and nimble\n"
"I go crazy when I hear a cymbal")
key = "ICE"
print repeating_xor_encrypt(text, key).lower()
|
|
2e4cdae4fd866149185a5c16a7e0e37b9b87150c
|
tests/thread/thread_qstr1.py
|
tests/thread/thread_qstr1.py
|
# test concurrent interning of strings
#
# MIT license; Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd
import _thread
# function to check the interned string
def check(s, val):
assert type(s) == str
assert int(s) == val
# main thread function
def th(base, n):
for i in range(n):
# this will intern the string and check it
exec("check('%u', %u)" % (base + i, base + i))
with lock:
global n_finished
n_finished += 1
lock = _thread.allocate_lock()
n_thread = 4
n_finished = 0
# spawn threads
for i in range(n_thread):
_thread.start_new_thread(th, (i * 1000, 1000))
# busy wait for threads to finish
while n_finished < n_thread:
pass
print('pass')
|
Add test for concurrent interning of strings.
|
tests/thread: Add test for concurrent interning of strings.
Qstr code accesses global state and needs to be made thread safe.
|
Python
|
mit
|
emfcamp/micropython,selste/micropython,mhoffma/micropython,AriZuu/micropython,cwyark/micropython,hiway/micropython,swegener/micropython,henriknelson/micropython,torwag/micropython,kerneltask/micropython,tuc-osg/micropython,micropython/micropython-esp32,tobbad/micropython,oopy/micropython,ganshun666/micropython,matthewelse/micropython,dmazzella/micropython,deshipu/micropython,alex-march/micropython,TDAbboud/micropython,SHA2017-badge/micropython-esp32,jmarcelino/pycom-micropython,redbear/micropython,turbinenreiter/micropython,bvernoux/micropython,ryannathans/micropython,AriZuu/micropython,cwyark/micropython,infinnovation/micropython,chrisdearman/micropython,hosaka/micropython,dxxb/micropython,puuu/micropython,pramasoul/micropython,pfalcon/micropython,swegener/micropython,micropython/micropython-esp32,MrSurly/micropython-esp32,tralamazza/micropython,pfalcon/micropython,tobbad/micropython,hiway/micropython,AriZuu/micropython,tobbad/micropython,turbinenreiter/micropython,turbinenreiter/micropython,pramasoul/micropython,ganshun666/micropython,alex-march/micropython,hosaka/micropython,pfalcon/micropython,matthewelse/micropython,PappaPeppar/micropython,swegener/micropython,HenrikSolver/micropython,alex-robbins/micropython,kerneltask/micropython,AriZuu/micropython,adafruit/micropython,bvernoux/micropython,deshipu/micropython,dxxb/micropython,matthewelse/micropython,infinnovation/micropython,MrSurly/micropython,MrSurly/micropython,blazewicz/micropython,alex-robbins/micropython,oopy/micropython,SHA2017-badge/micropython-esp32,henriknelson/micropython,HenrikSolver/micropython,puuu/micropython,infinnovation/micropython,MrSurly/micropython,MrSurly/micropython,infinnovation/micropython,blazewicz/micropython,tralamazza/micropython,jmarcelino/pycom-micropython,toolmacher/micropython,redbear/micropython,torwag/micropython,puuu/micropython,Timmenem/micropython,SHA2017-badge/micropython-esp32,turbinenreiter/micropython,ryannathans/micropython,Peetz0r/micropython-esp32,cwyark/micropython,turbinenreiter/micropython,tobbad/micropython,ganshun666/micropython,tralamazza/micropython,MrSurly/micropython,toolmacher/micropython,hosaka/micropython,selste/micropython,TDAbboud/micropython,PappaPeppar/micropython,micropython/micropython-esp32,dxxb/micropython,emfcamp/micropython,pozetroninc/micropython,trezor/micropython,Timmenem/micropython,infinnovation/micropython,chrisdearman/micropython,torwag/micropython,alex-robbins/micropython,MrSurly/micropython-esp32,MrSurly/micropython-esp32,Peetz0r/micropython-esp32,adafruit/circuitpython,oopy/micropython,hiway/micropython,adafruit/circuitpython,alex-robbins/micropython,mhoffma/micropython,pramasoul/micropython,PappaPeppar/micropython,tuc-osg/micropython,blazewicz/micropython,adafruit/circuitpython,jmarcelino/pycom-micropython,bvernoux/micropython,Timmenem/micropython,adafruit/micropython,MrSurly/micropython-esp32,lowRISC/micropython,swegener/micropython,selste/micropython,pfalcon/micropython,lowRISC/micropython,jmarcelino/pycom-micropython,oopy/micropython,PappaPeppar/micropython,adafruit/circuitpython,toolmacher/micropython,deshipu/micropython,jmarcelino/pycom-micropython,lowRISC/micropython,micropython/micropython-esp32,pozetroninc/micropython,bvernoux/micropython,adafruit/circuitpython,Timmenem/micropython,AriZuu/micropython,emfcamp/micropython,cwyark/micropython,toolmacher/micropython,lowRISC/micropython,trezor/micropython,alex-march/micropython,henriknelson/micropython,cwyark/micropython,henriknelson/micropython,TDAbboud/micropython,trezor/micropython,ryannathans/micropython,TDAbboud/micropython,HenrikSolver/micropython,ryannathans/micropython,redbear/micropython,adafruit/circuitpython,adafruit/micropython,pozetroninc/micropython,matthewelse/micropython,matthewelse/micropython,bvernoux/micropython,dxxb/micropython,SHA2017-badge/micropython-esp32,chrisdearman/micropython,kerneltask/micropython,selste/micropython,redbear/micropython,chrisdearman/micropython,emfcamp/micropython,henriknelson/micropython,selste/micropython,blazewicz/micropython,SHA2017-badge/micropython-esp32,Peetz0r/micropython-esp32,trezor/micropython,alex-march/micropython,pramasoul/micropython,torwag/micropython,MrSurly/micropython-esp32,ryannathans/micropython,adafruit/micropython,PappaPeppar/micropython,trezor/micropython,chrisdearman/micropython,Peetz0r/micropython-esp32,torwag/micropython,dmazzella/micropython,lowRISC/micropython,ganshun666/micropython,pfalcon/micropython,kerneltask/micropython,mhoffma/micropython,mhoffma/micropython,HenrikSolver/micropython,alex-march/micropython,puuu/micropython,tuc-osg/micropython,pramasoul/micropython,deshipu/micropython,tralamazza/micropython,alex-robbins/micropython,Timmenem/micropython,mhoffma/micropython,Peetz0r/micropython-esp32,micropython/micropython-esp32,blazewicz/micropython,redbear/micropython,hiway/micropython,dxxb/micropython,swegener/micropython,tuc-osg/micropython,adafruit/micropython,matthewelse/micropython,kerneltask/micropython,TDAbboud/micropython,deshipu/micropython,dmazzella/micropython,hosaka/micropython,toolmacher/micropython,hiway/micropython,emfcamp/micropython,oopy/micropython,pozetroninc/micropython,HenrikSolver/micropython,pozetroninc/micropython,tobbad/micropython,hosaka/micropython,puuu/micropython,dmazzella/micropython,tuc-osg/micropython,ganshun666/micropython
|
tests/thread: Add test for concurrent interning of strings.
Qstr code accesses global state and needs to be made thread safe.
|
# test concurrent interning of strings
#
# MIT license; Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd
import _thread
# function to check the interned string
def check(s, val):
assert type(s) == str
assert int(s) == val
# main thread function
def th(base, n):
for i in range(n):
# this will intern the string and check it
exec("check('%u', %u)" % (base + i, base + i))
with lock:
global n_finished
n_finished += 1
lock = _thread.allocate_lock()
n_thread = 4
n_finished = 0
# spawn threads
for i in range(n_thread):
_thread.start_new_thread(th, (i * 1000, 1000))
# busy wait for threads to finish
while n_finished < n_thread:
pass
print('pass')
|
<commit_before><commit_msg>tests/thread: Add test for concurrent interning of strings.
Qstr code accesses global state and needs to be made thread safe.<commit_after>
|
# test concurrent interning of strings
#
# MIT license; Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd
import _thread
# function to check the interned string
def check(s, val):
assert type(s) == str
assert int(s) == val
# main thread function
def th(base, n):
for i in range(n):
# this will intern the string and check it
exec("check('%u', %u)" % (base + i, base + i))
with lock:
global n_finished
n_finished += 1
lock = _thread.allocate_lock()
n_thread = 4
n_finished = 0
# spawn threads
for i in range(n_thread):
_thread.start_new_thread(th, (i * 1000, 1000))
# busy wait for threads to finish
while n_finished < n_thread:
pass
print('pass')
|
tests/thread: Add test for concurrent interning of strings.
Qstr code accesses global state and needs to be made thread safe.# test concurrent interning of strings
#
# MIT license; Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd
import _thread
# function to check the interned string
def check(s, val):
assert type(s) == str
assert int(s) == val
# main thread function
def th(base, n):
for i in range(n):
# this will intern the string and check it
exec("check('%u', %u)" % (base + i, base + i))
with lock:
global n_finished
n_finished += 1
lock = _thread.allocate_lock()
n_thread = 4
n_finished = 0
# spawn threads
for i in range(n_thread):
_thread.start_new_thread(th, (i * 1000, 1000))
# busy wait for threads to finish
while n_finished < n_thread:
pass
print('pass')
|
<commit_before><commit_msg>tests/thread: Add test for concurrent interning of strings.
Qstr code accesses global state and needs to be made thread safe.<commit_after># test concurrent interning of strings
#
# MIT license; Copyright (c) 2016 Damien P. George on behalf of Pycom Ltd
import _thread
# function to check the interned string
def check(s, val):
assert type(s) == str
assert int(s) == val
# main thread function
def th(base, n):
for i in range(n):
# this will intern the string and check it
exec("check('%u', %u)" % (base + i, base + i))
with lock:
global n_finished
n_finished += 1
lock = _thread.allocate_lock()
n_thread = 4
n_finished = 0
# spawn threads
for i in range(n_thread):
_thread.start_new_thread(th, (i * 1000, 1000))
# busy wait for threads to finish
while n_finished < n_thread:
pass
print('pass')
|
|
9877879c9a8d22a658f6c5f41b79930ff5385f05
|
plugins/parking.py
|
plugins/parking.py
|
from irc3.plugins.command import command
from bytebot_config import BYTEBOT_PLUGIN_CONFIG
from irc3 import asyncio
import aiohttp
import xml.etree.ElementTree as ET
@command(permission="view")
@asyncio.coroutine
def parking(bot, mask, target, args):
"""Show the current parking lot status
%%parking
"""
config = BYTEBOT_PLUGIN_CONFIG['parking']
if config['url'] == "parking_url":
return "I don't have your parking url!"
bot.privmsg(target, 'Parkhausbelegung:')
with aiohttp.Timeout(10):
with aiohttp.ClientSession(loop=bot.loop) as session:
resp = yield from session.get(config['url'])
if resp.status != 200:
bot.privmsg(target, "Error while retrieving parking data")
raise Exception()
r = yield from resp.read()
root = ET.fromstring(r)
for lot in root.findall('ph'):
bot.privmsg(target,
" {name:32}{use:3} von {max:3} frei".format(
name=lot.find('longname').text,
use=int(lot.find('belegung').text),
max=int(lot.find('kapazitaet').text)
))
|
from irc3.plugins.command import command
from bytebot_config import BYTEBOT_PLUGIN_CONFIG
from irc3 import asyncio
import aiohttp
import xml.etree.ElementTree as ET
@command(permission="view")
@asyncio.coroutine
def parking(bot, mask, target, args):
"""Show the current parking lot status
%%parking
"""
config = BYTEBOT_PLUGIN_CONFIG['parking']
if config['url'] == "parking_url":
return "I don't have your parking url!"
bot.privmsg(target, 'Parkhausbelegung:')
with aiohttp.Timeout(10):
with aiohttp.ClientSession(loop=bot.loop) as session:
resp = yield from session.get(config['url'])
if resp.status != 200:
bot.privmsg(target, "Error while retrieving parking data")
raise Exception()
r = yield from resp.read()
root = ET.fromstring(r)
"""Sort XML by element longname"""
root[:] = sorted(root, key=lambda key: key.findtext("longname"))
for lot in root.findall('ph'):
bot.privmsg(
target,
" {name:32}{use:3} von {max:3} frei".format(
name=lot.find('longname').text,
use=(
int(lot.find('kapazitaet').text) -
int(lot.find('belegung').text)
),
max=int(lot.find('kapazitaet').text)
)
)
|
Sort by name, fix vacancy (values in api are occupancy)
|
Sort by name, fix vacancy (values in api are occupancy)
|
Python
|
mit
|
mape2k/Bytebot,Bytespeicher/Bytebot
|
from irc3.plugins.command import command
from bytebot_config import BYTEBOT_PLUGIN_CONFIG
from irc3 import asyncio
import aiohttp
import xml.etree.ElementTree as ET
@command(permission="view")
@asyncio.coroutine
def parking(bot, mask, target, args):
"""Show the current parking lot status
%%parking
"""
config = BYTEBOT_PLUGIN_CONFIG['parking']
if config['url'] == "parking_url":
return "I don't have your parking url!"
bot.privmsg(target, 'Parkhausbelegung:')
with aiohttp.Timeout(10):
with aiohttp.ClientSession(loop=bot.loop) as session:
resp = yield from session.get(config['url'])
if resp.status != 200:
bot.privmsg(target, "Error while retrieving parking data")
raise Exception()
r = yield from resp.read()
root = ET.fromstring(r)
for lot in root.findall('ph'):
bot.privmsg(target,
" {name:32}{use:3} von {max:3} frei".format(
name=lot.find('longname').text,
use=int(lot.find('belegung').text),
max=int(lot.find('kapazitaet').text)
))
Sort by name, fix vacancy (values in api are occupancy)
|
from irc3.plugins.command import command
from bytebot_config import BYTEBOT_PLUGIN_CONFIG
from irc3 import asyncio
import aiohttp
import xml.etree.ElementTree as ET
@command(permission="view")
@asyncio.coroutine
def parking(bot, mask, target, args):
"""Show the current parking lot status
%%parking
"""
config = BYTEBOT_PLUGIN_CONFIG['parking']
if config['url'] == "parking_url":
return "I don't have your parking url!"
bot.privmsg(target, 'Parkhausbelegung:')
with aiohttp.Timeout(10):
with aiohttp.ClientSession(loop=bot.loop) as session:
resp = yield from session.get(config['url'])
if resp.status != 200:
bot.privmsg(target, "Error while retrieving parking data")
raise Exception()
r = yield from resp.read()
root = ET.fromstring(r)
"""Sort XML by element longname"""
root[:] = sorted(root, key=lambda key: key.findtext("longname"))
for lot in root.findall('ph'):
bot.privmsg(
target,
" {name:32}{use:3} von {max:3} frei".format(
name=lot.find('longname').text,
use=(
int(lot.find('kapazitaet').text) -
int(lot.find('belegung').text)
),
max=int(lot.find('kapazitaet').text)
)
)
|
<commit_before>from irc3.plugins.command import command
from bytebot_config import BYTEBOT_PLUGIN_CONFIG
from irc3 import asyncio
import aiohttp
import xml.etree.ElementTree as ET
@command(permission="view")
@asyncio.coroutine
def parking(bot, mask, target, args):
"""Show the current parking lot status
%%parking
"""
config = BYTEBOT_PLUGIN_CONFIG['parking']
if config['url'] == "parking_url":
return "I don't have your parking url!"
bot.privmsg(target, 'Parkhausbelegung:')
with aiohttp.Timeout(10):
with aiohttp.ClientSession(loop=bot.loop) as session:
resp = yield from session.get(config['url'])
if resp.status != 200:
bot.privmsg(target, "Error while retrieving parking data")
raise Exception()
r = yield from resp.read()
root = ET.fromstring(r)
for lot in root.findall('ph'):
bot.privmsg(target,
" {name:32}{use:3} von {max:3} frei".format(
name=lot.find('longname').text,
use=int(lot.find('belegung').text),
max=int(lot.find('kapazitaet').text)
))
<commit_msg>Sort by name, fix vacancy (values in api are occupancy)<commit_after>
|
from irc3.plugins.command import command
from bytebot_config import BYTEBOT_PLUGIN_CONFIG
from irc3 import asyncio
import aiohttp
import xml.etree.ElementTree as ET
@command(permission="view")
@asyncio.coroutine
def parking(bot, mask, target, args):
"""Show the current parking lot status
%%parking
"""
config = BYTEBOT_PLUGIN_CONFIG['parking']
if config['url'] == "parking_url":
return "I don't have your parking url!"
bot.privmsg(target, 'Parkhausbelegung:')
with aiohttp.Timeout(10):
with aiohttp.ClientSession(loop=bot.loop) as session:
resp = yield from session.get(config['url'])
if resp.status != 200:
bot.privmsg(target, "Error while retrieving parking data")
raise Exception()
r = yield from resp.read()
root = ET.fromstring(r)
"""Sort XML by element longname"""
root[:] = sorted(root, key=lambda key: key.findtext("longname"))
for lot in root.findall('ph'):
bot.privmsg(
target,
" {name:32}{use:3} von {max:3} frei".format(
name=lot.find('longname').text,
use=(
int(lot.find('kapazitaet').text) -
int(lot.find('belegung').text)
),
max=int(lot.find('kapazitaet').text)
)
)
|
from irc3.plugins.command import command
from bytebot_config import BYTEBOT_PLUGIN_CONFIG
from irc3 import asyncio
import aiohttp
import xml.etree.ElementTree as ET
@command(permission="view")
@asyncio.coroutine
def parking(bot, mask, target, args):
"""Show the current parking lot status
%%parking
"""
config = BYTEBOT_PLUGIN_CONFIG['parking']
if config['url'] == "parking_url":
return "I don't have your parking url!"
bot.privmsg(target, 'Parkhausbelegung:')
with aiohttp.Timeout(10):
with aiohttp.ClientSession(loop=bot.loop) as session:
resp = yield from session.get(config['url'])
if resp.status != 200:
bot.privmsg(target, "Error while retrieving parking data")
raise Exception()
r = yield from resp.read()
root = ET.fromstring(r)
for lot in root.findall('ph'):
bot.privmsg(target,
" {name:32}{use:3} von {max:3} frei".format(
name=lot.find('longname').text,
use=int(lot.find('belegung').text),
max=int(lot.find('kapazitaet').text)
))
Sort by name, fix vacancy (values in api are occupancy)from irc3.plugins.command import command
from bytebot_config import BYTEBOT_PLUGIN_CONFIG
from irc3 import asyncio
import aiohttp
import xml.etree.ElementTree as ET
@command(permission="view")
@asyncio.coroutine
def parking(bot, mask, target, args):
"""Show the current parking lot status
%%parking
"""
config = BYTEBOT_PLUGIN_CONFIG['parking']
if config['url'] == "parking_url":
return "I don't have your parking url!"
bot.privmsg(target, 'Parkhausbelegung:')
with aiohttp.Timeout(10):
with aiohttp.ClientSession(loop=bot.loop) as session:
resp = yield from session.get(config['url'])
if resp.status != 200:
bot.privmsg(target, "Error while retrieving parking data")
raise Exception()
r = yield from resp.read()
root = ET.fromstring(r)
"""Sort XML by element longname"""
root[:] = sorted(root, key=lambda key: key.findtext("longname"))
for lot in root.findall('ph'):
bot.privmsg(
target,
" {name:32}{use:3} von {max:3} frei".format(
name=lot.find('longname').text,
use=(
int(lot.find('kapazitaet').text) -
int(lot.find('belegung').text)
),
max=int(lot.find('kapazitaet').text)
)
)
|
<commit_before>from irc3.plugins.command import command
from bytebot_config import BYTEBOT_PLUGIN_CONFIG
from irc3 import asyncio
import aiohttp
import xml.etree.ElementTree as ET
@command(permission="view")
@asyncio.coroutine
def parking(bot, mask, target, args):
"""Show the current parking lot status
%%parking
"""
config = BYTEBOT_PLUGIN_CONFIG['parking']
if config['url'] == "parking_url":
return "I don't have your parking url!"
bot.privmsg(target, 'Parkhausbelegung:')
with aiohttp.Timeout(10):
with aiohttp.ClientSession(loop=bot.loop) as session:
resp = yield from session.get(config['url'])
if resp.status != 200:
bot.privmsg(target, "Error while retrieving parking data")
raise Exception()
r = yield from resp.read()
root = ET.fromstring(r)
for lot in root.findall('ph'):
bot.privmsg(target,
" {name:32}{use:3} von {max:3} frei".format(
name=lot.find('longname').text,
use=int(lot.find('belegung').text),
max=int(lot.find('kapazitaet').text)
))
<commit_msg>Sort by name, fix vacancy (values in api are occupancy)<commit_after>from irc3.plugins.command import command
from bytebot_config import BYTEBOT_PLUGIN_CONFIG
from irc3 import asyncio
import aiohttp
import xml.etree.ElementTree as ET
@command(permission="view")
@asyncio.coroutine
def parking(bot, mask, target, args):
"""Show the current parking lot status
%%parking
"""
config = BYTEBOT_PLUGIN_CONFIG['parking']
if config['url'] == "parking_url":
return "I don't have your parking url!"
bot.privmsg(target, 'Parkhausbelegung:')
with aiohttp.Timeout(10):
with aiohttp.ClientSession(loop=bot.loop) as session:
resp = yield from session.get(config['url'])
if resp.status != 200:
bot.privmsg(target, "Error while retrieving parking data")
raise Exception()
r = yield from resp.read()
root = ET.fromstring(r)
"""Sort XML by element longname"""
root[:] = sorted(root, key=lambda key: key.findtext("longname"))
for lot in root.findall('ph'):
bot.privmsg(
target,
" {name:32}{use:3} von {max:3} frei".format(
name=lot.find('longname').text,
use=(
int(lot.find('kapazitaet').text) -
int(lot.find('belegung').text)
),
max=int(lot.find('kapazitaet').text)
)
)
|
219253f59314e0adf57deb238c9d888bbb984b23
|
app/util/danger.py
|
app/util/danger.py
|
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer, SignatureExpired, BadTimeSignature
import os
def gen_auth_token(id,expiration=10000):
"""Generate auth token"""
s = Serializer(os.environ['API_KEY'],expires_in=expiration)
return s.dumps({'id':id})
def verify_auth_token(token):
"""Verify auth token"""
s = Serializer(os.environ['API_KEY'])
# check the token and throw respective
try:
user = s.loads()
except SignatureExpired:
return False, "SignatureExpired bad token"
except BadTimeSignature:
return False, "BadSignature bad token"
return True, user
|
Create util file for gen and verifying auth tokens
|
Create util file for gen and verifying auth tokens
|
Python
|
mit
|
tforrest/soda-automation,tforrest/soda-automation
|
Create util file for gen and verifying auth tokens
|
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer, SignatureExpired, BadTimeSignature
import os
def gen_auth_token(id,expiration=10000):
"""Generate auth token"""
s = Serializer(os.environ['API_KEY'],expires_in=expiration)
return s.dumps({'id':id})
def verify_auth_token(token):
"""Verify auth token"""
s = Serializer(os.environ['API_KEY'])
# check the token and throw respective
try:
user = s.loads()
except SignatureExpired:
return False, "SignatureExpired bad token"
except BadTimeSignature:
return False, "BadSignature bad token"
return True, user
|
<commit_before><commit_msg>Create util file for gen and verifying auth tokens<commit_after>
|
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer, SignatureExpired, BadTimeSignature
import os
def gen_auth_token(id,expiration=10000):
"""Generate auth token"""
s = Serializer(os.environ['API_KEY'],expires_in=expiration)
return s.dumps({'id':id})
def verify_auth_token(token):
"""Verify auth token"""
s = Serializer(os.environ['API_KEY'])
# check the token and throw respective
try:
user = s.loads()
except SignatureExpired:
return False, "SignatureExpired bad token"
except BadTimeSignature:
return False, "BadSignature bad token"
return True, user
|
Create util file for gen and verifying auth tokensfrom itsdangerous import TimedJSONWebSignatureSerializer as Serializer, SignatureExpired, BadTimeSignature
import os
def gen_auth_token(id,expiration=10000):
"""Generate auth token"""
s = Serializer(os.environ['API_KEY'],expires_in=expiration)
return s.dumps({'id':id})
def verify_auth_token(token):
"""Verify auth token"""
s = Serializer(os.environ['API_KEY'])
# check the token and throw respective
try:
user = s.loads()
except SignatureExpired:
return False, "SignatureExpired bad token"
except BadTimeSignature:
return False, "BadSignature bad token"
return True, user
|
<commit_before><commit_msg>Create util file for gen and verifying auth tokens<commit_after>from itsdangerous import TimedJSONWebSignatureSerializer as Serializer, SignatureExpired, BadTimeSignature
import os
def gen_auth_token(id,expiration=10000):
"""Generate auth token"""
s = Serializer(os.environ['API_KEY'],expires_in=expiration)
return s.dumps({'id':id})
def verify_auth_token(token):
"""Verify auth token"""
s = Serializer(os.environ['API_KEY'])
# check the token and throw respective
try:
user = s.loads()
except SignatureExpired:
return False, "SignatureExpired bad token"
except BadTimeSignature:
return False, "BadSignature bad token"
return True, user
|
|
224f977a60fc94893228f9372386de8ca0551fde
|
artifacts/firmware_password_status.py
|
artifacts/firmware_password_status.py
|
import subprocess
factoid = 'firmware_password_status'
def fact():
'''Returns whether or not a firmware password is set'''
result = 'None'
try:
proc = subprocess.Popen(
['/usr/sbin/firmwarepasswd', '-check'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdout, _ = proc.communicate()
except (IOError, OSError):
stdout = None
if stdout:
result = True if stdout.split()[-1] == 'Yes' else False
return {factoid: result}
if __name__ == '__main__':
print '<result>%s</result>' % fact()[factoid]
|
Add firmware password status artifact
|
Add firmware password status artifact
|
Python
|
apache-2.0
|
chilcote/unearth,chilcote/unearth
|
Add firmware password status artifact
|
import subprocess
factoid = 'firmware_password_status'
def fact():
'''Returns whether or not a firmware password is set'''
result = 'None'
try:
proc = subprocess.Popen(
['/usr/sbin/firmwarepasswd', '-check'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdout, _ = proc.communicate()
except (IOError, OSError):
stdout = None
if stdout:
result = True if stdout.split()[-1] == 'Yes' else False
return {factoid: result}
if __name__ == '__main__':
print '<result>%s</result>' % fact()[factoid]
|
<commit_before><commit_msg>Add firmware password status artifact<commit_after>
|
import subprocess
factoid = 'firmware_password_status'
def fact():
'''Returns whether or not a firmware password is set'''
result = 'None'
try:
proc = subprocess.Popen(
['/usr/sbin/firmwarepasswd', '-check'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdout, _ = proc.communicate()
except (IOError, OSError):
stdout = None
if stdout:
result = True if stdout.split()[-1] == 'Yes' else False
return {factoid: result}
if __name__ == '__main__':
print '<result>%s</result>' % fact()[factoid]
|
Add firmware password status artifactimport subprocess
factoid = 'firmware_password_status'
def fact():
'''Returns whether or not a firmware password is set'''
result = 'None'
try:
proc = subprocess.Popen(
['/usr/sbin/firmwarepasswd', '-check'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdout, _ = proc.communicate()
except (IOError, OSError):
stdout = None
if stdout:
result = True if stdout.split()[-1] == 'Yes' else False
return {factoid: result}
if __name__ == '__main__':
print '<result>%s</result>' % fact()[factoid]
|
<commit_before><commit_msg>Add firmware password status artifact<commit_after>import subprocess
factoid = 'firmware_password_status'
def fact():
'''Returns whether or not a firmware password is set'''
result = 'None'
try:
proc = subprocess.Popen(
['/usr/sbin/firmwarepasswd', '-check'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdout, _ = proc.communicate()
except (IOError, OSError):
stdout = None
if stdout:
result = True if stdout.split()[-1] == 'Yes' else False
return {factoid: result}
if __name__ == '__main__':
print '<result>%s</result>' % fact()[factoid]
|
|
eb7b230246f5756c4d164eba110e4d323bad5941
|
load_db.py
|
load_db.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import csv
import re
import sqlite3
from collections import namedtuple
import sys
Record = namedtuple('Record', ['name', 'day_of_birth',
'month_of_birth', 'series', 'photo_url', 'important'])
def new_record(csv_line):
character_id, name, full_birthday, series, photo, is_important = csv_line
m = re.match('(\d+).(\d+).+', full_birthday)
if not m:
return
day_of_birth, month_of_birth = m.groups()
if day_of_birth == '??' or month_of_birth == '??':
return
return Record(name, int(day_of_birth), int(month_of_birth), series, photo, bool(is_important))
if __name__ == '__main__':
records = []
with open(sys.argv[1], 'r') as fp:
reader = csv.reader(fp, delimiter=';')
reader.next()
for line in reader:
record = new_record(line)
if record:
records.append(record)
conn = sqlite3.connect(sys.argv[2])
with conn:
cursor = conn.cursor()
cursor.execute("CREATE TABLE IF NOT EXISTS birthdays(name TEXT, day INTEGER, "
"month INTEGER, series TEXT, photo VARCHAR(255), "
"important BOOLEAN, PRIMARY KEY(name, day, month))")
cursor.executemany("INSERT OR REPLACE INTO birthdays (name, day, month, series, photo, important) "
"VALUES (?, ?, ?, ?, ?, ?)", records)
|
Load data from modified CSV file.
|
Load data from modified CSV file.
|
Python
|
mit
|
lensvol/anibirthday
|
Load data from modified CSV file.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import csv
import re
import sqlite3
from collections import namedtuple
import sys
Record = namedtuple('Record', ['name', 'day_of_birth',
'month_of_birth', 'series', 'photo_url', 'important'])
def new_record(csv_line):
character_id, name, full_birthday, series, photo, is_important = csv_line
m = re.match('(\d+).(\d+).+', full_birthday)
if not m:
return
day_of_birth, month_of_birth = m.groups()
if day_of_birth == '??' or month_of_birth == '??':
return
return Record(name, int(day_of_birth), int(month_of_birth), series, photo, bool(is_important))
if __name__ == '__main__':
records = []
with open(sys.argv[1], 'r') as fp:
reader = csv.reader(fp, delimiter=';')
reader.next()
for line in reader:
record = new_record(line)
if record:
records.append(record)
conn = sqlite3.connect(sys.argv[2])
with conn:
cursor = conn.cursor()
cursor.execute("CREATE TABLE IF NOT EXISTS birthdays(name TEXT, day INTEGER, "
"month INTEGER, series TEXT, photo VARCHAR(255), "
"important BOOLEAN, PRIMARY KEY(name, day, month))")
cursor.executemany("INSERT OR REPLACE INTO birthdays (name, day, month, series, photo, important) "
"VALUES (?, ?, ?, ?, ?, ?)", records)
|
<commit_before><commit_msg>Load data from modified CSV file.<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import csv
import re
import sqlite3
from collections import namedtuple
import sys
Record = namedtuple('Record', ['name', 'day_of_birth',
'month_of_birth', 'series', 'photo_url', 'important'])
def new_record(csv_line):
character_id, name, full_birthday, series, photo, is_important = csv_line
m = re.match('(\d+).(\d+).+', full_birthday)
if not m:
return
day_of_birth, month_of_birth = m.groups()
if day_of_birth == '??' or month_of_birth == '??':
return
return Record(name, int(day_of_birth), int(month_of_birth), series, photo, bool(is_important))
if __name__ == '__main__':
records = []
with open(sys.argv[1], 'r') as fp:
reader = csv.reader(fp, delimiter=';')
reader.next()
for line in reader:
record = new_record(line)
if record:
records.append(record)
conn = sqlite3.connect(sys.argv[2])
with conn:
cursor = conn.cursor()
cursor.execute("CREATE TABLE IF NOT EXISTS birthdays(name TEXT, day INTEGER, "
"month INTEGER, series TEXT, photo VARCHAR(255), "
"important BOOLEAN, PRIMARY KEY(name, day, month))")
cursor.executemany("INSERT OR REPLACE INTO birthdays (name, day, month, series, photo, important) "
"VALUES (?, ?, ?, ?, ?, ?)", records)
|
Load data from modified CSV file.#!/usr/bin/env python
# -*- coding: utf-8 -*-
import csv
import re
import sqlite3
from collections import namedtuple
import sys
Record = namedtuple('Record', ['name', 'day_of_birth',
'month_of_birth', 'series', 'photo_url', 'important'])
def new_record(csv_line):
character_id, name, full_birthday, series, photo, is_important = csv_line
m = re.match('(\d+).(\d+).+', full_birthday)
if not m:
return
day_of_birth, month_of_birth = m.groups()
if day_of_birth == '??' or month_of_birth == '??':
return
return Record(name, int(day_of_birth), int(month_of_birth), series, photo, bool(is_important))
if __name__ == '__main__':
records = []
with open(sys.argv[1], 'r') as fp:
reader = csv.reader(fp, delimiter=';')
reader.next()
for line in reader:
record = new_record(line)
if record:
records.append(record)
conn = sqlite3.connect(sys.argv[2])
with conn:
cursor = conn.cursor()
cursor.execute("CREATE TABLE IF NOT EXISTS birthdays(name TEXT, day INTEGER, "
"month INTEGER, series TEXT, photo VARCHAR(255), "
"important BOOLEAN, PRIMARY KEY(name, day, month))")
cursor.executemany("INSERT OR REPLACE INTO birthdays (name, day, month, series, photo, important) "
"VALUES (?, ?, ?, ?, ?, ?)", records)
|
<commit_before><commit_msg>Load data from modified CSV file.<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import csv
import re
import sqlite3
from collections import namedtuple
import sys
Record = namedtuple('Record', ['name', 'day_of_birth',
'month_of_birth', 'series', 'photo_url', 'important'])
def new_record(csv_line):
character_id, name, full_birthday, series, photo, is_important = csv_line
m = re.match('(\d+).(\d+).+', full_birthday)
if not m:
return
day_of_birth, month_of_birth = m.groups()
if day_of_birth == '??' or month_of_birth == '??':
return
return Record(name, int(day_of_birth), int(month_of_birth), series, photo, bool(is_important))
if __name__ == '__main__':
records = []
with open(sys.argv[1], 'r') as fp:
reader = csv.reader(fp, delimiter=';')
reader.next()
for line in reader:
record = new_record(line)
if record:
records.append(record)
conn = sqlite3.connect(sys.argv[2])
with conn:
cursor = conn.cursor()
cursor.execute("CREATE TABLE IF NOT EXISTS birthdays(name TEXT, day INTEGER, "
"month INTEGER, series TEXT, photo VARCHAR(255), "
"important BOOLEAN, PRIMARY KEY(name, day, month))")
cursor.executemany("INSERT OR REPLACE INTO birthdays (name, day, month, series, photo, important) "
"VALUES (?, ?, ?, ?, ?, ?)", records)
|
|
afbca5518cd7da60ddbb01d07a457a59c037c1b3
|
amiconfig/lib/mountdaemon.py
|
amiconfig/lib/mountdaemon.py
|
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from amiconfig.lib import daemon
class MountDaemon(daemon.Daemon):
"""
Attempt to mount a device in the background
"""
def __init__(self, device, mount_point, wait_time):
self.device = device
self.mount_point = mount_point
self.wait_time = wait_time
def start(self):
count = 0
while count < self.wait_time:
if not os.path.exists(self.device):
time.sleep(60)
else:
break
count += 1
subprocess.call(["mount", self.device, self.mount_point])
|
Add missing library for daemonization
|
Add missing library for daemonization
|
Python
|
apache-2.0
|
sassoftware/amiconfig,sassoftware/amiconfig
|
Add missing library for daemonization
|
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from amiconfig.lib import daemon
class MountDaemon(daemon.Daemon):
"""
Attempt to mount a device in the background
"""
def __init__(self, device, mount_point, wait_time):
self.device = device
self.mount_point = mount_point
self.wait_time = wait_time
def start(self):
count = 0
while count < self.wait_time:
if not os.path.exists(self.device):
time.sleep(60)
else:
break
count += 1
subprocess.call(["mount", self.device, self.mount_point])
|
<commit_before><commit_msg>Add missing library for daemonization<commit_after>
|
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from amiconfig.lib import daemon
class MountDaemon(daemon.Daemon):
"""
Attempt to mount a device in the background
"""
def __init__(self, device, mount_point, wait_time):
self.device = device
self.mount_point = mount_point
self.wait_time = wait_time
def start(self):
count = 0
while count < self.wait_time:
if not os.path.exists(self.device):
time.sleep(60)
else:
break
count += 1
subprocess.call(["mount", self.device, self.mount_point])
|
Add missing library for daemonization#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from amiconfig.lib import daemon
class MountDaemon(daemon.Daemon):
"""
Attempt to mount a device in the background
"""
def __init__(self, device, mount_point, wait_time):
self.device = device
self.mount_point = mount_point
self.wait_time = wait_time
def start(self):
count = 0
while count < self.wait_time:
if not os.path.exists(self.device):
time.sleep(60)
else:
break
count += 1
subprocess.call(["mount", self.device, self.mount_point])
|
<commit_before><commit_msg>Add missing library for daemonization<commit_after>#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from amiconfig.lib import daemon
class MountDaemon(daemon.Daemon):
"""
Attempt to mount a device in the background
"""
def __init__(self, device, mount_point, wait_time):
self.device = device
self.mount_point = mount_point
self.wait_time = wait_time
def start(self):
count = 0
while count < self.wait_time:
if not os.path.exists(self.device):
time.sleep(60)
else:
break
count += 1
subprocess.call(["mount", self.device, self.mount_point])
|
|
8068428fb06a2f3dba2e3f73684f26b9838c71e4
|
apps/core/tests/test_models.py
|
apps/core/tests/test_models.py
|
from django.test import TestCase
from apps.data.factories import EntryFactory
from .. import models
class SpeciesTestCase(TestCase):
def test_can_create_species(self):
name = 'Saccharomyces cerevisiae'
reference = EntryFactory()
description = 'lorem ipsum'
qs = models.Species.objects.all()
self.assertEqual(qs.count(), 0)
species = models.Species.objects.create(
name=name,
reference=reference,
description=description,
)
self.assertEqual(species.name, name)
self.assertEqual(species.reference.id, reference.id)
self.assertEqual(species.description, description)
self.assertEqual(qs.count(), 1)
def test_can_create_species_without_description(self):
name = 'Saccharomyces cerevisiae'
reference = EntryFactory()
qs = models.Species.objects.all()
self.assertEqual(qs.count(), 0)
species = models.Species.objects.create(
name=name,
reference=reference,
)
self.assertEqual(species.name, name)
self.assertEqual(species.reference.id, reference.id)
self.assertEqual(qs.count(), 1)
def test_model_representation(self):
name = 'Saccharomyces cerevisiae'
reference = EntryFactory()
species = models.Species.objects.create(
name=name,
reference=reference,
)
self.assertEqual(str(species), name)
|
Add unit tests for the Species model
|
Add unit tests for the Species model
|
Python
|
bsd-3-clause
|
Candihub/pixel,Candihub/pixel,Candihub/pixel,Candihub/pixel,Candihub/pixel
|
Add unit tests for the Species model
|
from django.test import TestCase
from apps.data.factories import EntryFactory
from .. import models
class SpeciesTestCase(TestCase):
def test_can_create_species(self):
name = 'Saccharomyces cerevisiae'
reference = EntryFactory()
description = 'lorem ipsum'
qs = models.Species.objects.all()
self.assertEqual(qs.count(), 0)
species = models.Species.objects.create(
name=name,
reference=reference,
description=description,
)
self.assertEqual(species.name, name)
self.assertEqual(species.reference.id, reference.id)
self.assertEqual(species.description, description)
self.assertEqual(qs.count(), 1)
def test_can_create_species_without_description(self):
name = 'Saccharomyces cerevisiae'
reference = EntryFactory()
qs = models.Species.objects.all()
self.assertEqual(qs.count(), 0)
species = models.Species.objects.create(
name=name,
reference=reference,
)
self.assertEqual(species.name, name)
self.assertEqual(species.reference.id, reference.id)
self.assertEqual(qs.count(), 1)
def test_model_representation(self):
name = 'Saccharomyces cerevisiae'
reference = EntryFactory()
species = models.Species.objects.create(
name=name,
reference=reference,
)
self.assertEqual(str(species), name)
|
<commit_before><commit_msg>Add unit tests for the Species model<commit_after>
|
from django.test import TestCase
from apps.data.factories import EntryFactory
from .. import models
class SpeciesTestCase(TestCase):
def test_can_create_species(self):
name = 'Saccharomyces cerevisiae'
reference = EntryFactory()
description = 'lorem ipsum'
qs = models.Species.objects.all()
self.assertEqual(qs.count(), 0)
species = models.Species.objects.create(
name=name,
reference=reference,
description=description,
)
self.assertEqual(species.name, name)
self.assertEqual(species.reference.id, reference.id)
self.assertEqual(species.description, description)
self.assertEqual(qs.count(), 1)
def test_can_create_species_without_description(self):
name = 'Saccharomyces cerevisiae'
reference = EntryFactory()
qs = models.Species.objects.all()
self.assertEqual(qs.count(), 0)
species = models.Species.objects.create(
name=name,
reference=reference,
)
self.assertEqual(species.name, name)
self.assertEqual(species.reference.id, reference.id)
self.assertEqual(qs.count(), 1)
def test_model_representation(self):
name = 'Saccharomyces cerevisiae'
reference = EntryFactory()
species = models.Species.objects.create(
name=name,
reference=reference,
)
self.assertEqual(str(species), name)
|
Add unit tests for the Species modelfrom django.test import TestCase
from apps.data.factories import EntryFactory
from .. import models
class SpeciesTestCase(TestCase):
def test_can_create_species(self):
name = 'Saccharomyces cerevisiae'
reference = EntryFactory()
description = 'lorem ipsum'
qs = models.Species.objects.all()
self.assertEqual(qs.count(), 0)
species = models.Species.objects.create(
name=name,
reference=reference,
description=description,
)
self.assertEqual(species.name, name)
self.assertEqual(species.reference.id, reference.id)
self.assertEqual(species.description, description)
self.assertEqual(qs.count(), 1)
def test_can_create_species_without_description(self):
name = 'Saccharomyces cerevisiae'
reference = EntryFactory()
qs = models.Species.objects.all()
self.assertEqual(qs.count(), 0)
species = models.Species.objects.create(
name=name,
reference=reference,
)
self.assertEqual(species.name, name)
self.assertEqual(species.reference.id, reference.id)
self.assertEqual(qs.count(), 1)
def test_model_representation(self):
name = 'Saccharomyces cerevisiae'
reference = EntryFactory()
species = models.Species.objects.create(
name=name,
reference=reference,
)
self.assertEqual(str(species), name)
|
<commit_before><commit_msg>Add unit tests for the Species model<commit_after>from django.test import TestCase
from apps.data.factories import EntryFactory
from .. import models
class SpeciesTestCase(TestCase):
def test_can_create_species(self):
name = 'Saccharomyces cerevisiae'
reference = EntryFactory()
description = 'lorem ipsum'
qs = models.Species.objects.all()
self.assertEqual(qs.count(), 0)
species = models.Species.objects.create(
name=name,
reference=reference,
description=description,
)
self.assertEqual(species.name, name)
self.assertEqual(species.reference.id, reference.id)
self.assertEqual(species.description, description)
self.assertEqual(qs.count(), 1)
def test_can_create_species_without_description(self):
name = 'Saccharomyces cerevisiae'
reference = EntryFactory()
qs = models.Species.objects.all()
self.assertEqual(qs.count(), 0)
species = models.Species.objects.create(
name=name,
reference=reference,
)
self.assertEqual(species.name, name)
self.assertEqual(species.reference.id, reference.id)
self.assertEqual(qs.count(), 1)
def test_model_representation(self):
name = 'Saccharomyces cerevisiae'
reference = EntryFactory()
species = models.Species.objects.create(
name=name,
reference=reference,
)
self.assertEqual(str(species), name)
|
|
1e716c71395d2df5e34fe403115d0dab630f36cf
|
tests/config/test_defaults.py
|
tests/config/test_defaults.py
|
from __future__ import absolute_import, unicode_literals
from mopidy import config
def test_core_schema_has_cache_dir():
assert 'cache_dir' in config._core_schema
assert isinstance(config._core_schema['cache_dir'], config.Path)
def test_core_schema_has_config_dir():
assert 'config_dir' in config._core_schema
assert isinstance(config._core_schema['config_dir'], config.Path)
def test_core_schema_has_data_dir():
assert 'data_dir' in config._core_schema
assert isinstance(config._core_schema['data_dir'], config.Path)
def test_core_schema_has_max_tracklist_length():
assert 'max_tracklist_length' in config._core_schema
max_tracklist_length_schema = config._core_schema['max_tracklist_length']
assert isinstance(max_tracklist_length_schema, config.Integer)
assert max_tracklist_length_schema._minimum == 1
assert max_tracklist_length_schema._maximum == 10000
|
Add tests for defaults in core schema config
|
tests: Add tests for defaults in core schema config
Tests check if default core schema has cache_dir, config_dir, data_dir and max_tracklist_length and if they have proper type
|
Python
|
apache-2.0
|
jmarsik/mopidy,bencevans/mopidy,adamcik/mopidy,ZenithDK/mopidy,vrs01/mopidy,kingosticks/mopidy,diandiankan/mopidy,kingosticks/mopidy,ZenithDK/mopidy,bacontext/mopidy,jcass77/mopidy,ali/mopidy,SuperStarPL/mopidy,SuperStarPL/mopidy,bencevans/mopidy,adamcik/mopidy,ali/mopidy,vrs01/mopidy,hkariti/mopidy,jmarsik/mopidy,kingosticks/mopidy,jodal/mopidy,hkariti/mopidy,ali/mopidy,quartz55/mopidy,hkariti/mopidy,swak/mopidy,mopidy/mopidy,pacificIT/mopidy,ZenithDK/mopidy,jodal/mopidy,swak/mopidy,ali/mopidy,pacificIT/mopidy,diandiankan/mopidy,mokieyue/mopidy,mopidy/mopidy,swak/mopidy,vrs01/mopidy,mokieyue/mopidy,jodal/mopidy,bacontext/mopidy,mokieyue/mopidy,hkariti/mopidy,SuperStarPL/mopidy,adamcik/mopidy,jmarsik/mopidy,ZenithDK/mopidy,tkem/mopidy,tkem/mopidy,bacontext/mopidy,pacificIT/mopidy,vrs01/mopidy,pacificIT/mopidy,quartz55/mopidy,swak/mopidy,mokieyue/mopidy,SuperStarPL/mopidy,diandiankan/mopidy,diandiankan/mopidy,bacontext/mopidy,mopidy/mopidy,quartz55/mopidy,tkem/mopidy,jcass77/mopidy,quartz55/mopidy,tkem/mopidy,jmarsik/mopidy,bencevans/mopidy,jcass77/mopidy,bencevans/mopidy
|
tests: Add tests for defaults in core schema config
Tests check if default core schema has cache_dir, config_dir, data_dir and max_tracklist_length and if they have proper type
|
from __future__ import absolute_import, unicode_literals
from mopidy import config
def test_core_schema_has_cache_dir():
assert 'cache_dir' in config._core_schema
assert isinstance(config._core_schema['cache_dir'], config.Path)
def test_core_schema_has_config_dir():
assert 'config_dir' in config._core_schema
assert isinstance(config._core_schema['config_dir'], config.Path)
def test_core_schema_has_data_dir():
assert 'data_dir' in config._core_schema
assert isinstance(config._core_schema['data_dir'], config.Path)
def test_core_schema_has_max_tracklist_length():
assert 'max_tracklist_length' in config._core_schema
max_tracklist_length_schema = config._core_schema['max_tracklist_length']
assert isinstance(max_tracklist_length_schema, config.Integer)
assert max_tracklist_length_schema._minimum == 1
assert max_tracklist_length_schema._maximum == 10000
|
<commit_before><commit_msg>tests: Add tests for defaults in core schema config
Tests check if default core schema has cache_dir, config_dir, data_dir and max_tracklist_length and if they have proper type<commit_after>
|
from __future__ import absolute_import, unicode_literals
from mopidy import config
def test_core_schema_has_cache_dir():
assert 'cache_dir' in config._core_schema
assert isinstance(config._core_schema['cache_dir'], config.Path)
def test_core_schema_has_config_dir():
assert 'config_dir' in config._core_schema
assert isinstance(config._core_schema['config_dir'], config.Path)
def test_core_schema_has_data_dir():
assert 'data_dir' in config._core_schema
assert isinstance(config._core_schema['data_dir'], config.Path)
def test_core_schema_has_max_tracklist_length():
assert 'max_tracklist_length' in config._core_schema
max_tracklist_length_schema = config._core_schema['max_tracklist_length']
assert isinstance(max_tracklist_length_schema, config.Integer)
assert max_tracklist_length_schema._minimum == 1
assert max_tracklist_length_schema._maximum == 10000
|
tests: Add tests for defaults in core schema config
Tests check if default core schema has cache_dir, config_dir, data_dir and max_tracklist_length and if they have proper typefrom __future__ import absolute_import, unicode_literals
from mopidy import config
def test_core_schema_has_cache_dir():
assert 'cache_dir' in config._core_schema
assert isinstance(config._core_schema['cache_dir'], config.Path)
def test_core_schema_has_config_dir():
assert 'config_dir' in config._core_schema
assert isinstance(config._core_schema['config_dir'], config.Path)
def test_core_schema_has_data_dir():
assert 'data_dir' in config._core_schema
assert isinstance(config._core_schema['data_dir'], config.Path)
def test_core_schema_has_max_tracklist_length():
assert 'max_tracklist_length' in config._core_schema
max_tracklist_length_schema = config._core_schema['max_tracklist_length']
assert isinstance(max_tracklist_length_schema, config.Integer)
assert max_tracklist_length_schema._minimum == 1
assert max_tracklist_length_schema._maximum == 10000
|
<commit_before><commit_msg>tests: Add tests for defaults in core schema config
Tests check if default core schema has cache_dir, config_dir, data_dir and max_tracklist_length and if they have proper type<commit_after>from __future__ import absolute_import, unicode_literals
from mopidy import config
def test_core_schema_has_cache_dir():
assert 'cache_dir' in config._core_schema
assert isinstance(config._core_schema['cache_dir'], config.Path)
def test_core_schema_has_config_dir():
assert 'config_dir' in config._core_schema
assert isinstance(config._core_schema['config_dir'], config.Path)
def test_core_schema_has_data_dir():
assert 'data_dir' in config._core_schema
assert isinstance(config._core_schema['data_dir'], config.Path)
def test_core_schema_has_max_tracklist_length():
assert 'max_tracklist_length' in config._core_schema
max_tracklist_length_schema = config._core_schema['max_tracklist_length']
assert isinstance(max_tracklist_length_schema, config.Integer)
assert max_tracklist_length_schema._minimum == 1
assert max_tracklist_length_schema._maximum == 10000
|
|
8021593349e27df111429d7a8ac9d443d9b1877e
|
demo/amqp_clock.py
|
demo/amqp_clock.py
|
#!/usr/bin/env python
"""
AMQP Clock
Fires off simple messages at one-minute intervals to a topic
exchange named 'clock', with the topic of the message being
the local time as 'year.month.date.dow.hour.minute',
for example: '2007.11.26.1.12.33', where the dow (day of week)
is 0 for Sunday, 1 for Monday, and so on (similar to Unix crontab).
A consumer could then bind a queue to the routing key '#.0'
for example to get a message at the beginning of each hour.
2007-11-26 Barry Pederson <bp@barryp.org>
"""
from datetime import datetime
from optparse import OptionParser
from time import sleep
import amqplib.client_0_8 as amqp
Message = amqp.Message
EXCHANGE_NAME = 'clock'
TOPIC_PATTERN = '%Y.%m.%d.%w.%H.%M' # Python datetime.strftime() pattern
def main():
parser = OptionParser()
parser.add_option('--host', dest='host',
help='AMQP server to connect to (default: %default)',
default='localhost')
parser.add_option('-u', '--userid', dest='userid',
help='AMQP userid to authenticate as (default: %default)',
default='guest')
parser.add_option('-p', '--password', dest='password',
help='AMQP password to authenticate with (default: %default)',
default='guest')
parser.add_option('--ssl', dest='ssl', action='store_true',
help='Enable SSL with AMQP server (default: not enabled)',
default=False)
options, args = parser.parse_args()
conn = amqp.Connection(options.host, options.userid, options.password)
ch = conn.channel()
ch.access_request('/data', write=True, active=True)
ch.exchange_declare(EXCHANGE_NAME, type='topic')
# Make sure our first message is close to the beginning
# of a minute
now = datetime.now()
if now.second > 0:
sleep(60 - now.second)
while True:
now = datetime.now()
msg = Message(timestamp=now)
topic = now.strftime(TOPIC_PATTERN)
ch.basic_publish(msg, EXCHANGE_NAME, routing_key=topic)
# Don't know how long the basic_publish took, so
# grab the time again.
now = datetime.now()
sleep(60 - now.second)
ch.close()
conn.close()
if __name__ == '__main__':
main()
|
Add another demo program, one that spits out messages at regular intervals.
|
Add another demo program, one that spits out messages
at regular intervals.
|
Python
|
lgpl-2.1
|
JustinTulloss/harelib,arnaudsj/py-amqplib,JustinTulloss/harelib
|
Add another demo program, one that spits out messages
at regular intervals.
|
#!/usr/bin/env python
"""
AMQP Clock
Fires off simple messages at one-minute intervals to a topic
exchange named 'clock', with the topic of the message being
the local time as 'year.month.date.dow.hour.minute',
for example: '2007.11.26.1.12.33', where the dow (day of week)
is 0 for Sunday, 1 for Monday, and so on (similar to Unix crontab).
A consumer could then bind a queue to the routing key '#.0'
for example to get a message at the beginning of each hour.
2007-11-26 Barry Pederson <bp@barryp.org>
"""
from datetime import datetime
from optparse import OptionParser
from time import sleep
import amqplib.client_0_8 as amqp
Message = amqp.Message
EXCHANGE_NAME = 'clock'
TOPIC_PATTERN = '%Y.%m.%d.%w.%H.%M' # Python datetime.strftime() pattern
def main():
parser = OptionParser()
parser.add_option('--host', dest='host',
help='AMQP server to connect to (default: %default)',
default='localhost')
parser.add_option('-u', '--userid', dest='userid',
help='AMQP userid to authenticate as (default: %default)',
default='guest')
parser.add_option('-p', '--password', dest='password',
help='AMQP password to authenticate with (default: %default)',
default='guest')
parser.add_option('--ssl', dest='ssl', action='store_true',
help='Enable SSL with AMQP server (default: not enabled)',
default=False)
options, args = parser.parse_args()
conn = amqp.Connection(options.host, options.userid, options.password)
ch = conn.channel()
ch.access_request('/data', write=True, active=True)
ch.exchange_declare(EXCHANGE_NAME, type='topic')
# Make sure our first message is close to the beginning
# of a minute
now = datetime.now()
if now.second > 0:
sleep(60 - now.second)
while True:
now = datetime.now()
msg = Message(timestamp=now)
topic = now.strftime(TOPIC_PATTERN)
ch.basic_publish(msg, EXCHANGE_NAME, routing_key=topic)
# Don't know how long the basic_publish took, so
# grab the time again.
now = datetime.now()
sleep(60 - now.second)
ch.close()
conn.close()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add another demo program, one that spits out messages
at regular intervals.<commit_after>
|
#!/usr/bin/env python
"""
AMQP Clock
Fires off simple messages at one-minute intervals to a topic
exchange named 'clock', with the topic of the message being
the local time as 'year.month.date.dow.hour.minute',
for example: '2007.11.26.1.12.33', where the dow (day of week)
is 0 for Sunday, 1 for Monday, and so on (similar to Unix crontab).
A consumer could then bind a queue to the routing key '#.0'
for example to get a message at the beginning of each hour.
2007-11-26 Barry Pederson <bp@barryp.org>
"""
from datetime import datetime
from optparse import OptionParser
from time import sleep
import amqplib.client_0_8 as amqp
Message = amqp.Message
EXCHANGE_NAME = 'clock'
TOPIC_PATTERN = '%Y.%m.%d.%w.%H.%M' # Python datetime.strftime() pattern
def main():
parser = OptionParser()
parser.add_option('--host', dest='host',
help='AMQP server to connect to (default: %default)',
default='localhost')
parser.add_option('-u', '--userid', dest='userid',
help='AMQP userid to authenticate as (default: %default)',
default='guest')
parser.add_option('-p', '--password', dest='password',
help='AMQP password to authenticate with (default: %default)',
default='guest')
parser.add_option('--ssl', dest='ssl', action='store_true',
help='Enable SSL with AMQP server (default: not enabled)',
default=False)
options, args = parser.parse_args()
conn = amqp.Connection(options.host, options.userid, options.password)
ch = conn.channel()
ch.access_request('/data', write=True, active=True)
ch.exchange_declare(EXCHANGE_NAME, type='topic')
# Make sure our first message is close to the beginning
# of a minute
now = datetime.now()
if now.second > 0:
sleep(60 - now.second)
while True:
now = datetime.now()
msg = Message(timestamp=now)
topic = now.strftime(TOPIC_PATTERN)
ch.basic_publish(msg, EXCHANGE_NAME, routing_key=topic)
# Don't know how long the basic_publish took, so
# grab the time again.
now = datetime.now()
sleep(60 - now.second)
ch.close()
conn.close()
if __name__ == '__main__':
main()
|
Add another demo program, one that spits out messages
at regular intervals.#!/usr/bin/env python
"""
AMQP Clock
Fires off simple messages at one-minute intervals to a topic
exchange named 'clock', with the topic of the message being
the local time as 'year.month.date.dow.hour.minute',
for example: '2007.11.26.1.12.33', where the dow (day of week)
is 0 for Sunday, 1 for Monday, and so on (similar to Unix crontab).
A consumer could then bind a queue to the routing key '#.0'
for example to get a message at the beginning of each hour.
2007-11-26 Barry Pederson <bp@barryp.org>
"""
from datetime import datetime
from optparse import OptionParser
from time import sleep
import amqplib.client_0_8 as amqp
Message = amqp.Message
EXCHANGE_NAME = 'clock'
TOPIC_PATTERN = '%Y.%m.%d.%w.%H.%M' # Python datetime.strftime() pattern
def main():
parser = OptionParser()
parser.add_option('--host', dest='host',
help='AMQP server to connect to (default: %default)',
default='localhost')
parser.add_option('-u', '--userid', dest='userid',
help='AMQP userid to authenticate as (default: %default)',
default='guest')
parser.add_option('-p', '--password', dest='password',
help='AMQP password to authenticate with (default: %default)',
default='guest')
parser.add_option('--ssl', dest='ssl', action='store_true',
help='Enable SSL with AMQP server (default: not enabled)',
default=False)
options, args = parser.parse_args()
conn = amqp.Connection(options.host, options.userid, options.password)
ch = conn.channel()
ch.access_request('/data', write=True, active=True)
ch.exchange_declare(EXCHANGE_NAME, type='topic')
# Make sure our first message is close to the beginning
# of a minute
now = datetime.now()
if now.second > 0:
sleep(60 - now.second)
while True:
now = datetime.now()
msg = Message(timestamp=now)
topic = now.strftime(TOPIC_PATTERN)
ch.basic_publish(msg, EXCHANGE_NAME, routing_key=topic)
# Don't know how long the basic_publish took, so
# grab the time again.
now = datetime.now()
sleep(60 - now.second)
ch.close()
conn.close()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add another demo program, one that spits out messages
at regular intervals.<commit_after>#!/usr/bin/env python
"""
AMQP Clock
Fires off simple messages at one-minute intervals to a topic
exchange named 'clock', with the topic of the message being
the local time as 'year.month.date.dow.hour.minute',
for example: '2007.11.26.1.12.33', where the dow (day of week)
is 0 for Sunday, 1 for Monday, and so on (similar to Unix crontab).
A consumer could then bind a queue to the routing key '#.0'
for example to get a message at the beginning of each hour.
2007-11-26 Barry Pederson <bp@barryp.org>
"""
from datetime import datetime
from optparse import OptionParser
from time import sleep
import amqplib.client_0_8 as amqp
Message = amqp.Message
EXCHANGE_NAME = 'clock'
TOPIC_PATTERN = '%Y.%m.%d.%w.%H.%M' # Python datetime.strftime() pattern
def main():
parser = OptionParser()
parser.add_option('--host', dest='host',
help='AMQP server to connect to (default: %default)',
default='localhost')
parser.add_option('-u', '--userid', dest='userid',
help='AMQP userid to authenticate as (default: %default)',
default='guest')
parser.add_option('-p', '--password', dest='password',
help='AMQP password to authenticate with (default: %default)',
default='guest')
parser.add_option('--ssl', dest='ssl', action='store_true',
help='Enable SSL with AMQP server (default: not enabled)',
default=False)
options, args = parser.parse_args()
conn = amqp.Connection(options.host, options.userid, options.password)
ch = conn.channel()
ch.access_request('/data', write=True, active=True)
ch.exchange_declare(EXCHANGE_NAME, type='topic')
# Make sure our first message is close to the beginning
# of a minute
now = datetime.now()
if now.second > 0:
sleep(60 - now.second)
while True:
now = datetime.now()
msg = Message(timestamp=now)
topic = now.strftime(TOPIC_PATTERN)
ch.basic_publish(msg, EXCHANGE_NAME, routing_key=topic)
# Don't know how long the basic_publish took, so
# grab the time again.
now = datetime.now()
sleep(60 - now.second)
ch.close()
conn.close()
if __name__ == '__main__':
main()
|
|
ea0f9f97f4a0a8338bed30724ab92a8acc4b6efa
|
tests/panels/test_cache.py
|
tests/panels/test_cache.py
|
# coding: utf-8
from __future__ import absolute_import, unicode_literals
import django
from django.core import cache
from django.utils.unittest import skipIf
from ..base import BaseTestCase
class CachePanelTestCase(BaseTestCase):
def setUp(self):
super(CachePanelTestCase, self).setUp()
self.panel = self.toolbar.get_panel_by_id('CachePanel')
self.panel.enable_instrumentation()
def tearDown(self):
self.panel.disable_instrumentation()
super(CachePanelTestCase, self).tearDown()
def test_recording(self):
self.assertEqual(len(self.panel.calls), 0)
cache.cache.set('foo', 'bar')
cache.cache.get('foo')
cache.cache.delete('foo')
self.assertEqual(len(self.panel.calls), 3)
@skipIf(django.VERSION < (1, 7), "Caches was added in Django 1.7")
def test_recording_caches(self):
self.assertEqual(len(self.panel.calls), 0)
cache.cache.set('foo', 'bar')
cache.caches[cache.DEFAULT_CACHE_ALIAS].get('foo')
self.assertEqual(len(self.panel.calls), 2)
|
# coding: utf-8
from __future__ import absolute_import, unicode_literals
import django
from django.core import cache
from django.utils.unittest import skipIf
from ..base import BaseTestCase
class CachePanelTestCase(BaseTestCase):
def setUp(self):
super(CachePanelTestCase, self).setUp()
self.panel = self.toolbar.get_panel_by_id('CachePanel')
self.panel.enable_instrumentation()
def tearDown(self):
self.panel.disable_instrumentation()
super(CachePanelTestCase, self).tearDown()
def test_recording(self):
self.assertEqual(len(self.panel.calls), 0)
cache.cache.set('foo', 'bar')
cache.cache.get('foo')
cache.cache.delete('foo')
# Verify that the cache has a valid clear method.
cache.cache.clear()
self.assertEqual(len(self.panel.calls), 5)
@skipIf(django.VERSION < (1, 7), "Caches was added in Django 1.7")
def test_recording_caches(self):
self.assertEqual(len(self.panel.calls), 0)
cache.cache.set('foo', 'bar')
cache.caches[cache.DEFAULT_CACHE_ALIAS].get('foo')
self.assertEqual(len(self.panel.calls), 2)
|
Add a test that verifies the cache has a clear method.
|
Add a test that verifies the cache has a clear method.
|
Python
|
bsd-3-clause
|
peap/django-debug-toolbar,seperman/django-debug-toolbar,stored/django-debug-toolbar,megcunningham/django-debug-toolbar,calvinpy/django-debug-toolbar,spookylukey/django-debug-toolbar,guilhermetavares/django-debug-toolbar,megcunningham/django-debug-toolbar,tim-schilling/django-debug-toolbar,jazzband/django-debug-toolbar,ChristosChristofidis/django-debug-toolbar,barseghyanartur/django-debug-toolbar,pevzi/django-debug-toolbar,Endika/django-debug-toolbar,guilhermetavares/django-debug-toolbar,pevzi/django-debug-toolbar,calvinpy/django-debug-toolbar,spookylukey/django-debug-toolbar,megcunningham/django-debug-toolbar,tim-schilling/django-debug-toolbar,barseghyanartur/django-debug-toolbar,Endika/django-debug-toolbar,peap/django-debug-toolbar,tim-schilling/django-debug-toolbar,stored/django-debug-toolbar,sidja/django-debug-toolbar,spookylukey/django-debug-toolbar,jazzband/django-debug-toolbar,django-debug-toolbar/django-debug-toolbar,peap/django-debug-toolbar,seperman/django-debug-toolbar,calvinpy/django-debug-toolbar,django-debug-toolbar/django-debug-toolbar,django-debug-toolbar/django-debug-toolbar,barseghyanartur/django-debug-toolbar,jazzband/django-debug-toolbar,pevzi/django-debug-toolbar,stored/django-debug-toolbar,seperman/django-debug-toolbar,guilhermetavares/django-debug-toolbar,Endika/django-debug-toolbar,sidja/django-debug-toolbar,ChristosChristofidis/django-debug-toolbar,sidja/django-debug-toolbar,ChristosChristofidis/django-debug-toolbar
|
# coding: utf-8
from __future__ import absolute_import, unicode_literals
import django
from django.core import cache
from django.utils.unittest import skipIf
from ..base import BaseTestCase
class CachePanelTestCase(BaseTestCase):
def setUp(self):
super(CachePanelTestCase, self).setUp()
self.panel = self.toolbar.get_panel_by_id('CachePanel')
self.panel.enable_instrumentation()
def tearDown(self):
self.panel.disable_instrumentation()
super(CachePanelTestCase, self).tearDown()
def test_recording(self):
self.assertEqual(len(self.panel.calls), 0)
cache.cache.set('foo', 'bar')
cache.cache.get('foo')
cache.cache.delete('foo')
self.assertEqual(len(self.panel.calls), 3)
@skipIf(django.VERSION < (1, 7), "Caches was added in Django 1.7")
def test_recording_caches(self):
self.assertEqual(len(self.panel.calls), 0)
cache.cache.set('foo', 'bar')
cache.caches[cache.DEFAULT_CACHE_ALIAS].get('foo')
self.assertEqual(len(self.panel.calls), 2)
Add a test that verifies the cache has a clear method.
|
# coding: utf-8
from __future__ import absolute_import, unicode_literals
import django
from django.core import cache
from django.utils.unittest import skipIf
from ..base import BaseTestCase
class CachePanelTestCase(BaseTestCase):
def setUp(self):
super(CachePanelTestCase, self).setUp()
self.panel = self.toolbar.get_panel_by_id('CachePanel')
self.panel.enable_instrumentation()
def tearDown(self):
self.panel.disable_instrumentation()
super(CachePanelTestCase, self).tearDown()
def test_recording(self):
self.assertEqual(len(self.panel.calls), 0)
cache.cache.set('foo', 'bar')
cache.cache.get('foo')
cache.cache.delete('foo')
# Verify that the cache has a valid clear method.
cache.cache.clear()
self.assertEqual(len(self.panel.calls), 5)
@skipIf(django.VERSION < (1, 7), "Caches was added in Django 1.7")
def test_recording_caches(self):
self.assertEqual(len(self.panel.calls), 0)
cache.cache.set('foo', 'bar')
cache.caches[cache.DEFAULT_CACHE_ALIAS].get('foo')
self.assertEqual(len(self.panel.calls), 2)
|
<commit_before># coding: utf-8
from __future__ import absolute_import, unicode_literals
import django
from django.core import cache
from django.utils.unittest import skipIf
from ..base import BaseTestCase
class CachePanelTestCase(BaseTestCase):
def setUp(self):
super(CachePanelTestCase, self).setUp()
self.panel = self.toolbar.get_panel_by_id('CachePanel')
self.panel.enable_instrumentation()
def tearDown(self):
self.panel.disable_instrumentation()
super(CachePanelTestCase, self).tearDown()
def test_recording(self):
self.assertEqual(len(self.panel.calls), 0)
cache.cache.set('foo', 'bar')
cache.cache.get('foo')
cache.cache.delete('foo')
self.assertEqual(len(self.panel.calls), 3)
@skipIf(django.VERSION < (1, 7), "Caches was added in Django 1.7")
def test_recording_caches(self):
self.assertEqual(len(self.panel.calls), 0)
cache.cache.set('foo', 'bar')
cache.caches[cache.DEFAULT_CACHE_ALIAS].get('foo')
self.assertEqual(len(self.panel.calls), 2)
<commit_msg>Add a test that verifies the cache has a clear method.<commit_after>
|
# coding: utf-8
from __future__ import absolute_import, unicode_literals
import django
from django.core import cache
from django.utils.unittest import skipIf
from ..base import BaseTestCase
class CachePanelTestCase(BaseTestCase):
def setUp(self):
super(CachePanelTestCase, self).setUp()
self.panel = self.toolbar.get_panel_by_id('CachePanel')
self.panel.enable_instrumentation()
def tearDown(self):
self.panel.disable_instrumentation()
super(CachePanelTestCase, self).tearDown()
def test_recording(self):
self.assertEqual(len(self.panel.calls), 0)
cache.cache.set('foo', 'bar')
cache.cache.get('foo')
cache.cache.delete('foo')
# Verify that the cache has a valid clear method.
cache.cache.clear()
self.assertEqual(len(self.panel.calls), 5)
@skipIf(django.VERSION < (1, 7), "Caches was added in Django 1.7")
def test_recording_caches(self):
self.assertEqual(len(self.panel.calls), 0)
cache.cache.set('foo', 'bar')
cache.caches[cache.DEFAULT_CACHE_ALIAS].get('foo')
self.assertEqual(len(self.panel.calls), 2)
|
# coding: utf-8
from __future__ import absolute_import, unicode_literals
import django
from django.core import cache
from django.utils.unittest import skipIf
from ..base import BaseTestCase
class CachePanelTestCase(BaseTestCase):
def setUp(self):
super(CachePanelTestCase, self).setUp()
self.panel = self.toolbar.get_panel_by_id('CachePanel')
self.panel.enable_instrumentation()
def tearDown(self):
self.panel.disable_instrumentation()
super(CachePanelTestCase, self).tearDown()
def test_recording(self):
self.assertEqual(len(self.panel.calls), 0)
cache.cache.set('foo', 'bar')
cache.cache.get('foo')
cache.cache.delete('foo')
self.assertEqual(len(self.panel.calls), 3)
@skipIf(django.VERSION < (1, 7), "Caches was added in Django 1.7")
def test_recording_caches(self):
self.assertEqual(len(self.panel.calls), 0)
cache.cache.set('foo', 'bar')
cache.caches[cache.DEFAULT_CACHE_ALIAS].get('foo')
self.assertEqual(len(self.panel.calls), 2)
Add a test that verifies the cache has a clear method.# coding: utf-8
from __future__ import absolute_import, unicode_literals
import django
from django.core import cache
from django.utils.unittest import skipIf
from ..base import BaseTestCase
class CachePanelTestCase(BaseTestCase):
def setUp(self):
super(CachePanelTestCase, self).setUp()
self.panel = self.toolbar.get_panel_by_id('CachePanel')
self.panel.enable_instrumentation()
def tearDown(self):
self.panel.disable_instrumentation()
super(CachePanelTestCase, self).tearDown()
def test_recording(self):
self.assertEqual(len(self.panel.calls), 0)
cache.cache.set('foo', 'bar')
cache.cache.get('foo')
cache.cache.delete('foo')
# Verify that the cache has a valid clear method.
cache.cache.clear()
self.assertEqual(len(self.panel.calls), 5)
@skipIf(django.VERSION < (1, 7), "Caches was added in Django 1.7")
def test_recording_caches(self):
self.assertEqual(len(self.panel.calls), 0)
cache.cache.set('foo', 'bar')
cache.caches[cache.DEFAULT_CACHE_ALIAS].get('foo')
self.assertEqual(len(self.panel.calls), 2)
|
<commit_before># coding: utf-8
from __future__ import absolute_import, unicode_literals
import django
from django.core import cache
from django.utils.unittest import skipIf
from ..base import BaseTestCase
class CachePanelTestCase(BaseTestCase):
def setUp(self):
super(CachePanelTestCase, self).setUp()
self.panel = self.toolbar.get_panel_by_id('CachePanel')
self.panel.enable_instrumentation()
def tearDown(self):
self.panel.disable_instrumentation()
super(CachePanelTestCase, self).tearDown()
def test_recording(self):
self.assertEqual(len(self.panel.calls), 0)
cache.cache.set('foo', 'bar')
cache.cache.get('foo')
cache.cache.delete('foo')
self.assertEqual(len(self.panel.calls), 3)
@skipIf(django.VERSION < (1, 7), "Caches was added in Django 1.7")
def test_recording_caches(self):
self.assertEqual(len(self.panel.calls), 0)
cache.cache.set('foo', 'bar')
cache.caches[cache.DEFAULT_CACHE_ALIAS].get('foo')
self.assertEqual(len(self.panel.calls), 2)
<commit_msg>Add a test that verifies the cache has a clear method.<commit_after># coding: utf-8
from __future__ import absolute_import, unicode_literals
import django
from django.core import cache
from django.utils.unittest import skipIf
from ..base import BaseTestCase
class CachePanelTestCase(BaseTestCase):
def setUp(self):
super(CachePanelTestCase, self).setUp()
self.panel = self.toolbar.get_panel_by_id('CachePanel')
self.panel.enable_instrumentation()
def tearDown(self):
self.panel.disable_instrumentation()
super(CachePanelTestCase, self).tearDown()
def test_recording(self):
self.assertEqual(len(self.panel.calls), 0)
cache.cache.set('foo', 'bar')
cache.cache.get('foo')
cache.cache.delete('foo')
# Verify that the cache has a valid clear method.
cache.cache.clear()
self.assertEqual(len(self.panel.calls), 5)
@skipIf(django.VERSION < (1, 7), "Caches was added in Django 1.7")
def test_recording_caches(self):
self.assertEqual(len(self.panel.calls), 0)
cache.cache.set('foo', 'bar')
cache.caches[cache.DEFAULT_CACHE_ALIAS].get('foo')
self.assertEqual(len(self.panel.calls), 2)
|
61af21c1081e36aa6f90fc909d719e5ec0d44bd8
|
dotbriefs/tests/test_clean.py
|
dotbriefs/tests/test_clean.py
|
import unittest
from clean import CleanSecret
class TestCleanSecret(unittest.TestCase):
def setUp(self):
self.secrets = []
self.secrets.append(CleanSecret('passwd',
r'password(\s*)=(\s*)(?#UpToHash)',
r'password\1=\2(?#Key)',
'Mutt passwords',
True))
def test_state(self):
state = self.secrets[0].__getstate__()
other_secret = CleanSecret('', '', '')
other_secret.__setstate__(state)
self.assertEqual(self.secrets[0].__dict__, other_secret.__dict__)
if __name__ == '__main__':
unittest.main()
|
Add first test case for clean
|
Add first test case for clean
|
Python
|
bsd-3-clause
|
oohlaf/dotsecrets
|
Add first test case for clean
|
import unittest
from clean import CleanSecret
class TestCleanSecret(unittest.TestCase):
def setUp(self):
self.secrets = []
self.secrets.append(CleanSecret('passwd',
r'password(\s*)=(\s*)(?#UpToHash)',
r'password\1=\2(?#Key)',
'Mutt passwords',
True))
def test_state(self):
state = self.secrets[0].__getstate__()
other_secret = CleanSecret('', '', '')
other_secret.__setstate__(state)
self.assertEqual(self.secrets[0].__dict__, other_secret.__dict__)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add first test case for clean<commit_after>
|
import unittest
from clean import CleanSecret
class TestCleanSecret(unittest.TestCase):
def setUp(self):
self.secrets = []
self.secrets.append(CleanSecret('passwd',
r'password(\s*)=(\s*)(?#UpToHash)',
r'password\1=\2(?#Key)',
'Mutt passwords',
True))
def test_state(self):
state = self.secrets[0].__getstate__()
other_secret = CleanSecret('', '', '')
other_secret.__setstate__(state)
self.assertEqual(self.secrets[0].__dict__, other_secret.__dict__)
if __name__ == '__main__':
unittest.main()
|
Add first test case for cleanimport unittest
from clean import CleanSecret
class TestCleanSecret(unittest.TestCase):
def setUp(self):
self.secrets = []
self.secrets.append(CleanSecret('passwd',
r'password(\s*)=(\s*)(?#UpToHash)',
r'password\1=\2(?#Key)',
'Mutt passwords',
True))
def test_state(self):
state = self.secrets[0].__getstate__()
other_secret = CleanSecret('', '', '')
other_secret.__setstate__(state)
self.assertEqual(self.secrets[0].__dict__, other_secret.__dict__)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add first test case for clean<commit_after>import unittest
from clean import CleanSecret
class TestCleanSecret(unittest.TestCase):
def setUp(self):
self.secrets = []
self.secrets.append(CleanSecret('passwd',
r'password(\s*)=(\s*)(?#UpToHash)',
r'password\1=\2(?#Key)',
'Mutt passwords',
True))
def test_state(self):
state = self.secrets[0].__getstate__()
other_secret = CleanSecret('', '', '')
other_secret.__setstate__(state)
self.assertEqual(self.secrets[0].__dict__, other_secret.__dict__)
if __name__ == '__main__':
unittest.main()
|
|
f2328d35a1a964f5e0c22420f054d0451c9a62aa
|
pindahdatalessonplantarikh.py
|
pindahdatalessonplantarikh.py
|
#!/usr/bin/python
#Created : Mon 08 Sep 2008 01:40:45 PM GMT
#Last Modified : Tue 28 Jul 2015 10:34:53 AM UTC
#qpy:2
#qpy:console
import site
import os
import sys
from time import strftime
import sqlite3
con01 = sqlite3.connect("/usb/phpmysql/lessonplan2010.db")
cur01 = con01.cursor()
con02 = sqlite3.connect("/usb/phpmysql/lessonplanbank.db")
cur02 = con02.cursor()
tahunini = strftime("%Y")
selectid = raw_input("\nPlease enter LESSON PLAN BANK ID :\n")
intended_date = raw_input("Please enter LESSON PLAN DATE : (MMDD)\n")
tarikh_sebenar = tahunini+intended_date
cur01.execute("""select
tingkatan,theme,topic,lo1,lo2,
content,activity1,activity2,assimilation,impact
from lessonplanbank where bank_id=?""",(selectid,) )
hasilbankid = cur01.fetchall()
for form,theme,topic,lo1,lo2,content,activity1,activity2,assimilation,impact in hasilbankid:
cur01.execute("""
update lessonplan2013 set theme=?,topic=?, lo1 = ?, lo2 = ?,
content = ?,activity1 = ?,activity2 = ?,assimilation =
?,impact = ? where date
=?""",(theme,topic,lo1,lo2,content,activity1,activity2,assimilation,impact,tarikh_sebenar))
con01.commit()
print "-" * 60
cur01.execute("""
select tingkatan,date,timestart,timeend,theme,topic,lo1,lo2,lo3,
content,activity1,activity2,assimilation,
impact,note from lessonplan2013
where date = ?
""", (tarikh_sebenar,))
hasilsepatutnya = cur01.fetchall()
for form,date,timestart,timeend,theme,topic,lo1,lo2,lo3,content,activity1,activity2,assimilation,impact,note in hasilsepatutnya:
print
print "-----[%s -lessonplan id: ]-----" % (form)
print "DATE : %s TIME : %s - %s" % (date,timestart,timeend)
print "THEME /TOPIC : %s / %s" % (theme,topic)
print "LO1 : %s LO2 : %s LO3 : %s" % (lo1,lo2,lo3)
print "CONTENT : %s ACTIVITIES : %s , %s" % (content,activity1,activity2)
print "IMPACT : %s NOTE : %s" % (impact,note)
print
print "-" * 60
|
Add pindah lesson plan tarikh
|
Add pindah lesson plan tarikh
|
Python
|
cc0-1.0
|
mwbetrg/englishdb
|
Add pindah lesson plan tarikh
|
#!/usr/bin/python
#Created : Mon 08 Sep 2008 01:40:45 PM GMT
#Last Modified : Tue 28 Jul 2015 10:34:53 AM UTC
#qpy:2
#qpy:console
import site
import os
import sys
from time import strftime
import sqlite3
con01 = sqlite3.connect("/usb/phpmysql/lessonplan2010.db")
cur01 = con01.cursor()
con02 = sqlite3.connect("/usb/phpmysql/lessonplanbank.db")
cur02 = con02.cursor()
tahunini = strftime("%Y")
selectid = raw_input("\nPlease enter LESSON PLAN BANK ID :\n")
intended_date = raw_input("Please enter LESSON PLAN DATE : (MMDD)\n")
tarikh_sebenar = tahunini+intended_date
cur01.execute("""select
tingkatan,theme,topic,lo1,lo2,
content,activity1,activity2,assimilation,impact
from lessonplanbank where bank_id=?""",(selectid,) )
hasilbankid = cur01.fetchall()
for form,theme,topic,lo1,lo2,content,activity1,activity2,assimilation,impact in hasilbankid:
cur01.execute("""
update lessonplan2013 set theme=?,topic=?, lo1 = ?, lo2 = ?,
content = ?,activity1 = ?,activity2 = ?,assimilation =
?,impact = ? where date
=?""",(theme,topic,lo1,lo2,content,activity1,activity2,assimilation,impact,tarikh_sebenar))
con01.commit()
print "-" * 60
cur01.execute("""
select tingkatan,date,timestart,timeend,theme,topic,lo1,lo2,lo3,
content,activity1,activity2,assimilation,
impact,note from lessonplan2013
where date = ?
""", (tarikh_sebenar,))
hasilsepatutnya = cur01.fetchall()
for form,date,timestart,timeend,theme,topic,lo1,lo2,lo3,content,activity1,activity2,assimilation,impact,note in hasilsepatutnya:
print
print "-----[%s -lessonplan id: ]-----" % (form)
print "DATE : %s TIME : %s - %s" % (date,timestart,timeend)
print "THEME /TOPIC : %s / %s" % (theme,topic)
print "LO1 : %s LO2 : %s LO3 : %s" % (lo1,lo2,lo3)
print "CONTENT : %s ACTIVITIES : %s , %s" % (content,activity1,activity2)
print "IMPACT : %s NOTE : %s" % (impact,note)
print
print "-" * 60
|
<commit_before><commit_msg>Add pindah lesson plan tarikh<commit_after>
|
#!/usr/bin/python
#Created : Mon 08 Sep 2008 01:40:45 PM GMT
#Last Modified : Tue 28 Jul 2015 10:34:53 AM UTC
#qpy:2
#qpy:console
import site
import os
import sys
from time import strftime
import sqlite3
con01 = sqlite3.connect("/usb/phpmysql/lessonplan2010.db")
cur01 = con01.cursor()
con02 = sqlite3.connect("/usb/phpmysql/lessonplanbank.db")
cur02 = con02.cursor()
tahunini = strftime("%Y")
selectid = raw_input("\nPlease enter LESSON PLAN BANK ID :\n")
intended_date = raw_input("Please enter LESSON PLAN DATE : (MMDD)\n")
tarikh_sebenar = tahunini+intended_date
cur01.execute("""select
tingkatan,theme,topic,lo1,lo2,
content,activity1,activity2,assimilation,impact
from lessonplanbank where bank_id=?""",(selectid,) )
hasilbankid = cur01.fetchall()
for form,theme,topic,lo1,lo2,content,activity1,activity2,assimilation,impact in hasilbankid:
cur01.execute("""
update lessonplan2013 set theme=?,topic=?, lo1 = ?, lo2 = ?,
content = ?,activity1 = ?,activity2 = ?,assimilation =
?,impact = ? where date
=?""",(theme,topic,lo1,lo2,content,activity1,activity2,assimilation,impact,tarikh_sebenar))
con01.commit()
print "-" * 60
cur01.execute("""
select tingkatan,date,timestart,timeend,theme,topic,lo1,lo2,lo3,
content,activity1,activity2,assimilation,
impact,note from lessonplan2013
where date = ?
""", (tarikh_sebenar,))
hasilsepatutnya = cur01.fetchall()
for form,date,timestart,timeend,theme,topic,lo1,lo2,lo3,content,activity1,activity2,assimilation,impact,note in hasilsepatutnya:
print
print "-----[%s -lessonplan id: ]-----" % (form)
print "DATE : %s TIME : %s - %s" % (date,timestart,timeend)
print "THEME /TOPIC : %s / %s" % (theme,topic)
print "LO1 : %s LO2 : %s LO3 : %s" % (lo1,lo2,lo3)
print "CONTENT : %s ACTIVITIES : %s , %s" % (content,activity1,activity2)
print "IMPACT : %s NOTE : %s" % (impact,note)
print
print "-" * 60
|
Add pindah lesson plan tarikh#!/usr/bin/python
#Created : Mon 08 Sep 2008 01:40:45 PM GMT
#Last Modified : Tue 28 Jul 2015 10:34:53 AM UTC
#qpy:2
#qpy:console
import site
import os
import sys
from time import strftime
import sqlite3
con01 = sqlite3.connect("/usb/phpmysql/lessonplan2010.db")
cur01 = con01.cursor()
con02 = sqlite3.connect("/usb/phpmysql/lessonplanbank.db")
cur02 = con02.cursor()
tahunini = strftime("%Y")
selectid = raw_input("\nPlease enter LESSON PLAN BANK ID :\n")
intended_date = raw_input("Please enter LESSON PLAN DATE : (MMDD)\n")
tarikh_sebenar = tahunini+intended_date
cur01.execute("""select
tingkatan,theme,topic,lo1,lo2,
content,activity1,activity2,assimilation,impact
from lessonplanbank where bank_id=?""",(selectid,) )
hasilbankid = cur01.fetchall()
for form,theme,topic,lo1,lo2,content,activity1,activity2,assimilation,impact in hasilbankid:
cur01.execute("""
update lessonplan2013 set theme=?,topic=?, lo1 = ?, lo2 = ?,
content = ?,activity1 = ?,activity2 = ?,assimilation =
?,impact = ? where date
=?""",(theme,topic,lo1,lo2,content,activity1,activity2,assimilation,impact,tarikh_sebenar))
con01.commit()
print "-" * 60
cur01.execute("""
select tingkatan,date,timestart,timeend,theme,topic,lo1,lo2,lo3,
content,activity1,activity2,assimilation,
impact,note from lessonplan2013
where date = ?
""", (tarikh_sebenar,))
hasilsepatutnya = cur01.fetchall()
for form,date,timestart,timeend,theme,topic,lo1,lo2,lo3,content,activity1,activity2,assimilation,impact,note in hasilsepatutnya:
print
print "-----[%s -lessonplan id: ]-----" % (form)
print "DATE : %s TIME : %s - %s" % (date,timestart,timeend)
print "THEME /TOPIC : %s / %s" % (theme,topic)
print "LO1 : %s LO2 : %s LO3 : %s" % (lo1,lo2,lo3)
print "CONTENT : %s ACTIVITIES : %s , %s" % (content,activity1,activity2)
print "IMPACT : %s NOTE : %s" % (impact,note)
print
print "-" * 60
|
<commit_before><commit_msg>Add pindah lesson plan tarikh<commit_after>#!/usr/bin/python
#Created : Mon 08 Sep 2008 01:40:45 PM GMT
#Last Modified : Tue 28 Jul 2015 10:34:53 AM UTC
#qpy:2
#qpy:console
import site
import os
import sys
from time import strftime
import sqlite3
con01 = sqlite3.connect("/usb/phpmysql/lessonplan2010.db")
cur01 = con01.cursor()
con02 = sqlite3.connect("/usb/phpmysql/lessonplanbank.db")
cur02 = con02.cursor()
tahunini = strftime("%Y")
selectid = raw_input("\nPlease enter LESSON PLAN BANK ID :\n")
intended_date = raw_input("Please enter LESSON PLAN DATE : (MMDD)\n")
tarikh_sebenar = tahunini+intended_date
cur01.execute("""select
tingkatan,theme,topic,lo1,lo2,
content,activity1,activity2,assimilation,impact
from lessonplanbank where bank_id=?""",(selectid,) )
hasilbankid = cur01.fetchall()
for form,theme,topic,lo1,lo2,content,activity1,activity2,assimilation,impact in hasilbankid:
cur01.execute("""
update lessonplan2013 set theme=?,topic=?, lo1 = ?, lo2 = ?,
content = ?,activity1 = ?,activity2 = ?,assimilation =
?,impact = ? where date
=?""",(theme,topic,lo1,lo2,content,activity1,activity2,assimilation,impact,tarikh_sebenar))
con01.commit()
print "-" * 60
cur01.execute("""
select tingkatan,date,timestart,timeend,theme,topic,lo1,lo2,lo3,
content,activity1,activity2,assimilation,
impact,note from lessonplan2013
where date = ?
""", (tarikh_sebenar,))
hasilsepatutnya = cur01.fetchall()
for form,date,timestart,timeend,theme,topic,lo1,lo2,lo3,content,activity1,activity2,assimilation,impact,note in hasilsepatutnya:
print
print "-----[%s -lessonplan id: ]-----" % (form)
print "DATE : %s TIME : %s - %s" % (date,timestart,timeend)
print "THEME /TOPIC : %s / %s" % (theme,topic)
print "LO1 : %s LO2 : %s LO3 : %s" % (lo1,lo2,lo3)
print "CONTENT : %s ACTIVITIES : %s , %s" % (content,activity1,activity2)
print "IMPACT : %s NOTE : %s" % (impact,note)
print
print "-" * 60
|
|
92ff17745b125080f1d4cc7e46032806267936bd
|
comics/comics/abstrusegoose.py
|
comics/comics/abstrusegoose.py
|
from comics.aggregator.crawler import CrawlerBase, CrawlerResult
from comics.meta.base import MetaBase
class Meta(MetaBase):
name = 'Abstruse Goose'
language = 'en'
url = 'http://www.abstrusegoose.com/'
start_date = '2008-02-01'
rights = 'lcfr, CC BY-NC 3.0 US'
class Crawler(CrawlerBase):
history_capable_days = 10
schedule = 'Mo,We,Fr'
def crawl(self, pub_date):
feed = self.parse_feed('http://abstrusegoose.com/feed/atom')
for entry in feed.for_date(pub_date):
url = entry.content0.src('img[src*="/strips/"]')
title = entry.title
text = entry.content0.title('img[src*="/strips/"]')
return CrawlerResult(url, title, text)
|
Add crawler for Abstruse Goose
|
Add crawler for Abstruse Goose
|
Python
|
agpl-3.0
|
datagutten/comics,klette/comics,jodal/comics,jodal/comics,datagutten/comics,datagutten/comics,jodal/comics,jodal/comics,klette/comics,klette/comics,datagutten/comics
|
Add crawler for Abstruse Goose
|
from comics.aggregator.crawler import CrawlerBase, CrawlerResult
from comics.meta.base import MetaBase
class Meta(MetaBase):
name = 'Abstruse Goose'
language = 'en'
url = 'http://www.abstrusegoose.com/'
start_date = '2008-02-01'
rights = 'lcfr, CC BY-NC 3.0 US'
class Crawler(CrawlerBase):
history_capable_days = 10
schedule = 'Mo,We,Fr'
def crawl(self, pub_date):
feed = self.parse_feed('http://abstrusegoose.com/feed/atom')
for entry in feed.for_date(pub_date):
url = entry.content0.src('img[src*="/strips/"]')
title = entry.title
text = entry.content0.title('img[src*="/strips/"]')
return CrawlerResult(url, title, text)
|
<commit_before><commit_msg>Add crawler for Abstruse Goose<commit_after>
|
from comics.aggregator.crawler import CrawlerBase, CrawlerResult
from comics.meta.base import MetaBase
class Meta(MetaBase):
name = 'Abstruse Goose'
language = 'en'
url = 'http://www.abstrusegoose.com/'
start_date = '2008-02-01'
rights = 'lcfr, CC BY-NC 3.0 US'
class Crawler(CrawlerBase):
history_capable_days = 10
schedule = 'Mo,We,Fr'
def crawl(self, pub_date):
feed = self.parse_feed('http://abstrusegoose.com/feed/atom')
for entry in feed.for_date(pub_date):
url = entry.content0.src('img[src*="/strips/"]')
title = entry.title
text = entry.content0.title('img[src*="/strips/"]')
return CrawlerResult(url, title, text)
|
Add crawler for Abstruse Goosefrom comics.aggregator.crawler import CrawlerBase, CrawlerResult
from comics.meta.base import MetaBase
class Meta(MetaBase):
name = 'Abstruse Goose'
language = 'en'
url = 'http://www.abstrusegoose.com/'
start_date = '2008-02-01'
rights = 'lcfr, CC BY-NC 3.0 US'
class Crawler(CrawlerBase):
history_capable_days = 10
schedule = 'Mo,We,Fr'
def crawl(self, pub_date):
feed = self.parse_feed('http://abstrusegoose.com/feed/atom')
for entry in feed.for_date(pub_date):
url = entry.content0.src('img[src*="/strips/"]')
title = entry.title
text = entry.content0.title('img[src*="/strips/"]')
return CrawlerResult(url, title, text)
|
<commit_before><commit_msg>Add crawler for Abstruse Goose<commit_after>from comics.aggregator.crawler import CrawlerBase, CrawlerResult
from comics.meta.base import MetaBase
class Meta(MetaBase):
name = 'Abstruse Goose'
language = 'en'
url = 'http://www.abstrusegoose.com/'
start_date = '2008-02-01'
rights = 'lcfr, CC BY-NC 3.0 US'
class Crawler(CrawlerBase):
history_capable_days = 10
schedule = 'Mo,We,Fr'
def crawl(self, pub_date):
feed = self.parse_feed('http://abstrusegoose.com/feed/atom')
for entry in feed.for_date(pub_date):
url = entry.content0.src('img[src*="/strips/"]')
title = entry.title
text = entry.content0.title('img[src*="/strips/"]')
return CrawlerResult(url, title, text)
|
|
23e24ccb8a2a5fb9ead50b1697344c79c03ef14a
|
traits/tests/test_dynamic_trait_definition.py
|
traits/tests/test_dynamic_trait_definition.py
|
from traits.testing.unittest_tools import unittest
from traits.api import Float, HasTraits, Int, List
class Foo(HasTraits):
x = Float
x_changes = List
y_changes = List
def _x_changed(self, new):
self.x_changes.append(new)
def _y_changed(self, new):
self.y_changes.append(new)
class TestDynamicTraitDefinition(unittest.TestCase):
""" Test demonstrating special change events using the 'event' metadata.
"""
def test_add_trait(self):
foo = Foo(x=3)
foo.add_trait('y', Int)
self.assertTrue(hasattr(foo, 'y'))
self.assertEqual(type(foo.y), int)
foo.y = 4
self.assertEqual(foo.y_changes, [4])
def test_remove_trait(self):
foo = Foo(x=3)
# We can't remove a "statically" added trait (i.e., a trait defined
# in the Foo class).
result = foo.remove_trait('x')
self.assertFalse(result)
# We can remove dynamically added traits.
foo.add_trait('y', Int)
foo.y = 70
result = foo.remove_trait('y')
self.assertTrue(result)
self.assertFalse(hasattr(foo, 'y'))
foo.y = 10
self.assertEqual(foo.y_changes, [70])
|
Add tests for `add_trait` and `remove_trait`.
|
Add tests for `add_trait` and `remove_trait`.
|
Python
|
bsd-3-clause
|
burnpanck/traits,burnpanck/traits
|
Add tests for `add_trait` and `remove_trait`.
|
from traits.testing.unittest_tools import unittest
from traits.api import Float, HasTraits, Int, List
class Foo(HasTraits):
x = Float
x_changes = List
y_changes = List
def _x_changed(self, new):
self.x_changes.append(new)
def _y_changed(self, new):
self.y_changes.append(new)
class TestDynamicTraitDefinition(unittest.TestCase):
""" Test demonstrating special change events using the 'event' metadata.
"""
def test_add_trait(self):
foo = Foo(x=3)
foo.add_trait('y', Int)
self.assertTrue(hasattr(foo, 'y'))
self.assertEqual(type(foo.y), int)
foo.y = 4
self.assertEqual(foo.y_changes, [4])
def test_remove_trait(self):
foo = Foo(x=3)
# We can't remove a "statically" added trait (i.e., a trait defined
# in the Foo class).
result = foo.remove_trait('x')
self.assertFalse(result)
# We can remove dynamically added traits.
foo.add_trait('y', Int)
foo.y = 70
result = foo.remove_trait('y')
self.assertTrue(result)
self.assertFalse(hasattr(foo, 'y'))
foo.y = 10
self.assertEqual(foo.y_changes, [70])
|
<commit_before><commit_msg>Add tests for `add_trait` and `remove_trait`.<commit_after>
|
from traits.testing.unittest_tools import unittest
from traits.api import Float, HasTraits, Int, List
class Foo(HasTraits):
x = Float
x_changes = List
y_changes = List
def _x_changed(self, new):
self.x_changes.append(new)
def _y_changed(self, new):
self.y_changes.append(new)
class TestDynamicTraitDefinition(unittest.TestCase):
""" Test demonstrating special change events using the 'event' metadata.
"""
def test_add_trait(self):
foo = Foo(x=3)
foo.add_trait('y', Int)
self.assertTrue(hasattr(foo, 'y'))
self.assertEqual(type(foo.y), int)
foo.y = 4
self.assertEqual(foo.y_changes, [4])
def test_remove_trait(self):
foo = Foo(x=3)
# We can't remove a "statically" added trait (i.e., a trait defined
# in the Foo class).
result = foo.remove_trait('x')
self.assertFalse(result)
# We can remove dynamically added traits.
foo.add_trait('y', Int)
foo.y = 70
result = foo.remove_trait('y')
self.assertTrue(result)
self.assertFalse(hasattr(foo, 'y'))
foo.y = 10
self.assertEqual(foo.y_changes, [70])
|
Add tests for `add_trait` and `remove_trait`.from traits.testing.unittest_tools import unittest
from traits.api import Float, HasTraits, Int, List
class Foo(HasTraits):
x = Float
x_changes = List
y_changes = List
def _x_changed(self, new):
self.x_changes.append(new)
def _y_changed(self, new):
self.y_changes.append(new)
class TestDynamicTraitDefinition(unittest.TestCase):
""" Test demonstrating special change events using the 'event' metadata.
"""
def test_add_trait(self):
foo = Foo(x=3)
foo.add_trait('y', Int)
self.assertTrue(hasattr(foo, 'y'))
self.assertEqual(type(foo.y), int)
foo.y = 4
self.assertEqual(foo.y_changes, [4])
def test_remove_trait(self):
foo = Foo(x=3)
# We can't remove a "statically" added trait (i.e., a trait defined
# in the Foo class).
result = foo.remove_trait('x')
self.assertFalse(result)
# We can remove dynamically added traits.
foo.add_trait('y', Int)
foo.y = 70
result = foo.remove_trait('y')
self.assertTrue(result)
self.assertFalse(hasattr(foo, 'y'))
foo.y = 10
self.assertEqual(foo.y_changes, [70])
|
<commit_before><commit_msg>Add tests for `add_trait` and `remove_trait`.<commit_after>from traits.testing.unittest_tools import unittest
from traits.api import Float, HasTraits, Int, List
class Foo(HasTraits):
x = Float
x_changes = List
y_changes = List
def _x_changed(self, new):
self.x_changes.append(new)
def _y_changed(self, new):
self.y_changes.append(new)
class TestDynamicTraitDefinition(unittest.TestCase):
""" Test demonstrating special change events using the 'event' metadata.
"""
def test_add_trait(self):
foo = Foo(x=3)
foo.add_trait('y', Int)
self.assertTrue(hasattr(foo, 'y'))
self.assertEqual(type(foo.y), int)
foo.y = 4
self.assertEqual(foo.y_changes, [4])
def test_remove_trait(self):
foo = Foo(x=3)
# We can't remove a "statically" added trait (i.e., a trait defined
# in the Foo class).
result = foo.remove_trait('x')
self.assertFalse(result)
# We can remove dynamically added traits.
foo.add_trait('y', Int)
foo.y = 70
result = foo.remove_trait('y')
self.assertTrue(result)
self.assertFalse(hasattr(foo, 'y'))
foo.y = 10
self.assertEqual(foo.y_changes, [70])
|
|
5116cf742be887e7ed40b17714ac6a2a117ad5b8
|
schema_test.py
|
schema_test.py
|
from dtest import Tester
from assertions import *
from tools import *
import time
class TestSchema(Tester):
def remove_columndef_test(self):
""" Remove a column definition while a node is dead """
cluster = self.cluster
cluster.populate(2).start()
nodes = cluster.nodelist()
time.sleep(.3)
cursor = self.cql_connection(nodes[0]).cursor()
self.create_ks(cursor, 'ks', 3)
cursor.execute("""
CREATE TABLE cf (
key int PRIMARY KEY,
c1 int,
c2 int
)
""")
# Shutdown a node and do an update
nodes[1].stop()
cursor.execute("ALTER TABLE cf DROP c2")
nodes[0].compact() # compact the schema CF
time.sleep(.5)
# Restart the dead node and do a new (trivial) update to make sure the
# schema are in agreement
nodes[1].start()
time.sleep(.5)
cursor.execute("ALTER TABLE cf ADD c3 int")
# Check the schema don't reference c2 anymore
# (use the cli to be compatible with 1.0)
cli = nodes[0].cli()
cli.do("use ks")
cli.do("describe ks")
assert "Column Name: c2" not in cli.last_output(), cli.last_output()
|
Add test schema deletion bug
|
Add test schema deletion bug
|
Python
|
apache-2.0
|
krummas/cassandra-dtest,krummas/cassandra-dtest,mambocab/cassandra-dtest,stef1927/cassandra-dtest,thobbs/cassandra-dtest,iamaleksey/cassandra-dtest,tjake/cassandra-dtest,iamaleksey/cassandra-dtest,snazy/cassandra-dtest,blerer/cassandra-dtest,riptano/cassandra-dtest,pauloricardomg/cassandra-dtest,beobal/cassandra-dtest,bdeggleston/cassandra-dtest,carlyeks/cassandra-dtest,spodkowinski/cassandra-dtest,stef1927/cassandra-dtest,pcmanus/cassandra-dtest,mambocab/cassandra-dtest,aweisberg/cassandra-dtest,pauloricardomg/cassandra-dtest,thobbs/cassandra-dtest,beobal/cassandra-dtest,spodkowinski/cassandra-dtest,blerer/cassandra-dtest,yukim/cassandra-dtest,carlyeks/cassandra-dtest,snazy/cassandra-dtest,aweisberg/cassandra-dtest,bdeggleston/cassandra-dtest,riptano/cassandra-dtest,josh-mckenzie/cassandra-dtest
|
Add test schema deletion bug
|
from dtest import Tester
from assertions import *
from tools import *
import time
class TestSchema(Tester):
def remove_columndef_test(self):
""" Remove a column definition while a node is dead """
cluster = self.cluster
cluster.populate(2).start()
nodes = cluster.nodelist()
time.sleep(.3)
cursor = self.cql_connection(nodes[0]).cursor()
self.create_ks(cursor, 'ks', 3)
cursor.execute("""
CREATE TABLE cf (
key int PRIMARY KEY,
c1 int,
c2 int
)
""")
# Shutdown a node and do an update
nodes[1].stop()
cursor.execute("ALTER TABLE cf DROP c2")
nodes[0].compact() # compact the schema CF
time.sleep(.5)
# Restart the dead node and do a new (trivial) update to make sure the
# schema are in agreement
nodes[1].start()
time.sleep(.5)
cursor.execute("ALTER TABLE cf ADD c3 int")
# Check the schema don't reference c2 anymore
# (use the cli to be compatible with 1.0)
cli = nodes[0].cli()
cli.do("use ks")
cli.do("describe ks")
assert "Column Name: c2" not in cli.last_output(), cli.last_output()
|
<commit_before><commit_msg>Add test schema deletion bug<commit_after>
|
from dtest import Tester
from assertions import *
from tools import *
import time
class TestSchema(Tester):
def remove_columndef_test(self):
""" Remove a column definition while a node is dead """
cluster = self.cluster
cluster.populate(2).start()
nodes = cluster.nodelist()
time.sleep(.3)
cursor = self.cql_connection(nodes[0]).cursor()
self.create_ks(cursor, 'ks', 3)
cursor.execute("""
CREATE TABLE cf (
key int PRIMARY KEY,
c1 int,
c2 int
)
""")
# Shutdown a node and do an update
nodes[1].stop()
cursor.execute("ALTER TABLE cf DROP c2")
nodes[0].compact() # compact the schema CF
time.sleep(.5)
# Restart the dead node and do a new (trivial) update to make sure the
# schema are in agreement
nodes[1].start()
time.sleep(.5)
cursor.execute("ALTER TABLE cf ADD c3 int")
# Check the schema don't reference c2 anymore
# (use the cli to be compatible with 1.0)
cli = nodes[0].cli()
cli.do("use ks")
cli.do("describe ks")
assert "Column Name: c2" not in cli.last_output(), cli.last_output()
|
Add test schema deletion bugfrom dtest import Tester
from assertions import *
from tools import *
import time
class TestSchema(Tester):
def remove_columndef_test(self):
""" Remove a column definition while a node is dead """
cluster = self.cluster
cluster.populate(2).start()
nodes = cluster.nodelist()
time.sleep(.3)
cursor = self.cql_connection(nodes[0]).cursor()
self.create_ks(cursor, 'ks', 3)
cursor.execute("""
CREATE TABLE cf (
key int PRIMARY KEY,
c1 int,
c2 int
)
""")
# Shutdown a node and do an update
nodes[1].stop()
cursor.execute("ALTER TABLE cf DROP c2")
nodes[0].compact() # compact the schema CF
time.sleep(.5)
# Restart the dead node and do a new (trivial) update to make sure the
# schema are in agreement
nodes[1].start()
time.sleep(.5)
cursor.execute("ALTER TABLE cf ADD c3 int")
# Check the schema don't reference c2 anymore
# (use the cli to be compatible with 1.0)
cli = nodes[0].cli()
cli.do("use ks")
cli.do("describe ks")
assert "Column Name: c2" not in cli.last_output(), cli.last_output()
|
<commit_before><commit_msg>Add test schema deletion bug<commit_after>from dtest import Tester
from assertions import *
from tools import *
import time
class TestSchema(Tester):
def remove_columndef_test(self):
""" Remove a column definition while a node is dead """
cluster = self.cluster
cluster.populate(2).start()
nodes = cluster.nodelist()
time.sleep(.3)
cursor = self.cql_connection(nodes[0]).cursor()
self.create_ks(cursor, 'ks', 3)
cursor.execute("""
CREATE TABLE cf (
key int PRIMARY KEY,
c1 int,
c2 int
)
""")
# Shutdown a node and do an update
nodes[1].stop()
cursor.execute("ALTER TABLE cf DROP c2")
nodes[0].compact() # compact the schema CF
time.sleep(.5)
# Restart the dead node and do a new (trivial) update to make sure the
# schema are in agreement
nodes[1].start()
time.sleep(.5)
cursor.execute("ALTER TABLE cf ADD c3 int")
# Check the schema don't reference c2 anymore
# (use the cli to be compatible with 1.0)
cli = nodes[0].cli()
cli.do("use ks")
cli.do("describe ks")
assert "Column Name: c2" not in cli.last_output(), cli.last_output()
|
|
d95804836fd5f693bd70ee4d8480e0e63d98cdb7
|
tests/test_utils.py
|
tests/test_utils.py
|
from fudge.patcher import with_patched_object
from functools import wraps
from nose.tools import eq_
from nose.tools import raises
from fabric.state import output
from fabric.utils import warn, indent, abort
import sys
from StringIO import StringIO
#
# Setup/teardown helpers and decorators
#
def mock_streams(*which):
"""
Replaces ``sys.stderr`` with a ``StringIO`` during the test, then restores
after.
Must specify which stream via string args, e.g.::
@mock_streams('stdout')
def func():
pass
@mock_streams('stderr')
def func():
pass
@mock_streams('stdout', 'stderr')
def func()
pass
"""
def mocked_streams_decorator(func):
@wraps(func)
def inner_wrapper(*args, **kwargs):
if 'stdout' in which:
my_stdout, sys.stdout = sys.stdout, StringIO()
if 'stderr' in which:
my_stderr, sys.stderr = sys.stderr, StringIO()
result = func(*args, **kwargs)
if 'stderr' in which:
sys.stderr = my_stderr
if 'stdout' in which:
sys.stdout = my_stdout
return result
return inner_wrapper
return mocked_streams_decorator
@mock_streams('stderr')
@with_patched_object(output, 'warnings', True)
def test_warn():
warn("Test")
result = sys.stderr.getvalue()
assert "\nWarning: Test\n\n" == result
def test_indent():
for description, input, output in (
("Sanity check: 1 line string",
'Test', ' Test'),
("List of strings turns in to strings joined by \n",
["Test", "Test"], ' Test\n Test'),
):
eq_.description = description
yield eq_, indent(input), output
del eq_.description
def test_indent_with_strip():
for description, input, output in (
("Sanity check: 1 line string",
indent('Test', strip=True), ' Test'),
("Check list of strings",
indent(["Test", "Test"], strip=True), ' Test\n Test'),
("Check list of strings",
indent([" Test", " Test"], strip=True), ' Test\n Test'),
):
eq_.description = description
yield eq_, input, output
del eq_.description
@mock_streams('stderr')
@raises(SystemExit)
def test_abort():
"""
Abort should raise SystemExit
"""
abort("Test")
@mock_streams('stderr')
@with_patched_object(output, 'aborts', True)
def test_abort_message():
try:
abort("Test")
except SystemExit:
pass
result = sys.stderr.getvalue()
eq_("\nFatal error: Test\n\nAborting.\n", result)
|
Add basic tests for utils
|
Add basic tests for utils
Signed-off-by: Jeff Forcier <f3e731dfa293c7a83119d8aacfa41b5d2d780be9@bitprophet.org>
|
Python
|
bsd-2-clause
|
amaniak/fabric,SamuelMarks/fabric,simon-engledew/fabric,likesxuqiang/fabric,kmonsoor/fabric,rane-hs/fabric-py3,ploxiln/fabric,StackStorm/fabric,bitprophet/fabric,bitmonk/fabric,itoed/fabric,haridsv/fabric,xLegoz/fabric,pashinin/fabric,cgvarela/fabric,cmattoon/fabric,tolbkni/fabric,raimon49/fabric,kxxoling/fabric,rbramwell/fabric,felix-d/fabric,fernandezcuesta/fabric,mathiasertl/fabric,askulkarni2/fabric,qinrong/fabric,bspink/fabric,TarasRudnyk/fabric,akaariai/fabric,opavader/fabric,elijah513/fabric,jaraco/fabric,ericholscher/fabric,hrubi/fabric,sdelements/fabric,rodrigc/fabric,tekapo/fabric,MjAbuz/fabric,getsentry/fabric,pgroudas/fabric
|
Add basic tests for utils
Signed-off-by: Jeff Forcier <f3e731dfa293c7a83119d8aacfa41b5d2d780be9@bitprophet.org>
|
from fudge.patcher import with_patched_object
from functools import wraps
from nose.tools import eq_
from nose.tools import raises
from fabric.state import output
from fabric.utils import warn, indent, abort
import sys
from StringIO import StringIO
#
# Setup/teardown helpers and decorators
#
def mock_streams(*which):
"""
Replaces ``sys.stderr`` with a ``StringIO`` during the test, then restores
after.
Must specify which stream via string args, e.g.::
@mock_streams('stdout')
def func():
pass
@mock_streams('stderr')
def func():
pass
@mock_streams('stdout', 'stderr')
def func()
pass
"""
def mocked_streams_decorator(func):
@wraps(func)
def inner_wrapper(*args, **kwargs):
if 'stdout' in which:
my_stdout, sys.stdout = sys.stdout, StringIO()
if 'stderr' in which:
my_stderr, sys.stderr = sys.stderr, StringIO()
result = func(*args, **kwargs)
if 'stderr' in which:
sys.stderr = my_stderr
if 'stdout' in which:
sys.stdout = my_stdout
return result
return inner_wrapper
return mocked_streams_decorator
@mock_streams('stderr')
@with_patched_object(output, 'warnings', True)
def test_warn():
warn("Test")
result = sys.stderr.getvalue()
assert "\nWarning: Test\n\n" == result
def test_indent():
for description, input, output in (
("Sanity check: 1 line string",
'Test', ' Test'),
("List of strings turns in to strings joined by \n",
["Test", "Test"], ' Test\n Test'),
):
eq_.description = description
yield eq_, indent(input), output
del eq_.description
def test_indent_with_strip():
for description, input, output in (
("Sanity check: 1 line string",
indent('Test', strip=True), ' Test'),
("Check list of strings",
indent(["Test", "Test"], strip=True), ' Test\n Test'),
("Check list of strings",
indent([" Test", " Test"], strip=True), ' Test\n Test'),
):
eq_.description = description
yield eq_, input, output
del eq_.description
@mock_streams('stderr')
@raises(SystemExit)
def test_abort():
"""
Abort should raise SystemExit
"""
abort("Test")
@mock_streams('stderr')
@with_patched_object(output, 'aborts', True)
def test_abort_message():
try:
abort("Test")
except SystemExit:
pass
result = sys.stderr.getvalue()
eq_("\nFatal error: Test\n\nAborting.\n", result)
|
<commit_before><commit_msg>Add basic tests for utils
Signed-off-by: Jeff Forcier <f3e731dfa293c7a83119d8aacfa41b5d2d780be9@bitprophet.org><commit_after>
|
from fudge.patcher import with_patched_object
from functools import wraps
from nose.tools import eq_
from nose.tools import raises
from fabric.state import output
from fabric.utils import warn, indent, abort
import sys
from StringIO import StringIO
#
# Setup/teardown helpers and decorators
#
def mock_streams(*which):
"""
Replaces ``sys.stderr`` with a ``StringIO`` during the test, then restores
after.
Must specify which stream via string args, e.g.::
@mock_streams('stdout')
def func():
pass
@mock_streams('stderr')
def func():
pass
@mock_streams('stdout', 'stderr')
def func()
pass
"""
def mocked_streams_decorator(func):
@wraps(func)
def inner_wrapper(*args, **kwargs):
if 'stdout' in which:
my_stdout, sys.stdout = sys.stdout, StringIO()
if 'stderr' in which:
my_stderr, sys.stderr = sys.stderr, StringIO()
result = func(*args, **kwargs)
if 'stderr' in which:
sys.stderr = my_stderr
if 'stdout' in which:
sys.stdout = my_stdout
return result
return inner_wrapper
return mocked_streams_decorator
@mock_streams('stderr')
@with_patched_object(output, 'warnings', True)
def test_warn():
warn("Test")
result = sys.stderr.getvalue()
assert "\nWarning: Test\n\n" == result
def test_indent():
for description, input, output in (
("Sanity check: 1 line string",
'Test', ' Test'),
("List of strings turns in to strings joined by \n",
["Test", "Test"], ' Test\n Test'),
):
eq_.description = description
yield eq_, indent(input), output
del eq_.description
def test_indent_with_strip():
for description, input, output in (
("Sanity check: 1 line string",
indent('Test', strip=True), ' Test'),
("Check list of strings",
indent(["Test", "Test"], strip=True), ' Test\n Test'),
("Check list of strings",
indent([" Test", " Test"], strip=True), ' Test\n Test'),
):
eq_.description = description
yield eq_, input, output
del eq_.description
@mock_streams('stderr')
@raises(SystemExit)
def test_abort():
"""
Abort should raise SystemExit
"""
abort("Test")
@mock_streams('stderr')
@with_patched_object(output, 'aborts', True)
def test_abort_message():
try:
abort("Test")
except SystemExit:
pass
result = sys.stderr.getvalue()
eq_("\nFatal error: Test\n\nAborting.\n", result)
|
Add basic tests for utils
Signed-off-by: Jeff Forcier <f3e731dfa293c7a83119d8aacfa41b5d2d780be9@bitprophet.org>from fudge.patcher import with_patched_object
from functools import wraps
from nose.tools import eq_
from nose.tools import raises
from fabric.state import output
from fabric.utils import warn, indent, abort
import sys
from StringIO import StringIO
#
# Setup/teardown helpers and decorators
#
def mock_streams(*which):
"""
Replaces ``sys.stderr`` with a ``StringIO`` during the test, then restores
after.
Must specify which stream via string args, e.g.::
@mock_streams('stdout')
def func():
pass
@mock_streams('stderr')
def func():
pass
@mock_streams('stdout', 'stderr')
def func()
pass
"""
def mocked_streams_decorator(func):
@wraps(func)
def inner_wrapper(*args, **kwargs):
if 'stdout' in which:
my_stdout, sys.stdout = sys.stdout, StringIO()
if 'stderr' in which:
my_stderr, sys.stderr = sys.stderr, StringIO()
result = func(*args, **kwargs)
if 'stderr' in which:
sys.stderr = my_stderr
if 'stdout' in which:
sys.stdout = my_stdout
return result
return inner_wrapper
return mocked_streams_decorator
@mock_streams('stderr')
@with_patched_object(output, 'warnings', True)
def test_warn():
warn("Test")
result = sys.stderr.getvalue()
assert "\nWarning: Test\n\n" == result
def test_indent():
for description, input, output in (
("Sanity check: 1 line string",
'Test', ' Test'),
("List of strings turns in to strings joined by \n",
["Test", "Test"], ' Test\n Test'),
):
eq_.description = description
yield eq_, indent(input), output
del eq_.description
def test_indent_with_strip():
for description, input, output in (
("Sanity check: 1 line string",
indent('Test', strip=True), ' Test'),
("Check list of strings",
indent(["Test", "Test"], strip=True), ' Test\n Test'),
("Check list of strings",
indent([" Test", " Test"], strip=True), ' Test\n Test'),
):
eq_.description = description
yield eq_, input, output
del eq_.description
@mock_streams('stderr')
@raises(SystemExit)
def test_abort():
"""
Abort should raise SystemExit
"""
abort("Test")
@mock_streams('stderr')
@with_patched_object(output, 'aborts', True)
def test_abort_message():
try:
abort("Test")
except SystemExit:
pass
result = sys.stderr.getvalue()
eq_("\nFatal error: Test\n\nAborting.\n", result)
|
<commit_before><commit_msg>Add basic tests for utils
Signed-off-by: Jeff Forcier <f3e731dfa293c7a83119d8aacfa41b5d2d780be9@bitprophet.org><commit_after>from fudge.patcher import with_patched_object
from functools import wraps
from nose.tools import eq_
from nose.tools import raises
from fabric.state import output
from fabric.utils import warn, indent, abort
import sys
from StringIO import StringIO
#
# Setup/teardown helpers and decorators
#
def mock_streams(*which):
"""
Replaces ``sys.stderr`` with a ``StringIO`` during the test, then restores
after.
Must specify which stream via string args, e.g.::
@mock_streams('stdout')
def func():
pass
@mock_streams('stderr')
def func():
pass
@mock_streams('stdout', 'stderr')
def func()
pass
"""
def mocked_streams_decorator(func):
@wraps(func)
def inner_wrapper(*args, **kwargs):
if 'stdout' in which:
my_stdout, sys.stdout = sys.stdout, StringIO()
if 'stderr' in which:
my_stderr, sys.stderr = sys.stderr, StringIO()
result = func(*args, **kwargs)
if 'stderr' in which:
sys.stderr = my_stderr
if 'stdout' in which:
sys.stdout = my_stdout
return result
return inner_wrapper
return mocked_streams_decorator
@mock_streams('stderr')
@with_patched_object(output, 'warnings', True)
def test_warn():
warn("Test")
result = sys.stderr.getvalue()
assert "\nWarning: Test\n\n" == result
def test_indent():
for description, input, output in (
("Sanity check: 1 line string",
'Test', ' Test'),
("List of strings turns in to strings joined by \n",
["Test", "Test"], ' Test\n Test'),
):
eq_.description = description
yield eq_, indent(input), output
del eq_.description
def test_indent_with_strip():
for description, input, output in (
("Sanity check: 1 line string",
indent('Test', strip=True), ' Test'),
("Check list of strings",
indent(["Test", "Test"], strip=True), ' Test\n Test'),
("Check list of strings",
indent([" Test", " Test"], strip=True), ' Test\n Test'),
):
eq_.description = description
yield eq_, input, output
del eq_.description
@mock_streams('stderr')
@raises(SystemExit)
def test_abort():
"""
Abort should raise SystemExit
"""
abort("Test")
@mock_streams('stderr')
@with_patched_object(output, 'aborts', True)
def test_abort_message():
try:
abort("Test")
except SystemExit:
pass
result = sys.stderr.getvalue()
eq_("\nFatal error: Test\n\nAborting.\n", result)
|
|
e2b5df2501571b51e4a37ee5b7c7f16ededd5995
|
astm/constants.py
|
astm/constants.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
#: :mod:`astm.protocol` base encoding.
ENCODING = 'latin-1'
#: Message start token.
STX = b'\x02'
#: Message end token.
ETX = b'\x03'
#: ASTM session termination token.
EOT = b'\x04'
#: ASTM session initialization token.
ENQ = b'\x05'
#: Command accepted token.
ACK = b'\x06'
#: Command rejected token.
NAK = b'\x15'
#: Message chunk end token.
ETB = b'\x17'
LF = b'\x0A'
CR = b'\x0D'
#: CR + LF shortcut.
CRLF = CR + LF
#: Message records delimeter.
RECORD_SEP = b'\x0D' # \r #
#: Record fields delimeter.
FIELD_SEP = b'\x7C' # | #
#: Delimeter for repeated fields.
REPEAT_SEP = b'\x5C' # \ #
#: Field components delimeter.
COMPONENT_SEP = b'\x5E' # ^ #
#: Date escape token.
ESCAPE_SEP = b'\x26' # & #
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
#: ASTM specification base encoding.
ENCODING = 'latin-1'
#: Message start token.
STX = b'\x02'
#: Message end token.
ETX = b'\x03'
#: ASTM session termination token.
EOT = b'\x04'
#: ASTM session initialization token.
ENQ = b'\x05'
#: Command accepted token.
ACK = b'\x06'
#: Command rejected token.
NAK = b'\x15'
#: Message chunk end token.
ETB = b'\x17'
LF = b'\x0A'
CR = b'\x0D'
#: CR + LF shortcut.
CRLF = CR + LF
#: Message records delimeter.
RECORD_SEP = b'\x0D' # \r #
#: Record fields delimeter.
FIELD_SEP = b'\x7C' # | #
#: Delimeter for repeated fields.
REPEAT_SEP = b'\x5C' # \ #
#: Field components delimeter.
COMPONENT_SEP = b'\x5E' # ^ #
#: Date escape token.
ESCAPE_SEP = b'\x26' # & #
|
Fix description about global ENCODING.
|
Fix description about global ENCODING.
|
Python
|
bsd-3-clause
|
tectronics/python-astm,Iskander1b/python-astm,eddiep1101/python-astm,andrexmd/python-astm,pombreda/python-astm,kxepal/python-astm,LogicalKnight/python-astm,123412345/python-astm,tinoshot/python-astm,MarcosHaenisch/python-astm,Alwnikrotikz/python-astm,asingla87/python-astm,mhaulo/python-astm,AlanZatarain/python-astm,briankip/python-astm,kxepal/python-astm
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
#: :mod:`astm.protocol` base encoding.
ENCODING = 'latin-1'
#: Message start token.
STX = b'\x02'
#: Message end token.
ETX = b'\x03'
#: ASTM session termination token.
EOT = b'\x04'
#: ASTM session initialization token.
ENQ = b'\x05'
#: Command accepted token.
ACK = b'\x06'
#: Command rejected token.
NAK = b'\x15'
#: Message chunk end token.
ETB = b'\x17'
LF = b'\x0A'
CR = b'\x0D'
#: CR + LF shortcut.
CRLF = CR + LF
#: Message records delimeter.
RECORD_SEP = b'\x0D' # \r #
#: Record fields delimeter.
FIELD_SEP = b'\x7C' # | #
#: Delimeter for repeated fields.
REPEAT_SEP = b'\x5C' # \ #
#: Field components delimeter.
COMPONENT_SEP = b'\x5E' # ^ #
#: Date escape token.
ESCAPE_SEP = b'\x26' # & #
Fix description about global ENCODING.
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
#: ASTM specification base encoding.
ENCODING = 'latin-1'
#: Message start token.
STX = b'\x02'
#: Message end token.
ETX = b'\x03'
#: ASTM session termination token.
EOT = b'\x04'
#: ASTM session initialization token.
ENQ = b'\x05'
#: Command accepted token.
ACK = b'\x06'
#: Command rejected token.
NAK = b'\x15'
#: Message chunk end token.
ETB = b'\x17'
LF = b'\x0A'
CR = b'\x0D'
#: CR + LF shortcut.
CRLF = CR + LF
#: Message records delimeter.
RECORD_SEP = b'\x0D' # \r #
#: Record fields delimeter.
FIELD_SEP = b'\x7C' # | #
#: Delimeter for repeated fields.
REPEAT_SEP = b'\x5C' # \ #
#: Field components delimeter.
COMPONENT_SEP = b'\x5E' # ^ #
#: Date escape token.
ESCAPE_SEP = b'\x26' # & #
|
<commit_before># -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
#: :mod:`astm.protocol` base encoding.
ENCODING = 'latin-1'
#: Message start token.
STX = b'\x02'
#: Message end token.
ETX = b'\x03'
#: ASTM session termination token.
EOT = b'\x04'
#: ASTM session initialization token.
ENQ = b'\x05'
#: Command accepted token.
ACK = b'\x06'
#: Command rejected token.
NAK = b'\x15'
#: Message chunk end token.
ETB = b'\x17'
LF = b'\x0A'
CR = b'\x0D'
#: CR + LF shortcut.
CRLF = CR + LF
#: Message records delimeter.
RECORD_SEP = b'\x0D' # \r #
#: Record fields delimeter.
FIELD_SEP = b'\x7C' # | #
#: Delimeter for repeated fields.
REPEAT_SEP = b'\x5C' # \ #
#: Field components delimeter.
COMPONENT_SEP = b'\x5E' # ^ #
#: Date escape token.
ESCAPE_SEP = b'\x26' # & #
<commit_msg>Fix description about global ENCODING.<commit_after>
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
#: ASTM specification base encoding.
ENCODING = 'latin-1'
#: Message start token.
STX = b'\x02'
#: Message end token.
ETX = b'\x03'
#: ASTM session termination token.
EOT = b'\x04'
#: ASTM session initialization token.
ENQ = b'\x05'
#: Command accepted token.
ACK = b'\x06'
#: Command rejected token.
NAK = b'\x15'
#: Message chunk end token.
ETB = b'\x17'
LF = b'\x0A'
CR = b'\x0D'
#: CR + LF shortcut.
CRLF = CR + LF
#: Message records delimeter.
RECORD_SEP = b'\x0D' # \r #
#: Record fields delimeter.
FIELD_SEP = b'\x7C' # | #
#: Delimeter for repeated fields.
REPEAT_SEP = b'\x5C' # \ #
#: Field components delimeter.
COMPONENT_SEP = b'\x5E' # ^ #
#: Date escape token.
ESCAPE_SEP = b'\x26' # & #
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
#: :mod:`astm.protocol` base encoding.
ENCODING = 'latin-1'
#: Message start token.
STX = b'\x02'
#: Message end token.
ETX = b'\x03'
#: ASTM session termination token.
EOT = b'\x04'
#: ASTM session initialization token.
ENQ = b'\x05'
#: Command accepted token.
ACK = b'\x06'
#: Command rejected token.
NAK = b'\x15'
#: Message chunk end token.
ETB = b'\x17'
LF = b'\x0A'
CR = b'\x0D'
#: CR + LF shortcut.
CRLF = CR + LF
#: Message records delimeter.
RECORD_SEP = b'\x0D' # \r #
#: Record fields delimeter.
FIELD_SEP = b'\x7C' # | #
#: Delimeter for repeated fields.
REPEAT_SEP = b'\x5C' # \ #
#: Field components delimeter.
COMPONENT_SEP = b'\x5E' # ^ #
#: Date escape token.
ESCAPE_SEP = b'\x26' # & #
Fix description about global ENCODING.# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
#: ASTM specification base encoding.
ENCODING = 'latin-1'
#: Message start token.
STX = b'\x02'
#: Message end token.
ETX = b'\x03'
#: ASTM session termination token.
EOT = b'\x04'
#: ASTM session initialization token.
ENQ = b'\x05'
#: Command accepted token.
ACK = b'\x06'
#: Command rejected token.
NAK = b'\x15'
#: Message chunk end token.
ETB = b'\x17'
LF = b'\x0A'
CR = b'\x0D'
#: CR + LF shortcut.
CRLF = CR + LF
#: Message records delimeter.
RECORD_SEP = b'\x0D' # \r #
#: Record fields delimeter.
FIELD_SEP = b'\x7C' # | #
#: Delimeter for repeated fields.
REPEAT_SEP = b'\x5C' # \ #
#: Field components delimeter.
COMPONENT_SEP = b'\x5E' # ^ #
#: Date escape token.
ESCAPE_SEP = b'\x26' # & #
|
<commit_before># -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
#: :mod:`astm.protocol` base encoding.
ENCODING = 'latin-1'
#: Message start token.
STX = b'\x02'
#: Message end token.
ETX = b'\x03'
#: ASTM session termination token.
EOT = b'\x04'
#: ASTM session initialization token.
ENQ = b'\x05'
#: Command accepted token.
ACK = b'\x06'
#: Command rejected token.
NAK = b'\x15'
#: Message chunk end token.
ETB = b'\x17'
LF = b'\x0A'
CR = b'\x0D'
#: CR + LF shortcut.
CRLF = CR + LF
#: Message records delimeter.
RECORD_SEP = b'\x0D' # \r #
#: Record fields delimeter.
FIELD_SEP = b'\x7C' # | #
#: Delimeter for repeated fields.
REPEAT_SEP = b'\x5C' # \ #
#: Field components delimeter.
COMPONENT_SEP = b'\x5E' # ^ #
#: Date escape token.
ESCAPE_SEP = b'\x26' # & #
<commit_msg>Fix description about global ENCODING.<commit_after># -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
#: ASTM specification base encoding.
ENCODING = 'latin-1'
#: Message start token.
STX = b'\x02'
#: Message end token.
ETX = b'\x03'
#: ASTM session termination token.
EOT = b'\x04'
#: ASTM session initialization token.
ENQ = b'\x05'
#: Command accepted token.
ACK = b'\x06'
#: Command rejected token.
NAK = b'\x15'
#: Message chunk end token.
ETB = b'\x17'
LF = b'\x0A'
CR = b'\x0D'
#: CR + LF shortcut.
CRLF = CR + LF
#: Message records delimeter.
RECORD_SEP = b'\x0D' # \r #
#: Record fields delimeter.
FIELD_SEP = b'\x7C' # | #
#: Delimeter for repeated fields.
REPEAT_SEP = b'\x5C' # \ #
#: Field components delimeter.
COMPONENT_SEP = b'\x5E' # ^ #
#: Date escape token.
ESCAPE_SEP = b'\x26' # & #
|
8c0689decb6953f04b059f9b7838c69f6d41c8b0
|
drudge/drs.py
|
drudge/drs.py
|
"""Support for drudge scripts."""
import collections
from sympy import Symbol, Indexed, IndexedBase
#
# Special classes for SymPy objects
# ---------------------------------
#
class DrsSymbol(Symbol):
"""Symbols used in drudge scripts.
The drudge symbol needs to behave as similar to the actual symbol as
possible, because it is possible that they are used for keys in
dictionaries.
"""
__slots__ = [
'_drudge',
'_orig'
]
def __new__(cls, drudge, name):
"""Create a symbol object."""
symb = super().__new__(cls, name)
return symb
def __init__(self, drudge, name):
"""Initialize the symbol object."""
self._drudge = drudge
self._orig = Symbol(name)
def __eq__(self, other):
"""Make equality comparison."""
return self._orig == other
def __hash__(self):
"""Compute the hash."""
return hash(self._orig)
def _hashable_content(self):
"""Hashable content for SymPy usages."""
return self._orig._hashable_content()
@classmethod
def class_key(cls):
return Symbol.class_key()
def __getitem__(self, indices):
"""Index the given symbol.
In drudge scripts, all symbols are by itself indexed bases.
"""
base = IndexedBase(self._orig)
if isinstance(indices, collections.Sequence):
return DrsIndexed(self._drudge, base, *indices)
else:
return DrsIndexed(self._drudge, base, indices)
def __iter__(self):
"""Disable iterability of the symbol.
Or a default implementation from ``__getitem__`` will be used,
which makes the symbols unable to be used as subscripts for indexed
objects.
"""
raise TypeError('Drudge script symbol cannot be iterated over.')
class DrsIndexed(Indexed):
"""Indexed objects for drudge scripts."""
__slots__ = [
'_drudge',
'_orig'
]
def __new__(cls, drudge, base, *args, **kwargs):
"""Create an indexed object for drudge scripts."""
indexed = super().__new__(cls, base, *args, **kwargs)
return indexed
def __init__(self, drudge, base, *args, **kwargs):
"""Initialize the indexed object."""
self._drudge = drudge
self._orig = Indexed(base, *args, **kwargs)
def __eq__(self, other):
"""Make equality comparison."""
return self._orig == other
def __hash__(self):
"""Compute the hash."""
return hash(self._orig)
def _hashable_content(self):
"""Hashable content for SymPy usages."""
return self._orig._hashable_content()
@classmethod
def class_key(cls):
return Indexed.class_key()
|
Add utility SymPy subclasses for drudge scripts
|
Add utility SymPy subclasses for drudge scripts
These two subclasses are going to be used for SymPy objects in drudge
scripts, especially used as the objects for unresolved names.
|
Python
|
mit
|
tschijnmo/drudge,tschijnmo/drudge,tschijnmo/drudge
|
Add utility SymPy subclasses for drudge scripts
These two subclasses are going to be used for SymPy objects in drudge
scripts, especially used as the objects for unresolved names.
|
"""Support for drudge scripts."""
import collections
from sympy import Symbol, Indexed, IndexedBase
#
# Special classes for SymPy objects
# ---------------------------------
#
class DrsSymbol(Symbol):
"""Symbols used in drudge scripts.
The drudge symbol needs to behave as similar to the actual symbol as
possible, because it is possible that they are used for keys in
dictionaries.
"""
__slots__ = [
'_drudge',
'_orig'
]
def __new__(cls, drudge, name):
"""Create a symbol object."""
symb = super().__new__(cls, name)
return symb
def __init__(self, drudge, name):
"""Initialize the symbol object."""
self._drudge = drudge
self._orig = Symbol(name)
def __eq__(self, other):
"""Make equality comparison."""
return self._orig == other
def __hash__(self):
"""Compute the hash."""
return hash(self._orig)
def _hashable_content(self):
"""Hashable content for SymPy usages."""
return self._orig._hashable_content()
@classmethod
def class_key(cls):
return Symbol.class_key()
def __getitem__(self, indices):
"""Index the given symbol.
In drudge scripts, all symbols are by itself indexed bases.
"""
base = IndexedBase(self._orig)
if isinstance(indices, collections.Sequence):
return DrsIndexed(self._drudge, base, *indices)
else:
return DrsIndexed(self._drudge, base, indices)
def __iter__(self):
"""Disable iterability of the symbol.
Or a default implementation from ``__getitem__`` will be used,
which makes the symbols unable to be used as subscripts for indexed
objects.
"""
raise TypeError('Drudge script symbol cannot be iterated over.')
class DrsIndexed(Indexed):
"""Indexed objects for drudge scripts."""
__slots__ = [
'_drudge',
'_orig'
]
def __new__(cls, drudge, base, *args, **kwargs):
"""Create an indexed object for drudge scripts."""
indexed = super().__new__(cls, base, *args, **kwargs)
return indexed
def __init__(self, drudge, base, *args, **kwargs):
"""Initialize the indexed object."""
self._drudge = drudge
self._orig = Indexed(base, *args, **kwargs)
def __eq__(self, other):
"""Make equality comparison."""
return self._orig == other
def __hash__(self):
"""Compute the hash."""
return hash(self._orig)
def _hashable_content(self):
"""Hashable content for SymPy usages."""
return self._orig._hashable_content()
@classmethod
def class_key(cls):
return Indexed.class_key()
|
<commit_before><commit_msg>Add utility SymPy subclasses for drudge scripts
These two subclasses are going to be used for SymPy objects in drudge
scripts, especially used as the objects for unresolved names.<commit_after>
|
"""Support for drudge scripts."""
import collections
from sympy import Symbol, Indexed, IndexedBase
#
# Special classes for SymPy objects
# ---------------------------------
#
class DrsSymbol(Symbol):
"""Symbols used in drudge scripts.
The drudge symbol needs to behave as similar to the actual symbol as
possible, because it is possible that they are used for keys in
dictionaries.
"""
__slots__ = [
'_drudge',
'_orig'
]
def __new__(cls, drudge, name):
"""Create a symbol object."""
symb = super().__new__(cls, name)
return symb
def __init__(self, drudge, name):
"""Initialize the symbol object."""
self._drudge = drudge
self._orig = Symbol(name)
def __eq__(self, other):
"""Make equality comparison."""
return self._orig == other
def __hash__(self):
"""Compute the hash."""
return hash(self._orig)
def _hashable_content(self):
"""Hashable content for SymPy usages."""
return self._orig._hashable_content()
@classmethod
def class_key(cls):
return Symbol.class_key()
def __getitem__(self, indices):
"""Index the given symbol.
In drudge scripts, all symbols are by itself indexed bases.
"""
base = IndexedBase(self._orig)
if isinstance(indices, collections.Sequence):
return DrsIndexed(self._drudge, base, *indices)
else:
return DrsIndexed(self._drudge, base, indices)
def __iter__(self):
"""Disable iterability of the symbol.
Or a default implementation from ``__getitem__`` will be used,
which makes the symbols unable to be used as subscripts for indexed
objects.
"""
raise TypeError('Drudge script symbol cannot be iterated over.')
class DrsIndexed(Indexed):
"""Indexed objects for drudge scripts."""
__slots__ = [
'_drudge',
'_orig'
]
def __new__(cls, drudge, base, *args, **kwargs):
"""Create an indexed object for drudge scripts."""
indexed = super().__new__(cls, base, *args, **kwargs)
return indexed
def __init__(self, drudge, base, *args, **kwargs):
"""Initialize the indexed object."""
self._drudge = drudge
self._orig = Indexed(base, *args, **kwargs)
def __eq__(self, other):
"""Make equality comparison."""
return self._orig == other
def __hash__(self):
"""Compute the hash."""
return hash(self._orig)
def _hashable_content(self):
"""Hashable content for SymPy usages."""
return self._orig._hashable_content()
@classmethod
def class_key(cls):
return Indexed.class_key()
|
Add utility SymPy subclasses for drudge scripts
These two subclasses are going to be used for SymPy objects in drudge
scripts, especially used as the objects for unresolved names."""Support for drudge scripts."""
import collections
from sympy import Symbol, Indexed, IndexedBase
#
# Special classes for SymPy objects
# ---------------------------------
#
class DrsSymbol(Symbol):
"""Symbols used in drudge scripts.
The drudge symbol needs to behave as similar to the actual symbol as
possible, because it is possible that they are used for keys in
dictionaries.
"""
__slots__ = [
'_drudge',
'_orig'
]
def __new__(cls, drudge, name):
"""Create a symbol object."""
symb = super().__new__(cls, name)
return symb
def __init__(self, drudge, name):
"""Initialize the symbol object."""
self._drudge = drudge
self._orig = Symbol(name)
def __eq__(self, other):
"""Make equality comparison."""
return self._orig == other
def __hash__(self):
"""Compute the hash."""
return hash(self._orig)
def _hashable_content(self):
"""Hashable content for SymPy usages."""
return self._orig._hashable_content()
@classmethod
def class_key(cls):
return Symbol.class_key()
def __getitem__(self, indices):
"""Index the given symbol.
In drudge scripts, all symbols are by itself indexed bases.
"""
base = IndexedBase(self._orig)
if isinstance(indices, collections.Sequence):
return DrsIndexed(self._drudge, base, *indices)
else:
return DrsIndexed(self._drudge, base, indices)
def __iter__(self):
"""Disable iterability of the symbol.
Or a default implementation from ``__getitem__`` will be used,
which makes the symbols unable to be used as subscripts for indexed
objects.
"""
raise TypeError('Drudge script symbol cannot be iterated over.')
class DrsIndexed(Indexed):
"""Indexed objects for drudge scripts."""
__slots__ = [
'_drudge',
'_orig'
]
def __new__(cls, drudge, base, *args, **kwargs):
"""Create an indexed object for drudge scripts."""
indexed = super().__new__(cls, base, *args, **kwargs)
return indexed
def __init__(self, drudge, base, *args, **kwargs):
"""Initialize the indexed object."""
self._drudge = drudge
self._orig = Indexed(base, *args, **kwargs)
def __eq__(self, other):
"""Make equality comparison."""
return self._orig == other
def __hash__(self):
"""Compute the hash."""
return hash(self._orig)
def _hashable_content(self):
"""Hashable content for SymPy usages."""
return self._orig._hashable_content()
@classmethod
def class_key(cls):
return Indexed.class_key()
|
<commit_before><commit_msg>Add utility SymPy subclasses for drudge scripts
These two subclasses are going to be used for SymPy objects in drudge
scripts, especially used as the objects for unresolved names.<commit_after>"""Support for drudge scripts."""
import collections
from sympy import Symbol, Indexed, IndexedBase
#
# Special classes for SymPy objects
# ---------------------------------
#
class DrsSymbol(Symbol):
"""Symbols used in drudge scripts.
The drudge symbol needs to behave as similar to the actual symbol as
possible, because it is possible that they are used for keys in
dictionaries.
"""
__slots__ = [
'_drudge',
'_orig'
]
def __new__(cls, drudge, name):
"""Create a symbol object."""
symb = super().__new__(cls, name)
return symb
def __init__(self, drudge, name):
"""Initialize the symbol object."""
self._drudge = drudge
self._orig = Symbol(name)
def __eq__(self, other):
"""Make equality comparison."""
return self._orig == other
def __hash__(self):
"""Compute the hash."""
return hash(self._orig)
def _hashable_content(self):
"""Hashable content for SymPy usages."""
return self._orig._hashable_content()
@classmethod
def class_key(cls):
return Symbol.class_key()
def __getitem__(self, indices):
"""Index the given symbol.
In drudge scripts, all symbols are by itself indexed bases.
"""
base = IndexedBase(self._orig)
if isinstance(indices, collections.Sequence):
return DrsIndexed(self._drudge, base, *indices)
else:
return DrsIndexed(self._drudge, base, indices)
def __iter__(self):
"""Disable iterability of the symbol.
Or a default implementation from ``__getitem__`` will be used,
which makes the symbols unable to be used as subscripts for indexed
objects.
"""
raise TypeError('Drudge script symbol cannot be iterated over.')
class DrsIndexed(Indexed):
"""Indexed objects for drudge scripts."""
__slots__ = [
'_drudge',
'_orig'
]
def __new__(cls, drudge, base, *args, **kwargs):
"""Create an indexed object for drudge scripts."""
indexed = super().__new__(cls, base, *args, **kwargs)
return indexed
def __init__(self, drudge, base, *args, **kwargs):
"""Initialize the indexed object."""
self._drudge = drudge
self._orig = Indexed(base, *args, **kwargs)
def __eq__(self, other):
"""Make equality comparison."""
return self._orig == other
def __hash__(self):
"""Compute the hash."""
return hash(self._orig)
def _hashable_content(self):
"""Hashable content for SymPy usages."""
return self._orig._hashable_content()
@classmethod
def class_key(cls):
return Indexed.class_key()
|
|
4ca362dcdc2da9ad56d837db6be9104af19dd163
|
scripts/create_extra_users.py
|
scripts/create_extra_users.py
|
import requests
from requests.auth import HTTPBasicAuth
users = [
{
"firstName": "Alicja",
"lastName": "Smith",
"login": "ala123",
"email": "ala@localhost.com",
"admin": False,
"password": "password",
},
{
"firstName": "Barbara",
"lastName": "Smith",
"login": "basia",
"email": "basia@localhost.com",
"admin": False,
"password": "password",
},
]
headers = {"Content-Type": "application/json", "Accept": "application/json"}
api_url = "https://girder.local.wholetale.org/api/v1"
for user in users:
try:
r = requests.post(api_url + "/user", params=user, headers=headers)
r.raise_for_status()
except requests.HTTPError:
if r.status_code == 400:
r = requests.get(
api_url + "/user/authentication",
auth=HTTPBasicAuth(user["login"], user["password"]),
)
r.raise_for_status()
else:
raise
except Exception:
print("Girder is no longer running")
raise
token = r.json()["authToken"]["token"]
print(f"https://dashboard.local.wholetale.org/login?token={token} # {user['login']}")
|
Add missing script for extra users
|
Add missing script for extra users
|
Python
|
mit
|
whole-tale/girder_deploy,whole-tale/girder_deploy
|
Add missing script for extra users
|
import requests
from requests.auth import HTTPBasicAuth
users = [
{
"firstName": "Alicja",
"lastName": "Smith",
"login": "ala123",
"email": "ala@localhost.com",
"admin": False,
"password": "password",
},
{
"firstName": "Barbara",
"lastName": "Smith",
"login": "basia",
"email": "basia@localhost.com",
"admin": False,
"password": "password",
},
]
headers = {"Content-Type": "application/json", "Accept": "application/json"}
api_url = "https://girder.local.wholetale.org/api/v1"
for user in users:
try:
r = requests.post(api_url + "/user", params=user, headers=headers)
r.raise_for_status()
except requests.HTTPError:
if r.status_code == 400:
r = requests.get(
api_url + "/user/authentication",
auth=HTTPBasicAuth(user["login"], user["password"]),
)
r.raise_for_status()
else:
raise
except Exception:
print("Girder is no longer running")
raise
token = r.json()["authToken"]["token"]
print(f"https://dashboard.local.wholetale.org/login?token={token} # {user['login']}")
|
<commit_before><commit_msg>Add missing script for extra users<commit_after>
|
import requests
from requests.auth import HTTPBasicAuth
users = [
{
"firstName": "Alicja",
"lastName": "Smith",
"login": "ala123",
"email": "ala@localhost.com",
"admin": False,
"password": "password",
},
{
"firstName": "Barbara",
"lastName": "Smith",
"login": "basia",
"email": "basia@localhost.com",
"admin": False,
"password": "password",
},
]
headers = {"Content-Type": "application/json", "Accept": "application/json"}
api_url = "https://girder.local.wholetale.org/api/v1"
for user in users:
try:
r = requests.post(api_url + "/user", params=user, headers=headers)
r.raise_for_status()
except requests.HTTPError:
if r.status_code == 400:
r = requests.get(
api_url + "/user/authentication",
auth=HTTPBasicAuth(user["login"], user["password"]),
)
r.raise_for_status()
else:
raise
except Exception:
print("Girder is no longer running")
raise
token = r.json()["authToken"]["token"]
print(f"https://dashboard.local.wholetale.org/login?token={token} # {user['login']}")
|
Add missing script for extra usersimport requests
from requests.auth import HTTPBasicAuth
users = [
{
"firstName": "Alicja",
"lastName": "Smith",
"login": "ala123",
"email": "ala@localhost.com",
"admin": False,
"password": "password",
},
{
"firstName": "Barbara",
"lastName": "Smith",
"login": "basia",
"email": "basia@localhost.com",
"admin": False,
"password": "password",
},
]
headers = {"Content-Type": "application/json", "Accept": "application/json"}
api_url = "https://girder.local.wholetale.org/api/v1"
for user in users:
try:
r = requests.post(api_url + "/user", params=user, headers=headers)
r.raise_for_status()
except requests.HTTPError:
if r.status_code == 400:
r = requests.get(
api_url + "/user/authentication",
auth=HTTPBasicAuth(user["login"], user["password"]),
)
r.raise_for_status()
else:
raise
except Exception:
print("Girder is no longer running")
raise
token = r.json()["authToken"]["token"]
print(f"https://dashboard.local.wholetale.org/login?token={token} # {user['login']}")
|
<commit_before><commit_msg>Add missing script for extra users<commit_after>import requests
from requests.auth import HTTPBasicAuth
users = [
{
"firstName": "Alicja",
"lastName": "Smith",
"login": "ala123",
"email": "ala@localhost.com",
"admin": False,
"password": "password",
},
{
"firstName": "Barbara",
"lastName": "Smith",
"login": "basia",
"email": "basia@localhost.com",
"admin": False,
"password": "password",
},
]
headers = {"Content-Type": "application/json", "Accept": "application/json"}
api_url = "https://girder.local.wholetale.org/api/v1"
for user in users:
try:
r = requests.post(api_url + "/user", params=user, headers=headers)
r.raise_for_status()
except requests.HTTPError:
if r.status_code == 400:
r = requests.get(
api_url + "/user/authentication",
auth=HTTPBasicAuth(user["login"], user["password"]),
)
r.raise_for_status()
else:
raise
except Exception:
print("Girder is no longer running")
raise
token = r.json()["authToken"]["token"]
print(f"https://dashboard.local.wholetale.org/login?token={token} # {user['login']}")
|
|
0be39cc9f97f69d8841d0cb8bc402881858ac5d8
|
migrations/versions/0144_template_service_letter.py
|
migrations/versions/0144_template_service_letter.py
|
"""
Revision ID: 0144_template_service_letter
Revises: 0143_remove_reply_to
Create Date: 2017-11-17 15:42:16.401229
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
revision = '0144_template_service_letter'
down_revision = '0143_remove_reply_to'
def upgrade():
op.add_column('templates',
sa.Column('service_letter_contact_id', postgresql.UUID(as_uuid=True), nullable=True))
op.create_foreign_key('templates_service_letter_contact_id_fkey', 'templates',
'service_letter_contacts', ['service_letter_contact_id'], ['id'])
op.add_column('templates_history',
sa.Column('service_letter_contact_id', postgresql.UUID(as_uuid=True), nullable=True))
op.create_foreign_key('templates_history_service_letter_contact_id_fkey', 'templates_history',
'service_letter_contacts', ['service_letter_contact_id'], ['id'])
def downgrade():
op.drop_constraint('templates_service_letter_contact_id_fkey', 'templates', type_='foreignkey')
op.drop_column('templates', 'service_letter_contact_id')
op.drop_constraint('templates_history_service_letter_contact_id_fkey', 'templates_history', type_='foreignkey')
op.drop_column('templates_history', 'service_letter_contact_id')
|
Add a migration to add template service_letter_contact_id columns
|
Add a migration to add template service_letter_contact_id columns
|
Python
|
mit
|
alphagov/notifications-api,alphagov/notifications-api
|
Add a migration to add template service_letter_contact_id columns
|
"""
Revision ID: 0144_template_service_letter
Revises: 0143_remove_reply_to
Create Date: 2017-11-17 15:42:16.401229
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
revision = '0144_template_service_letter'
down_revision = '0143_remove_reply_to'
def upgrade():
op.add_column('templates',
sa.Column('service_letter_contact_id', postgresql.UUID(as_uuid=True), nullable=True))
op.create_foreign_key('templates_service_letter_contact_id_fkey', 'templates',
'service_letter_contacts', ['service_letter_contact_id'], ['id'])
op.add_column('templates_history',
sa.Column('service_letter_contact_id', postgresql.UUID(as_uuid=True), nullable=True))
op.create_foreign_key('templates_history_service_letter_contact_id_fkey', 'templates_history',
'service_letter_contacts', ['service_letter_contact_id'], ['id'])
def downgrade():
op.drop_constraint('templates_service_letter_contact_id_fkey', 'templates', type_='foreignkey')
op.drop_column('templates', 'service_letter_contact_id')
op.drop_constraint('templates_history_service_letter_contact_id_fkey', 'templates_history', type_='foreignkey')
op.drop_column('templates_history', 'service_letter_contact_id')
|
<commit_before><commit_msg>Add a migration to add template service_letter_contact_id columns<commit_after>
|
"""
Revision ID: 0144_template_service_letter
Revises: 0143_remove_reply_to
Create Date: 2017-11-17 15:42:16.401229
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
revision = '0144_template_service_letter'
down_revision = '0143_remove_reply_to'
def upgrade():
op.add_column('templates',
sa.Column('service_letter_contact_id', postgresql.UUID(as_uuid=True), nullable=True))
op.create_foreign_key('templates_service_letter_contact_id_fkey', 'templates',
'service_letter_contacts', ['service_letter_contact_id'], ['id'])
op.add_column('templates_history',
sa.Column('service_letter_contact_id', postgresql.UUID(as_uuid=True), nullable=True))
op.create_foreign_key('templates_history_service_letter_contact_id_fkey', 'templates_history',
'service_letter_contacts', ['service_letter_contact_id'], ['id'])
def downgrade():
op.drop_constraint('templates_service_letter_contact_id_fkey', 'templates', type_='foreignkey')
op.drop_column('templates', 'service_letter_contact_id')
op.drop_constraint('templates_history_service_letter_contact_id_fkey', 'templates_history', type_='foreignkey')
op.drop_column('templates_history', 'service_letter_contact_id')
|
Add a migration to add template service_letter_contact_id columns"""
Revision ID: 0144_template_service_letter
Revises: 0143_remove_reply_to
Create Date: 2017-11-17 15:42:16.401229
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
revision = '0144_template_service_letter'
down_revision = '0143_remove_reply_to'
def upgrade():
op.add_column('templates',
sa.Column('service_letter_contact_id', postgresql.UUID(as_uuid=True), nullable=True))
op.create_foreign_key('templates_service_letter_contact_id_fkey', 'templates',
'service_letter_contacts', ['service_letter_contact_id'], ['id'])
op.add_column('templates_history',
sa.Column('service_letter_contact_id', postgresql.UUID(as_uuid=True), nullable=True))
op.create_foreign_key('templates_history_service_letter_contact_id_fkey', 'templates_history',
'service_letter_contacts', ['service_letter_contact_id'], ['id'])
def downgrade():
op.drop_constraint('templates_service_letter_contact_id_fkey', 'templates', type_='foreignkey')
op.drop_column('templates', 'service_letter_contact_id')
op.drop_constraint('templates_history_service_letter_contact_id_fkey', 'templates_history', type_='foreignkey')
op.drop_column('templates_history', 'service_letter_contact_id')
|
<commit_before><commit_msg>Add a migration to add template service_letter_contact_id columns<commit_after>"""
Revision ID: 0144_template_service_letter
Revises: 0143_remove_reply_to
Create Date: 2017-11-17 15:42:16.401229
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
revision = '0144_template_service_letter'
down_revision = '0143_remove_reply_to'
def upgrade():
op.add_column('templates',
sa.Column('service_letter_contact_id', postgresql.UUID(as_uuid=True), nullable=True))
op.create_foreign_key('templates_service_letter_contact_id_fkey', 'templates',
'service_letter_contacts', ['service_letter_contact_id'], ['id'])
op.add_column('templates_history',
sa.Column('service_letter_contact_id', postgresql.UUID(as_uuid=True), nullable=True))
op.create_foreign_key('templates_history_service_letter_contact_id_fkey', 'templates_history',
'service_letter_contacts', ['service_letter_contact_id'], ['id'])
def downgrade():
op.drop_constraint('templates_service_letter_contact_id_fkey', 'templates', type_='foreignkey')
op.drop_column('templates', 'service_letter_contact_id')
op.drop_constraint('templates_history_service_letter_contact_id_fkey', 'templates_history', type_='foreignkey')
op.drop_column('templates_history', 'service_letter_contact_id')
|
|
ebb9c3310fa6a1362116a040409b93c2eb756f0d
|
tests/test_memoty_leak.py
|
tests/test_memoty_leak.py
|
import weakref
from tests import BaseTestCase
class TestCase(BaseTestCase):
async def test_on_successful_cleanup_message(self):
rabbitmq_connection = await self.create_connection()
weakset = weakref.WeakSet()
async def f(rabbitmq_connection, weakset):
async with rabbitmq_connection.channel() as channel:
weakset.add(channel)
async with rabbitmq_connection:
for i in range(5):
await f(rabbitmq_connection, weakset)
self.assertEqual(len(weakset), 0)
|
Add test on memory leak
|
Add test on memory leak
|
Python
|
apache-2.0
|
mosquito/aio-pika
|
Add test on memory leak
|
import weakref
from tests import BaseTestCase
class TestCase(BaseTestCase):
async def test_on_successful_cleanup_message(self):
rabbitmq_connection = await self.create_connection()
weakset = weakref.WeakSet()
async def f(rabbitmq_connection, weakset):
async with rabbitmq_connection.channel() as channel:
weakset.add(channel)
async with rabbitmq_connection:
for i in range(5):
await f(rabbitmq_connection, weakset)
self.assertEqual(len(weakset), 0)
|
<commit_before><commit_msg>Add test on memory leak<commit_after>
|
import weakref
from tests import BaseTestCase
class TestCase(BaseTestCase):
async def test_on_successful_cleanup_message(self):
rabbitmq_connection = await self.create_connection()
weakset = weakref.WeakSet()
async def f(rabbitmq_connection, weakset):
async with rabbitmq_connection.channel() as channel:
weakset.add(channel)
async with rabbitmq_connection:
for i in range(5):
await f(rabbitmq_connection, weakset)
self.assertEqual(len(weakset), 0)
|
Add test on memory leakimport weakref
from tests import BaseTestCase
class TestCase(BaseTestCase):
async def test_on_successful_cleanup_message(self):
rabbitmq_connection = await self.create_connection()
weakset = weakref.WeakSet()
async def f(rabbitmq_connection, weakset):
async with rabbitmq_connection.channel() as channel:
weakset.add(channel)
async with rabbitmq_connection:
for i in range(5):
await f(rabbitmq_connection, weakset)
self.assertEqual(len(weakset), 0)
|
<commit_before><commit_msg>Add test on memory leak<commit_after>import weakref
from tests import BaseTestCase
class TestCase(BaseTestCase):
async def test_on_successful_cleanup_message(self):
rabbitmq_connection = await self.create_connection()
weakset = weakref.WeakSet()
async def f(rabbitmq_connection, weakset):
async with rabbitmq_connection.channel() as channel:
weakset.add(channel)
async with rabbitmq_connection:
for i in range(5):
await f(rabbitmq_connection, weakset)
self.assertEqual(len(weakset), 0)
|
|
8171734723ddb7203f916191253c30844757176b
|
setup_cuda_deps.py
|
setup_cuda_deps.py
|
#!/usr/bin/env python
from setuptools import setup
setup(
name='chainer-cuda-deps',
version='1.0.0',
description='A flexible framework of neural networks',
author='Seiya Tokui',
author_email='tokui@preferred.jp',
url='http://chainer.org/',
packages=[],
install_requires=[
'pycuda>=2014.1',
'scikits.cuda>=0.5.0b1,!=0.042',
'Mako',
'six>=1.9.0',
],
)
|
Add setup.py for cuda dependency
|
Add setup.py for cuda dependency
|
Python
|
mit
|
jnishi/chainer,truongdq/chainer,keisuke-umezawa/chainer,bayerj/chainer,sinhrks/chainer,okuta/chainer,minhpqn/chainer,muupan/chainer,wkentaro/chainer,keisuke-umezawa/chainer,hvy/chainer,wkentaro/chainer,kiyukuta/chainer,ikasumi/chainer,pfnet/chainer,chainer/chainer,ktnyt/chainer,cupy/cupy,okuta/chainer,woodshop/complex-chainer,t-abe/chainer,kuwa32/chainer,hvy/chainer,yanweifu/chainer,niboshi/chainer,tereka114/chainer,tkerola/chainer,t-abe/chainer,niboshi/chainer,chainer/chainer,Kaisuke5/chainer,chainer/chainer,jnishi/chainer,cemoody/chainer,elviswf/chainer,ysekky/chainer,okuta/chainer,AlpacaDB/chainer,rezoo/chainer,sinhrks/chainer,aonotas/chainer,keisuke-umezawa/chainer,AlpacaDB/chainer,jfsantos/chainer,ktnyt/chainer,wkentaro/chainer,laysakura/chainer,benob/chainer,masia02/chainer,kikusu/chainer,hvy/chainer,muupan/chainer,tscohen/chainer,ytoyama/yans_chainer_hackathon,anaruse/chainer,hvy/chainer,tigerneil/chainer,kikusu/chainer,keisuke-umezawa/chainer,niboshi/chainer,cupy/cupy,jnishi/chainer,truongdq/chainer,ronekko/chainer,ktnyt/chainer,benob/chainer,wkentaro/chainer,niboshi/chainer,cupy/cupy,woodshop/chainer,ktnyt/chainer,chainer/chainer,wavelets/chainer,hidenori-t/chainer,1986ks/chainer,sou81821/chainer,delta2323/chainer,cupy/cupy,okuta/chainer,jnishi/chainer,kashif/chainer,umitanuki/chainer
|
Add setup.py for cuda dependency
|
#!/usr/bin/env python
from setuptools import setup
setup(
name='chainer-cuda-deps',
version='1.0.0',
description='A flexible framework of neural networks',
author='Seiya Tokui',
author_email='tokui@preferred.jp',
url='http://chainer.org/',
packages=[],
install_requires=[
'pycuda>=2014.1',
'scikits.cuda>=0.5.0b1,!=0.042',
'Mako',
'six>=1.9.0',
],
)
|
<commit_before><commit_msg>Add setup.py for cuda dependency<commit_after>
|
#!/usr/bin/env python
from setuptools import setup
setup(
name='chainer-cuda-deps',
version='1.0.0',
description='A flexible framework of neural networks',
author='Seiya Tokui',
author_email='tokui@preferred.jp',
url='http://chainer.org/',
packages=[],
install_requires=[
'pycuda>=2014.1',
'scikits.cuda>=0.5.0b1,!=0.042',
'Mako',
'six>=1.9.0',
],
)
|
Add setup.py for cuda dependency#!/usr/bin/env python
from setuptools import setup
setup(
name='chainer-cuda-deps',
version='1.0.0',
description='A flexible framework of neural networks',
author='Seiya Tokui',
author_email='tokui@preferred.jp',
url='http://chainer.org/',
packages=[],
install_requires=[
'pycuda>=2014.1',
'scikits.cuda>=0.5.0b1,!=0.042',
'Mako',
'six>=1.9.0',
],
)
|
<commit_before><commit_msg>Add setup.py for cuda dependency<commit_after>#!/usr/bin/env python
from setuptools import setup
setup(
name='chainer-cuda-deps',
version='1.0.0',
description='A flexible framework of neural networks',
author='Seiya Tokui',
author_email='tokui@preferred.jp',
url='http://chainer.org/',
packages=[],
install_requires=[
'pycuda>=2014.1',
'scikits.cuda>=0.5.0b1,!=0.042',
'Mako',
'six>=1.9.0',
],
)
|
|
4a2456f518294ecc14088a0a5ef225002fa79058
|
test/test_accuracyIndicators.py
|
test/test_accuracyIndicators.py
|
import os
import time
import pandas as pd
import numpy as np
import tsam.timeseriesaggregation as tsam
def test_accuracyIndicators():
hoursPerPeriod = 24
noTypicalPeriods = 8
raw = pd.read_csv(os.path.join(os.path.dirname(__file__),'..','examples','testdata.csv'), index_col = 0)
starttime = time.time()
aggregation1 = tsam.TimeSeriesAggregation(raw, noTypicalPeriods=noTypicalPeriods, hoursPerPeriod=hoursPerPeriod,
clusterMethod='hierarchical')
print('Clustering took ' + str(time.time() - starttime))
starttime = time.time()
aggregation2 = tsam.TimeSeriesAggregation(raw, noTypicalPeriods=noTypicalPeriods, hoursPerPeriod=hoursPerPeriod,
clusterMethod='hierarchical', sortValues=True)
print('Clustering took ' + str(time.time() - starttime))
# make sure, that the sum of the attribute specific RMSEs is smaller for the normal time series clustering than for
# the duration curve clustering
np.testing.assert_array_less(aggregation1.accuracyIndicators().loc[:, 'RMSE'].sum(),
aggregation2.accuracyIndicators().loc[:, 'RMSE'].sum())
# make sure, that the sum of the attribute specific duration curve RMSEs is smaller for the duration curve
# clustering than for the normal time series clustering
np.testing.assert_array_less(aggregation2.accuracyIndicators().loc[:, 'RMSE_duration'].sum(),
aggregation1.accuracyIndicators().loc[:, 'RMSE_duration'].sum())
if __name__ == "__main__":
test_accuracyIndicators()
|
Test for accuracy indicators implemented
|
Test for accuracy indicators implemented
|
Python
|
mit
|
FZJ-IEK3-VSA/tsam
|
Test for accuracy indicators implemented
|
import os
import time
import pandas as pd
import numpy as np
import tsam.timeseriesaggregation as tsam
def test_accuracyIndicators():
hoursPerPeriod = 24
noTypicalPeriods = 8
raw = pd.read_csv(os.path.join(os.path.dirname(__file__),'..','examples','testdata.csv'), index_col = 0)
starttime = time.time()
aggregation1 = tsam.TimeSeriesAggregation(raw, noTypicalPeriods=noTypicalPeriods, hoursPerPeriod=hoursPerPeriod,
clusterMethod='hierarchical')
print('Clustering took ' + str(time.time() - starttime))
starttime = time.time()
aggregation2 = tsam.TimeSeriesAggregation(raw, noTypicalPeriods=noTypicalPeriods, hoursPerPeriod=hoursPerPeriod,
clusterMethod='hierarchical', sortValues=True)
print('Clustering took ' + str(time.time() - starttime))
# make sure, that the sum of the attribute specific RMSEs is smaller for the normal time series clustering than for
# the duration curve clustering
np.testing.assert_array_less(aggregation1.accuracyIndicators().loc[:, 'RMSE'].sum(),
aggregation2.accuracyIndicators().loc[:, 'RMSE'].sum())
# make sure, that the sum of the attribute specific duration curve RMSEs is smaller for the duration curve
# clustering than for the normal time series clustering
np.testing.assert_array_less(aggregation2.accuracyIndicators().loc[:, 'RMSE_duration'].sum(),
aggregation1.accuracyIndicators().loc[:, 'RMSE_duration'].sum())
if __name__ == "__main__":
test_accuracyIndicators()
|
<commit_before><commit_msg>Test for accuracy indicators implemented<commit_after>
|
import os
import time
import pandas as pd
import numpy as np
import tsam.timeseriesaggregation as tsam
def test_accuracyIndicators():
hoursPerPeriod = 24
noTypicalPeriods = 8
raw = pd.read_csv(os.path.join(os.path.dirname(__file__),'..','examples','testdata.csv'), index_col = 0)
starttime = time.time()
aggregation1 = tsam.TimeSeriesAggregation(raw, noTypicalPeriods=noTypicalPeriods, hoursPerPeriod=hoursPerPeriod,
clusterMethod='hierarchical')
print('Clustering took ' + str(time.time() - starttime))
starttime = time.time()
aggregation2 = tsam.TimeSeriesAggregation(raw, noTypicalPeriods=noTypicalPeriods, hoursPerPeriod=hoursPerPeriod,
clusterMethod='hierarchical', sortValues=True)
print('Clustering took ' + str(time.time() - starttime))
# make sure, that the sum of the attribute specific RMSEs is smaller for the normal time series clustering than for
# the duration curve clustering
np.testing.assert_array_less(aggregation1.accuracyIndicators().loc[:, 'RMSE'].sum(),
aggregation2.accuracyIndicators().loc[:, 'RMSE'].sum())
# make sure, that the sum of the attribute specific duration curve RMSEs is smaller for the duration curve
# clustering than for the normal time series clustering
np.testing.assert_array_less(aggregation2.accuracyIndicators().loc[:, 'RMSE_duration'].sum(),
aggregation1.accuracyIndicators().loc[:, 'RMSE_duration'].sum())
if __name__ == "__main__":
test_accuracyIndicators()
|
Test for accuracy indicators implementedimport os
import time
import pandas as pd
import numpy as np
import tsam.timeseriesaggregation as tsam
def test_accuracyIndicators():
hoursPerPeriod = 24
noTypicalPeriods = 8
raw = pd.read_csv(os.path.join(os.path.dirname(__file__),'..','examples','testdata.csv'), index_col = 0)
starttime = time.time()
aggregation1 = tsam.TimeSeriesAggregation(raw, noTypicalPeriods=noTypicalPeriods, hoursPerPeriod=hoursPerPeriod,
clusterMethod='hierarchical')
print('Clustering took ' + str(time.time() - starttime))
starttime = time.time()
aggregation2 = tsam.TimeSeriesAggregation(raw, noTypicalPeriods=noTypicalPeriods, hoursPerPeriod=hoursPerPeriod,
clusterMethod='hierarchical', sortValues=True)
print('Clustering took ' + str(time.time() - starttime))
# make sure, that the sum of the attribute specific RMSEs is smaller for the normal time series clustering than for
# the duration curve clustering
np.testing.assert_array_less(aggregation1.accuracyIndicators().loc[:, 'RMSE'].sum(),
aggregation2.accuracyIndicators().loc[:, 'RMSE'].sum())
# make sure, that the sum of the attribute specific duration curve RMSEs is smaller for the duration curve
# clustering than for the normal time series clustering
np.testing.assert_array_less(aggregation2.accuracyIndicators().loc[:, 'RMSE_duration'].sum(),
aggregation1.accuracyIndicators().loc[:, 'RMSE_duration'].sum())
if __name__ == "__main__":
test_accuracyIndicators()
|
<commit_before><commit_msg>Test for accuracy indicators implemented<commit_after>import os
import time
import pandas as pd
import numpy as np
import tsam.timeseriesaggregation as tsam
def test_accuracyIndicators():
hoursPerPeriod = 24
noTypicalPeriods = 8
raw = pd.read_csv(os.path.join(os.path.dirname(__file__),'..','examples','testdata.csv'), index_col = 0)
starttime = time.time()
aggregation1 = tsam.TimeSeriesAggregation(raw, noTypicalPeriods=noTypicalPeriods, hoursPerPeriod=hoursPerPeriod,
clusterMethod='hierarchical')
print('Clustering took ' + str(time.time() - starttime))
starttime = time.time()
aggregation2 = tsam.TimeSeriesAggregation(raw, noTypicalPeriods=noTypicalPeriods, hoursPerPeriod=hoursPerPeriod,
clusterMethod='hierarchical', sortValues=True)
print('Clustering took ' + str(time.time() - starttime))
# make sure, that the sum of the attribute specific RMSEs is smaller for the normal time series clustering than for
# the duration curve clustering
np.testing.assert_array_less(aggregation1.accuracyIndicators().loc[:, 'RMSE'].sum(),
aggregation2.accuracyIndicators().loc[:, 'RMSE'].sum())
# make sure, that the sum of the attribute specific duration curve RMSEs is smaller for the duration curve
# clustering than for the normal time series clustering
np.testing.assert_array_less(aggregation2.accuracyIndicators().loc[:, 'RMSE_duration'].sum(),
aggregation1.accuracyIndicators().loc[:, 'RMSE_duration'].sum())
if __name__ == "__main__":
test_accuracyIndicators()
|
|
475b1755164f9ccd9b855447fe9f26a59bbaed99
|
examples/client_json.py
|
examples/client_json.py
|
import aiohttp
import asyncio
@asyncio.coroutine
def go(session):
print('Query http://httpbin.org/get')
resp = yield from session.get(
'http://httpbin.org/get')
print(resp.status)
try:
data = yield from resp.json()
print(data)
finally:
yield from resp.release()
loop = asyncio.get_event_loop()
session = aiohttp.ClientSession(loop=loop)
loop.run_until_complete(go(session))
session.close()
# run loop iteration for actual session closing
loop.stop()
loop.run_forever()
loop.close()
|
Add example for retrieving json data
|
Add example for retrieving json data
|
Python
|
apache-2.0
|
KeepSafe/aiohttp,singulared/aiohttp,mind1master/aiohttp,esaezgil/aiohttp,jettify/aiohttp,elastic-coders/aiohttp,esaezgil/aiohttp,decentfox/aiohttp,Srogozins/aiohttp,jashandeep-sohi/aiohttp,singulared/aiohttp,alex-eri/aiohttp-1,z2v/aiohttp,morgan-del/aiohttp,mind1master/aiohttp,playpauseandstop/aiohttp,vaskalas/aiohttp,panda73111/aiohttp,decentfox/aiohttp,vedun/aiohttp,noodle-learns-programming/aiohttp,rutsky/aiohttp,arthurdarcet/aiohttp,esaezgil/aiohttp,pfreixes/aiohttp,jettify/aiohttp,Eyepea/aiohttp,jashandeep-sohi/aiohttp,moden-py/aiohttp,panda73111/aiohttp,vaskalas/aiohttp,jashandeep-sohi/aiohttp,rutsky/aiohttp,arthurdarcet/aiohttp,elastic-coders/aiohttp,alex-eri/aiohttp-1,juliatem/aiohttp,vasylbo/aiohttp,KeepSafe/aiohttp,alex-eri/aiohttp-1,avanov/aiohttp,panda73111/aiohttp,vaskalas/aiohttp,moden-py/aiohttp,alunduil/aiohttp,KeepSafe/aiohttp,z2v/aiohttp,flying-sheep/aiohttp,z2v/aiohttp,mind1master/aiohttp,hellysmile/aiohttp,hellysmile/aiohttp,Insoleet/aiohttp,singulared/aiohttp,pfreixes/aiohttp,decentfox/aiohttp,moden-py/aiohttp,juliatem/aiohttp,elastic-coders/aiohttp,AraHaanOrg/aiohttp,pathcl/aiohttp,rutsky/aiohttp,jettify/aiohttp,AraHaanOrg/aiohttp,andyaguiar/aiohttp,sterwill/aiohttp,arthurdarcet/aiohttp,danielnelson/aiohttp
|
Add example for retrieving json data
|
import aiohttp
import asyncio
@asyncio.coroutine
def go(session):
print('Query http://httpbin.org/get')
resp = yield from session.get(
'http://httpbin.org/get')
print(resp.status)
try:
data = yield from resp.json()
print(data)
finally:
yield from resp.release()
loop = asyncio.get_event_loop()
session = aiohttp.ClientSession(loop=loop)
loop.run_until_complete(go(session))
session.close()
# run loop iteration for actual session closing
loop.stop()
loop.run_forever()
loop.close()
|
<commit_before><commit_msg>Add example for retrieving json data<commit_after>
|
import aiohttp
import asyncio
@asyncio.coroutine
def go(session):
print('Query http://httpbin.org/get')
resp = yield from session.get(
'http://httpbin.org/get')
print(resp.status)
try:
data = yield from resp.json()
print(data)
finally:
yield from resp.release()
loop = asyncio.get_event_loop()
session = aiohttp.ClientSession(loop=loop)
loop.run_until_complete(go(session))
session.close()
# run loop iteration for actual session closing
loop.stop()
loop.run_forever()
loop.close()
|
Add example for retrieving json dataimport aiohttp
import asyncio
@asyncio.coroutine
def go(session):
print('Query http://httpbin.org/get')
resp = yield from session.get(
'http://httpbin.org/get')
print(resp.status)
try:
data = yield from resp.json()
print(data)
finally:
yield from resp.release()
loop = asyncio.get_event_loop()
session = aiohttp.ClientSession(loop=loop)
loop.run_until_complete(go(session))
session.close()
# run loop iteration for actual session closing
loop.stop()
loop.run_forever()
loop.close()
|
<commit_before><commit_msg>Add example for retrieving json data<commit_after>import aiohttp
import asyncio
@asyncio.coroutine
def go(session):
print('Query http://httpbin.org/get')
resp = yield from session.get(
'http://httpbin.org/get')
print(resp.status)
try:
data = yield from resp.json()
print(data)
finally:
yield from resp.release()
loop = asyncio.get_event_loop()
session = aiohttp.ClientSession(loop=loop)
loop.run_until_complete(go(session))
session.close()
# run loop iteration for actual session closing
loop.stop()
loop.run_forever()
loop.close()
|
|
b066ad2e8ceb26373b3baff67a455f10d23275ec
|
datastructures.py
|
datastructures.py
|
#!/usr/bin/env python3
class Triangle:
"""A class structure for storing and minipulating a triangle.
The trianlge is represented as a 3-tuple of points. Each point is
represented as a 2-tuple of floats, the first element being the
x-coordinate and the second element being the y-coordinate.
Several useful operations can be applied to a triangle such as, rotate,
translate, split across altitude, and rectanglify.
The Triangle (and underlying tuple) should be treated as an immutable
data structure. All methods return a new triangle and do not modify the
existing one."""
def __init__(self, tpl):
"""tpl is a 3-tuple of coordinates"""
self.points = tpl
class Shape:
"""A class structure for representing and minipulating arbitary shapes.
A shape is defines as a list of triangles (see Triangle). Several
operations can be applied to a shape such as rotation, translation and
splitting the shape into two.
This object should be treated as an immutable data structure. All methods
return new shapes and do not modify the existing one."""
def __init__(self, triangle_list):
"""triangle_list is a list of triangles"""
self.triangles = triangle_list
|
Add Triangle and Shape data structures
|
Add Triangle and Shape data structures
Added classes, Triangle and Shape, to represent triangles and shapes
respectively.
|
Python
|
mit
|
moyamo/polygon2square
|
Add Triangle and Shape data structures
Added classes, Triangle and Shape, to represent triangles and shapes
respectively.
|
#!/usr/bin/env python3
class Triangle:
"""A class structure for storing and minipulating a triangle.
The trianlge is represented as a 3-tuple of points. Each point is
represented as a 2-tuple of floats, the first element being the
x-coordinate and the second element being the y-coordinate.
Several useful operations can be applied to a triangle such as, rotate,
translate, split across altitude, and rectanglify.
The Triangle (and underlying tuple) should be treated as an immutable
data structure. All methods return a new triangle and do not modify the
existing one."""
def __init__(self, tpl):
"""tpl is a 3-tuple of coordinates"""
self.points = tpl
class Shape:
"""A class structure for representing and minipulating arbitary shapes.
A shape is defines as a list of triangles (see Triangle). Several
operations can be applied to a shape such as rotation, translation and
splitting the shape into two.
This object should be treated as an immutable data structure. All methods
return new shapes and do not modify the existing one."""
def __init__(self, triangle_list):
"""triangle_list is a list of triangles"""
self.triangles = triangle_list
|
<commit_before><commit_msg>Add Triangle and Shape data structures
Added classes, Triangle and Shape, to represent triangles and shapes
respectively.<commit_after>
|
#!/usr/bin/env python3
class Triangle:
"""A class structure for storing and minipulating a triangle.
The trianlge is represented as a 3-tuple of points. Each point is
represented as a 2-tuple of floats, the first element being the
x-coordinate and the second element being the y-coordinate.
Several useful operations can be applied to a triangle such as, rotate,
translate, split across altitude, and rectanglify.
The Triangle (and underlying tuple) should be treated as an immutable
data structure. All methods return a new triangle and do not modify the
existing one."""
def __init__(self, tpl):
"""tpl is a 3-tuple of coordinates"""
self.points = tpl
class Shape:
"""A class structure for representing and minipulating arbitary shapes.
A shape is defines as a list of triangles (see Triangle). Several
operations can be applied to a shape such as rotation, translation and
splitting the shape into two.
This object should be treated as an immutable data structure. All methods
return new shapes and do not modify the existing one."""
def __init__(self, triangle_list):
"""triangle_list is a list of triangles"""
self.triangles = triangle_list
|
Add Triangle and Shape data structures
Added classes, Triangle and Shape, to represent triangles and shapes
respectively.#!/usr/bin/env python3
class Triangle:
"""A class structure for storing and minipulating a triangle.
The trianlge is represented as a 3-tuple of points. Each point is
represented as a 2-tuple of floats, the first element being the
x-coordinate and the second element being the y-coordinate.
Several useful operations can be applied to a triangle such as, rotate,
translate, split across altitude, and rectanglify.
The Triangle (and underlying tuple) should be treated as an immutable
data structure. All methods return a new triangle and do not modify the
existing one."""
def __init__(self, tpl):
"""tpl is a 3-tuple of coordinates"""
self.points = tpl
class Shape:
"""A class structure for representing and minipulating arbitary shapes.
A shape is defines as a list of triangles (see Triangle). Several
operations can be applied to a shape such as rotation, translation and
splitting the shape into two.
This object should be treated as an immutable data structure. All methods
return new shapes and do not modify the existing one."""
def __init__(self, triangle_list):
"""triangle_list is a list of triangles"""
self.triangles = triangle_list
|
<commit_before><commit_msg>Add Triangle and Shape data structures
Added classes, Triangle and Shape, to represent triangles and shapes
respectively.<commit_after>#!/usr/bin/env python3
class Triangle:
"""A class structure for storing and minipulating a triangle.
The trianlge is represented as a 3-tuple of points. Each point is
represented as a 2-tuple of floats, the first element being the
x-coordinate and the second element being the y-coordinate.
Several useful operations can be applied to a triangle such as, rotate,
translate, split across altitude, and rectanglify.
The Triangle (and underlying tuple) should be treated as an immutable
data structure. All methods return a new triangle and do not modify the
existing one."""
def __init__(self, tpl):
"""tpl is a 3-tuple of coordinates"""
self.points = tpl
class Shape:
"""A class structure for representing and minipulating arbitary shapes.
A shape is defines as a list of triangles (see Triangle). Several
operations can be applied to a shape such as rotation, translation and
splitting the shape into two.
This object should be treated as an immutable data structure. All methods
return new shapes and do not modify the existing one."""
def __init__(self, triangle_list):
"""triangle_list is a list of triangles"""
self.triangles = triangle_list
|
|
8f8caf50f51225964e09f25224bb2782bce479a1
|
src/sentry/receivers/users.py
|
src/sentry/receivers/users.py
|
from __future__ import absolute_import, print_function
from django.db.models.signals import post_syncdb
from sentry.models import User
def create_first_user(app, created_models, verbosity, db, **kwargs):
if User not in created_models:
return
if not kwargs.get('interactive', True):
return
import click
if not click.confirm('\nWould you like to create a user account now?', default=True):
# Not using `abort=1` because we don't want to exit out from further execution
click.echo('\nRun `sentry createuser` to do this later.\n')
return
from sentry.runner import call_command
call_command('sentry.runner.commands.createuser.createuser')
post_syncdb.connect(
create_first_user,
dispatch_uid="create_first_user",
weak=False,
)
|
from __future__ import absolute_import, print_function
from django.db import router
from django.db.models.signals import post_syncdb
from sentry.models import User
def create_first_user(app, created_models, verbosity, db, **kwargs):
if User not in created_models:
return
if not router.allow_syncdb(db, User):
return
if not kwargs.get('interactive', True):
return
import click
if not click.confirm('\nWould you like to create a user account now?', default=True):
# Not using `abort=1` because we don't want to exit out from further execution
click.echo('\nRun `sentry createuser` to do this later.\n')
return
from sentry.runner import call_command
call_command('sentry.runner.commands.createuser.createuser')
post_syncdb.connect(
create_first_user,
dispatch_uid="create_first_user",
weak=False,
)
|
Support multi-db for user creation signal
|
Support multi-db for user creation signal
|
Python
|
bsd-3-clause
|
BuildingLink/sentry,jean/sentry,beeftornado/sentry,BuildingLink/sentry,jean/sentry,nicholasserra/sentry,ifduyue/sentry,jean/sentry,JamesMura/sentry,ifduyue/sentry,looker/sentry,fotinakis/sentry,looker/sentry,fotinakis/sentry,JamesMura/sentry,mitsuhiko/sentry,gencer/sentry,beeftornado/sentry,nicholasserra/sentry,looker/sentry,looker/sentry,beeftornado/sentry,JackDanger/sentry,mitsuhiko/sentry,mvaled/sentry,fotinakis/sentry,daevaorn/sentry,nicholasserra/sentry,alexm92/sentry,gencer/sentry,mvaled/sentry,ifduyue/sentry,alexm92/sentry,mvaled/sentry,mvaled/sentry,alexm92/sentry,zenefits/sentry,ifduyue/sentry,gencer/sentry,gencer/sentry,mvaled/sentry,mvaled/sentry,JamesMura/sentry,BuildingLink/sentry,ifduyue/sentry,zenefits/sentry,BuildingLink/sentry,zenefits/sentry,daevaorn/sentry,zenefits/sentry,gencer/sentry,BuildingLink/sentry,jean/sentry,zenefits/sentry,fotinakis/sentry,JackDanger/sentry,JamesMura/sentry,daevaorn/sentry,daevaorn/sentry,JackDanger/sentry,JamesMura/sentry,jean/sentry,looker/sentry
|
from __future__ import absolute_import, print_function
from django.db.models.signals import post_syncdb
from sentry.models import User
def create_first_user(app, created_models, verbosity, db, **kwargs):
if User not in created_models:
return
if not kwargs.get('interactive', True):
return
import click
if not click.confirm('\nWould you like to create a user account now?', default=True):
# Not using `abort=1` because we don't want to exit out from further execution
click.echo('\nRun `sentry createuser` to do this later.\n')
return
from sentry.runner import call_command
call_command('sentry.runner.commands.createuser.createuser')
post_syncdb.connect(
create_first_user,
dispatch_uid="create_first_user",
weak=False,
)
Support multi-db for user creation signal
|
from __future__ import absolute_import, print_function
from django.db import router
from django.db.models.signals import post_syncdb
from sentry.models import User
def create_first_user(app, created_models, verbosity, db, **kwargs):
if User not in created_models:
return
if not router.allow_syncdb(db, User):
return
if not kwargs.get('interactive', True):
return
import click
if not click.confirm('\nWould you like to create a user account now?', default=True):
# Not using `abort=1` because we don't want to exit out from further execution
click.echo('\nRun `sentry createuser` to do this later.\n')
return
from sentry.runner import call_command
call_command('sentry.runner.commands.createuser.createuser')
post_syncdb.connect(
create_first_user,
dispatch_uid="create_first_user",
weak=False,
)
|
<commit_before>from __future__ import absolute_import, print_function
from django.db.models.signals import post_syncdb
from sentry.models import User
def create_first_user(app, created_models, verbosity, db, **kwargs):
if User not in created_models:
return
if not kwargs.get('interactive', True):
return
import click
if not click.confirm('\nWould you like to create a user account now?', default=True):
# Not using `abort=1` because we don't want to exit out from further execution
click.echo('\nRun `sentry createuser` to do this later.\n')
return
from sentry.runner import call_command
call_command('sentry.runner.commands.createuser.createuser')
post_syncdb.connect(
create_first_user,
dispatch_uid="create_first_user",
weak=False,
)
<commit_msg>Support multi-db for user creation signal<commit_after>
|
from __future__ import absolute_import, print_function
from django.db import router
from django.db.models.signals import post_syncdb
from sentry.models import User
def create_first_user(app, created_models, verbosity, db, **kwargs):
if User not in created_models:
return
if not router.allow_syncdb(db, User):
return
if not kwargs.get('interactive', True):
return
import click
if not click.confirm('\nWould you like to create a user account now?', default=True):
# Not using `abort=1` because we don't want to exit out from further execution
click.echo('\nRun `sentry createuser` to do this later.\n')
return
from sentry.runner import call_command
call_command('sentry.runner.commands.createuser.createuser')
post_syncdb.connect(
create_first_user,
dispatch_uid="create_first_user",
weak=False,
)
|
from __future__ import absolute_import, print_function
from django.db.models.signals import post_syncdb
from sentry.models import User
def create_first_user(app, created_models, verbosity, db, **kwargs):
if User not in created_models:
return
if not kwargs.get('interactive', True):
return
import click
if not click.confirm('\nWould you like to create a user account now?', default=True):
# Not using `abort=1` because we don't want to exit out from further execution
click.echo('\nRun `sentry createuser` to do this later.\n')
return
from sentry.runner import call_command
call_command('sentry.runner.commands.createuser.createuser')
post_syncdb.connect(
create_first_user,
dispatch_uid="create_first_user",
weak=False,
)
Support multi-db for user creation signalfrom __future__ import absolute_import, print_function
from django.db import router
from django.db.models.signals import post_syncdb
from sentry.models import User
def create_first_user(app, created_models, verbosity, db, **kwargs):
if User not in created_models:
return
if not router.allow_syncdb(db, User):
return
if not kwargs.get('interactive', True):
return
import click
if not click.confirm('\nWould you like to create a user account now?', default=True):
# Not using `abort=1` because we don't want to exit out from further execution
click.echo('\nRun `sentry createuser` to do this later.\n')
return
from sentry.runner import call_command
call_command('sentry.runner.commands.createuser.createuser')
post_syncdb.connect(
create_first_user,
dispatch_uid="create_first_user",
weak=False,
)
|
<commit_before>from __future__ import absolute_import, print_function
from django.db.models.signals import post_syncdb
from sentry.models import User
def create_first_user(app, created_models, verbosity, db, **kwargs):
if User not in created_models:
return
if not kwargs.get('interactive', True):
return
import click
if not click.confirm('\nWould you like to create a user account now?', default=True):
# Not using `abort=1` because we don't want to exit out from further execution
click.echo('\nRun `sentry createuser` to do this later.\n')
return
from sentry.runner import call_command
call_command('sentry.runner.commands.createuser.createuser')
post_syncdb.connect(
create_first_user,
dispatch_uid="create_first_user",
weak=False,
)
<commit_msg>Support multi-db for user creation signal<commit_after>from __future__ import absolute_import, print_function
from django.db import router
from django.db.models.signals import post_syncdb
from sentry.models import User
def create_first_user(app, created_models, verbosity, db, **kwargs):
if User not in created_models:
return
if not router.allow_syncdb(db, User):
return
if not kwargs.get('interactive', True):
return
import click
if not click.confirm('\nWould you like to create a user account now?', default=True):
# Not using `abort=1` because we don't want to exit out from further execution
click.echo('\nRun `sentry createuser` to do this later.\n')
return
from sentry.runner import call_command
call_command('sentry.runner.commands.createuser.createuser')
post_syncdb.connect(
create_first_user,
dispatch_uid="create_first_user",
weak=False,
)
|
3144a6fd412a43b41a1112b1c8e91fbb8bea3f8d
|
google-url-shortener.py
|
google-url-shortener.py
|
import json
import requests
import argparse
import sys
class GUrlShorten():
def __init__(self, key):
self.API_KEY = key
def google_url_shorten(self, url):
req_url = 'https://www.googleapis.com/urlshortener/v1/url?key=' + self.API_KEY
payload = {'longUrl': url}
headers = {'content-type': 'application/json'}
r = requests.post(req_url, json=payload, headers=headers)
resp = json.loads(r.text)
return resp['id']
def google_url_expand(self, url):
req_url = 'https://www.googleapis.com/urlshortener/v1/url'
payload = {'key': self.API_KEY, 'shortUrl': url}
r = requests.get(req_url, params=payload)
resp = json.loads(r.text)
return resp['longUrl']
def main():
desc = "A python script to shorten or expand urls using Google Url Shortener API"
parser = argparse.ArgumentParser(description=desc, prog='GoogleUrlShortener.py')
parser.add_argument('-e', '--expand', action='store_true', help='Short Url will be expanded.')
parser.add_argument('-s', '--shorten', action='store_true', help='Long Url will be shortened.')
req_key = parser.add_argument_group("Required named arguments")
req_key.add_argument('-k', '--key', type=str, help='Browser API key. Get it from Google Developer Console', required=True)
req_key.add_argument('-u', '--url', type=str, help='URL which you want to shorten or expand.', required=True)
args = parser.parse_args()
if args.expand and args.shorten:
print('\n--expand/--shorten are mutually exclusive\n')
parser.parse_args(['--help'])
if not (args.shorten or args.expand):
print('\n--expand/--shorten is required\n')
parser.parse_args(['--help'])
else:
obj = GUrlShorten(args.key)
if args.expand:
print(obj.google_url_expand(args.url))
elif args.shorten:
print(obj.google_url_shorten(args.url))
else:
raise Exception('Magical Exception')
if __name__ == '__main__':
sys.exit(main())
|
Add google url shortener example
|
Add google url shortener example
|
Python
|
mit
|
voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts
|
Add google url shortener example
|
import json
import requests
import argparse
import sys
class GUrlShorten():
def __init__(self, key):
self.API_KEY = key
def google_url_shorten(self, url):
req_url = 'https://www.googleapis.com/urlshortener/v1/url?key=' + self.API_KEY
payload = {'longUrl': url}
headers = {'content-type': 'application/json'}
r = requests.post(req_url, json=payload, headers=headers)
resp = json.loads(r.text)
return resp['id']
def google_url_expand(self, url):
req_url = 'https://www.googleapis.com/urlshortener/v1/url'
payload = {'key': self.API_KEY, 'shortUrl': url}
r = requests.get(req_url, params=payload)
resp = json.loads(r.text)
return resp['longUrl']
def main():
desc = "A python script to shorten or expand urls using Google Url Shortener API"
parser = argparse.ArgumentParser(description=desc, prog='GoogleUrlShortener.py')
parser.add_argument('-e', '--expand', action='store_true', help='Short Url will be expanded.')
parser.add_argument('-s', '--shorten', action='store_true', help='Long Url will be shortened.')
req_key = parser.add_argument_group("Required named arguments")
req_key.add_argument('-k', '--key', type=str, help='Browser API key. Get it from Google Developer Console', required=True)
req_key.add_argument('-u', '--url', type=str, help='URL which you want to shorten or expand.', required=True)
args = parser.parse_args()
if args.expand and args.shorten:
print('\n--expand/--shorten are mutually exclusive\n')
parser.parse_args(['--help'])
if not (args.shorten or args.expand):
print('\n--expand/--shorten is required\n')
parser.parse_args(['--help'])
else:
obj = GUrlShorten(args.key)
if args.expand:
print(obj.google_url_expand(args.url))
elif args.shorten:
print(obj.google_url_shorten(args.url))
else:
raise Exception('Magical Exception')
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add google url shortener example<commit_after>
|
import json
import requests
import argparse
import sys
class GUrlShorten():
def __init__(self, key):
self.API_KEY = key
def google_url_shorten(self, url):
req_url = 'https://www.googleapis.com/urlshortener/v1/url?key=' + self.API_KEY
payload = {'longUrl': url}
headers = {'content-type': 'application/json'}
r = requests.post(req_url, json=payload, headers=headers)
resp = json.loads(r.text)
return resp['id']
def google_url_expand(self, url):
req_url = 'https://www.googleapis.com/urlshortener/v1/url'
payload = {'key': self.API_KEY, 'shortUrl': url}
r = requests.get(req_url, params=payload)
resp = json.loads(r.text)
return resp['longUrl']
def main():
desc = "A python script to shorten or expand urls using Google Url Shortener API"
parser = argparse.ArgumentParser(description=desc, prog='GoogleUrlShortener.py')
parser.add_argument('-e', '--expand', action='store_true', help='Short Url will be expanded.')
parser.add_argument('-s', '--shorten', action='store_true', help='Long Url will be shortened.')
req_key = parser.add_argument_group("Required named arguments")
req_key.add_argument('-k', '--key', type=str, help='Browser API key. Get it from Google Developer Console', required=True)
req_key.add_argument('-u', '--url', type=str, help='URL which you want to shorten or expand.', required=True)
args = parser.parse_args()
if args.expand and args.shorten:
print('\n--expand/--shorten are mutually exclusive\n')
parser.parse_args(['--help'])
if not (args.shorten or args.expand):
print('\n--expand/--shorten is required\n')
parser.parse_args(['--help'])
else:
obj = GUrlShorten(args.key)
if args.expand:
print(obj.google_url_expand(args.url))
elif args.shorten:
print(obj.google_url_shorten(args.url))
else:
raise Exception('Magical Exception')
if __name__ == '__main__':
sys.exit(main())
|
Add google url shortener exampleimport json
import requests
import argparse
import sys
class GUrlShorten():
def __init__(self, key):
self.API_KEY = key
def google_url_shorten(self, url):
req_url = 'https://www.googleapis.com/urlshortener/v1/url?key=' + self.API_KEY
payload = {'longUrl': url}
headers = {'content-type': 'application/json'}
r = requests.post(req_url, json=payload, headers=headers)
resp = json.loads(r.text)
return resp['id']
def google_url_expand(self, url):
req_url = 'https://www.googleapis.com/urlshortener/v1/url'
payload = {'key': self.API_KEY, 'shortUrl': url}
r = requests.get(req_url, params=payload)
resp = json.loads(r.text)
return resp['longUrl']
def main():
desc = "A python script to shorten or expand urls using Google Url Shortener API"
parser = argparse.ArgumentParser(description=desc, prog='GoogleUrlShortener.py')
parser.add_argument('-e', '--expand', action='store_true', help='Short Url will be expanded.')
parser.add_argument('-s', '--shorten', action='store_true', help='Long Url will be shortened.')
req_key = parser.add_argument_group("Required named arguments")
req_key.add_argument('-k', '--key', type=str, help='Browser API key. Get it from Google Developer Console', required=True)
req_key.add_argument('-u', '--url', type=str, help='URL which you want to shorten or expand.', required=True)
args = parser.parse_args()
if args.expand and args.shorten:
print('\n--expand/--shorten are mutually exclusive\n')
parser.parse_args(['--help'])
if not (args.shorten or args.expand):
print('\n--expand/--shorten is required\n')
parser.parse_args(['--help'])
else:
obj = GUrlShorten(args.key)
if args.expand:
print(obj.google_url_expand(args.url))
elif args.shorten:
print(obj.google_url_shorten(args.url))
else:
raise Exception('Magical Exception')
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add google url shortener example<commit_after>import json
import requests
import argparse
import sys
class GUrlShorten():
def __init__(self, key):
self.API_KEY = key
def google_url_shorten(self, url):
req_url = 'https://www.googleapis.com/urlshortener/v1/url?key=' + self.API_KEY
payload = {'longUrl': url}
headers = {'content-type': 'application/json'}
r = requests.post(req_url, json=payload, headers=headers)
resp = json.loads(r.text)
return resp['id']
def google_url_expand(self, url):
req_url = 'https://www.googleapis.com/urlshortener/v1/url'
payload = {'key': self.API_KEY, 'shortUrl': url}
r = requests.get(req_url, params=payload)
resp = json.loads(r.text)
return resp['longUrl']
def main():
desc = "A python script to shorten or expand urls using Google Url Shortener API"
parser = argparse.ArgumentParser(description=desc, prog='GoogleUrlShortener.py')
parser.add_argument('-e', '--expand', action='store_true', help='Short Url will be expanded.')
parser.add_argument('-s', '--shorten', action='store_true', help='Long Url will be shortened.')
req_key = parser.add_argument_group("Required named arguments")
req_key.add_argument('-k', '--key', type=str, help='Browser API key. Get it from Google Developer Console', required=True)
req_key.add_argument('-u', '--url', type=str, help='URL which you want to shorten or expand.', required=True)
args = parser.parse_args()
if args.expand and args.shorten:
print('\n--expand/--shorten are mutually exclusive\n')
parser.parse_args(['--help'])
if not (args.shorten or args.expand):
print('\n--expand/--shorten is required\n')
parser.parse_args(['--help'])
else:
obj = GUrlShorten(args.key)
if args.expand:
print(obj.google_url_expand(args.url))
elif args.shorten:
print(obj.google_url_shorten(args.url))
else:
raise Exception('Magical Exception')
if __name__ == '__main__':
sys.exit(main())
|
|
d892f3ac66f45816de3a9ba1f44d4a4df1bacf07
|
pytube/cli.py
|
pytube/cli.py
|
import argparse
from .api import YouTube
from .utils import print_status
def _main():
parser = argparse.ArgumentParser(description='YouTube video downloader')
parser.add_argument("url", help="The URL of the Video to be downloaded")
parser.add_argument("--extension", "-e",
help="The requested format of the video", dest="ext")
parser.add_argument("--resolution", "-r",
help="The requested resolution", dest="res")
parser.add_argument("--path", "-p",
help="The path to save the video to.", dest="path")
parser.add_argument("--filename", "-f",
dest="filename",
help=("The filename, without extension, "
"to save the video in."))
args = parser.parse_args()
yt = YouTube()
yt.url = args.url
if args.filename:
yt.filename = args.filename
if args.ext and args.res:
# There's only ope video that matches both so get it
vid = yt.get(args.ext, args.res)
elif args.ext:
# There are several videos with the same extension
videos = yt.filter(extension=args.ext)
# Select the highest resolution one
vid = max(videos)
elif args.res:
# There are several videos with the same extension
videos = yt.filter(resolution=args.res)
# Select the highest resolution one
vid = max(videos)
else:
# If nothing is specified get the highest resolution one
vid = max(yt.videos)
vid.download(path=args.path, on_progress=print_status)
|
Add a command line tool.
|
Add a command line tool.
|
Python
|
mit
|
mserjx/pytube,DomainGroupOSS/pytube,harsham05/pytube,harsham05/pytube,garg10may/pytube,garg10may/pytube,pytube/pytube,mserjx/pytube,DomainGroupOSS/pytube
|
Add a command line tool.
|
import argparse
from .api import YouTube
from .utils import print_status
def _main():
parser = argparse.ArgumentParser(description='YouTube video downloader')
parser.add_argument("url", help="The URL of the Video to be downloaded")
parser.add_argument("--extension", "-e",
help="The requested format of the video", dest="ext")
parser.add_argument("--resolution", "-r",
help="The requested resolution", dest="res")
parser.add_argument("--path", "-p",
help="The path to save the video to.", dest="path")
parser.add_argument("--filename", "-f",
dest="filename",
help=("The filename, without extension, "
"to save the video in."))
args = parser.parse_args()
yt = YouTube()
yt.url = args.url
if args.filename:
yt.filename = args.filename
if args.ext and args.res:
# There's only ope video that matches both so get it
vid = yt.get(args.ext, args.res)
elif args.ext:
# There are several videos with the same extension
videos = yt.filter(extension=args.ext)
# Select the highest resolution one
vid = max(videos)
elif args.res:
# There are several videos with the same extension
videos = yt.filter(resolution=args.res)
# Select the highest resolution one
vid = max(videos)
else:
# If nothing is specified get the highest resolution one
vid = max(yt.videos)
vid.download(path=args.path, on_progress=print_status)
|
<commit_before><commit_msg>Add a command line tool.<commit_after>
|
import argparse
from .api import YouTube
from .utils import print_status
def _main():
parser = argparse.ArgumentParser(description='YouTube video downloader')
parser.add_argument("url", help="The URL of the Video to be downloaded")
parser.add_argument("--extension", "-e",
help="The requested format of the video", dest="ext")
parser.add_argument("--resolution", "-r",
help="The requested resolution", dest="res")
parser.add_argument("--path", "-p",
help="The path to save the video to.", dest="path")
parser.add_argument("--filename", "-f",
dest="filename",
help=("The filename, without extension, "
"to save the video in."))
args = parser.parse_args()
yt = YouTube()
yt.url = args.url
if args.filename:
yt.filename = args.filename
if args.ext and args.res:
# There's only ope video that matches both so get it
vid = yt.get(args.ext, args.res)
elif args.ext:
# There are several videos with the same extension
videos = yt.filter(extension=args.ext)
# Select the highest resolution one
vid = max(videos)
elif args.res:
# There are several videos with the same extension
videos = yt.filter(resolution=args.res)
# Select the highest resolution one
vid = max(videos)
else:
# If nothing is specified get the highest resolution one
vid = max(yt.videos)
vid.download(path=args.path, on_progress=print_status)
|
Add a command line tool.import argparse
from .api import YouTube
from .utils import print_status
def _main():
parser = argparse.ArgumentParser(description='YouTube video downloader')
parser.add_argument("url", help="The URL of the Video to be downloaded")
parser.add_argument("--extension", "-e",
help="The requested format of the video", dest="ext")
parser.add_argument("--resolution", "-r",
help="The requested resolution", dest="res")
parser.add_argument("--path", "-p",
help="The path to save the video to.", dest="path")
parser.add_argument("--filename", "-f",
dest="filename",
help=("The filename, without extension, "
"to save the video in."))
args = parser.parse_args()
yt = YouTube()
yt.url = args.url
if args.filename:
yt.filename = args.filename
if args.ext and args.res:
# There's only ope video that matches both so get it
vid = yt.get(args.ext, args.res)
elif args.ext:
# There are several videos with the same extension
videos = yt.filter(extension=args.ext)
# Select the highest resolution one
vid = max(videos)
elif args.res:
# There are several videos with the same extension
videos = yt.filter(resolution=args.res)
# Select the highest resolution one
vid = max(videos)
else:
# If nothing is specified get the highest resolution one
vid = max(yt.videos)
vid.download(path=args.path, on_progress=print_status)
|
<commit_before><commit_msg>Add a command line tool.<commit_after>import argparse
from .api import YouTube
from .utils import print_status
def _main():
parser = argparse.ArgumentParser(description='YouTube video downloader')
parser.add_argument("url", help="The URL of the Video to be downloaded")
parser.add_argument("--extension", "-e",
help="The requested format of the video", dest="ext")
parser.add_argument("--resolution", "-r",
help="The requested resolution", dest="res")
parser.add_argument("--path", "-p",
help="The path to save the video to.", dest="path")
parser.add_argument("--filename", "-f",
dest="filename",
help=("The filename, without extension, "
"to save the video in."))
args = parser.parse_args()
yt = YouTube()
yt.url = args.url
if args.filename:
yt.filename = args.filename
if args.ext and args.res:
# There's only ope video that matches both so get it
vid = yt.get(args.ext, args.res)
elif args.ext:
# There are several videos with the same extension
videos = yt.filter(extension=args.ext)
# Select the highest resolution one
vid = max(videos)
elif args.res:
# There are several videos with the same extension
videos = yt.filter(resolution=args.res)
# Select the highest resolution one
vid = max(videos)
else:
# If nothing is specified get the highest resolution one
vid = max(yt.videos)
vid.download(path=args.path, on_progress=print_status)
|
|
6ccaac73969815e4e195cbeb3ead850153c4da12
|
mica/starcheck/tests/make_database.py
|
mica/starcheck/tests/make_database.py
|
import os
import tempfile
from Chandra.Time import DateTime
from Ska.Shell import bash
import mica.common
# Override MICA_ARCHIVE with a temporary directory
TESTDIR = tempfile.mkdtemp()
mica.common.MICA_ARCHIVE = TESTDIR
# import mica.starcheck.starcheck after setting MICA_ARCHIVE
import mica.starcheck.starcheck
# Just ingest files from the last couple of weeks or so
# This still uses the silly find files newer than this other file method, so
# set the time stamp on that reference file
if not os.path.exists(os.path.join(TESTDIR, 'starcheck')):
os.makedirs(os.path.join(TESTDIR, 'starcheck'))
bash("touch -d {} {}".format(DateTime(-15).iso, mica.starcheck.starcheck.FILES['touch_file']))
# And just check that the update script didn't raise any exceptions
mica.starcheck.starcheck.update()
# Cleanup manually
bash("rm -r {}".format(TESTDIR))
|
Add a script to make a test starcheck database from recent data
|
Add a script to make a test starcheck database from recent data
Even on just a few weeks, this seems to take too long to be a
standard test, so I'm just putting the logic here for safekeeping.
It could also be a "long" test for ska_testr, but that wouldn't
conveniently let you check test code compatibility without a dev ska.
|
Python
|
bsd-3-clause
|
sot/mica,sot/mica
|
Add a script to make a test starcheck database from recent data
Even on just a few weeks, this seems to take too long to be a
standard test, so I'm just putting the logic here for safekeeping.
It could also be a "long" test for ska_testr, but that wouldn't
conveniently let you check test code compatibility without a dev ska.
|
import os
import tempfile
from Chandra.Time import DateTime
from Ska.Shell import bash
import mica.common
# Override MICA_ARCHIVE with a temporary directory
TESTDIR = tempfile.mkdtemp()
mica.common.MICA_ARCHIVE = TESTDIR
# import mica.starcheck.starcheck after setting MICA_ARCHIVE
import mica.starcheck.starcheck
# Just ingest files from the last couple of weeks or so
# This still uses the silly find files newer than this other file method, so
# set the time stamp on that reference file
if not os.path.exists(os.path.join(TESTDIR, 'starcheck')):
os.makedirs(os.path.join(TESTDIR, 'starcheck'))
bash("touch -d {} {}".format(DateTime(-15).iso, mica.starcheck.starcheck.FILES['touch_file']))
# And just check that the update script didn't raise any exceptions
mica.starcheck.starcheck.update()
# Cleanup manually
bash("rm -r {}".format(TESTDIR))
|
<commit_before><commit_msg>Add a script to make a test starcheck database from recent data
Even on just a few weeks, this seems to take too long to be a
standard test, so I'm just putting the logic here for safekeeping.
It could also be a "long" test for ska_testr, but that wouldn't
conveniently let you check test code compatibility without a dev ska.<commit_after>
|
import os
import tempfile
from Chandra.Time import DateTime
from Ska.Shell import bash
import mica.common
# Override MICA_ARCHIVE with a temporary directory
TESTDIR = tempfile.mkdtemp()
mica.common.MICA_ARCHIVE = TESTDIR
# import mica.starcheck.starcheck after setting MICA_ARCHIVE
import mica.starcheck.starcheck
# Just ingest files from the last couple of weeks or so
# This still uses the silly find files newer than this other file method, so
# set the time stamp on that reference file
if not os.path.exists(os.path.join(TESTDIR, 'starcheck')):
os.makedirs(os.path.join(TESTDIR, 'starcheck'))
bash("touch -d {} {}".format(DateTime(-15).iso, mica.starcheck.starcheck.FILES['touch_file']))
# And just check that the update script didn't raise any exceptions
mica.starcheck.starcheck.update()
# Cleanup manually
bash("rm -r {}".format(TESTDIR))
|
Add a script to make a test starcheck database from recent data
Even on just a few weeks, this seems to take too long to be a
standard test, so I'm just putting the logic here for safekeeping.
It could also be a "long" test for ska_testr, but that wouldn't
conveniently let you check test code compatibility without a dev ska.import os
import tempfile
from Chandra.Time import DateTime
from Ska.Shell import bash
import mica.common
# Override MICA_ARCHIVE with a temporary directory
TESTDIR = tempfile.mkdtemp()
mica.common.MICA_ARCHIVE = TESTDIR
# import mica.starcheck.starcheck after setting MICA_ARCHIVE
import mica.starcheck.starcheck
# Just ingest files from the last couple of weeks or so
# This still uses the silly find files newer than this other file method, so
# set the time stamp on that reference file
if not os.path.exists(os.path.join(TESTDIR, 'starcheck')):
os.makedirs(os.path.join(TESTDIR, 'starcheck'))
bash("touch -d {} {}".format(DateTime(-15).iso, mica.starcheck.starcheck.FILES['touch_file']))
# And just check that the update script didn't raise any exceptions
mica.starcheck.starcheck.update()
# Cleanup manually
bash("rm -r {}".format(TESTDIR))
|
<commit_before><commit_msg>Add a script to make a test starcheck database from recent data
Even on just a few weeks, this seems to take too long to be a
standard test, so I'm just putting the logic here for safekeeping.
It could also be a "long" test for ska_testr, but that wouldn't
conveniently let you check test code compatibility without a dev ska.<commit_after>import os
import tempfile
from Chandra.Time import DateTime
from Ska.Shell import bash
import mica.common
# Override MICA_ARCHIVE with a temporary directory
TESTDIR = tempfile.mkdtemp()
mica.common.MICA_ARCHIVE = TESTDIR
# import mica.starcheck.starcheck after setting MICA_ARCHIVE
import mica.starcheck.starcheck
# Just ingest files from the last couple of weeks or so
# This still uses the silly find files newer than this other file method, so
# set the time stamp on that reference file
if not os.path.exists(os.path.join(TESTDIR, 'starcheck')):
os.makedirs(os.path.join(TESTDIR, 'starcheck'))
bash("touch -d {} {}".format(DateTime(-15).iso, mica.starcheck.starcheck.FILES['touch_file']))
# And just check that the update script didn't raise any exceptions
mica.starcheck.starcheck.update()
# Cleanup manually
bash("rm -r {}".format(TESTDIR))
|
|
35d5b923b70acf4fe6cb0dda1466463a00296214
|
tests/core/test_projectors.py
|
tests/core/test_projectors.py
|
from openfisca_core.simulations.simulation_builder import SimulationBuilder
from openfisca_core.taxbenefitsystems import TaxBenefitSystem
from openfisca_core.entities import build_entity
def test_shortcut_to_containing_entity_provided():
"""
Tests that, when an entity provides a containing entity,
the shortcut to that containing entity is provided.
"""
person_entity = build_entity(
key="person",
plural="people",
label="A person",
is_person=True,
)
family_entity = build_entity(
key="family",
plural="families",
label="A family (all members in the same household)",
containing_entities=["household"],
roles=[{
"key": "member",
"plural": "members",
"label": "Member",
}]
)
household_entity = build_entity(
key="household",
plural="households",
label="A household, containing one or more families",
roles=[{
"key": "member",
"plural": "members",
"label": "Member",
}]
)
entities = [person_entity, family_entity, household_entity]
system = TaxBenefitSystem(entities)
simulation = SimulationBuilder().build_from_dict(system, {})
assert simulation.populations["family"].household.entity.key == "household"
def test_shortcut_to_containing_entity_not_provided():
"""
Tests that, when an entity doesn't provide a containing
entity, the shortcut to that containing entity is not provided.
"""
person_entity = build_entity(
key="person",
plural="people",
label="A person",
is_person=True,
)
family_entity = build_entity(
key="family",
plural="families",
label="A family (all members in the same household)",
containing_entities=[],
roles=[{
"key": "member",
"plural": "members",
"label": "Member",
}]
)
household_entity = build_entity(
key="household",
plural="households",
label="A household, containing one or more families",
roles=[{
"key": "member",
"plural": "members",
"label": "Member",
}]
)
entities = [person_entity, family_entity, household_entity]
system = TaxBenefitSystem(entities)
simulation = SimulationBuilder().build_from_dict(system, {})
try:
simulation.populations["family"].household
assert False
except AttributeError:
pass
|
Add unit tests for projector shortcut
|
Add unit tests for projector shortcut
|
Python
|
agpl-3.0
|
openfisca/openfisca-core,openfisca/openfisca-core
|
Add unit tests for projector shortcut
|
from openfisca_core.simulations.simulation_builder import SimulationBuilder
from openfisca_core.taxbenefitsystems import TaxBenefitSystem
from openfisca_core.entities import build_entity
def test_shortcut_to_containing_entity_provided():
"""
Tests that, when an entity provides a containing entity,
the shortcut to that containing entity is provided.
"""
person_entity = build_entity(
key="person",
plural="people",
label="A person",
is_person=True,
)
family_entity = build_entity(
key="family",
plural="families",
label="A family (all members in the same household)",
containing_entities=["household"],
roles=[{
"key": "member",
"plural": "members",
"label": "Member",
}]
)
household_entity = build_entity(
key="household",
plural="households",
label="A household, containing one or more families",
roles=[{
"key": "member",
"plural": "members",
"label": "Member",
}]
)
entities = [person_entity, family_entity, household_entity]
system = TaxBenefitSystem(entities)
simulation = SimulationBuilder().build_from_dict(system, {})
assert simulation.populations["family"].household.entity.key == "household"
def test_shortcut_to_containing_entity_not_provided():
"""
Tests that, when an entity doesn't provide a containing
entity, the shortcut to that containing entity is not provided.
"""
person_entity = build_entity(
key="person",
plural="people",
label="A person",
is_person=True,
)
family_entity = build_entity(
key="family",
plural="families",
label="A family (all members in the same household)",
containing_entities=[],
roles=[{
"key": "member",
"plural": "members",
"label": "Member",
}]
)
household_entity = build_entity(
key="household",
plural="households",
label="A household, containing one or more families",
roles=[{
"key": "member",
"plural": "members",
"label": "Member",
}]
)
entities = [person_entity, family_entity, household_entity]
system = TaxBenefitSystem(entities)
simulation = SimulationBuilder().build_from_dict(system, {})
try:
simulation.populations["family"].household
assert False
except AttributeError:
pass
|
<commit_before><commit_msg>Add unit tests for projector shortcut<commit_after>
|
from openfisca_core.simulations.simulation_builder import SimulationBuilder
from openfisca_core.taxbenefitsystems import TaxBenefitSystem
from openfisca_core.entities import build_entity
def test_shortcut_to_containing_entity_provided():
"""
Tests that, when an entity provides a containing entity,
the shortcut to that containing entity is provided.
"""
person_entity = build_entity(
key="person",
plural="people",
label="A person",
is_person=True,
)
family_entity = build_entity(
key="family",
plural="families",
label="A family (all members in the same household)",
containing_entities=["household"],
roles=[{
"key": "member",
"plural": "members",
"label": "Member",
}]
)
household_entity = build_entity(
key="household",
plural="households",
label="A household, containing one or more families",
roles=[{
"key": "member",
"plural": "members",
"label": "Member",
}]
)
entities = [person_entity, family_entity, household_entity]
system = TaxBenefitSystem(entities)
simulation = SimulationBuilder().build_from_dict(system, {})
assert simulation.populations["family"].household.entity.key == "household"
def test_shortcut_to_containing_entity_not_provided():
"""
Tests that, when an entity doesn't provide a containing
entity, the shortcut to that containing entity is not provided.
"""
person_entity = build_entity(
key="person",
plural="people",
label="A person",
is_person=True,
)
family_entity = build_entity(
key="family",
plural="families",
label="A family (all members in the same household)",
containing_entities=[],
roles=[{
"key": "member",
"plural": "members",
"label": "Member",
}]
)
household_entity = build_entity(
key="household",
plural="households",
label="A household, containing one or more families",
roles=[{
"key": "member",
"plural": "members",
"label": "Member",
}]
)
entities = [person_entity, family_entity, household_entity]
system = TaxBenefitSystem(entities)
simulation = SimulationBuilder().build_from_dict(system, {})
try:
simulation.populations["family"].household
assert False
except AttributeError:
pass
|
Add unit tests for projector shortcutfrom openfisca_core.simulations.simulation_builder import SimulationBuilder
from openfisca_core.taxbenefitsystems import TaxBenefitSystem
from openfisca_core.entities import build_entity
def test_shortcut_to_containing_entity_provided():
"""
Tests that, when an entity provides a containing entity,
the shortcut to that containing entity is provided.
"""
person_entity = build_entity(
key="person",
plural="people",
label="A person",
is_person=True,
)
family_entity = build_entity(
key="family",
plural="families",
label="A family (all members in the same household)",
containing_entities=["household"],
roles=[{
"key": "member",
"plural": "members",
"label": "Member",
}]
)
household_entity = build_entity(
key="household",
plural="households",
label="A household, containing one or more families",
roles=[{
"key": "member",
"plural": "members",
"label": "Member",
}]
)
entities = [person_entity, family_entity, household_entity]
system = TaxBenefitSystem(entities)
simulation = SimulationBuilder().build_from_dict(system, {})
assert simulation.populations["family"].household.entity.key == "household"
def test_shortcut_to_containing_entity_not_provided():
"""
Tests that, when an entity doesn't provide a containing
entity, the shortcut to that containing entity is not provided.
"""
person_entity = build_entity(
key="person",
plural="people",
label="A person",
is_person=True,
)
family_entity = build_entity(
key="family",
plural="families",
label="A family (all members in the same household)",
containing_entities=[],
roles=[{
"key": "member",
"plural": "members",
"label": "Member",
}]
)
household_entity = build_entity(
key="household",
plural="households",
label="A household, containing one or more families",
roles=[{
"key": "member",
"plural": "members",
"label": "Member",
}]
)
entities = [person_entity, family_entity, household_entity]
system = TaxBenefitSystem(entities)
simulation = SimulationBuilder().build_from_dict(system, {})
try:
simulation.populations["family"].household
assert False
except AttributeError:
pass
|
<commit_before><commit_msg>Add unit tests for projector shortcut<commit_after>from openfisca_core.simulations.simulation_builder import SimulationBuilder
from openfisca_core.taxbenefitsystems import TaxBenefitSystem
from openfisca_core.entities import build_entity
def test_shortcut_to_containing_entity_provided():
"""
Tests that, when an entity provides a containing entity,
the shortcut to that containing entity is provided.
"""
person_entity = build_entity(
key="person",
plural="people",
label="A person",
is_person=True,
)
family_entity = build_entity(
key="family",
plural="families",
label="A family (all members in the same household)",
containing_entities=["household"],
roles=[{
"key": "member",
"plural": "members",
"label": "Member",
}]
)
household_entity = build_entity(
key="household",
plural="households",
label="A household, containing one or more families",
roles=[{
"key": "member",
"plural": "members",
"label": "Member",
}]
)
entities = [person_entity, family_entity, household_entity]
system = TaxBenefitSystem(entities)
simulation = SimulationBuilder().build_from_dict(system, {})
assert simulation.populations["family"].household.entity.key == "household"
def test_shortcut_to_containing_entity_not_provided():
"""
Tests that, when an entity doesn't provide a containing
entity, the shortcut to that containing entity is not provided.
"""
person_entity = build_entity(
key="person",
plural="people",
label="A person",
is_person=True,
)
family_entity = build_entity(
key="family",
plural="families",
label="A family (all members in the same household)",
containing_entities=[],
roles=[{
"key": "member",
"plural": "members",
"label": "Member",
}]
)
household_entity = build_entity(
key="household",
plural="households",
label="A household, containing one or more families",
roles=[{
"key": "member",
"plural": "members",
"label": "Member",
}]
)
entities = [person_entity, family_entity, household_entity]
system = TaxBenefitSystem(entities)
simulation = SimulationBuilder().build_from_dict(system, {})
try:
simulation.populations["family"].household
assert False
except AttributeError:
pass
|
|
cc8ae7df7918375454188f3f5c5d2df0290c0886
|
pombola/interests_register/management/commands/interests_register_delete_existing.py
|
pombola/interests_register/management/commands/interests_register_delete_existing.py
|
import sys
from optparse import make_option
from django.core.management.base import NoArgsCommand
from ...models import Release, Category, Entry, EntryLineItem
class Command(NoArgsCommand):
help = 'Delete existing declarations of members interests - allows for subsequent re-importing of data.'
option_list = NoArgsCommand.option_list + (
make_option(
'--commit',
action='store_true',
dest='commit',
help='Actually update the database'),)
def handle_noargs(self, **options):
count_releases = Release.objects.count()
count_categories = Category.objects.count()
count_entries = Entry.objects.count()
count_entrylineitems = EntryLineItem.objects.count()
print " Deleting", count_releases, "releases"
print " Deleting", count_categories, "categories"
print " Deleting", count_entries, "entries"
print " Deleting", count_entrylineitems, "entrylineitems\n"
if options['commit']:
print " Executing the delete"
Release.objects.all().delete()
Category.objects.all().delete()
else:
print " Not executing the delete (--commit not specified)"
|
Add members interests delete existing command
|
Add members interests delete existing command
|
Python
|
agpl-3.0
|
ken-muturi/pombola,mysociety/pombola,geoffkilpin/pombola,mysociety/pombola,geoffkilpin/pombola,hzj123/56th,patricmutwiri/pombola,geoffkilpin/pombola,hzj123/56th,hzj123/56th,mysociety/pombola,patricmutwiri/pombola,ken-muturi/pombola,patricmutwiri/pombola,ken-muturi/pombola,hzj123/56th,hzj123/56th,patricmutwiri/pombola,ken-muturi/pombola,mysociety/pombola,geoffkilpin/pombola,geoffkilpin/pombola,ken-muturi/pombola,ken-muturi/pombola,geoffkilpin/pombola,mysociety/pombola,patricmutwiri/pombola,patricmutwiri/pombola,hzj123/56th,mysociety/pombola
|
Add members interests delete existing command
|
import sys
from optparse import make_option
from django.core.management.base import NoArgsCommand
from ...models import Release, Category, Entry, EntryLineItem
class Command(NoArgsCommand):
help = 'Delete existing declarations of members interests - allows for subsequent re-importing of data.'
option_list = NoArgsCommand.option_list + (
make_option(
'--commit',
action='store_true',
dest='commit',
help='Actually update the database'),)
def handle_noargs(self, **options):
count_releases = Release.objects.count()
count_categories = Category.objects.count()
count_entries = Entry.objects.count()
count_entrylineitems = EntryLineItem.objects.count()
print " Deleting", count_releases, "releases"
print " Deleting", count_categories, "categories"
print " Deleting", count_entries, "entries"
print " Deleting", count_entrylineitems, "entrylineitems\n"
if options['commit']:
print " Executing the delete"
Release.objects.all().delete()
Category.objects.all().delete()
else:
print " Not executing the delete (--commit not specified)"
|
<commit_before><commit_msg>Add members interests delete existing command<commit_after>
|
import sys
from optparse import make_option
from django.core.management.base import NoArgsCommand
from ...models import Release, Category, Entry, EntryLineItem
class Command(NoArgsCommand):
help = 'Delete existing declarations of members interests - allows for subsequent re-importing of data.'
option_list = NoArgsCommand.option_list + (
make_option(
'--commit',
action='store_true',
dest='commit',
help='Actually update the database'),)
def handle_noargs(self, **options):
count_releases = Release.objects.count()
count_categories = Category.objects.count()
count_entries = Entry.objects.count()
count_entrylineitems = EntryLineItem.objects.count()
print " Deleting", count_releases, "releases"
print " Deleting", count_categories, "categories"
print " Deleting", count_entries, "entries"
print " Deleting", count_entrylineitems, "entrylineitems\n"
if options['commit']:
print " Executing the delete"
Release.objects.all().delete()
Category.objects.all().delete()
else:
print " Not executing the delete (--commit not specified)"
|
Add members interests delete existing commandimport sys
from optparse import make_option
from django.core.management.base import NoArgsCommand
from ...models import Release, Category, Entry, EntryLineItem
class Command(NoArgsCommand):
help = 'Delete existing declarations of members interests - allows for subsequent re-importing of data.'
option_list = NoArgsCommand.option_list + (
make_option(
'--commit',
action='store_true',
dest='commit',
help='Actually update the database'),)
def handle_noargs(self, **options):
count_releases = Release.objects.count()
count_categories = Category.objects.count()
count_entries = Entry.objects.count()
count_entrylineitems = EntryLineItem.objects.count()
print " Deleting", count_releases, "releases"
print " Deleting", count_categories, "categories"
print " Deleting", count_entries, "entries"
print " Deleting", count_entrylineitems, "entrylineitems\n"
if options['commit']:
print " Executing the delete"
Release.objects.all().delete()
Category.objects.all().delete()
else:
print " Not executing the delete (--commit not specified)"
|
<commit_before><commit_msg>Add members interests delete existing command<commit_after>import sys
from optparse import make_option
from django.core.management.base import NoArgsCommand
from ...models import Release, Category, Entry, EntryLineItem
class Command(NoArgsCommand):
help = 'Delete existing declarations of members interests - allows for subsequent re-importing of data.'
option_list = NoArgsCommand.option_list + (
make_option(
'--commit',
action='store_true',
dest='commit',
help='Actually update the database'),)
def handle_noargs(self, **options):
count_releases = Release.objects.count()
count_categories = Category.objects.count()
count_entries = Entry.objects.count()
count_entrylineitems = EntryLineItem.objects.count()
print " Deleting", count_releases, "releases"
print " Deleting", count_categories, "categories"
print " Deleting", count_entries, "entries"
print " Deleting", count_entrylineitems, "entrylineitems\n"
if options['commit']:
print " Executing the delete"
Release.objects.all().delete()
Category.objects.all().delete()
else:
print " Not executing the delete (--commit not specified)"
|
|
4a9fdb07b18e4b82d575633d52b504c1118e9843
|
scripts/rf-send.py
|
scripts/rf-send.py
|
import os
import subprocess
# Enter codes for each outlet
codes = {'1': {'on': '21811', 'off': '21820'},
'2': {'on': '21955', 'off': '21964'},
'3': {'on': '22275', 'off': '22284'},
'4': {'on': '23811', 'off': '23820'},
'5': {'on': '29955', 'off': '29964'}}
num = input('Enter outlet number: ')
state = input('Enter on/off: ')
code = codes[num][state] # Read code from signal
codesend = './codesend' # Set codesend script path (should be in rfoutlet)
pin = '0' # Set pin number (GPIO: 17)
length = '189' # Set pulse length
subprocess.call([codesend, code, '-p', pin, '-l', length])
|
Add script for sending rf signals.
|
Add script for sending rf signals.
|
Python
|
bsd-3-clause
|
kbsezginel/raspberry-pi,kbsezginel/raspberry-pi,kbsezginel/raspberry-pi,kbsezginel/raspberry-pi
|
Add script for sending rf signals.
|
import os
import subprocess
# Enter codes for each outlet
codes = {'1': {'on': '21811', 'off': '21820'},
'2': {'on': '21955', 'off': '21964'},
'3': {'on': '22275', 'off': '22284'},
'4': {'on': '23811', 'off': '23820'},
'5': {'on': '29955', 'off': '29964'}}
num = input('Enter outlet number: ')
state = input('Enter on/off: ')
code = codes[num][state] # Read code from signal
codesend = './codesend' # Set codesend script path (should be in rfoutlet)
pin = '0' # Set pin number (GPIO: 17)
length = '189' # Set pulse length
subprocess.call([codesend, code, '-p', pin, '-l', length])
|
<commit_before><commit_msg>Add script for sending rf signals.<commit_after>
|
import os
import subprocess
# Enter codes for each outlet
codes = {'1': {'on': '21811', 'off': '21820'},
'2': {'on': '21955', 'off': '21964'},
'3': {'on': '22275', 'off': '22284'},
'4': {'on': '23811', 'off': '23820'},
'5': {'on': '29955', 'off': '29964'}}
num = input('Enter outlet number: ')
state = input('Enter on/off: ')
code = codes[num][state] # Read code from signal
codesend = './codesend' # Set codesend script path (should be in rfoutlet)
pin = '0' # Set pin number (GPIO: 17)
length = '189' # Set pulse length
subprocess.call([codesend, code, '-p', pin, '-l', length])
|
Add script for sending rf signals.import os
import subprocess
# Enter codes for each outlet
codes = {'1': {'on': '21811', 'off': '21820'},
'2': {'on': '21955', 'off': '21964'},
'3': {'on': '22275', 'off': '22284'},
'4': {'on': '23811', 'off': '23820'},
'5': {'on': '29955', 'off': '29964'}}
num = input('Enter outlet number: ')
state = input('Enter on/off: ')
code = codes[num][state] # Read code from signal
codesend = './codesend' # Set codesend script path (should be in rfoutlet)
pin = '0' # Set pin number (GPIO: 17)
length = '189' # Set pulse length
subprocess.call([codesend, code, '-p', pin, '-l', length])
|
<commit_before><commit_msg>Add script for sending rf signals.<commit_after>import os
import subprocess
# Enter codes for each outlet
codes = {'1': {'on': '21811', 'off': '21820'},
'2': {'on': '21955', 'off': '21964'},
'3': {'on': '22275', 'off': '22284'},
'4': {'on': '23811', 'off': '23820'},
'5': {'on': '29955', 'off': '29964'}}
num = input('Enter outlet number: ')
state = input('Enter on/off: ')
code = codes[num][state] # Read code from signal
codesend = './codesend' # Set codesend script path (should be in rfoutlet)
pin = '0' # Set pin number (GPIO: 17)
length = '189' # Set pulse length
subprocess.call([codesend, code, '-p', pin, '-l', length])
|
|
f211ebde80454beffd7d8df9633b8b67e7ff73c1
|
DataWrangling/process_json.py
|
DataWrangling/process_json.py
|
# To experiment with this code freely you will have to run this code locally.
# Take a look at the main() function for an example of how to use the code.
# We have provided example json output in the other code editor tabs for you to
# look at, but you will not be able to run any queries through our UI.
import json
import requests
BASE_URL = "http://musicbrainz.org/ws/2/"
ARTIST_URL = BASE_URL + "artist/"
# query parameters are given to the requests.get function as a dictionary; this
# variable contains some starter parameters.
query_type = { "simple": {},
"atr": {"inc": "aliases+tags+ratings"},
"aliases": {"inc": "aliases"},
"releases": {"inc": "releases"}}
def query_site(url, params, uid="", fmt="json"):
# This is the main function for making queries to the musicbrainz API.
# A json document should be returned by the query.
params["fmt"] = fmt
r = requests.get(url + uid, params=params)
print "requesting", r.url
if r.status_code == requests.codes.ok:
return r.json()
else:
r.raise_for_status()
def query_by_name(url, params, name):
# This adds an artist name to the query parameters before making
# an API call to the function above.
params["query"] = "artist:" + name
return query_site(url, params)
def pretty_print(data, indent=4):
# After we get our output, we can format it to be more readable
# by using this function.
if type(data) == dict:
print json.dumps(data, indent=indent, sort_keys=True)
else:
print data
def main():
'''
Modify the function calls and indexing below to answer the questions on
the next quiz. HINT: Note how the output we get from the site is a
multi-level JSON document, so try making print statements to step through
the structure one level at a time or copy the output to a separate output
file.
'''
results = query_by_name(ARTIST_URL, query_type["simple"], "Nirvana")
pretty_print(results)
artist_id = results["artists"][1]["id"]
print "\nARTIST:"
pretty_print(results["artists"][1])
artist_data = query_site(ARTIST_URL, query_type["releases"], artist_id)
releases = artist_data["releases"]
print "\nONE RELEASE:"
pretty_print(releases[0], indent=2)
release_titles = [r["title"] for r in releases]
print "\nALL TITLES:"
for t in release_titles:
print t
if __name__ == '__main__':
main()
|
Add scripts to process json files
|
feat: Add scripts to process json files
Reads a json files, process it, and apply some functions to get
data
|
Python
|
mit
|
aguijarro/DataSciencePython
|
feat: Add scripts to process json files
Reads a json files, process it, and apply some functions to get
data
|
# To experiment with this code freely you will have to run this code locally.
# Take a look at the main() function for an example of how to use the code.
# We have provided example json output in the other code editor tabs for you to
# look at, but you will not be able to run any queries through our UI.
import json
import requests
BASE_URL = "http://musicbrainz.org/ws/2/"
ARTIST_URL = BASE_URL + "artist/"
# query parameters are given to the requests.get function as a dictionary; this
# variable contains some starter parameters.
query_type = { "simple": {},
"atr": {"inc": "aliases+tags+ratings"},
"aliases": {"inc": "aliases"},
"releases": {"inc": "releases"}}
def query_site(url, params, uid="", fmt="json"):
# This is the main function for making queries to the musicbrainz API.
# A json document should be returned by the query.
params["fmt"] = fmt
r = requests.get(url + uid, params=params)
print "requesting", r.url
if r.status_code == requests.codes.ok:
return r.json()
else:
r.raise_for_status()
def query_by_name(url, params, name):
# This adds an artist name to the query parameters before making
# an API call to the function above.
params["query"] = "artist:" + name
return query_site(url, params)
def pretty_print(data, indent=4):
# After we get our output, we can format it to be more readable
# by using this function.
if type(data) == dict:
print json.dumps(data, indent=indent, sort_keys=True)
else:
print data
def main():
'''
Modify the function calls and indexing below to answer the questions on
the next quiz. HINT: Note how the output we get from the site is a
multi-level JSON document, so try making print statements to step through
the structure one level at a time or copy the output to a separate output
file.
'''
results = query_by_name(ARTIST_URL, query_type["simple"], "Nirvana")
pretty_print(results)
artist_id = results["artists"][1]["id"]
print "\nARTIST:"
pretty_print(results["artists"][1])
artist_data = query_site(ARTIST_URL, query_type["releases"], artist_id)
releases = artist_data["releases"]
print "\nONE RELEASE:"
pretty_print(releases[0], indent=2)
release_titles = [r["title"] for r in releases]
print "\nALL TITLES:"
for t in release_titles:
print t
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>feat: Add scripts to process json files
Reads a json files, process it, and apply some functions to get
data<commit_after>
|
# To experiment with this code freely you will have to run this code locally.
# Take a look at the main() function for an example of how to use the code.
# We have provided example json output in the other code editor tabs for you to
# look at, but you will not be able to run any queries through our UI.
import json
import requests
BASE_URL = "http://musicbrainz.org/ws/2/"
ARTIST_URL = BASE_URL + "artist/"
# query parameters are given to the requests.get function as a dictionary; this
# variable contains some starter parameters.
query_type = { "simple": {},
"atr": {"inc": "aliases+tags+ratings"},
"aliases": {"inc": "aliases"},
"releases": {"inc": "releases"}}
def query_site(url, params, uid="", fmt="json"):
# This is the main function for making queries to the musicbrainz API.
# A json document should be returned by the query.
params["fmt"] = fmt
r = requests.get(url + uid, params=params)
print "requesting", r.url
if r.status_code == requests.codes.ok:
return r.json()
else:
r.raise_for_status()
def query_by_name(url, params, name):
# This adds an artist name to the query parameters before making
# an API call to the function above.
params["query"] = "artist:" + name
return query_site(url, params)
def pretty_print(data, indent=4):
# After we get our output, we can format it to be more readable
# by using this function.
if type(data) == dict:
print json.dumps(data, indent=indent, sort_keys=True)
else:
print data
def main():
'''
Modify the function calls and indexing below to answer the questions on
the next quiz. HINT: Note how the output we get from the site is a
multi-level JSON document, so try making print statements to step through
the structure one level at a time or copy the output to a separate output
file.
'''
results = query_by_name(ARTIST_URL, query_type["simple"], "Nirvana")
pretty_print(results)
artist_id = results["artists"][1]["id"]
print "\nARTIST:"
pretty_print(results["artists"][1])
artist_data = query_site(ARTIST_URL, query_type["releases"], artist_id)
releases = artist_data["releases"]
print "\nONE RELEASE:"
pretty_print(releases[0], indent=2)
release_titles = [r["title"] for r in releases]
print "\nALL TITLES:"
for t in release_titles:
print t
if __name__ == '__main__':
main()
|
feat: Add scripts to process json files
Reads a json files, process it, and apply some functions to get
data# To experiment with this code freely you will have to run this code locally.
# Take a look at the main() function for an example of how to use the code.
# We have provided example json output in the other code editor tabs for you to
# look at, but you will not be able to run any queries through our UI.
import json
import requests
BASE_URL = "http://musicbrainz.org/ws/2/"
ARTIST_URL = BASE_URL + "artist/"
# query parameters are given to the requests.get function as a dictionary; this
# variable contains some starter parameters.
query_type = { "simple": {},
"atr": {"inc": "aliases+tags+ratings"},
"aliases": {"inc": "aliases"},
"releases": {"inc": "releases"}}
def query_site(url, params, uid="", fmt="json"):
# This is the main function for making queries to the musicbrainz API.
# A json document should be returned by the query.
params["fmt"] = fmt
r = requests.get(url + uid, params=params)
print "requesting", r.url
if r.status_code == requests.codes.ok:
return r.json()
else:
r.raise_for_status()
def query_by_name(url, params, name):
# This adds an artist name to the query parameters before making
# an API call to the function above.
params["query"] = "artist:" + name
return query_site(url, params)
def pretty_print(data, indent=4):
# After we get our output, we can format it to be more readable
# by using this function.
if type(data) == dict:
print json.dumps(data, indent=indent, sort_keys=True)
else:
print data
def main():
'''
Modify the function calls and indexing below to answer the questions on
the next quiz. HINT: Note how the output we get from the site is a
multi-level JSON document, so try making print statements to step through
the structure one level at a time or copy the output to a separate output
file.
'''
results = query_by_name(ARTIST_URL, query_type["simple"], "Nirvana")
pretty_print(results)
artist_id = results["artists"][1]["id"]
print "\nARTIST:"
pretty_print(results["artists"][1])
artist_data = query_site(ARTIST_URL, query_type["releases"], artist_id)
releases = artist_data["releases"]
print "\nONE RELEASE:"
pretty_print(releases[0], indent=2)
release_titles = [r["title"] for r in releases]
print "\nALL TITLES:"
for t in release_titles:
print t
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>feat: Add scripts to process json files
Reads a json files, process it, and apply some functions to get
data<commit_after># To experiment with this code freely you will have to run this code locally.
# Take a look at the main() function for an example of how to use the code.
# We have provided example json output in the other code editor tabs for you to
# look at, but you will not be able to run any queries through our UI.
import json
import requests
BASE_URL = "http://musicbrainz.org/ws/2/"
ARTIST_URL = BASE_URL + "artist/"
# query parameters are given to the requests.get function as a dictionary; this
# variable contains some starter parameters.
query_type = { "simple": {},
"atr": {"inc": "aliases+tags+ratings"},
"aliases": {"inc": "aliases"},
"releases": {"inc": "releases"}}
def query_site(url, params, uid="", fmt="json"):
# This is the main function for making queries to the musicbrainz API.
# A json document should be returned by the query.
params["fmt"] = fmt
r = requests.get(url + uid, params=params)
print "requesting", r.url
if r.status_code == requests.codes.ok:
return r.json()
else:
r.raise_for_status()
def query_by_name(url, params, name):
# This adds an artist name to the query parameters before making
# an API call to the function above.
params["query"] = "artist:" + name
return query_site(url, params)
def pretty_print(data, indent=4):
# After we get our output, we can format it to be more readable
# by using this function.
if type(data) == dict:
print json.dumps(data, indent=indent, sort_keys=True)
else:
print data
def main():
'''
Modify the function calls and indexing below to answer the questions on
the next quiz. HINT: Note how the output we get from the site is a
multi-level JSON document, so try making print statements to step through
the structure one level at a time or copy the output to a separate output
file.
'''
results = query_by_name(ARTIST_URL, query_type["simple"], "Nirvana")
pretty_print(results)
artist_id = results["artists"][1]["id"]
print "\nARTIST:"
pretty_print(results["artists"][1])
artist_data = query_site(ARTIST_URL, query_type["releases"], artist_id)
releases = artist_data["releases"]
print "\nONE RELEASE:"
pretty_print(releases[0], indent=2)
release_titles = [r["title"] for r in releases]
print "\nALL TITLES:"
for t in release_titles:
print t
if __name__ == '__main__':
main()
|
|
5a46337647436b1f39562d4f5664a7833b87c269
|
__init__.py
|
__init__.py
|
import requests
# Yummly API: https://developer.yummly.com
# API URLs
URL_BASE = 'http://api.yummly.com/v1'
URL_GET = URL_BASE + '/api/recipe/'
URL_SEARCH = URL_BASE + '/api/recipes'
# API auth properties which should be set externally
api_id = None
api_key = None
# basic request config options
# @note: have found that Yummly's API "hangs" so it might be a good idea to have some reasonable timeout and handle appropriately
timeout = 5.0
class YummlyError( Exception ):
'''Exception class for Yummly errors'''
pass
### Methods for API get recipe and search recipes
def recipe( recipe_id ):
url = URL_GET + recipe_id
response = _request( url )
result = _extract_response( response )
return result
def search( q, limit=40, offset=0 ):
'''
Prepares yummly search API request
:param q: search string
:param limit: max results
:param offset: pagination offset in # of records (e.g. offset=5 means skip first 5 results)
'''
url = URL_SEARCH
params = {
'q': q,
'maxResult': limit,
'start': offset
}
response = _request( url, params=params )
results = _extract_response( response )
return results
### Helper functions
def handle_errors( fn ):
'''Decorator for handling Yummly errors'''
def handle( *args, **kargs ):
response = fn( *args, **kargs )
status = response.status_code
if status != 200:
if status == 409:
raise YummlyError( 'API id and/or key are invalid or API rate limit exceeded' )
else:
# error ocurred which may not be documented
response.raise_for_status()
return response
return handle
@handle_errors
def _request( url, params=None ):
'''
Generic yummly request which attaches meta info (e.g. auth)
:param url: URL of endpoint
:param params: GET params of request
'''
# auth headers
headers = {
'X-Yummly-App-ID': api_id,
'X-Yummly-App-Key': api_key,
}
response = requests.get( url, params=params, headers=headers, timeout=timeout )
return response
def _extract_response( response ):
'''Extract data from api resposne'''
return response.json()
|
Add yummly.recipe and yummly.search functions.
|
Add yummly.recipe and yummly.search functions.
|
Python
|
mit
|
dgilland/yummly.py
|
Add yummly.recipe and yummly.search functions.
|
import requests
# Yummly API: https://developer.yummly.com
# API URLs
URL_BASE = 'http://api.yummly.com/v1'
URL_GET = URL_BASE + '/api/recipe/'
URL_SEARCH = URL_BASE + '/api/recipes'
# API auth properties which should be set externally
api_id = None
api_key = None
# basic request config options
# @note: have found that Yummly's API "hangs" so it might be a good idea to have some reasonable timeout and handle appropriately
timeout = 5.0
class YummlyError( Exception ):
'''Exception class for Yummly errors'''
pass
### Methods for API get recipe and search recipes
def recipe( recipe_id ):
url = URL_GET + recipe_id
response = _request( url )
result = _extract_response( response )
return result
def search( q, limit=40, offset=0 ):
'''
Prepares yummly search API request
:param q: search string
:param limit: max results
:param offset: pagination offset in # of records (e.g. offset=5 means skip first 5 results)
'''
url = URL_SEARCH
params = {
'q': q,
'maxResult': limit,
'start': offset
}
response = _request( url, params=params )
results = _extract_response( response )
return results
### Helper functions
def handle_errors( fn ):
'''Decorator for handling Yummly errors'''
def handle( *args, **kargs ):
response = fn( *args, **kargs )
status = response.status_code
if status != 200:
if status == 409:
raise YummlyError( 'API id and/or key are invalid or API rate limit exceeded' )
else:
# error ocurred which may not be documented
response.raise_for_status()
return response
return handle
@handle_errors
def _request( url, params=None ):
'''
Generic yummly request which attaches meta info (e.g. auth)
:param url: URL of endpoint
:param params: GET params of request
'''
# auth headers
headers = {
'X-Yummly-App-ID': api_id,
'X-Yummly-App-Key': api_key,
}
response = requests.get( url, params=params, headers=headers, timeout=timeout )
return response
def _extract_response( response ):
'''Extract data from api resposne'''
return response.json()
|
<commit_before><commit_msg>Add yummly.recipe and yummly.search functions.<commit_after>
|
import requests
# Yummly API: https://developer.yummly.com
# API URLs
URL_BASE = 'http://api.yummly.com/v1'
URL_GET = URL_BASE + '/api/recipe/'
URL_SEARCH = URL_BASE + '/api/recipes'
# API auth properties which should be set externally
api_id = None
api_key = None
# basic request config options
# @note: have found that Yummly's API "hangs" so it might be a good idea to have some reasonable timeout and handle appropriately
timeout = 5.0
class YummlyError( Exception ):
'''Exception class for Yummly errors'''
pass
### Methods for API get recipe and search recipes
def recipe( recipe_id ):
url = URL_GET + recipe_id
response = _request( url )
result = _extract_response( response )
return result
def search( q, limit=40, offset=0 ):
'''
Prepares yummly search API request
:param q: search string
:param limit: max results
:param offset: pagination offset in # of records (e.g. offset=5 means skip first 5 results)
'''
url = URL_SEARCH
params = {
'q': q,
'maxResult': limit,
'start': offset
}
response = _request( url, params=params )
results = _extract_response( response )
return results
### Helper functions
def handle_errors( fn ):
'''Decorator for handling Yummly errors'''
def handle( *args, **kargs ):
response = fn( *args, **kargs )
status = response.status_code
if status != 200:
if status == 409:
raise YummlyError( 'API id and/or key are invalid or API rate limit exceeded' )
else:
# error ocurred which may not be documented
response.raise_for_status()
return response
return handle
@handle_errors
def _request( url, params=None ):
'''
Generic yummly request which attaches meta info (e.g. auth)
:param url: URL of endpoint
:param params: GET params of request
'''
# auth headers
headers = {
'X-Yummly-App-ID': api_id,
'X-Yummly-App-Key': api_key,
}
response = requests.get( url, params=params, headers=headers, timeout=timeout )
return response
def _extract_response( response ):
'''Extract data from api resposne'''
return response.json()
|
Add yummly.recipe and yummly.search functions.
import requests
# Yummly API: https://developer.yummly.com
# API URLs
URL_BASE = 'http://api.yummly.com/v1'
URL_GET = URL_BASE + '/api/recipe/'
URL_SEARCH = URL_BASE + '/api/recipes'
# API auth properties which should be set externally
api_id = None
api_key = None
# basic request config options
# @note: have found that Yummly's API "hangs" so it might be a good idea to have some reasonable timeout and handle appropriately
timeout = 5.0
class YummlyError( Exception ):
'''Exception class for Yummly errors'''
pass
### Methods for API get recipe and search recipes
def recipe( recipe_id ):
url = URL_GET + recipe_id
response = _request( url )
result = _extract_response( response )
return result
def search( q, limit=40, offset=0 ):
'''
Prepares yummly search API request
:param q: search string
:param limit: max results
:param offset: pagination offset in # of records (e.g. offset=5 means skip first 5 results)
'''
url = URL_SEARCH
params = {
'q': q,
'maxResult': limit,
'start': offset
}
response = _request( url, params=params )
results = _extract_response( response )
return results
### Helper functions
def handle_errors( fn ):
'''Decorator for handling Yummly errors'''
def handle( *args, **kargs ):
response = fn( *args, **kargs )
status = response.status_code
if status != 200:
if status == 409:
raise YummlyError( 'API id and/or key are invalid or API rate limit exceeded' )
else:
# error ocurred which may not be documented
response.raise_for_status()
return response
return handle
@handle_errors
def _request( url, params=None ):
'''
Generic yummly request which attaches meta info (e.g. auth)
:param url: URL of endpoint
:param params: GET params of request
'''
# auth headers
headers = {
'X-Yummly-App-ID': api_id,
'X-Yummly-App-Key': api_key,
}
response = requests.get( url, params=params, headers=headers, timeout=timeout )
return response
def _extract_response( response ):
'''Extract data from api resposne'''
return response.json()
|
<commit_before><commit_msg>Add yummly.recipe and yummly.search functions.<commit_after>
import requests
# Yummly API: https://developer.yummly.com
# API URLs
URL_BASE = 'http://api.yummly.com/v1'
URL_GET = URL_BASE + '/api/recipe/'
URL_SEARCH = URL_BASE + '/api/recipes'
# API auth properties which should be set externally
api_id = None
api_key = None
# basic request config options
# @note: have found that Yummly's API "hangs" so it might be a good idea to have some reasonable timeout and handle appropriately
timeout = 5.0
class YummlyError( Exception ):
'''Exception class for Yummly errors'''
pass
### Methods for API get recipe and search recipes
def recipe( recipe_id ):
url = URL_GET + recipe_id
response = _request( url )
result = _extract_response( response )
return result
def search( q, limit=40, offset=0 ):
'''
Prepares yummly search API request
:param q: search string
:param limit: max results
:param offset: pagination offset in # of records (e.g. offset=5 means skip first 5 results)
'''
url = URL_SEARCH
params = {
'q': q,
'maxResult': limit,
'start': offset
}
response = _request( url, params=params )
results = _extract_response( response )
return results
### Helper functions
def handle_errors( fn ):
'''Decorator for handling Yummly errors'''
def handle( *args, **kargs ):
response = fn( *args, **kargs )
status = response.status_code
if status != 200:
if status == 409:
raise YummlyError( 'API id and/or key are invalid or API rate limit exceeded' )
else:
# error ocurred which may not be documented
response.raise_for_status()
return response
return handle
@handle_errors
def _request( url, params=None ):
'''
Generic yummly request which attaches meta info (e.g. auth)
:param url: URL of endpoint
:param params: GET params of request
'''
# auth headers
headers = {
'X-Yummly-App-ID': api_id,
'X-Yummly-App-Key': api_key,
}
response = requests.get( url, params=params, headers=headers, timeout=timeout )
return response
def _extract_response( response ):
'''Extract data from api resposne'''
return response.json()
|
|
83dfc8b5f2e614a1b16b23c3d33f39935709d6fa
|
testing/ufl_fix.py
|
testing/ufl_fix.py
|
from __future__ import absolute_import, print_function, division
from firedrake import *
mesh = UnitSquareMesh(2, 2)
V = FunctionSpace(mesh, "RT", 1)
f1 = Function(V)
f2 = Function(V)
# Apply a strong condition at the top of the square domain
bc1 = DirichletBC(V, Constant((0.0, 10.0)), 4)
bc1.apply(f1)
bc2 = DirichletBC(V, Expression(("0.0", "10.0")), 4)
bc2.apply(f2)
print(f1.dat.data)
print(f2.dat.data)
|
Add Constant vs Expression testing
|
Add Constant vs Expression testing
|
Python
|
mit
|
thomasgibson/firedrake-hybridization
|
Add Constant vs Expression testing
|
from __future__ import absolute_import, print_function, division
from firedrake import *
mesh = UnitSquareMesh(2, 2)
V = FunctionSpace(mesh, "RT", 1)
f1 = Function(V)
f2 = Function(V)
# Apply a strong condition at the top of the square domain
bc1 = DirichletBC(V, Constant((0.0, 10.0)), 4)
bc1.apply(f1)
bc2 = DirichletBC(V, Expression(("0.0", "10.0")), 4)
bc2.apply(f2)
print(f1.dat.data)
print(f2.dat.data)
|
<commit_before><commit_msg>Add Constant vs Expression testing<commit_after>
|
from __future__ import absolute_import, print_function, division
from firedrake import *
mesh = UnitSquareMesh(2, 2)
V = FunctionSpace(mesh, "RT", 1)
f1 = Function(V)
f2 = Function(V)
# Apply a strong condition at the top of the square domain
bc1 = DirichletBC(V, Constant((0.0, 10.0)), 4)
bc1.apply(f1)
bc2 = DirichletBC(V, Expression(("0.0", "10.0")), 4)
bc2.apply(f2)
print(f1.dat.data)
print(f2.dat.data)
|
Add Constant vs Expression testingfrom __future__ import absolute_import, print_function, division
from firedrake import *
mesh = UnitSquareMesh(2, 2)
V = FunctionSpace(mesh, "RT", 1)
f1 = Function(V)
f2 = Function(V)
# Apply a strong condition at the top of the square domain
bc1 = DirichletBC(V, Constant((0.0, 10.0)), 4)
bc1.apply(f1)
bc2 = DirichletBC(V, Expression(("0.0", "10.0")), 4)
bc2.apply(f2)
print(f1.dat.data)
print(f2.dat.data)
|
<commit_before><commit_msg>Add Constant vs Expression testing<commit_after>from __future__ import absolute_import, print_function, division
from firedrake import *
mesh = UnitSquareMesh(2, 2)
V = FunctionSpace(mesh, "RT", 1)
f1 = Function(V)
f2 = Function(V)
# Apply a strong condition at the top of the square domain
bc1 = DirichletBC(V, Constant((0.0, 10.0)), 4)
bc1.apply(f1)
bc2 = DirichletBC(V, Expression(("0.0", "10.0")), 4)
bc2.apply(f2)
print(f1.dat.data)
print(f2.dat.data)
|
|
c0b97e072664440dcdd79a98a69536834a1e172b
|
rabbitmq_status.py
|
rabbitmq_status.py
|
#!/usr/bin/env python
import requests
import sys
OVERVIEW_URL = "http://localhost:55672/api/overview"
NODES_URL = "http://localhost:55672/api/nodes"
USERNAME = 'guest'
PASSWORD = 'guest'
OVERVIEW_METRICS = {"queue_totals": ("messages",
"messages_ready",
"messages_unacknowledged"),
"message_stats": ("get",
"ack",
"deliver_get",
"deliver",
"publish")}
NODES_METRICS = ("proc_used",
"proc_total",
"fd_used",
"fd_total",
"sockets_used",
"sockets_total",
"mem_used",
"mem_limit",
"mem_alarm")
def main():
metrics = {}
try:
r = requests.get(OVERVIEW_URL, auth=(USERNAME, PASSWORD))
except requests.exceptions.ConnectionError:
error()
if r.status_code == 200:
for k in OVERVIEW_METRICS.keys():
if k in r.json():
for i in OVERVIEW_METRICS[k]:
if i in r.json()[k]:
metrics[i] = r.json()[k][i]
else:
error()
try:
r = requests.get(NODES_URL, auth=(USERNAME, PASSWORD))
except requests.exceptions.ConnectionError:
error()
if r.status_code == 200:
for i in NODES_METRICS:
if i in r.json()[0]:
metrics[i] = r.json()[0][i]
else:
error()
print "status ok"
for k in metrics.keys():
print k, metrics[k]
def error():
print "status error"
sys.exit(1)
if __name__ == "__main__":
main()
|
Return details about RabbitMQ server
|
Return details about RabbitMQ server
|
Python
|
apache-2.0
|
xeregin/rpc-openstack,claco/rpc-openstack,hughsaunders/rpc-openstack,busterswt/rpc-openstack,xeregin/rpc-openstack,claco/rpc-openstack,git-harry/rpc-openstack,byronmccollum/rpc-openstack,darrenchan/rpc-openstack,mattt416/rpc-openstack,sigmavirus24/rpc-openstack,mattt416/rpc-openstack,byronmccollum/rpc-openstack,shannonmitchell/rpc-openstack,cfarquhar/rpc-maas,cfarquhar/rpc-openstack,andymcc/rpc-openstack,BjoernT/rpc-openstack,miguelgrinberg/rpc-openstack,cloudnull/rpc-openstack,jacobwagner/rpc-openstack,briancurtin/rpc-maas,robb-romans/rpc-openstack,mancdaz/rpc-openstack,darrenchan/rpc-openstack,major/rpc-openstack,jpmontez/rpc-openstack,briancurtin/rpc-maas,sigmavirus24/rpc-openstack,jpmontez/rpc-openstack,miguelgrinberg/rpc-openstack,BjoernT/rpc-openstack,jpmontez/rpc-openstack,prometheanfire/rpc-openstack,cfarquhar/rpc-maas,xeregin/rpc-openstack,rcbops/rpc-openstack,busterswt/rpc-openstack,mancdaz/rpc-openstack,cloudnull/rpc-maas,hughsaunders/rpc-openstack,galstrom21/rpc-openstack,nrb/rpc-openstack,jacobwagner/rpc-openstack,sigmavirus24/rpc-openstack,andymcc/rpc-openstack,prometheanfire/rpc-openstack,npawelek/rpc-maas,git-harry/rpc-openstack,miguelgrinberg/rpc-openstack,cloudnull/rpc-maas,briancurtin/rpc-maas,nrb/rpc-openstack,busterswt/rpc-openstack,cfarquhar/rpc-openstack,darrenchan/rpc-openstack,xeregin/rpc-openstack,stevelle/rpc-openstack,npawelek/rpc-maas,nrb/rpc-openstack,cfarquhar/rpc-maas,stevelle/rpc-openstack,robb-romans/rpc-openstack,mattt416/rpc-openstack,byronmccollum/rpc-openstack,darrenchan/rpc-openstack,stevelle/rpc-openstack,cloudnull/rpc-maas,sigmavirus24/rpc-openstack,major/rpc-openstack,claco/rpc-openstack,npawelek/rpc-maas,galstrom21/rpc-openstack,cloudnull/rpc-openstack,rcbops/rpc-openstack,andymcc/rpc-openstack,shannonmitchell/rpc-openstack
|
Return details about RabbitMQ server
|
#!/usr/bin/env python
import requests
import sys
OVERVIEW_URL = "http://localhost:55672/api/overview"
NODES_URL = "http://localhost:55672/api/nodes"
USERNAME = 'guest'
PASSWORD = 'guest'
OVERVIEW_METRICS = {"queue_totals": ("messages",
"messages_ready",
"messages_unacknowledged"),
"message_stats": ("get",
"ack",
"deliver_get",
"deliver",
"publish")}
NODES_METRICS = ("proc_used",
"proc_total",
"fd_used",
"fd_total",
"sockets_used",
"sockets_total",
"mem_used",
"mem_limit",
"mem_alarm")
def main():
metrics = {}
try:
r = requests.get(OVERVIEW_URL, auth=(USERNAME, PASSWORD))
except requests.exceptions.ConnectionError:
error()
if r.status_code == 200:
for k in OVERVIEW_METRICS.keys():
if k in r.json():
for i in OVERVIEW_METRICS[k]:
if i in r.json()[k]:
metrics[i] = r.json()[k][i]
else:
error()
try:
r = requests.get(NODES_URL, auth=(USERNAME, PASSWORD))
except requests.exceptions.ConnectionError:
error()
if r.status_code == 200:
for i in NODES_METRICS:
if i in r.json()[0]:
metrics[i] = r.json()[0][i]
else:
error()
print "status ok"
for k in metrics.keys():
print k, metrics[k]
def error():
print "status error"
sys.exit(1)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Return details about RabbitMQ server<commit_after>
|
#!/usr/bin/env python
import requests
import sys
OVERVIEW_URL = "http://localhost:55672/api/overview"
NODES_URL = "http://localhost:55672/api/nodes"
USERNAME = 'guest'
PASSWORD = 'guest'
OVERVIEW_METRICS = {"queue_totals": ("messages",
"messages_ready",
"messages_unacknowledged"),
"message_stats": ("get",
"ack",
"deliver_get",
"deliver",
"publish")}
NODES_METRICS = ("proc_used",
"proc_total",
"fd_used",
"fd_total",
"sockets_used",
"sockets_total",
"mem_used",
"mem_limit",
"mem_alarm")
def main():
metrics = {}
try:
r = requests.get(OVERVIEW_URL, auth=(USERNAME, PASSWORD))
except requests.exceptions.ConnectionError:
error()
if r.status_code == 200:
for k in OVERVIEW_METRICS.keys():
if k in r.json():
for i in OVERVIEW_METRICS[k]:
if i in r.json()[k]:
metrics[i] = r.json()[k][i]
else:
error()
try:
r = requests.get(NODES_URL, auth=(USERNAME, PASSWORD))
except requests.exceptions.ConnectionError:
error()
if r.status_code == 200:
for i in NODES_METRICS:
if i in r.json()[0]:
metrics[i] = r.json()[0][i]
else:
error()
print "status ok"
for k in metrics.keys():
print k, metrics[k]
def error():
print "status error"
sys.exit(1)
if __name__ == "__main__":
main()
|
Return details about RabbitMQ server#!/usr/bin/env python
import requests
import sys
OVERVIEW_URL = "http://localhost:55672/api/overview"
NODES_URL = "http://localhost:55672/api/nodes"
USERNAME = 'guest'
PASSWORD = 'guest'
OVERVIEW_METRICS = {"queue_totals": ("messages",
"messages_ready",
"messages_unacknowledged"),
"message_stats": ("get",
"ack",
"deliver_get",
"deliver",
"publish")}
NODES_METRICS = ("proc_used",
"proc_total",
"fd_used",
"fd_total",
"sockets_used",
"sockets_total",
"mem_used",
"mem_limit",
"mem_alarm")
def main():
metrics = {}
try:
r = requests.get(OVERVIEW_URL, auth=(USERNAME, PASSWORD))
except requests.exceptions.ConnectionError:
error()
if r.status_code == 200:
for k in OVERVIEW_METRICS.keys():
if k in r.json():
for i in OVERVIEW_METRICS[k]:
if i in r.json()[k]:
metrics[i] = r.json()[k][i]
else:
error()
try:
r = requests.get(NODES_URL, auth=(USERNAME, PASSWORD))
except requests.exceptions.ConnectionError:
error()
if r.status_code == 200:
for i in NODES_METRICS:
if i in r.json()[0]:
metrics[i] = r.json()[0][i]
else:
error()
print "status ok"
for k in metrics.keys():
print k, metrics[k]
def error():
print "status error"
sys.exit(1)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Return details about RabbitMQ server<commit_after>#!/usr/bin/env python
import requests
import sys
OVERVIEW_URL = "http://localhost:55672/api/overview"
NODES_URL = "http://localhost:55672/api/nodes"
USERNAME = 'guest'
PASSWORD = 'guest'
OVERVIEW_METRICS = {"queue_totals": ("messages",
"messages_ready",
"messages_unacknowledged"),
"message_stats": ("get",
"ack",
"deliver_get",
"deliver",
"publish")}
NODES_METRICS = ("proc_used",
"proc_total",
"fd_used",
"fd_total",
"sockets_used",
"sockets_total",
"mem_used",
"mem_limit",
"mem_alarm")
def main():
metrics = {}
try:
r = requests.get(OVERVIEW_URL, auth=(USERNAME, PASSWORD))
except requests.exceptions.ConnectionError:
error()
if r.status_code == 200:
for k in OVERVIEW_METRICS.keys():
if k in r.json():
for i in OVERVIEW_METRICS[k]:
if i in r.json()[k]:
metrics[i] = r.json()[k][i]
else:
error()
try:
r = requests.get(NODES_URL, auth=(USERNAME, PASSWORD))
except requests.exceptions.ConnectionError:
error()
if r.status_code == 200:
for i in NODES_METRICS:
if i in r.json()[0]:
metrics[i] = r.json()[0][i]
else:
error()
print "status ok"
for k in metrics.keys():
print k, metrics[k]
def error():
print "status error"
sys.exit(1)
if __name__ == "__main__":
main()
|
|
a707998d54d584cec90b17b4d34c9401ba802844
|
mistral/db/sqlalchemy/migration/alembic_migrations/versions/011_add_workflow_id_for_execution.py
|
mistral/db/sqlalchemy/migration/alembic_migrations/versions/011_add_workflow_id_for_execution.py
|
# Copyright 2016 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""add workflow id for execution
Revision ID: 011
Revises: 010
Create Date: 2016-02-02 22:29:34.672735
"""
# revision identifiers, used by Alembic.
revision = '011'
down_revision = '010'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column(
'executions_v2',
sa.Column('workflow_id', sa.String(length=80), nullable=True)
)
|
Add workflow id column to executions_v2 table
|
Add workflow id column to executions_v2 table
Partially implements: blueprint use-workflow-id-in-rest-api
Change-Id: Iad7f1024576b31307016c0f1b4057c794bf4e30b
|
Python
|
apache-2.0
|
openstack/mistral,StackStorm/mistral,StackStorm/mistral,openstack/mistral
|
Add workflow id column to executions_v2 table
Partially implements: blueprint use-workflow-id-in-rest-api
Change-Id: Iad7f1024576b31307016c0f1b4057c794bf4e30b
|
# Copyright 2016 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""add workflow id for execution
Revision ID: 011
Revises: 010
Create Date: 2016-02-02 22:29:34.672735
"""
# revision identifiers, used by Alembic.
revision = '011'
down_revision = '010'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column(
'executions_v2',
sa.Column('workflow_id', sa.String(length=80), nullable=True)
)
|
<commit_before><commit_msg>Add workflow id column to executions_v2 table
Partially implements: blueprint use-workflow-id-in-rest-api
Change-Id: Iad7f1024576b31307016c0f1b4057c794bf4e30b<commit_after>
|
# Copyright 2016 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""add workflow id for execution
Revision ID: 011
Revises: 010
Create Date: 2016-02-02 22:29:34.672735
"""
# revision identifiers, used by Alembic.
revision = '011'
down_revision = '010'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column(
'executions_v2',
sa.Column('workflow_id', sa.String(length=80), nullable=True)
)
|
Add workflow id column to executions_v2 table
Partially implements: blueprint use-workflow-id-in-rest-api
Change-Id: Iad7f1024576b31307016c0f1b4057c794bf4e30b# Copyright 2016 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""add workflow id for execution
Revision ID: 011
Revises: 010
Create Date: 2016-02-02 22:29:34.672735
"""
# revision identifiers, used by Alembic.
revision = '011'
down_revision = '010'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column(
'executions_v2',
sa.Column('workflow_id', sa.String(length=80), nullable=True)
)
|
<commit_before><commit_msg>Add workflow id column to executions_v2 table
Partially implements: blueprint use-workflow-id-in-rest-api
Change-Id: Iad7f1024576b31307016c0f1b4057c794bf4e30b<commit_after># Copyright 2016 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""add workflow id for execution
Revision ID: 011
Revises: 010
Create Date: 2016-02-02 22:29:34.672735
"""
# revision identifiers, used by Alembic.
revision = '011'
down_revision = '010'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column(
'executions_v2',
sa.Column('workflow_id', sa.String(length=80), nullable=True)
)
|
|
1f0660579f35b1913bb4dc5f973532ec47d4de26
|
DilipadTopicModelling/experiment_number_of_topics.py
|
DilipadTopicModelling/experiment_number_of_topics.py
|
import logging
import glob
from multiprocessing import Process
from CPTCorpus import CPTCorpus
from CPT_Gibbs import GibbsSampler
def run_sampler(corpus, nTopics, nIter, beta, out_dir):
sampler = GibbsSampler(corpus, nTopics=nTopics, nIter=nIter,
alpha=(50.0/n), beta=beta, beta_o=beta,
out_dir=out_dir.format(nTopics))
sampler._initialize()
sampler.run()
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
#logger.setLevel(logging.INFO)
#files = glob.glob('/home/jvdzwaan/data/tmp/dilipad/gov_opp/*')
files = glob.glob('/home/jvdzwaan/data/tmp/test/*')
corpus_dir = '/home/jvdzwaan/data/tmp/generated/test_exp/corpus'
out_dir = '/home/jvdzwaan/data/tmp/generated/test_exp/{}'
#out_dir = '/home/jvdzwaan/data/tmp/dilipad/test_perplexity/'
corpus = CPTCorpus(files, testSplit=20)
corpus.filter_dictionaries(minFreq=5, removeTopTF=100, removeTopDF=100)
corpus.save_dictionaries(directory=out_dir.format(''))
corpus.save(out_dir.format('corpus.json'))
#corpus = CPTCorpus.CPTCorpus.load('{}corpus.json'.format(out_dir),
# topicDict='{}/topicDict.dict'.format(out_dir),
# opinionDict='{}/opinionDict.dict'.format(out_dir))
nIter = 200
beta = 0.02
nTopics = range(20, 201, 20)
logger.info('running Gibbs sampler for {} configurations'.format(len(nTopics)))
processes = [Process(target=run_sampler,
args=(corpus, n, nIter, beta, out_dir))
for n in nTopics]
# Run processes
for p in processes:
p.start()
# Exit the completed processes
for p in processes:
p.join()
|
Add script to run a series of experiments
|
Add script to run a series of experiments
The script does Gibbs sampling for different values of nTopics.
|
Python
|
apache-2.0
|
NLeSC/cptm,NLeSC/cptm
|
Add script to run a series of experiments
The script does Gibbs sampling for different values of nTopics.
|
import logging
import glob
from multiprocessing import Process
from CPTCorpus import CPTCorpus
from CPT_Gibbs import GibbsSampler
def run_sampler(corpus, nTopics, nIter, beta, out_dir):
sampler = GibbsSampler(corpus, nTopics=nTopics, nIter=nIter,
alpha=(50.0/n), beta=beta, beta_o=beta,
out_dir=out_dir.format(nTopics))
sampler._initialize()
sampler.run()
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
#logger.setLevel(logging.INFO)
#files = glob.glob('/home/jvdzwaan/data/tmp/dilipad/gov_opp/*')
files = glob.glob('/home/jvdzwaan/data/tmp/test/*')
corpus_dir = '/home/jvdzwaan/data/tmp/generated/test_exp/corpus'
out_dir = '/home/jvdzwaan/data/tmp/generated/test_exp/{}'
#out_dir = '/home/jvdzwaan/data/tmp/dilipad/test_perplexity/'
corpus = CPTCorpus(files, testSplit=20)
corpus.filter_dictionaries(minFreq=5, removeTopTF=100, removeTopDF=100)
corpus.save_dictionaries(directory=out_dir.format(''))
corpus.save(out_dir.format('corpus.json'))
#corpus = CPTCorpus.CPTCorpus.load('{}corpus.json'.format(out_dir),
# topicDict='{}/topicDict.dict'.format(out_dir),
# opinionDict='{}/opinionDict.dict'.format(out_dir))
nIter = 200
beta = 0.02
nTopics = range(20, 201, 20)
logger.info('running Gibbs sampler for {} configurations'.format(len(nTopics)))
processes = [Process(target=run_sampler,
args=(corpus, n, nIter, beta, out_dir))
for n in nTopics]
# Run processes
for p in processes:
p.start()
# Exit the completed processes
for p in processes:
p.join()
|
<commit_before><commit_msg>Add script to run a series of experiments
The script does Gibbs sampling for different values of nTopics.<commit_after>
|
import logging
import glob
from multiprocessing import Process
from CPTCorpus import CPTCorpus
from CPT_Gibbs import GibbsSampler
def run_sampler(corpus, nTopics, nIter, beta, out_dir):
sampler = GibbsSampler(corpus, nTopics=nTopics, nIter=nIter,
alpha=(50.0/n), beta=beta, beta_o=beta,
out_dir=out_dir.format(nTopics))
sampler._initialize()
sampler.run()
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
#logger.setLevel(logging.INFO)
#files = glob.glob('/home/jvdzwaan/data/tmp/dilipad/gov_opp/*')
files = glob.glob('/home/jvdzwaan/data/tmp/test/*')
corpus_dir = '/home/jvdzwaan/data/tmp/generated/test_exp/corpus'
out_dir = '/home/jvdzwaan/data/tmp/generated/test_exp/{}'
#out_dir = '/home/jvdzwaan/data/tmp/dilipad/test_perplexity/'
corpus = CPTCorpus(files, testSplit=20)
corpus.filter_dictionaries(minFreq=5, removeTopTF=100, removeTopDF=100)
corpus.save_dictionaries(directory=out_dir.format(''))
corpus.save(out_dir.format('corpus.json'))
#corpus = CPTCorpus.CPTCorpus.load('{}corpus.json'.format(out_dir),
# topicDict='{}/topicDict.dict'.format(out_dir),
# opinionDict='{}/opinionDict.dict'.format(out_dir))
nIter = 200
beta = 0.02
nTopics = range(20, 201, 20)
logger.info('running Gibbs sampler for {} configurations'.format(len(nTopics)))
processes = [Process(target=run_sampler,
args=(corpus, n, nIter, beta, out_dir))
for n in nTopics]
# Run processes
for p in processes:
p.start()
# Exit the completed processes
for p in processes:
p.join()
|
Add script to run a series of experiments
The script does Gibbs sampling for different values of nTopics.import logging
import glob
from multiprocessing import Process
from CPTCorpus import CPTCorpus
from CPT_Gibbs import GibbsSampler
def run_sampler(corpus, nTopics, nIter, beta, out_dir):
sampler = GibbsSampler(corpus, nTopics=nTopics, nIter=nIter,
alpha=(50.0/n), beta=beta, beta_o=beta,
out_dir=out_dir.format(nTopics))
sampler._initialize()
sampler.run()
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
#logger.setLevel(logging.INFO)
#files = glob.glob('/home/jvdzwaan/data/tmp/dilipad/gov_opp/*')
files = glob.glob('/home/jvdzwaan/data/tmp/test/*')
corpus_dir = '/home/jvdzwaan/data/tmp/generated/test_exp/corpus'
out_dir = '/home/jvdzwaan/data/tmp/generated/test_exp/{}'
#out_dir = '/home/jvdzwaan/data/tmp/dilipad/test_perplexity/'
corpus = CPTCorpus(files, testSplit=20)
corpus.filter_dictionaries(minFreq=5, removeTopTF=100, removeTopDF=100)
corpus.save_dictionaries(directory=out_dir.format(''))
corpus.save(out_dir.format('corpus.json'))
#corpus = CPTCorpus.CPTCorpus.load('{}corpus.json'.format(out_dir),
# topicDict='{}/topicDict.dict'.format(out_dir),
# opinionDict='{}/opinionDict.dict'.format(out_dir))
nIter = 200
beta = 0.02
nTopics = range(20, 201, 20)
logger.info('running Gibbs sampler for {} configurations'.format(len(nTopics)))
processes = [Process(target=run_sampler,
args=(corpus, n, nIter, beta, out_dir))
for n in nTopics]
# Run processes
for p in processes:
p.start()
# Exit the completed processes
for p in processes:
p.join()
|
<commit_before><commit_msg>Add script to run a series of experiments
The script does Gibbs sampling for different values of nTopics.<commit_after>import logging
import glob
from multiprocessing import Process
from CPTCorpus import CPTCorpus
from CPT_Gibbs import GibbsSampler
def run_sampler(corpus, nTopics, nIter, beta, out_dir):
sampler = GibbsSampler(corpus, nTopics=nTopics, nIter=nIter,
alpha=(50.0/n), beta=beta, beta_o=beta,
out_dir=out_dir.format(nTopics))
sampler._initialize()
sampler.run()
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
#logger.setLevel(logging.INFO)
#files = glob.glob('/home/jvdzwaan/data/tmp/dilipad/gov_opp/*')
files = glob.glob('/home/jvdzwaan/data/tmp/test/*')
corpus_dir = '/home/jvdzwaan/data/tmp/generated/test_exp/corpus'
out_dir = '/home/jvdzwaan/data/tmp/generated/test_exp/{}'
#out_dir = '/home/jvdzwaan/data/tmp/dilipad/test_perplexity/'
corpus = CPTCorpus(files, testSplit=20)
corpus.filter_dictionaries(minFreq=5, removeTopTF=100, removeTopDF=100)
corpus.save_dictionaries(directory=out_dir.format(''))
corpus.save(out_dir.format('corpus.json'))
#corpus = CPTCorpus.CPTCorpus.load('{}corpus.json'.format(out_dir),
# topicDict='{}/topicDict.dict'.format(out_dir),
# opinionDict='{}/opinionDict.dict'.format(out_dir))
nIter = 200
beta = 0.02
nTopics = range(20, 201, 20)
logger.info('running Gibbs sampler for {} configurations'.format(len(nTopics)))
processes = [Process(target=run_sampler,
args=(corpus, n, nIter, beta, out_dir))
for n in nTopics]
# Run processes
for p in processes:
p.start()
# Exit the completed processes
for p in processes:
p.join()
|
|
0f3dbed232e73dfa63c219402fff5c74a0f107bc
|
django_graphene_utils/pager.py
|
django_graphene_utils/pager.py
|
import graphene
from graphene.utils.str_converters import to_camel_case
from django.utils import six
from django.utils.functional import cached_property
__all__ = ['Pager']
class BasePager(object):
def __init__(self, data, queryset, default_size=20):
# process data
self.qs = self._process_data(data or {}, queryset, default_size)
def _process_data(self, data, queryset, default_size):
# we may have to handle sorting fields
if self._sort_fields:
# check if we've some
sort_fields = data.get('sort', None)
if sort_fields:
# so order the queryset
queryset = queryset.order_by(*sort_fields)
# we maye have to get all items
if data.get('all', False):
# no need to paginate it
return queryset
# get the offset (0 by default)
offset = data.get('offset', 0)
# final queryset
return queryset[offset:offset + data.get('size', default_size)]
@classmethod
def to_input(cls, input_name, graphql_type=graphene.InputObjectType, enum_name=None):
# start with basic attributes
attrs = {
'offset': graphene.Int(),
'size': graphene.Int(),
'all': graphene.Boolean()
}
# we might have to support sorting
if cls._sort_fields:
# first ensure we have a name
if enum_name is None:
# made it from the input name
enum_name = '%SortField' % input_name
# then build the enum for this input
sort_enum = graphene.Enum(enum_name, list(cls._sort_fields.items()))
# and the field
attrs['sort'] = graphene.List(sort_enum)
# build the final type
return type(input_name, (graphql_type,), attrs)
class PagerMeta(type):
def __new__(mcs, name, bases, attrs):
# build the new class
new_method = super(PagerMeta, mcs).__new__
if bases == (BasePager,):
return new_method(mcs, name, bases, attrs)
# start with an empty list of fields
fields = {}
# loop on attributes
for key, field in list(attrs.items()):
# only attributes with an upper name and a string as value
# will be considered as sorting field
if key.isupper() and isinstance(field, str):
# remove it from attribute
attrs.pop(key)
# turn the key into camel case
key = to_camel_case(key.lower())
# push it into valid fields
fields[key] = field
fields['%s_Desc' % key] = '-%s' % field
# create the new class
new_cls = new_method(mcs, name, bases, attrs)
# then add fields
new_cls._sort_fields = fields
return new_cls
class Pager(six.with_metaclass(PagerMeta, BasePager)):
pass
|
Add a way to handle sorts & pagination
|
Add a way to handle sorts & pagination
|
Python
|
mit
|
amille44420/django-graphene-utils
|
Add a way to handle sorts & pagination
|
import graphene
from graphene.utils.str_converters import to_camel_case
from django.utils import six
from django.utils.functional import cached_property
__all__ = ['Pager']
class BasePager(object):
def __init__(self, data, queryset, default_size=20):
# process data
self.qs = self._process_data(data or {}, queryset, default_size)
def _process_data(self, data, queryset, default_size):
# we may have to handle sorting fields
if self._sort_fields:
# check if we've some
sort_fields = data.get('sort', None)
if sort_fields:
# so order the queryset
queryset = queryset.order_by(*sort_fields)
# we maye have to get all items
if data.get('all', False):
# no need to paginate it
return queryset
# get the offset (0 by default)
offset = data.get('offset', 0)
# final queryset
return queryset[offset:offset + data.get('size', default_size)]
@classmethod
def to_input(cls, input_name, graphql_type=graphene.InputObjectType, enum_name=None):
# start with basic attributes
attrs = {
'offset': graphene.Int(),
'size': graphene.Int(),
'all': graphene.Boolean()
}
# we might have to support sorting
if cls._sort_fields:
# first ensure we have a name
if enum_name is None:
# made it from the input name
enum_name = '%SortField' % input_name
# then build the enum for this input
sort_enum = graphene.Enum(enum_name, list(cls._sort_fields.items()))
# and the field
attrs['sort'] = graphene.List(sort_enum)
# build the final type
return type(input_name, (graphql_type,), attrs)
class PagerMeta(type):
def __new__(mcs, name, bases, attrs):
# build the new class
new_method = super(PagerMeta, mcs).__new__
if bases == (BasePager,):
return new_method(mcs, name, bases, attrs)
# start with an empty list of fields
fields = {}
# loop on attributes
for key, field in list(attrs.items()):
# only attributes with an upper name and a string as value
# will be considered as sorting field
if key.isupper() and isinstance(field, str):
# remove it from attribute
attrs.pop(key)
# turn the key into camel case
key = to_camel_case(key.lower())
# push it into valid fields
fields[key] = field
fields['%s_Desc' % key] = '-%s' % field
# create the new class
new_cls = new_method(mcs, name, bases, attrs)
# then add fields
new_cls._sort_fields = fields
return new_cls
class Pager(six.with_metaclass(PagerMeta, BasePager)):
pass
|
<commit_before><commit_msg>Add a way to handle sorts & pagination<commit_after>
|
import graphene
from graphene.utils.str_converters import to_camel_case
from django.utils import six
from django.utils.functional import cached_property
__all__ = ['Pager']
class BasePager(object):
def __init__(self, data, queryset, default_size=20):
# process data
self.qs = self._process_data(data or {}, queryset, default_size)
def _process_data(self, data, queryset, default_size):
# we may have to handle sorting fields
if self._sort_fields:
# check if we've some
sort_fields = data.get('sort', None)
if sort_fields:
# so order the queryset
queryset = queryset.order_by(*sort_fields)
# we maye have to get all items
if data.get('all', False):
# no need to paginate it
return queryset
# get the offset (0 by default)
offset = data.get('offset', 0)
# final queryset
return queryset[offset:offset + data.get('size', default_size)]
@classmethod
def to_input(cls, input_name, graphql_type=graphene.InputObjectType, enum_name=None):
# start with basic attributes
attrs = {
'offset': graphene.Int(),
'size': graphene.Int(),
'all': graphene.Boolean()
}
# we might have to support sorting
if cls._sort_fields:
# first ensure we have a name
if enum_name is None:
# made it from the input name
enum_name = '%SortField' % input_name
# then build the enum for this input
sort_enum = graphene.Enum(enum_name, list(cls._sort_fields.items()))
# and the field
attrs['sort'] = graphene.List(sort_enum)
# build the final type
return type(input_name, (graphql_type,), attrs)
class PagerMeta(type):
def __new__(mcs, name, bases, attrs):
# build the new class
new_method = super(PagerMeta, mcs).__new__
if bases == (BasePager,):
return new_method(mcs, name, bases, attrs)
# start with an empty list of fields
fields = {}
# loop on attributes
for key, field in list(attrs.items()):
# only attributes with an upper name and a string as value
# will be considered as sorting field
if key.isupper() and isinstance(field, str):
# remove it from attribute
attrs.pop(key)
# turn the key into camel case
key = to_camel_case(key.lower())
# push it into valid fields
fields[key] = field
fields['%s_Desc' % key] = '-%s' % field
# create the new class
new_cls = new_method(mcs, name, bases, attrs)
# then add fields
new_cls._sort_fields = fields
return new_cls
class Pager(six.with_metaclass(PagerMeta, BasePager)):
pass
|
Add a way to handle sorts & paginationimport graphene
from graphene.utils.str_converters import to_camel_case
from django.utils import six
from django.utils.functional import cached_property
__all__ = ['Pager']
class BasePager(object):
def __init__(self, data, queryset, default_size=20):
# process data
self.qs = self._process_data(data or {}, queryset, default_size)
def _process_data(self, data, queryset, default_size):
# we may have to handle sorting fields
if self._sort_fields:
# check if we've some
sort_fields = data.get('sort', None)
if sort_fields:
# so order the queryset
queryset = queryset.order_by(*sort_fields)
# we maye have to get all items
if data.get('all', False):
# no need to paginate it
return queryset
# get the offset (0 by default)
offset = data.get('offset', 0)
# final queryset
return queryset[offset:offset + data.get('size', default_size)]
@classmethod
def to_input(cls, input_name, graphql_type=graphene.InputObjectType, enum_name=None):
# start with basic attributes
attrs = {
'offset': graphene.Int(),
'size': graphene.Int(),
'all': graphene.Boolean()
}
# we might have to support sorting
if cls._sort_fields:
# first ensure we have a name
if enum_name is None:
# made it from the input name
enum_name = '%SortField' % input_name
# then build the enum for this input
sort_enum = graphene.Enum(enum_name, list(cls._sort_fields.items()))
# and the field
attrs['sort'] = graphene.List(sort_enum)
# build the final type
return type(input_name, (graphql_type,), attrs)
class PagerMeta(type):
def __new__(mcs, name, bases, attrs):
# build the new class
new_method = super(PagerMeta, mcs).__new__
if bases == (BasePager,):
return new_method(mcs, name, bases, attrs)
# start with an empty list of fields
fields = {}
# loop on attributes
for key, field in list(attrs.items()):
# only attributes with an upper name and a string as value
# will be considered as sorting field
if key.isupper() and isinstance(field, str):
# remove it from attribute
attrs.pop(key)
# turn the key into camel case
key = to_camel_case(key.lower())
# push it into valid fields
fields[key] = field
fields['%s_Desc' % key] = '-%s' % field
# create the new class
new_cls = new_method(mcs, name, bases, attrs)
# then add fields
new_cls._sort_fields = fields
return new_cls
class Pager(six.with_metaclass(PagerMeta, BasePager)):
pass
|
<commit_before><commit_msg>Add a way to handle sorts & pagination<commit_after>import graphene
from graphene.utils.str_converters import to_camel_case
from django.utils import six
from django.utils.functional import cached_property
__all__ = ['Pager']
class BasePager(object):
def __init__(self, data, queryset, default_size=20):
# process data
self.qs = self._process_data(data or {}, queryset, default_size)
def _process_data(self, data, queryset, default_size):
# we may have to handle sorting fields
if self._sort_fields:
# check if we've some
sort_fields = data.get('sort', None)
if sort_fields:
# so order the queryset
queryset = queryset.order_by(*sort_fields)
# we maye have to get all items
if data.get('all', False):
# no need to paginate it
return queryset
# get the offset (0 by default)
offset = data.get('offset', 0)
# final queryset
return queryset[offset:offset + data.get('size', default_size)]
@classmethod
def to_input(cls, input_name, graphql_type=graphene.InputObjectType, enum_name=None):
# start with basic attributes
attrs = {
'offset': graphene.Int(),
'size': graphene.Int(),
'all': graphene.Boolean()
}
# we might have to support sorting
if cls._sort_fields:
# first ensure we have a name
if enum_name is None:
# made it from the input name
enum_name = '%SortField' % input_name
# then build the enum for this input
sort_enum = graphene.Enum(enum_name, list(cls._sort_fields.items()))
# and the field
attrs['sort'] = graphene.List(sort_enum)
# build the final type
return type(input_name, (graphql_type,), attrs)
class PagerMeta(type):
def __new__(mcs, name, bases, attrs):
# build the new class
new_method = super(PagerMeta, mcs).__new__
if bases == (BasePager,):
return new_method(mcs, name, bases, attrs)
# start with an empty list of fields
fields = {}
# loop on attributes
for key, field in list(attrs.items()):
# only attributes with an upper name and a string as value
# will be considered as sorting field
if key.isupper() and isinstance(field, str):
# remove it from attribute
attrs.pop(key)
# turn the key into camel case
key = to_camel_case(key.lower())
# push it into valid fields
fields[key] = field
fields['%s_Desc' % key] = '-%s' % field
# create the new class
new_cls = new_method(mcs, name, bases, attrs)
# then add fields
new_cls._sort_fields = fields
return new_cls
class Pager(six.with_metaclass(PagerMeta, BasePager)):
pass
|
|
484b72439d1a6005eeb9fbab8a77bfa55e9d3228
|
tests/test_tree.py
|
tests/test_tree.py
|
import milk.supervised.tree
import numpy as np
import tests.data.german.german
def test_tree():
data = tests.data.german.german.load()
features = data['data']
labels = data['label']
C = milk.supervised.tree.tree_classifier()
C.train(features,labels)
assert ((np.array([C.apply(f) for f in features]) > .5) == labels).mean() > .5
|
Test tree for >50% accuracy
|
Test tree for >50% accuracy
|
Python
|
mit
|
pombredanne/milk,pombredanne/milk,pombredanne/milk,luispedro/milk,luispedro/milk,luispedro/milk
|
Test tree for >50% accuracy
|
import milk.supervised.tree
import numpy as np
import tests.data.german.german
def test_tree():
data = tests.data.german.german.load()
features = data['data']
labels = data['label']
C = milk.supervised.tree.tree_classifier()
C.train(features,labels)
assert ((np.array([C.apply(f) for f in features]) > .5) == labels).mean() > .5
|
<commit_before><commit_msg>Test tree for >50% accuracy<commit_after>
|
import milk.supervised.tree
import numpy as np
import tests.data.german.german
def test_tree():
data = tests.data.german.german.load()
features = data['data']
labels = data['label']
C = milk.supervised.tree.tree_classifier()
C.train(features,labels)
assert ((np.array([C.apply(f) for f in features]) > .5) == labels).mean() > .5
|
Test tree for >50% accuracyimport milk.supervised.tree
import numpy as np
import tests.data.german.german
def test_tree():
data = tests.data.german.german.load()
features = data['data']
labels = data['label']
C = milk.supervised.tree.tree_classifier()
C.train(features,labels)
assert ((np.array([C.apply(f) for f in features]) > .5) == labels).mean() > .5
|
<commit_before><commit_msg>Test tree for >50% accuracy<commit_after>import milk.supervised.tree
import numpy as np
import tests.data.german.german
def test_tree():
data = tests.data.german.german.load()
features = data['data']
labels = data['label']
C = milk.supervised.tree.tree_classifier()
C.train(features,labels)
assert ((np.array([C.apply(f) for f in features]) > .5) == labels).mean() > .5
|
|
ff9de1ab494ee5d48dba7aa84dfa1ce114464f09
|
py/tests/global_alpha_test.py
|
py/tests/global_alpha_test.py
|
#!/usr/bin/python3
import pykms
import time
card = pykms.Card()
res = pykms.ResourceManager(card)
conn = res.reserve_connector("")
crtc = res.reserve_crtc(conn)
mode = conn.get_default_mode()
modeb = mode.to_blob(card)
format = pykms.PixelFormat.ARGB8888
plane1 = res.reserve_generic_plane(crtc, format)
plane2 = res.reserve_generic_plane(crtc, format)
print("Got plane1 %d %d plane2 %d %d" %
(plane1.idx, plane1.id, plane2.idx, plane2.id))
fb1 = pykms.DumbFramebuffer(card, mode.hdisplay, mode.vdisplay, format);
pykms.draw_test_pattern(fb1);
fb2 = pykms.DumbFramebuffer(card, mode.hdisplay >> 1, mode.vdisplay >> 1, format);
pykms.draw_test_pattern(fb2);
alpha = 0
req = pykms.AtomicReq(card)
req.add(conn, "CRTC_ID", crtc.id)
req.add(crtc, {"ACTIVE": 1,
"MODE_ID": modeb.id})
req.add_plane(plane1, fb1, crtc)
req.add_plane(plane2, fb2, crtc)
r = req.commit_sync(allow_modeset = True)
assert r == 0, "Initial commit failed: %d" % r
while alpha <= 0xFFFF:
print("alpha %d" % (alpha >> 8))
req = pykms.AtomicReq(card)
req.add(plane2, {"alpha": alpha })
r = req.commit_sync()
assert r == 0, "alpha change commit failed: %d" % r
alpha = alpha + 0xFF
time.sleep(0.1)
input("press enter exit\n")
|
Add globa_alpha_test.py for DRM per plane "alpha" property testing
|
Add globa_alpha_test.py for DRM per plane "alpha" property testing
|
Python
|
mpl-2.0
|
tomba/kmsxx,tomba/kmsxx,tomba/kmsxx,tomba/kmsxx
|
Add globa_alpha_test.py for DRM per plane "alpha" property testing
|
#!/usr/bin/python3
import pykms
import time
card = pykms.Card()
res = pykms.ResourceManager(card)
conn = res.reserve_connector("")
crtc = res.reserve_crtc(conn)
mode = conn.get_default_mode()
modeb = mode.to_blob(card)
format = pykms.PixelFormat.ARGB8888
plane1 = res.reserve_generic_plane(crtc, format)
plane2 = res.reserve_generic_plane(crtc, format)
print("Got plane1 %d %d plane2 %d %d" %
(plane1.idx, plane1.id, plane2.idx, plane2.id))
fb1 = pykms.DumbFramebuffer(card, mode.hdisplay, mode.vdisplay, format);
pykms.draw_test_pattern(fb1);
fb2 = pykms.DumbFramebuffer(card, mode.hdisplay >> 1, mode.vdisplay >> 1, format);
pykms.draw_test_pattern(fb2);
alpha = 0
req = pykms.AtomicReq(card)
req.add(conn, "CRTC_ID", crtc.id)
req.add(crtc, {"ACTIVE": 1,
"MODE_ID": modeb.id})
req.add_plane(plane1, fb1, crtc)
req.add_plane(plane2, fb2, crtc)
r = req.commit_sync(allow_modeset = True)
assert r == 0, "Initial commit failed: %d" % r
while alpha <= 0xFFFF:
print("alpha %d" % (alpha >> 8))
req = pykms.AtomicReq(card)
req.add(plane2, {"alpha": alpha })
r = req.commit_sync()
assert r == 0, "alpha change commit failed: %d" % r
alpha = alpha + 0xFF
time.sleep(0.1)
input("press enter exit\n")
|
<commit_before><commit_msg>Add globa_alpha_test.py for DRM per plane "alpha" property testing<commit_after>
|
#!/usr/bin/python3
import pykms
import time
card = pykms.Card()
res = pykms.ResourceManager(card)
conn = res.reserve_connector("")
crtc = res.reserve_crtc(conn)
mode = conn.get_default_mode()
modeb = mode.to_blob(card)
format = pykms.PixelFormat.ARGB8888
plane1 = res.reserve_generic_plane(crtc, format)
plane2 = res.reserve_generic_plane(crtc, format)
print("Got plane1 %d %d plane2 %d %d" %
(plane1.idx, plane1.id, plane2.idx, plane2.id))
fb1 = pykms.DumbFramebuffer(card, mode.hdisplay, mode.vdisplay, format);
pykms.draw_test_pattern(fb1);
fb2 = pykms.DumbFramebuffer(card, mode.hdisplay >> 1, mode.vdisplay >> 1, format);
pykms.draw_test_pattern(fb2);
alpha = 0
req = pykms.AtomicReq(card)
req.add(conn, "CRTC_ID", crtc.id)
req.add(crtc, {"ACTIVE": 1,
"MODE_ID": modeb.id})
req.add_plane(plane1, fb1, crtc)
req.add_plane(plane2, fb2, crtc)
r = req.commit_sync(allow_modeset = True)
assert r == 0, "Initial commit failed: %d" % r
while alpha <= 0xFFFF:
print("alpha %d" % (alpha >> 8))
req = pykms.AtomicReq(card)
req.add(plane2, {"alpha": alpha })
r = req.commit_sync()
assert r == 0, "alpha change commit failed: %d" % r
alpha = alpha + 0xFF
time.sleep(0.1)
input("press enter exit\n")
|
Add globa_alpha_test.py for DRM per plane "alpha" property testing#!/usr/bin/python3
import pykms
import time
card = pykms.Card()
res = pykms.ResourceManager(card)
conn = res.reserve_connector("")
crtc = res.reserve_crtc(conn)
mode = conn.get_default_mode()
modeb = mode.to_blob(card)
format = pykms.PixelFormat.ARGB8888
plane1 = res.reserve_generic_plane(crtc, format)
plane2 = res.reserve_generic_plane(crtc, format)
print("Got plane1 %d %d plane2 %d %d" %
(plane1.idx, plane1.id, plane2.idx, plane2.id))
fb1 = pykms.DumbFramebuffer(card, mode.hdisplay, mode.vdisplay, format);
pykms.draw_test_pattern(fb1);
fb2 = pykms.DumbFramebuffer(card, mode.hdisplay >> 1, mode.vdisplay >> 1, format);
pykms.draw_test_pattern(fb2);
alpha = 0
req = pykms.AtomicReq(card)
req.add(conn, "CRTC_ID", crtc.id)
req.add(crtc, {"ACTIVE": 1,
"MODE_ID": modeb.id})
req.add_plane(plane1, fb1, crtc)
req.add_plane(plane2, fb2, crtc)
r = req.commit_sync(allow_modeset = True)
assert r == 0, "Initial commit failed: %d" % r
while alpha <= 0xFFFF:
print("alpha %d" % (alpha >> 8))
req = pykms.AtomicReq(card)
req.add(plane2, {"alpha": alpha })
r = req.commit_sync()
assert r == 0, "alpha change commit failed: %d" % r
alpha = alpha + 0xFF
time.sleep(0.1)
input("press enter exit\n")
|
<commit_before><commit_msg>Add globa_alpha_test.py for DRM per plane "alpha" property testing<commit_after>#!/usr/bin/python3
import pykms
import time
card = pykms.Card()
res = pykms.ResourceManager(card)
conn = res.reserve_connector("")
crtc = res.reserve_crtc(conn)
mode = conn.get_default_mode()
modeb = mode.to_blob(card)
format = pykms.PixelFormat.ARGB8888
plane1 = res.reserve_generic_plane(crtc, format)
plane2 = res.reserve_generic_plane(crtc, format)
print("Got plane1 %d %d plane2 %d %d" %
(plane1.idx, plane1.id, plane2.idx, plane2.id))
fb1 = pykms.DumbFramebuffer(card, mode.hdisplay, mode.vdisplay, format);
pykms.draw_test_pattern(fb1);
fb2 = pykms.DumbFramebuffer(card, mode.hdisplay >> 1, mode.vdisplay >> 1, format);
pykms.draw_test_pattern(fb2);
alpha = 0
req = pykms.AtomicReq(card)
req.add(conn, "CRTC_ID", crtc.id)
req.add(crtc, {"ACTIVE": 1,
"MODE_ID": modeb.id})
req.add_plane(plane1, fb1, crtc)
req.add_plane(plane2, fb2, crtc)
r = req.commit_sync(allow_modeset = True)
assert r == 0, "Initial commit failed: %d" % r
while alpha <= 0xFFFF:
print("alpha %d" % (alpha >> 8))
req = pykms.AtomicReq(card)
req.add(plane2, {"alpha": alpha })
r = req.commit_sync()
assert r == 0, "alpha change commit failed: %d" % r
alpha = alpha + 0xFF
time.sleep(0.1)
input("press enter exit\n")
|
|
aba7b56ecfad85b006b9159926b1458a6f39847e
|
pylxd/tests/test_operation.py
|
pylxd/tests/test_operation.py
|
from pylxd import operation
from pylxd.tests import testing
class TestOperation(testing.PyLXDTestCase):
"""Tests for pylxd.operation.Operation."""
def test_get(self):
"""Return an operation."""
name = 'operation-abc'
an_operation = operation.Operation.get(self.client, name)
self.assertEqual(name, an_operation.id)
def test_get_full_path(self):
"""Return an operation even if the full path is specified."""
name = '/1.0/operations/operation-abc'
an_operation = operation.Operation.get(self.client, name)
self.assertEqual('operation-abc', an_operation.id)
|
Add test coverage for Operation
|
Add test coverage for Operation
|
Python
|
apache-2.0
|
lxc/pylxd,lxc/pylxd
|
Add test coverage for Operation
|
from pylxd import operation
from pylxd.tests import testing
class TestOperation(testing.PyLXDTestCase):
"""Tests for pylxd.operation.Operation."""
def test_get(self):
"""Return an operation."""
name = 'operation-abc'
an_operation = operation.Operation.get(self.client, name)
self.assertEqual(name, an_operation.id)
def test_get_full_path(self):
"""Return an operation even if the full path is specified."""
name = '/1.0/operations/operation-abc'
an_operation = operation.Operation.get(self.client, name)
self.assertEqual('operation-abc', an_operation.id)
|
<commit_before><commit_msg>Add test coverage for Operation<commit_after>
|
from pylxd import operation
from pylxd.tests import testing
class TestOperation(testing.PyLXDTestCase):
"""Tests for pylxd.operation.Operation."""
def test_get(self):
"""Return an operation."""
name = 'operation-abc'
an_operation = operation.Operation.get(self.client, name)
self.assertEqual(name, an_operation.id)
def test_get_full_path(self):
"""Return an operation even if the full path is specified."""
name = '/1.0/operations/operation-abc'
an_operation = operation.Operation.get(self.client, name)
self.assertEqual('operation-abc', an_operation.id)
|
Add test coverage for Operationfrom pylxd import operation
from pylxd.tests import testing
class TestOperation(testing.PyLXDTestCase):
"""Tests for pylxd.operation.Operation."""
def test_get(self):
"""Return an operation."""
name = 'operation-abc'
an_operation = operation.Operation.get(self.client, name)
self.assertEqual(name, an_operation.id)
def test_get_full_path(self):
"""Return an operation even if the full path is specified."""
name = '/1.0/operations/operation-abc'
an_operation = operation.Operation.get(self.client, name)
self.assertEqual('operation-abc', an_operation.id)
|
<commit_before><commit_msg>Add test coverage for Operation<commit_after>from pylxd import operation
from pylxd.tests import testing
class TestOperation(testing.PyLXDTestCase):
"""Tests for pylxd.operation.Operation."""
def test_get(self):
"""Return an operation."""
name = 'operation-abc'
an_operation = operation.Operation.get(self.client, name)
self.assertEqual(name, an_operation.id)
def test_get_full_path(self):
"""Return an operation even if the full path is specified."""
name = '/1.0/operations/operation-abc'
an_operation = operation.Operation.get(self.client, name)
self.assertEqual('operation-abc', an_operation.id)
|
|
23ab053ff9483cef63df0c3322d3d53086a2d273
|
tests/ODBPy/TestAttributes.py
|
tests/ODBPy/TestAttributes.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from nose.tools import assert_equal, assert_true, assert_false, raises, assert_is_none
from ODBPy.Attributes import *
class TestAttributes(object):
def test_parse_attributes_from_line(self):
assert_equal({0: 0, 2: 0}, parse_attributes_from_line(
"P -30.9595 3.8107 0 P 0 8 0;0=0,2=0"))
assert_equal({}, parse_attributes_from_line(
"P -30.9595 3.8107 0 P 0 8 0"))
|
Fix missing unit test file
|
Fix missing unit test file
|
Python
|
apache-2.0
|
ulikoehler/ODBPy
|
Fix missing unit test file
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from nose.tools import assert_equal, assert_true, assert_false, raises, assert_is_none
from ODBPy.Attributes import *
class TestAttributes(object):
def test_parse_attributes_from_line(self):
assert_equal({0: 0, 2: 0}, parse_attributes_from_line(
"P -30.9595 3.8107 0 P 0 8 0;0=0,2=0"))
assert_equal({}, parse_attributes_from_line(
"P -30.9595 3.8107 0 P 0 8 0"))
|
<commit_before><commit_msg>Fix missing unit test file<commit_after>
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from nose.tools import assert_equal, assert_true, assert_false, raises, assert_is_none
from ODBPy.Attributes import *
class TestAttributes(object):
def test_parse_attributes_from_line(self):
assert_equal({0: 0, 2: 0}, parse_attributes_from_line(
"P -30.9595 3.8107 0 P 0 8 0;0=0,2=0"))
assert_equal({}, parse_attributes_from_line(
"P -30.9595 3.8107 0 P 0 8 0"))
|
Fix missing unit test file#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from nose.tools import assert_equal, assert_true, assert_false, raises, assert_is_none
from ODBPy.Attributes import *
class TestAttributes(object):
def test_parse_attributes_from_line(self):
assert_equal({0: 0, 2: 0}, parse_attributes_from_line(
"P -30.9595 3.8107 0 P 0 8 0;0=0,2=0"))
assert_equal({}, parse_attributes_from_line(
"P -30.9595 3.8107 0 P 0 8 0"))
|
<commit_before><commit_msg>Fix missing unit test file<commit_after>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from nose.tools import assert_equal, assert_true, assert_false, raises, assert_is_none
from ODBPy.Attributes import *
class TestAttributes(object):
def test_parse_attributes_from_line(self):
assert_equal({0: 0, 2: 0}, parse_attributes_from_line(
"P -30.9595 3.8107 0 P 0 8 0;0=0,2=0"))
assert_equal({}, parse_attributes_from_line(
"P -30.9595 3.8107 0 P 0 8 0"))
|
|
a4dfdc82749873fefac1d0a5a411e178ed9f6da9
|
tests/test_core/test_utils.py
|
tests/test_core/test_utils.py
|
from __future__ import unicode_literals
import sure
from moto.core.utils import camelcase_to_underscores
def test_camelcase_to_underscores():
cases = {
"theNewAttribute": "the_new_attribute",
"attri bute With Space": "attribute_with_space",
"FirstLetterCapital": "first_letter_capital",
}
for arg, expected in cases.items():
camelcase_to_underscores(arg).should.equal(expected)
|
Add basic tests for moto.core.utils.camelcase_to_underscores()
|
Add basic tests for moto.core.utils.camelcase_to_underscores()
|
Python
|
apache-2.0
|
whummer/moto,spulec/moto,heddle317/moto,william-richard/moto,2rs2ts/moto,IlyaSukhanov/moto,gjtempleton/moto,spulec/moto,whummer/moto,braintreeps/moto,botify-labs/moto,william-richard/moto,okomestudio/moto,tootedom/moto,whummer/moto,Affirm/moto,dbfr3qs/moto,rocky4570/moto,ZuluPro/moto,botify-labs/moto,gjtempleton/moto,2rs2ts/moto,rocky4570/moto,riccardomc/moto,ZuluPro/moto,Brett55/moto,botify-labs/moto,okomestudio/moto,kefo/moto,ZuluPro/moto,Brett55/moto,spulec/moto,dbfr3qs/moto,Affirm/moto,ZuluPro/moto,2rs2ts/moto,heddle317/moto,Brett55/moto,botify-labs/moto,Brett55/moto,heddle317/moto,dbfr3qs/moto,EarthmanT/moto,okomestudio/moto,william-richard/moto,Affirm/moto,spulec/moto,Affirm/moto,whummer/moto,gjtempleton/moto,zonk1024/moto,whummer/moto,2rs2ts/moto,rocky4570/moto,heddle317/moto,okomestudio/moto,william-richard/moto,william-richard/moto,silveregg/moto,ZuluPro/moto,spulec/moto,botify-labs/moto,william-richard/moto,im-auld/moto,gjtempleton/moto,mrucci/moto,rocky4570/moto,kefo/moto,dbfr3qs/moto,behanceops/moto,Affirm/moto,kefo/moto,Brett55/moto,ZuluPro/moto,okomestudio/moto,rocky4570/moto,botify-labs/moto,kefo/moto,dbfr3qs/moto,2rs2ts/moto,spulec/moto,Brett55/moto,rocky4570/moto,Affirm/moto,dbfr3qs/moto,okomestudio/moto,heddle317/moto,kefo/moto,gjtempleton/moto,whummer/moto
|
Add basic tests for moto.core.utils.camelcase_to_underscores()
|
from __future__ import unicode_literals
import sure
from moto.core.utils import camelcase_to_underscores
def test_camelcase_to_underscores():
cases = {
"theNewAttribute": "the_new_attribute",
"attri bute With Space": "attribute_with_space",
"FirstLetterCapital": "first_letter_capital",
}
for arg, expected in cases.items():
camelcase_to_underscores(arg).should.equal(expected)
|
<commit_before><commit_msg>Add basic tests for moto.core.utils.camelcase_to_underscores()<commit_after>
|
from __future__ import unicode_literals
import sure
from moto.core.utils import camelcase_to_underscores
def test_camelcase_to_underscores():
cases = {
"theNewAttribute": "the_new_attribute",
"attri bute With Space": "attribute_with_space",
"FirstLetterCapital": "first_letter_capital",
}
for arg, expected in cases.items():
camelcase_to_underscores(arg).should.equal(expected)
|
Add basic tests for moto.core.utils.camelcase_to_underscores()from __future__ import unicode_literals
import sure
from moto.core.utils import camelcase_to_underscores
def test_camelcase_to_underscores():
cases = {
"theNewAttribute": "the_new_attribute",
"attri bute With Space": "attribute_with_space",
"FirstLetterCapital": "first_letter_capital",
}
for arg, expected in cases.items():
camelcase_to_underscores(arg).should.equal(expected)
|
<commit_before><commit_msg>Add basic tests for moto.core.utils.camelcase_to_underscores()<commit_after>from __future__ import unicode_literals
import sure
from moto.core.utils import camelcase_to_underscores
def test_camelcase_to_underscores():
cases = {
"theNewAttribute": "the_new_attribute",
"attri bute With Space": "attribute_with_space",
"FirstLetterCapital": "first_letter_capital",
}
for arg, expected in cases.items():
camelcase_to_underscores(arg).should.equal(expected)
|
|
f3df949b52a90f4bb8a51634eb41b4f0fb196b4a
|
examples/comp/set_input_on_selected_loaders.py
|
examples/comp/set_input_on_selected_loaders.py
|
"""Enable the 'Loop' input for all selected loaders"""
import fusionless as fu
import fusionless.context as fuCtx
c = fu.Comp()
with fuCtx.lock_and_undo_chunk(c, "Set loaders to loop"):
for tool in c.get_selected_tools(node_type="Loader"):
loop = tool.input("Loop").set_value(True)
|
Add set input on selected loaders example
|
Add set input on selected loaders example
|
Python
|
bsd-3-clause
|
BigRoy/fusionscript,BigRoy/fusionless
|
Add set input on selected loaders example
|
"""Enable the 'Loop' input for all selected loaders"""
import fusionless as fu
import fusionless.context as fuCtx
c = fu.Comp()
with fuCtx.lock_and_undo_chunk(c, "Set loaders to loop"):
for tool in c.get_selected_tools(node_type="Loader"):
loop = tool.input("Loop").set_value(True)
|
<commit_before><commit_msg>Add set input on selected loaders example<commit_after>
|
"""Enable the 'Loop' input for all selected loaders"""
import fusionless as fu
import fusionless.context as fuCtx
c = fu.Comp()
with fuCtx.lock_and_undo_chunk(c, "Set loaders to loop"):
for tool in c.get_selected_tools(node_type="Loader"):
loop = tool.input("Loop").set_value(True)
|
Add set input on selected loaders example"""Enable the 'Loop' input for all selected loaders"""
import fusionless as fu
import fusionless.context as fuCtx
c = fu.Comp()
with fuCtx.lock_and_undo_chunk(c, "Set loaders to loop"):
for tool in c.get_selected_tools(node_type="Loader"):
loop = tool.input("Loop").set_value(True)
|
<commit_before><commit_msg>Add set input on selected loaders example<commit_after>"""Enable the 'Loop' input for all selected loaders"""
import fusionless as fu
import fusionless.context as fuCtx
c = fu.Comp()
with fuCtx.lock_and_undo_chunk(c, "Set loaders to loop"):
for tool in c.get_selected_tools(node_type="Loader"):
loop = tool.input("Loop").set_value(True)
|
|
667d6e6d0634a39daaa39145bc037f63465b8ee2
|
tools/visual/show_tracking.py
|
tools/visual/show_tracking.py
|
#!/usr/bin/env python
import argparse
import os
import sys
sys.path.insert(1, 'external')
from vdetlib.vdet.dataset import imagenet_vdet_classes
from vdetlib.utils.visual import unique_colors, add_bbox
from vdetlib.utils.common import imread, imwrite
from vdetlib.utils.protocol import proto_dump, proto_load, top_detections, frame_path_at, track_box_at_frame
import cv2
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('vid_file')
parser.add_argument('track_file')
parser.add_argument('--save_dir', default=None)
args = parser.parse_args()
vid_proto = proto_load(args.vid_file)
track_proto = proto_load(args.track_file)
colors = unique_colors(len(track_proto['tracks']))
if not args.save_dir:
cv2.namedWindow('tracks')
for frame in vid_proto['frames']:
img = imread(frame_path_at(vid_proto, frame['frame']))
boxes = [track_box_at_frame(tracklet, frame['frame']) \
for tracklet in track_proto['tracks']]
tracked = add_bbox(img, boxes, None, 2)
if args.save_dir:
if not os.path.isdir(args.save_dir):
try:
os.makedirs(args.save_dir)
except:
pass
imwrite(os.path.join(args.save_dir, "{:04d}.jpg".format(frame['frame'])),
tracked)
else:
cv2.imshow('tracks', tracked)
if cv2.waitKey(0) == ord('q'):
cv2.destroyAllWindows()
sys.exit(0)
if not args.save_dir:
cv2.destroyAllWindows()
|
Add a script to show tracking results.
|
Add a script to show tracking results.
|
Python
|
mit
|
myfavouritekk/TPN
|
Add a script to show tracking results.
|
#!/usr/bin/env python
import argparse
import os
import sys
sys.path.insert(1, 'external')
from vdetlib.vdet.dataset import imagenet_vdet_classes
from vdetlib.utils.visual import unique_colors, add_bbox
from vdetlib.utils.common import imread, imwrite
from vdetlib.utils.protocol import proto_dump, proto_load, top_detections, frame_path_at, track_box_at_frame
import cv2
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('vid_file')
parser.add_argument('track_file')
parser.add_argument('--save_dir', default=None)
args = parser.parse_args()
vid_proto = proto_load(args.vid_file)
track_proto = proto_load(args.track_file)
colors = unique_colors(len(track_proto['tracks']))
if not args.save_dir:
cv2.namedWindow('tracks')
for frame in vid_proto['frames']:
img = imread(frame_path_at(vid_proto, frame['frame']))
boxes = [track_box_at_frame(tracklet, frame['frame']) \
for tracklet in track_proto['tracks']]
tracked = add_bbox(img, boxes, None, 2)
if args.save_dir:
if not os.path.isdir(args.save_dir):
try:
os.makedirs(args.save_dir)
except:
pass
imwrite(os.path.join(args.save_dir, "{:04d}.jpg".format(frame['frame'])),
tracked)
else:
cv2.imshow('tracks', tracked)
if cv2.waitKey(0) == ord('q'):
cv2.destroyAllWindows()
sys.exit(0)
if not args.save_dir:
cv2.destroyAllWindows()
|
<commit_before><commit_msg>Add a script to show tracking results.<commit_after>
|
#!/usr/bin/env python
import argparse
import os
import sys
sys.path.insert(1, 'external')
from vdetlib.vdet.dataset import imagenet_vdet_classes
from vdetlib.utils.visual import unique_colors, add_bbox
from vdetlib.utils.common import imread, imwrite
from vdetlib.utils.protocol import proto_dump, proto_load, top_detections, frame_path_at, track_box_at_frame
import cv2
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('vid_file')
parser.add_argument('track_file')
parser.add_argument('--save_dir', default=None)
args = parser.parse_args()
vid_proto = proto_load(args.vid_file)
track_proto = proto_load(args.track_file)
colors = unique_colors(len(track_proto['tracks']))
if not args.save_dir:
cv2.namedWindow('tracks')
for frame in vid_proto['frames']:
img = imread(frame_path_at(vid_proto, frame['frame']))
boxes = [track_box_at_frame(tracklet, frame['frame']) \
for tracklet in track_proto['tracks']]
tracked = add_bbox(img, boxes, None, 2)
if args.save_dir:
if not os.path.isdir(args.save_dir):
try:
os.makedirs(args.save_dir)
except:
pass
imwrite(os.path.join(args.save_dir, "{:04d}.jpg".format(frame['frame'])),
tracked)
else:
cv2.imshow('tracks', tracked)
if cv2.waitKey(0) == ord('q'):
cv2.destroyAllWindows()
sys.exit(0)
if not args.save_dir:
cv2.destroyAllWindows()
|
Add a script to show tracking results.#!/usr/bin/env python
import argparse
import os
import sys
sys.path.insert(1, 'external')
from vdetlib.vdet.dataset import imagenet_vdet_classes
from vdetlib.utils.visual import unique_colors, add_bbox
from vdetlib.utils.common import imread, imwrite
from vdetlib.utils.protocol import proto_dump, proto_load, top_detections, frame_path_at, track_box_at_frame
import cv2
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('vid_file')
parser.add_argument('track_file')
parser.add_argument('--save_dir', default=None)
args = parser.parse_args()
vid_proto = proto_load(args.vid_file)
track_proto = proto_load(args.track_file)
colors = unique_colors(len(track_proto['tracks']))
if not args.save_dir:
cv2.namedWindow('tracks')
for frame in vid_proto['frames']:
img = imread(frame_path_at(vid_proto, frame['frame']))
boxes = [track_box_at_frame(tracklet, frame['frame']) \
for tracklet in track_proto['tracks']]
tracked = add_bbox(img, boxes, None, 2)
if args.save_dir:
if not os.path.isdir(args.save_dir):
try:
os.makedirs(args.save_dir)
except:
pass
imwrite(os.path.join(args.save_dir, "{:04d}.jpg".format(frame['frame'])),
tracked)
else:
cv2.imshow('tracks', tracked)
if cv2.waitKey(0) == ord('q'):
cv2.destroyAllWindows()
sys.exit(0)
if not args.save_dir:
cv2.destroyAllWindows()
|
<commit_before><commit_msg>Add a script to show tracking results.<commit_after>#!/usr/bin/env python
import argparse
import os
import sys
sys.path.insert(1, 'external')
from vdetlib.vdet.dataset import imagenet_vdet_classes
from vdetlib.utils.visual import unique_colors, add_bbox
from vdetlib.utils.common import imread, imwrite
from vdetlib.utils.protocol import proto_dump, proto_load, top_detections, frame_path_at, track_box_at_frame
import cv2
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('vid_file')
parser.add_argument('track_file')
parser.add_argument('--save_dir', default=None)
args = parser.parse_args()
vid_proto = proto_load(args.vid_file)
track_proto = proto_load(args.track_file)
colors = unique_colors(len(track_proto['tracks']))
if not args.save_dir:
cv2.namedWindow('tracks')
for frame in vid_proto['frames']:
img = imread(frame_path_at(vid_proto, frame['frame']))
boxes = [track_box_at_frame(tracklet, frame['frame']) \
for tracklet in track_proto['tracks']]
tracked = add_bbox(img, boxes, None, 2)
if args.save_dir:
if not os.path.isdir(args.save_dir):
try:
os.makedirs(args.save_dir)
except:
pass
imwrite(os.path.join(args.save_dir, "{:04d}.jpg".format(frame['frame'])),
tracked)
else:
cv2.imshow('tracks', tracked)
if cv2.waitKey(0) == ord('q'):
cv2.destroyAllWindows()
sys.exit(0)
if not args.save_dir:
cv2.destroyAllWindows()
|
|
c3dbea85b4017fe34e856bfc9dc94ecc2966b4b1
|
mica/archive/tests/test_aca_l0.py
|
mica/archive/tests/test_aca_l0.py
|
from __future__ import division
from astropy.table import Table
import numpy as np
from mica.archive import aca_l0, asp_l1
from Ska.Numpy import interpolate
def test_get_l0_images():
"""
Do a validation test of get_l0_images:
- Get 20 mins of image data for slot 6 of obsid 8008 (very nice clean stars)
- Do first moment centroids in row and col
- Compare to aspect pipeline FM centroids for same slot data
This is a deep test that all the signs are right. If not then everything
breaks badly because the star image doesn't move in sync with row0, col0.
"""
start = '2007:002:06:00:00'
stop = '2007:002:06:20:00'
imgs = aca_l0.get_l0_images(start, stop, slot=6)
files = asp_l1.get_files(8008, content=['ACACENT'])
acen = Table.read(files[0])
# Pick FM centroids for slot 6
ok = (acen['alg'] == 1) & (acen['slot'] == 6)
acen = acen[ok]
# Row and col centroids
rcs = []
ccs = []
times = [img.TIME for img in imgs]
# Easy way to do FM centroids with mgrid
rw, cw = np.mgrid[0:6, 0:6]
# rw = [[0, 0, 0, 0, 0, 0],
# [1, 1, 1, 1, 1, 1],
# [2, 2, 2, 2, 2, 2],
# [3, 3, 3, 3, 3, 3],
# [4, 4, 4, 4, 4, 4],
# [5, 5, 5, 5, 5, 5]]
for img in imgs:
norm = np.sum(img)
rcs.append(np.sum(img * rw) / norm + img.row0)
ccs.append(np.sum(img * cw) / norm + img.col0)
rcen = interpolate(acen['cent_i'], acen['time'], times)
ccen = interpolate(acen['cent_j'], acen['time'], times)
assert np.all(np.abs(rcen - rcs) < 0.05)
assert np.all(np.abs(ccen - ccs) < 0.05)
|
Add minimal tests for aca_l0
|
Add minimal tests for aca_l0
|
Python
|
bsd-3-clause
|
sot/mica,sot/mica
|
Add minimal tests for aca_l0
|
from __future__ import division
from astropy.table import Table
import numpy as np
from mica.archive import aca_l0, asp_l1
from Ska.Numpy import interpolate
def test_get_l0_images():
"""
Do a validation test of get_l0_images:
- Get 20 mins of image data for slot 6 of obsid 8008 (very nice clean stars)
- Do first moment centroids in row and col
- Compare to aspect pipeline FM centroids for same slot data
This is a deep test that all the signs are right. If not then everything
breaks badly because the star image doesn't move in sync with row0, col0.
"""
start = '2007:002:06:00:00'
stop = '2007:002:06:20:00'
imgs = aca_l0.get_l0_images(start, stop, slot=6)
files = asp_l1.get_files(8008, content=['ACACENT'])
acen = Table.read(files[0])
# Pick FM centroids for slot 6
ok = (acen['alg'] == 1) & (acen['slot'] == 6)
acen = acen[ok]
# Row and col centroids
rcs = []
ccs = []
times = [img.TIME for img in imgs]
# Easy way to do FM centroids with mgrid
rw, cw = np.mgrid[0:6, 0:6]
# rw = [[0, 0, 0, 0, 0, 0],
# [1, 1, 1, 1, 1, 1],
# [2, 2, 2, 2, 2, 2],
# [3, 3, 3, 3, 3, 3],
# [4, 4, 4, 4, 4, 4],
# [5, 5, 5, 5, 5, 5]]
for img in imgs:
norm = np.sum(img)
rcs.append(np.sum(img * rw) / norm + img.row0)
ccs.append(np.sum(img * cw) / norm + img.col0)
rcen = interpolate(acen['cent_i'], acen['time'], times)
ccen = interpolate(acen['cent_j'], acen['time'], times)
assert np.all(np.abs(rcen - rcs) < 0.05)
assert np.all(np.abs(ccen - ccs) < 0.05)
|
<commit_before><commit_msg>Add minimal tests for aca_l0<commit_after>
|
from __future__ import division
from astropy.table import Table
import numpy as np
from mica.archive import aca_l0, asp_l1
from Ska.Numpy import interpolate
def test_get_l0_images():
"""
Do a validation test of get_l0_images:
- Get 20 mins of image data for slot 6 of obsid 8008 (very nice clean stars)
- Do first moment centroids in row and col
- Compare to aspect pipeline FM centroids for same slot data
This is a deep test that all the signs are right. If not then everything
breaks badly because the star image doesn't move in sync with row0, col0.
"""
start = '2007:002:06:00:00'
stop = '2007:002:06:20:00'
imgs = aca_l0.get_l0_images(start, stop, slot=6)
files = asp_l1.get_files(8008, content=['ACACENT'])
acen = Table.read(files[0])
# Pick FM centroids for slot 6
ok = (acen['alg'] == 1) & (acen['slot'] == 6)
acen = acen[ok]
# Row and col centroids
rcs = []
ccs = []
times = [img.TIME for img in imgs]
# Easy way to do FM centroids with mgrid
rw, cw = np.mgrid[0:6, 0:6]
# rw = [[0, 0, 0, 0, 0, 0],
# [1, 1, 1, 1, 1, 1],
# [2, 2, 2, 2, 2, 2],
# [3, 3, 3, 3, 3, 3],
# [4, 4, 4, 4, 4, 4],
# [5, 5, 5, 5, 5, 5]]
for img in imgs:
norm = np.sum(img)
rcs.append(np.sum(img * rw) / norm + img.row0)
ccs.append(np.sum(img * cw) / norm + img.col0)
rcen = interpolate(acen['cent_i'], acen['time'], times)
ccen = interpolate(acen['cent_j'], acen['time'], times)
assert np.all(np.abs(rcen - rcs) < 0.05)
assert np.all(np.abs(ccen - ccs) < 0.05)
|
Add minimal tests for aca_l0from __future__ import division
from astropy.table import Table
import numpy as np
from mica.archive import aca_l0, asp_l1
from Ska.Numpy import interpolate
def test_get_l0_images():
"""
Do a validation test of get_l0_images:
- Get 20 mins of image data for slot 6 of obsid 8008 (very nice clean stars)
- Do first moment centroids in row and col
- Compare to aspect pipeline FM centroids for same slot data
This is a deep test that all the signs are right. If not then everything
breaks badly because the star image doesn't move in sync with row0, col0.
"""
start = '2007:002:06:00:00'
stop = '2007:002:06:20:00'
imgs = aca_l0.get_l0_images(start, stop, slot=6)
files = asp_l1.get_files(8008, content=['ACACENT'])
acen = Table.read(files[0])
# Pick FM centroids for slot 6
ok = (acen['alg'] == 1) & (acen['slot'] == 6)
acen = acen[ok]
# Row and col centroids
rcs = []
ccs = []
times = [img.TIME for img in imgs]
# Easy way to do FM centroids with mgrid
rw, cw = np.mgrid[0:6, 0:6]
# rw = [[0, 0, 0, 0, 0, 0],
# [1, 1, 1, 1, 1, 1],
# [2, 2, 2, 2, 2, 2],
# [3, 3, 3, 3, 3, 3],
# [4, 4, 4, 4, 4, 4],
# [5, 5, 5, 5, 5, 5]]
for img in imgs:
norm = np.sum(img)
rcs.append(np.sum(img * rw) / norm + img.row0)
ccs.append(np.sum(img * cw) / norm + img.col0)
rcen = interpolate(acen['cent_i'], acen['time'], times)
ccen = interpolate(acen['cent_j'], acen['time'], times)
assert np.all(np.abs(rcen - rcs) < 0.05)
assert np.all(np.abs(ccen - ccs) < 0.05)
|
<commit_before><commit_msg>Add minimal tests for aca_l0<commit_after>from __future__ import division
from astropy.table import Table
import numpy as np
from mica.archive import aca_l0, asp_l1
from Ska.Numpy import interpolate
def test_get_l0_images():
"""
Do a validation test of get_l0_images:
- Get 20 mins of image data for slot 6 of obsid 8008 (very nice clean stars)
- Do first moment centroids in row and col
- Compare to aspect pipeline FM centroids for same slot data
This is a deep test that all the signs are right. If not then everything
breaks badly because the star image doesn't move in sync with row0, col0.
"""
start = '2007:002:06:00:00'
stop = '2007:002:06:20:00'
imgs = aca_l0.get_l0_images(start, stop, slot=6)
files = asp_l1.get_files(8008, content=['ACACENT'])
acen = Table.read(files[0])
# Pick FM centroids for slot 6
ok = (acen['alg'] == 1) & (acen['slot'] == 6)
acen = acen[ok]
# Row and col centroids
rcs = []
ccs = []
times = [img.TIME for img in imgs]
# Easy way to do FM centroids with mgrid
rw, cw = np.mgrid[0:6, 0:6]
# rw = [[0, 0, 0, 0, 0, 0],
# [1, 1, 1, 1, 1, 1],
# [2, 2, 2, 2, 2, 2],
# [3, 3, 3, 3, 3, 3],
# [4, 4, 4, 4, 4, 4],
# [5, 5, 5, 5, 5, 5]]
for img in imgs:
norm = np.sum(img)
rcs.append(np.sum(img * rw) / norm + img.row0)
ccs.append(np.sum(img * cw) / norm + img.col0)
rcen = interpolate(acen['cent_i'], acen['time'], times)
ccen = interpolate(acen['cent_j'], acen['time'], times)
assert np.all(np.abs(rcen - rcs) < 0.05)
assert np.all(np.abs(ccen - ccs) < 0.05)
|
|
bb96824958117e586919499124a550668b05f5a2
|
utilities/transInputData.py
|
utilities/transInputData.py
|
#! /usr/bin/python
import sys, os
if len(sys.argv) < 3:
print('Translate input data in AAAI\'15 format to separate config, men, women files.')
print('Usage: {} <input file> <output directory>'.format(sys.argv[0]))
sys.exit(1)
infile = sys.argv[1]
outdir = sys.argv[2]
if os.path.exists(outdir):
raise OSError('Output directory {} already exists!'.format(outdir))
os.makedirs(outdir)
with open(infile, 'r') as fh:
ctx = [ line.strip() for line in fh ]
# Filter empty lines
ctx = [ line for line in ctx if line ]
# First line
config = ctx[0]
size = int(config.split()[0])
assert size * 2 + 1 == len(ctx)
# Men preference matrix, index starts from 1
menlist = ctx[1:1+size]
menlist = [ '{}: {}\n'.format(idx+1, menlist[idx]) for idx in xrange(size) ]
# Women preference matrix, index starts from 1
womenlist = ctx[1+size:1+2*size]
womenlist = [ '{}: {}\n'.format(idx+1, womenlist[idx]) for idx in xrange(size) ]
with open(os.path.join(outdir, 'config.txt'), 'w') as fh:
fh.writelines([config])
with open(os.path.join(outdir, 'men.list'), 'w') as fh:
fh.writelines(menlist)
with open(os.path.join(outdir, 'women.list'), 'w') as fh:
fh.writelines(womenlist)
|
Add utility to translate input data format.
|
Add utility to translate input data format.
|
Python
|
apache-2.0
|
gaomy3832/spark-smti,gaomy3832/spark-smti
|
Add utility to translate input data format.
|
#! /usr/bin/python
import sys, os
if len(sys.argv) < 3:
print('Translate input data in AAAI\'15 format to separate config, men, women files.')
print('Usage: {} <input file> <output directory>'.format(sys.argv[0]))
sys.exit(1)
infile = sys.argv[1]
outdir = sys.argv[2]
if os.path.exists(outdir):
raise OSError('Output directory {} already exists!'.format(outdir))
os.makedirs(outdir)
with open(infile, 'r') as fh:
ctx = [ line.strip() for line in fh ]
# Filter empty lines
ctx = [ line for line in ctx if line ]
# First line
config = ctx[0]
size = int(config.split()[0])
assert size * 2 + 1 == len(ctx)
# Men preference matrix, index starts from 1
menlist = ctx[1:1+size]
menlist = [ '{}: {}\n'.format(idx+1, menlist[idx]) for idx in xrange(size) ]
# Women preference matrix, index starts from 1
womenlist = ctx[1+size:1+2*size]
womenlist = [ '{}: {}\n'.format(idx+1, womenlist[idx]) for idx in xrange(size) ]
with open(os.path.join(outdir, 'config.txt'), 'w') as fh:
fh.writelines([config])
with open(os.path.join(outdir, 'men.list'), 'w') as fh:
fh.writelines(menlist)
with open(os.path.join(outdir, 'women.list'), 'w') as fh:
fh.writelines(womenlist)
|
<commit_before><commit_msg>Add utility to translate input data format.<commit_after>
|
#! /usr/bin/python
import sys, os
if len(sys.argv) < 3:
print('Translate input data in AAAI\'15 format to separate config, men, women files.')
print('Usage: {} <input file> <output directory>'.format(sys.argv[0]))
sys.exit(1)
infile = sys.argv[1]
outdir = sys.argv[2]
if os.path.exists(outdir):
raise OSError('Output directory {} already exists!'.format(outdir))
os.makedirs(outdir)
with open(infile, 'r') as fh:
ctx = [ line.strip() for line in fh ]
# Filter empty lines
ctx = [ line for line in ctx if line ]
# First line
config = ctx[0]
size = int(config.split()[0])
assert size * 2 + 1 == len(ctx)
# Men preference matrix, index starts from 1
menlist = ctx[1:1+size]
menlist = [ '{}: {}\n'.format(idx+1, menlist[idx]) for idx in xrange(size) ]
# Women preference matrix, index starts from 1
womenlist = ctx[1+size:1+2*size]
womenlist = [ '{}: {}\n'.format(idx+1, womenlist[idx]) for idx in xrange(size) ]
with open(os.path.join(outdir, 'config.txt'), 'w') as fh:
fh.writelines([config])
with open(os.path.join(outdir, 'men.list'), 'w') as fh:
fh.writelines(menlist)
with open(os.path.join(outdir, 'women.list'), 'w') as fh:
fh.writelines(womenlist)
|
Add utility to translate input data format.#! /usr/bin/python
import sys, os
if len(sys.argv) < 3:
print('Translate input data in AAAI\'15 format to separate config, men, women files.')
print('Usage: {} <input file> <output directory>'.format(sys.argv[0]))
sys.exit(1)
infile = sys.argv[1]
outdir = sys.argv[2]
if os.path.exists(outdir):
raise OSError('Output directory {} already exists!'.format(outdir))
os.makedirs(outdir)
with open(infile, 'r') as fh:
ctx = [ line.strip() for line in fh ]
# Filter empty lines
ctx = [ line for line in ctx if line ]
# First line
config = ctx[0]
size = int(config.split()[0])
assert size * 2 + 1 == len(ctx)
# Men preference matrix, index starts from 1
menlist = ctx[1:1+size]
menlist = [ '{}: {}\n'.format(idx+1, menlist[idx]) for idx in xrange(size) ]
# Women preference matrix, index starts from 1
womenlist = ctx[1+size:1+2*size]
womenlist = [ '{}: {}\n'.format(idx+1, womenlist[idx]) for idx in xrange(size) ]
with open(os.path.join(outdir, 'config.txt'), 'w') as fh:
fh.writelines([config])
with open(os.path.join(outdir, 'men.list'), 'w') as fh:
fh.writelines(menlist)
with open(os.path.join(outdir, 'women.list'), 'w') as fh:
fh.writelines(womenlist)
|
<commit_before><commit_msg>Add utility to translate input data format.<commit_after>#! /usr/bin/python
import sys, os
if len(sys.argv) < 3:
print('Translate input data in AAAI\'15 format to separate config, men, women files.')
print('Usage: {} <input file> <output directory>'.format(sys.argv[0]))
sys.exit(1)
infile = sys.argv[1]
outdir = sys.argv[2]
if os.path.exists(outdir):
raise OSError('Output directory {} already exists!'.format(outdir))
os.makedirs(outdir)
with open(infile, 'r') as fh:
ctx = [ line.strip() for line in fh ]
# Filter empty lines
ctx = [ line for line in ctx if line ]
# First line
config = ctx[0]
size = int(config.split()[0])
assert size * 2 + 1 == len(ctx)
# Men preference matrix, index starts from 1
menlist = ctx[1:1+size]
menlist = [ '{}: {}\n'.format(idx+1, menlist[idx]) for idx in xrange(size) ]
# Women preference matrix, index starts from 1
womenlist = ctx[1+size:1+2*size]
womenlist = [ '{}: {}\n'.format(idx+1, womenlist[idx]) for idx in xrange(size) ]
with open(os.path.join(outdir, 'config.txt'), 'w') as fh:
fh.writelines([config])
with open(os.path.join(outdir, 'men.list'), 'w') as fh:
fh.writelines(menlist)
with open(os.path.join(outdir, 'women.list'), 'w') as fh:
fh.writelines(womenlist)
|
|
201d5ac1385f0800c6386e564483add3229ded2a
|
contrib/afl-fuzz.py
|
contrib/afl-fuzz.py
|
#!/usr/bin/python3
# SPDX-License-Identifier: LGPL-2.1+
import argparse
import sys
import subprocess
import os
def main():
parser = argparse.ArgumentParser(description='Run afl-fuzz on all cores')
parser.add_argument('--input', '-i', help='fuzzing input directory')
parser.add_argument('--output', '-o', help='findings output directory')
parser.add_argument('path', type=str, help='the fuzzer tool')
args = parser.parse_args()
if not args.input and not args.output:
print('-i and -o required')
return 1
if not args.path:
print('tool name required')
return 1
# create if not already exists
if not os.path.exists(args.output):
os.makedirs(args.output)
# run the main instance
envp = None
argv = ['afl-fuzz', '-m300', '-i', args.input, '-o', args.output,
'-M', 'fuzzer00', args.path, '@@']
print(argv)
p = subprocess.Popen(argv, env=envp)
# run the secondary instances
cs = []
for i in range(1, os.cpu_count()):
argv = ['afl-fuzz', '-m300', '-i', args.input, '-o', args.output,
'-S', 'fuzzer%02i' % i, args.path, '@@']
print(argv)
cs.append(subprocess.Popen(argv, env=envp, stdout=subprocess.DEVNULL))
# wait for the main instance
try:
p.wait()
except KeyboardInterrupt as _:
pass
for c in cs:
c.terminate()
return 0
if __name__ == '__main__':
sys.exit(main())
|
Add a parallel fuzzing script
|
trivial: Add a parallel fuzzing script
This approximately doubles performace of fuzzing for every two cores you have.
|
Python
|
lgpl-2.1
|
hughsie/fwupd,hughsie/fwupd,fwupd/fwupd,fwupd/fwupd,fwupd/fwupd,hughsie/fwupd,hughsie/fwupd,fwupd/fwupd
|
trivial: Add a parallel fuzzing script
This approximately doubles performace of fuzzing for every two cores you have.
|
#!/usr/bin/python3
# SPDX-License-Identifier: LGPL-2.1+
import argparse
import sys
import subprocess
import os
def main():
parser = argparse.ArgumentParser(description='Run afl-fuzz on all cores')
parser.add_argument('--input', '-i', help='fuzzing input directory')
parser.add_argument('--output', '-o', help='findings output directory')
parser.add_argument('path', type=str, help='the fuzzer tool')
args = parser.parse_args()
if not args.input and not args.output:
print('-i and -o required')
return 1
if not args.path:
print('tool name required')
return 1
# create if not already exists
if not os.path.exists(args.output):
os.makedirs(args.output)
# run the main instance
envp = None
argv = ['afl-fuzz', '-m300', '-i', args.input, '-o', args.output,
'-M', 'fuzzer00', args.path, '@@']
print(argv)
p = subprocess.Popen(argv, env=envp)
# run the secondary instances
cs = []
for i in range(1, os.cpu_count()):
argv = ['afl-fuzz', '-m300', '-i', args.input, '-o', args.output,
'-S', 'fuzzer%02i' % i, args.path, '@@']
print(argv)
cs.append(subprocess.Popen(argv, env=envp, stdout=subprocess.DEVNULL))
# wait for the main instance
try:
p.wait()
except KeyboardInterrupt as _:
pass
for c in cs:
c.terminate()
return 0
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>trivial: Add a parallel fuzzing script
This approximately doubles performace of fuzzing for every two cores you have.<commit_after>
|
#!/usr/bin/python3
# SPDX-License-Identifier: LGPL-2.1+
import argparse
import sys
import subprocess
import os
def main():
parser = argparse.ArgumentParser(description='Run afl-fuzz on all cores')
parser.add_argument('--input', '-i', help='fuzzing input directory')
parser.add_argument('--output', '-o', help='findings output directory')
parser.add_argument('path', type=str, help='the fuzzer tool')
args = parser.parse_args()
if not args.input and not args.output:
print('-i and -o required')
return 1
if not args.path:
print('tool name required')
return 1
# create if not already exists
if not os.path.exists(args.output):
os.makedirs(args.output)
# run the main instance
envp = None
argv = ['afl-fuzz', '-m300', '-i', args.input, '-o', args.output,
'-M', 'fuzzer00', args.path, '@@']
print(argv)
p = subprocess.Popen(argv, env=envp)
# run the secondary instances
cs = []
for i in range(1, os.cpu_count()):
argv = ['afl-fuzz', '-m300', '-i', args.input, '-o', args.output,
'-S', 'fuzzer%02i' % i, args.path, '@@']
print(argv)
cs.append(subprocess.Popen(argv, env=envp, stdout=subprocess.DEVNULL))
# wait for the main instance
try:
p.wait()
except KeyboardInterrupt as _:
pass
for c in cs:
c.terminate()
return 0
if __name__ == '__main__':
sys.exit(main())
|
trivial: Add a parallel fuzzing script
This approximately doubles performace of fuzzing for every two cores you have.#!/usr/bin/python3
# SPDX-License-Identifier: LGPL-2.1+
import argparse
import sys
import subprocess
import os
def main():
parser = argparse.ArgumentParser(description='Run afl-fuzz on all cores')
parser.add_argument('--input', '-i', help='fuzzing input directory')
parser.add_argument('--output', '-o', help='findings output directory')
parser.add_argument('path', type=str, help='the fuzzer tool')
args = parser.parse_args()
if not args.input and not args.output:
print('-i and -o required')
return 1
if not args.path:
print('tool name required')
return 1
# create if not already exists
if not os.path.exists(args.output):
os.makedirs(args.output)
# run the main instance
envp = None
argv = ['afl-fuzz', '-m300', '-i', args.input, '-o', args.output,
'-M', 'fuzzer00', args.path, '@@']
print(argv)
p = subprocess.Popen(argv, env=envp)
# run the secondary instances
cs = []
for i in range(1, os.cpu_count()):
argv = ['afl-fuzz', '-m300', '-i', args.input, '-o', args.output,
'-S', 'fuzzer%02i' % i, args.path, '@@']
print(argv)
cs.append(subprocess.Popen(argv, env=envp, stdout=subprocess.DEVNULL))
# wait for the main instance
try:
p.wait()
except KeyboardInterrupt as _:
pass
for c in cs:
c.terminate()
return 0
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>trivial: Add a parallel fuzzing script
This approximately doubles performace of fuzzing for every two cores you have.<commit_after>#!/usr/bin/python3
# SPDX-License-Identifier: LGPL-2.1+
import argparse
import sys
import subprocess
import os
def main():
parser = argparse.ArgumentParser(description='Run afl-fuzz on all cores')
parser.add_argument('--input', '-i', help='fuzzing input directory')
parser.add_argument('--output', '-o', help='findings output directory')
parser.add_argument('path', type=str, help='the fuzzer tool')
args = parser.parse_args()
if not args.input and not args.output:
print('-i and -o required')
return 1
if not args.path:
print('tool name required')
return 1
# create if not already exists
if not os.path.exists(args.output):
os.makedirs(args.output)
# run the main instance
envp = None
argv = ['afl-fuzz', '-m300', '-i', args.input, '-o', args.output,
'-M', 'fuzzer00', args.path, '@@']
print(argv)
p = subprocess.Popen(argv, env=envp)
# run the secondary instances
cs = []
for i in range(1, os.cpu_count()):
argv = ['afl-fuzz', '-m300', '-i', args.input, '-o', args.output,
'-S', 'fuzzer%02i' % i, args.path, '@@']
print(argv)
cs.append(subprocess.Popen(argv, env=envp, stdout=subprocess.DEVNULL))
# wait for the main instance
try:
p.wait()
except KeyboardInterrupt as _:
pass
for c in cs:
c.terminate()
return 0
if __name__ == '__main__':
sys.exit(main())
|
|
0057ea58b488f2c92b621f1312c5088a7395987d
|
indexer.py
|
indexer.py
|
#-*- coding:utf-8 -*-
import redis
import simplejson
import mmseg
r = redis.Redis ("localhost")
mmseg.Dictionary.load_dictionaries()
database='database:book'
indexbase='index:book'
def rebuild_index (items):
del_index ()
build_index (items)
def del_index ():
prefixs = r.smembers (indexbase)
for prefix in prefixs:
r.delete('%s:%s'%(indexbase,prefix))
r.delete(indexbase)
r.delete(database)
def build_index (items):
"""
Build index for items.
"""
for item in items:
add_item (item)
def add_item (item):
"""
Create index for ITEM.
"""
r.hset (database, item['id'], simplejson.dumps(item))
for prefix in prefixs_for_term (item['term']):
r.sadd (indexbase, prefix)
r.zadd ('%s:%s'%(indexbase,prefix),item['id'], item['score'])
def del_item (item):
"""
Delete ITEM from the index
"""
pass
def prefixs_for_term (term):
"""
Get prefixs for TERM.
"""
prefixs=[]
tokens=mmseg.Algorithm(term)
for token in tokens:
word = token.text
for i in xrange (1,len(word)+1):
prefixs.append(word[:i])
return prefixs
def search_query (prefix):
ids=r.zrange ('%s:%s'%(indexbase,prefix), 0, 5)
if not ids: return ids
return r.hmget(database, *ids)
def load_items ():
items= (
{'id':'1', 'term': u'轻轻地你走了', 'score': '9'},
{'id':'2', 'term': u'正如你轻轻地来', 'score': '8'},
{'id':'3', 'term': u'你挥一挥衣袖,不带走一片云彩', 'score': '8.5'},
)
rebuild_index (items)
|
Build index ready for autocomplete.
|
Build index ready for autocomplete.
* indexer.py (rebuild_index, del_index, build_index,
add_item, del_item, prefixs_for_term, search_query,
load_items): New.
|
Python
|
mit
|
fengli/autocomplete-redis
|
Build index ready for autocomplete.
* indexer.py (rebuild_index, del_index, build_index,
add_item, del_item, prefixs_for_term, search_query,
load_items): New.
|
#-*- coding:utf-8 -*-
import redis
import simplejson
import mmseg
r = redis.Redis ("localhost")
mmseg.Dictionary.load_dictionaries()
database='database:book'
indexbase='index:book'
def rebuild_index (items):
del_index ()
build_index (items)
def del_index ():
prefixs = r.smembers (indexbase)
for prefix in prefixs:
r.delete('%s:%s'%(indexbase,prefix))
r.delete(indexbase)
r.delete(database)
def build_index (items):
"""
Build index for items.
"""
for item in items:
add_item (item)
def add_item (item):
"""
Create index for ITEM.
"""
r.hset (database, item['id'], simplejson.dumps(item))
for prefix in prefixs_for_term (item['term']):
r.sadd (indexbase, prefix)
r.zadd ('%s:%s'%(indexbase,prefix),item['id'], item['score'])
def del_item (item):
"""
Delete ITEM from the index
"""
pass
def prefixs_for_term (term):
"""
Get prefixs for TERM.
"""
prefixs=[]
tokens=mmseg.Algorithm(term)
for token in tokens:
word = token.text
for i in xrange (1,len(word)+1):
prefixs.append(word[:i])
return prefixs
def search_query (prefix):
ids=r.zrange ('%s:%s'%(indexbase,prefix), 0, 5)
if not ids: return ids
return r.hmget(database, *ids)
def load_items ():
items= (
{'id':'1', 'term': u'轻轻地你走了', 'score': '9'},
{'id':'2', 'term': u'正如你轻轻地来', 'score': '8'},
{'id':'3', 'term': u'你挥一挥衣袖,不带走一片云彩', 'score': '8.5'},
)
rebuild_index (items)
|
<commit_before><commit_msg> Build index ready for autocomplete.
* indexer.py (rebuild_index, del_index, build_index,
add_item, del_item, prefixs_for_term, search_query,
load_items): New.<commit_after>
|
#-*- coding:utf-8 -*-
import redis
import simplejson
import mmseg
r = redis.Redis ("localhost")
mmseg.Dictionary.load_dictionaries()
database='database:book'
indexbase='index:book'
def rebuild_index (items):
del_index ()
build_index (items)
def del_index ():
prefixs = r.smembers (indexbase)
for prefix in prefixs:
r.delete('%s:%s'%(indexbase,prefix))
r.delete(indexbase)
r.delete(database)
def build_index (items):
"""
Build index for items.
"""
for item in items:
add_item (item)
def add_item (item):
"""
Create index for ITEM.
"""
r.hset (database, item['id'], simplejson.dumps(item))
for prefix in prefixs_for_term (item['term']):
r.sadd (indexbase, prefix)
r.zadd ('%s:%s'%(indexbase,prefix),item['id'], item['score'])
def del_item (item):
"""
Delete ITEM from the index
"""
pass
def prefixs_for_term (term):
"""
Get prefixs for TERM.
"""
prefixs=[]
tokens=mmseg.Algorithm(term)
for token in tokens:
word = token.text
for i in xrange (1,len(word)+1):
prefixs.append(word[:i])
return prefixs
def search_query (prefix):
ids=r.zrange ('%s:%s'%(indexbase,prefix), 0, 5)
if not ids: return ids
return r.hmget(database, *ids)
def load_items ():
items= (
{'id':'1', 'term': u'轻轻地你走了', 'score': '9'},
{'id':'2', 'term': u'正如你轻轻地来', 'score': '8'},
{'id':'3', 'term': u'你挥一挥衣袖,不带走一片云彩', 'score': '8.5'},
)
rebuild_index (items)
|
Build index ready for autocomplete.
* indexer.py (rebuild_index, del_index, build_index,
add_item, del_item, prefixs_for_term, search_query,
load_items): New.#-*- coding:utf-8 -*-
import redis
import simplejson
import mmseg
r = redis.Redis ("localhost")
mmseg.Dictionary.load_dictionaries()
database='database:book'
indexbase='index:book'
def rebuild_index (items):
del_index ()
build_index (items)
def del_index ():
prefixs = r.smembers (indexbase)
for prefix in prefixs:
r.delete('%s:%s'%(indexbase,prefix))
r.delete(indexbase)
r.delete(database)
def build_index (items):
"""
Build index for items.
"""
for item in items:
add_item (item)
def add_item (item):
"""
Create index for ITEM.
"""
r.hset (database, item['id'], simplejson.dumps(item))
for prefix in prefixs_for_term (item['term']):
r.sadd (indexbase, prefix)
r.zadd ('%s:%s'%(indexbase,prefix),item['id'], item['score'])
def del_item (item):
"""
Delete ITEM from the index
"""
pass
def prefixs_for_term (term):
"""
Get prefixs for TERM.
"""
prefixs=[]
tokens=mmseg.Algorithm(term)
for token in tokens:
word = token.text
for i in xrange (1,len(word)+1):
prefixs.append(word[:i])
return prefixs
def search_query (prefix):
ids=r.zrange ('%s:%s'%(indexbase,prefix), 0, 5)
if not ids: return ids
return r.hmget(database, *ids)
def load_items ():
items= (
{'id':'1', 'term': u'轻轻地你走了', 'score': '9'},
{'id':'2', 'term': u'正如你轻轻地来', 'score': '8'},
{'id':'3', 'term': u'你挥一挥衣袖,不带走一片云彩', 'score': '8.5'},
)
rebuild_index (items)
|
<commit_before><commit_msg> Build index ready for autocomplete.
* indexer.py (rebuild_index, del_index, build_index,
add_item, del_item, prefixs_for_term, search_query,
load_items): New.<commit_after>#-*- coding:utf-8 -*-
import redis
import simplejson
import mmseg
r = redis.Redis ("localhost")
mmseg.Dictionary.load_dictionaries()
database='database:book'
indexbase='index:book'
def rebuild_index (items):
del_index ()
build_index (items)
def del_index ():
prefixs = r.smembers (indexbase)
for prefix in prefixs:
r.delete('%s:%s'%(indexbase,prefix))
r.delete(indexbase)
r.delete(database)
def build_index (items):
"""
Build index for items.
"""
for item in items:
add_item (item)
def add_item (item):
"""
Create index for ITEM.
"""
r.hset (database, item['id'], simplejson.dumps(item))
for prefix in prefixs_for_term (item['term']):
r.sadd (indexbase, prefix)
r.zadd ('%s:%s'%(indexbase,prefix),item['id'], item['score'])
def del_item (item):
"""
Delete ITEM from the index
"""
pass
def prefixs_for_term (term):
"""
Get prefixs for TERM.
"""
prefixs=[]
tokens=mmseg.Algorithm(term)
for token in tokens:
word = token.text
for i in xrange (1,len(word)+1):
prefixs.append(word[:i])
return prefixs
def search_query (prefix):
ids=r.zrange ('%s:%s'%(indexbase,prefix), 0, 5)
if not ids: return ids
return r.hmget(database, *ids)
def load_items ():
items= (
{'id':'1', 'term': u'轻轻地你走了', 'score': '9'},
{'id':'2', 'term': u'正如你轻轻地来', 'score': '8'},
{'id':'3', 'term': u'你挥一挥衣袖,不带走一片云彩', 'score': '8.5'},
)
rebuild_index (items)
|
|
b0a74635399c86694b12587e91334886b434bca5
|
corehq/ex-submodules/couchforms/management/commands/one_off_resave_forms.py
|
corehq/ex-submodules/couchforms/management/commands/one_off_resave_forms.py
|
"""
ATTENTION!
If you see this file after the year 2015, please delete it immediately.
"""
import sys
from django.core.management.base import BaseCommand
from datetime import datetime
from corehq.apps.sofabed.models import FormData
from corehq.pillows.xform import XFormPillow
from corehq.pillows.reportxform import ReportXFormPillow
from pillowtop.feed.interface import Change
class Command(BaseCommand):
help = ("Send all form submitted between Oct 28 7:30am UTC and 8:10am UTC"
"to elasticsearch. I checked, there are 4697 of 'em")
def handle(self, *args, **options):
start = datetime(2015, 10, 28, 7, 0)
end = datetime(2015, 10, 28, 9, 0)
# I didn't see any couch views which can get forms in a date range
# without a domain, so I'm using FormData.
form_ids = (FormData.objects.filter(received_on__range=(start, end))
.values_list('instance_id', flat=True))
msg = "Really resave {} forms? (y/n)\n".format(len(form_ids))
if raw_input(msg) != "y":
print "cancelling"
sys.exit()
for form_id in form_ids:
XFormPillow().processor(Change(id=form_id, sequence_id=None), None)
ReportXFormPillow().processor(Change(id=form_id, sequence_id=None), None)
|
Add one off mgmt cmd to resubmit some forms to ES
|
Add one off mgmt cmd to resubmit some forms to ES
http://manage.dimagi.com/default.asp?187526
|
Python
|
bsd-3-clause
|
qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq
|
Add one off mgmt cmd to resubmit some forms to ES
http://manage.dimagi.com/default.asp?187526
|
"""
ATTENTION!
If you see this file after the year 2015, please delete it immediately.
"""
import sys
from django.core.management.base import BaseCommand
from datetime import datetime
from corehq.apps.sofabed.models import FormData
from corehq.pillows.xform import XFormPillow
from corehq.pillows.reportxform import ReportXFormPillow
from pillowtop.feed.interface import Change
class Command(BaseCommand):
help = ("Send all form submitted between Oct 28 7:30am UTC and 8:10am UTC"
"to elasticsearch. I checked, there are 4697 of 'em")
def handle(self, *args, **options):
start = datetime(2015, 10, 28, 7, 0)
end = datetime(2015, 10, 28, 9, 0)
# I didn't see any couch views which can get forms in a date range
# without a domain, so I'm using FormData.
form_ids = (FormData.objects.filter(received_on__range=(start, end))
.values_list('instance_id', flat=True))
msg = "Really resave {} forms? (y/n)\n".format(len(form_ids))
if raw_input(msg) != "y":
print "cancelling"
sys.exit()
for form_id in form_ids:
XFormPillow().processor(Change(id=form_id, sequence_id=None), None)
ReportXFormPillow().processor(Change(id=form_id, sequence_id=None), None)
|
<commit_before><commit_msg>Add one off mgmt cmd to resubmit some forms to ES
http://manage.dimagi.com/default.asp?187526<commit_after>
|
"""
ATTENTION!
If you see this file after the year 2015, please delete it immediately.
"""
import sys
from django.core.management.base import BaseCommand
from datetime import datetime
from corehq.apps.sofabed.models import FormData
from corehq.pillows.xform import XFormPillow
from corehq.pillows.reportxform import ReportXFormPillow
from pillowtop.feed.interface import Change
class Command(BaseCommand):
help = ("Send all form submitted between Oct 28 7:30am UTC and 8:10am UTC"
"to elasticsearch. I checked, there are 4697 of 'em")
def handle(self, *args, **options):
start = datetime(2015, 10, 28, 7, 0)
end = datetime(2015, 10, 28, 9, 0)
# I didn't see any couch views which can get forms in a date range
# without a domain, so I'm using FormData.
form_ids = (FormData.objects.filter(received_on__range=(start, end))
.values_list('instance_id', flat=True))
msg = "Really resave {} forms? (y/n)\n".format(len(form_ids))
if raw_input(msg) != "y":
print "cancelling"
sys.exit()
for form_id in form_ids:
XFormPillow().processor(Change(id=form_id, sequence_id=None), None)
ReportXFormPillow().processor(Change(id=form_id, sequence_id=None), None)
|
Add one off mgmt cmd to resubmit some forms to ES
http://manage.dimagi.com/default.asp?187526"""
ATTENTION!
If you see this file after the year 2015, please delete it immediately.
"""
import sys
from django.core.management.base import BaseCommand
from datetime import datetime
from corehq.apps.sofabed.models import FormData
from corehq.pillows.xform import XFormPillow
from corehq.pillows.reportxform import ReportXFormPillow
from pillowtop.feed.interface import Change
class Command(BaseCommand):
help = ("Send all form submitted between Oct 28 7:30am UTC and 8:10am UTC"
"to elasticsearch. I checked, there are 4697 of 'em")
def handle(self, *args, **options):
start = datetime(2015, 10, 28, 7, 0)
end = datetime(2015, 10, 28, 9, 0)
# I didn't see any couch views which can get forms in a date range
# without a domain, so I'm using FormData.
form_ids = (FormData.objects.filter(received_on__range=(start, end))
.values_list('instance_id', flat=True))
msg = "Really resave {} forms? (y/n)\n".format(len(form_ids))
if raw_input(msg) != "y":
print "cancelling"
sys.exit()
for form_id in form_ids:
XFormPillow().processor(Change(id=form_id, sequence_id=None), None)
ReportXFormPillow().processor(Change(id=form_id, sequence_id=None), None)
|
<commit_before><commit_msg>Add one off mgmt cmd to resubmit some forms to ES
http://manage.dimagi.com/default.asp?187526<commit_after>"""
ATTENTION!
If you see this file after the year 2015, please delete it immediately.
"""
import sys
from django.core.management.base import BaseCommand
from datetime import datetime
from corehq.apps.sofabed.models import FormData
from corehq.pillows.xform import XFormPillow
from corehq.pillows.reportxform import ReportXFormPillow
from pillowtop.feed.interface import Change
class Command(BaseCommand):
help = ("Send all form submitted between Oct 28 7:30am UTC and 8:10am UTC"
"to elasticsearch. I checked, there are 4697 of 'em")
def handle(self, *args, **options):
start = datetime(2015, 10, 28, 7, 0)
end = datetime(2015, 10, 28, 9, 0)
# I didn't see any couch views which can get forms in a date range
# without a domain, so I'm using FormData.
form_ids = (FormData.objects.filter(received_on__range=(start, end))
.values_list('instance_id', flat=True))
msg = "Really resave {} forms? (y/n)\n".format(len(form_ids))
if raw_input(msg) != "y":
print "cancelling"
sys.exit()
for form_id in form_ids:
XFormPillow().processor(Change(id=form_id, sequence_id=None), None)
ReportXFormPillow().processor(Change(id=form_id, sequence_id=None), None)
|
|
0251a4a4670ee7ae28fe58cbd313688bf20ba986
|
tbmodels/_kdotp.py
|
tbmodels/_kdotp.py
|
import numpy as np
import scipy.linalg as la
from fsc.export import export
from fsc.hdf5_io import subscribe_hdf5, SimpleHDF5Mapping
@export
@subscribe_hdf5('tbmodels.model', check_on_load=False)
class KdotpModel(SimpleHDF5Mapping):
HDF5_ATTRIBUTES = ['taylor_coefficients']
def __init__(self, taylor_coefficients):
self.taylor_coefficients = {
tuple(key): np.array(mat, dtype=complex)
for key, mat in taylor_coefficients.items()
}
def hamilton(self, k):
return sum(sum(kval**p for kval, p in zip(k, pow)) * mat for pow, mat in self.taylor_coefficients.items())
def eigenval(self, k):
return la.eigvalsh(self.hamilton(k))
|
Add prototype for k.p class.
|
Add prototype for k.p class.
|
Python
|
apache-2.0
|
Z2PackDev/TBmodels,Z2PackDev/TBmodels
|
Add prototype for k.p class.
|
import numpy as np
import scipy.linalg as la
from fsc.export import export
from fsc.hdf5_io import subscribe_hdf5, SimpleHDF5Mapping
@export
@subscribe_hdf5('tbmodels.model', check_on_load=False)
class KdotpModel(SimpleHDF5Mapping):
HDF5_ATTRIBUTES = ['taylor_coefficients']
def __init__(self, taylor_coefficients):
self.taylor_coefficients = {
tuple(key): np.array(mat, dtype=complex)
for key, mat in taylor_coefficients.items()
}
def hamilton(self, k):
return sum(sum(kval**p for kval, p in zip(k, pow)) * mat for pow, mat in self.taylor_coefficients.items())
def eigenval(self, k):
return la.eigvalsh(self.hamilton(k))
|
<commit_before><commit_msg>Add prototype for k.p class.<commit_after>
|
import numpy as np
import scipy.linalg as la
from fsc.export import export
from fsc.hdf5_io import subscribe_hdf5, SimpleHDF5Mapping
@export
@subscribe_hdf5('tbmodels.model', check_on_load=False)
class KdotpModel(SimpleHDF5Mapping):
HDF5_ATTRIBUTES = ['taylor_coefficients']
def __init__(self, taylor_coefficients):
self.taylor_coefficients = {
tuple(key): np.array(mat, dtype=complex)
for key, mat in taylor_coefficients.items()
}
def hamilton(self, k):
return sum(sum(kval**p for kval, p in zip(k, pow)) * mat for pow, mat in self.taylor_coefficients.items())
def eigenval(self, k):
return la.eigvalsh(self.hamilton(k))
|
Add prototype for k.p class.import numpy as np
import scipy.linalg as la
from fsc.export import export
from fsc.hdf5_io import subscribe_hdf5, SimpleHDF5Mapping
@export
@subscribe_hdf5('tbmodels.model', check_on_load=False)
class KdotpModel(SimpleHDF5Mapping):
HDF5_ATTRIBUTES = ['taylor_coefficients']
def __init__(self, taylor_coefficients):
self.taylor_coefficients = {
tuple(key): np.array(mat, dtype=complex)
for key, mat in taylor_coefficients.items()
}
def hamilton(self, k):
return sum(sum(kval**p for kval, p in zip(k, pow)) * mat for pow, mat in self.taylor_coefficients.items())
def eigenval(self, k):
return la.eigvalsh(self.hamilton(k))
|
<commit_before><commit_msg>Add prototype for k.p class.<commit_after>import numpy as np
import scipy.linalg as la
from fsc.export import export
from fsc.hdf5_io import subscribe_hdf5, SimpleHDF5Mapping
@export
@subscribe_hdf5('tbmodels.model', check_on_load=False)
class KdotpModel(SimpleHDF5Mapping):
HDF5_ATTRIBUTES = ['taylor_coefficients']
def __init__(self, taylor_coefficients):
self.taylor_coefficients = {
tuple(key): np.array(mat, dtype=complex)
for key, mat in taylor_coefficients.items()
}
def hamilton(self, k):
return sum(sum(kval**p for kval, p in zip(k, pow)) * mat for pow, mat in self.taylor_coefficients.items())
def eigenval(self, k):
return la.eigvalsh(self.hamilton(k))
|
|
f9a185fa48785b1c39d861fc13742356fe832313
|
Tests/ConfTest.py
|
Tests/ConfTest.py
|
import requests
import json
CLOUD_END_POINT = "http://211.249.49.198:5000"
def test_IsHomePageWorking():
homePageRequest = requests.get(CLOUD_END_POINT)
assert homePageRequest.text == "Hello world"
def test_UploadTestData():
testUploadData = {
"Data": [20.0, 30.0, 401.0, 50.0, 60.0],
"Date": ["2017-08-11", "2017-08-12", "2017-08-13", "2017-08-14", "2017-08-15"],
"Day": 4
}
uploadTestDataRequest = requests.post(CLOUD_END_POINT + "/upload/", json=testUploadData)
serverResponse = json.loads(uploadTestDataRequest.text)
assert serverResponse["Result"] >= 0, "Wrong process id returned"
return
def test_DownloadWorkingForecastData():
testUploadData = {
"ProcessId": 9
}
uploadTestDataRequest = requests.post(CLOUD_END_POINT + "/forecast/", json=testUploadData)
serverResponse = json.loads(uploadTestDataRequest.text)
assert serverResponse["Status"] == "Working", "Is it really process done?"
def test_DownloadDoneForecastData():
testUploadData = {
"ProcessId": 2
}
uploadTestDataRequest = requests.post(CLOUD_END_POINT + "/forecast/", json=testUploadData)
serverResponse = json.loads(uploadTestDataRequest.text)
assert serverResponse["Status"] == "Done", "That process id must return status done"
|
Test module : some test sinario added
|
Test module : some test sinario added
|
Python
|
mit
|
I2MAX-LearningProject/Flask-server,I2MAX-LearningProject/Flask-server,I2MAX-LearningProject/Flask-server,I2MAX-LearningProject/Flask-server
|
Test module : some test sinario added
|
import requests
import json
CLOUD_END_POINT = "http://211.249.49.198:5000"
def test_IsHomePageWorking():
homePageRequest = requests.get(CLOUD_END_POINT)
assert homePageRequest.text == "Hello world"
def test_UploadTestData():
testUploadData = {
"Data": [20.0, 30.0, 401.0, 50.0, 60.0],
"Date": ["2017-08-11", "2017-08-12", "2017-08-13", "2017-08-14", "2017-08-15"],
"Day": 4
}
uploadTestDataRequest = requests.post(CLOUD_END_POINT + "/upload/", json=testUploadData)
serverResponse = json.loads(uploadTestDataRequest.text)
assert serverResponse["Result"] >= 0, "Wrong process id returned"
return
def test_DownloadWorkingForecastData():
testUploadData = {
"ProcessId": 9
}
uploadTestDataRequest = requests.post(CLOUD_END_POINT + "/forecast/", json=testUploadData)
serverResponse = json.loads(uploadTestDataRequest.text)
assert serverResponse["Status"] == "Working", "Is it really process done?"
def test_DownloadDoneForecastData():
testUploadData = {
"ProcessId": 2
}
uploadTestDataRequest = requests.post(CLOUD_END_POINT + "/forecast/", json=testUploadData)
serverResponse = json.loads(uploadTestDataRequest.text)
assert serverResponse["Status"] == "Done", "That process id must return status done"
|
<commit_before><commit_msg>Test module : some test sinario added<commit_after>
|
import requests
import json
CLOUD_END_POINT = "http://211.249.49.198:5000"
def test_IsHomePageWorking():
homePageRequest = requests.get(CLOUD_END_POINT)
assert homePageRequest.text == "Hello world"
def test_UploadTestData():
testUploadData = {
"Data": [20.0, 30.0, 401.0, 50.0, 60.0],
"Date": ["2017-08-11", "2017-08-12", "2017-08-13", "2017-08-14", "2017-08-15"],
"Day": 4
}
uploadTestDataRequest = requests.post(CLOUD_END_POINT + "/upload/", json=testUploadData)
serverResponse = json.loads(uploadTestDataRequest.text)
assert serverResponse["Result"] >= 0, "Wrong process id returned"
return
def test_DownloadWorkingForecastData():
testUploadData = {
"ProcessId": 9
}
uploadTestDataRequest = requests.post(CLOUD_END_POINT + "/forecast/", json=testUploadData)
serverResponse = json.loads(uploadTestDataRequest.text)
assert serverResponse["Status"] == "Working", "Is it really process done?"
def test_DownloadDoneForecastData():
testUploadData = {
"ProcessId": 2
}
uploadTestDataRequest = requests.post(CLOUD_END_POINT + "/forecast/", json=testUploadData)
serverResponse = json.loads(uploadTestDataRequest.text)
assert serverResponse["Status"] == "Done", "That process id must return status done"
|
Test module : some test sinario addedimport requests
import json
CLOUD_END_POINT = "http://211.249.49.198:5000"
def test_IsHomePageWorking():
homePageRequest = requests.get(CLOUD_END_POINT)
assert homePageRequest.text == "Hello world"
def test_UploadTestData():
testUploadData = {
"Data": [20.0, 30.0, 401.0, 50.0, 60.0],
"Date": ["2017-08-11", "2017-08-12", "2017-08-13", "2017-08-14", "2017-08-15"],
"Day": 4
}
uploadTestDataRequest = requests.post(CLOUD_END_POINT + "/upload/", json=testUploadData)
serverResponse = json.loads(uploadTestDataRequest.text)
assert serverResponse["Result"] >= 0, "Wrong process id returned"
return
def test_DownloadWorkingForecastData():
testUploadData = {
"ProcessId": 9
}
uploadTestDataRequest = requests.post(CLOUD_END_POINT + "/forecast/", json=testUploadData)
serverResponse = json.loads(uploadTestDataRequest.text)
assert serverResponse["Status"] == "Working", "Is it really process done?"
def test_DownloadDoneForecastData():
testUploadData = {
"ProcessId": 2
}
uploadTestDataRequest = requests.post(CLOUD_END_POINT + "/forecast/", json=testUploadData)
serverResponse = json.loads(uploadTestDataRequest.text)
assert serverResponse["Status"] == "Done", "That process id must return status done"
|
<commit_before><commit_msg>Test module : some test sinario added<commit_after>import requests
import json
CLOUD_END_POINT = "http://211.249.49.198:5000"
def test_IsHomePageWorking():
homePageRequest = requests.get(CLOUD_END_POINT)
assert homePageRequest.text == "Hello world"
def test_UploadTestData():
testUploadData = {
"Data": [20.0, 30.0, 401.0, 50.0, 60.0],
"Date": ["2017-08-11", "2017-08-12", "2017-08-13", "2017-08-14", "2017-08-15"],
"Day": 4
}
uploadTestDataRequest = requests.post(CLOUD_END_POINT + "/upload/", json=testUploadData)
serverResponse = json.loads(uploadTestDataRequest.text)
assert serverResponse["Result"] >= 0, "Wrong process id returned"
return
def test_DownloadWorkingForecastData():
testUploadData = {
"ProcessId": 9
}
uploadTestDataRequest = requests.post(CLOUD_END_POINT + "/forecast/", json=testUploadData)
serverResponse = json.loads(uploadTestDataRequest.text)
assert serverResponse["Status"] == "Working", "Is it really process done?"
def test_DownloadDoneForecastData():
testUploadData = {
"ProcessId": 2
}
uploadTestDataRequest = requests.post(CLOUD_END_POINT + "/forecast/", json=testUploadData)
serverResponse = json.loads(uploadTestDataRequest.text)
assert serverResponse["Status"] == "Done", "That process id must return status done"
|
|
ef48d9fa9b15704c0fd4be897da1e29f7e359cc7
|
app/textutils.py
|
app/textutils.py
|
# Copyright 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Utilities for processing text files, including patching and diffing,
# as well as semantic splitting of ghilbert proof files.
def count_char(line, ch, limit):
count = 0
i = 0
while True:
i = line.find(ch, i, limit)
if i < 0: return count
count += 1
i += 1
def count_parens(line):
limit = line.find('#')
if limit == -1: limit = len(line)
return count_char(line, '(', limit) - count_char(line, ')', limit)
def tokenize(line):
line = line.split('#')[0]
line = line.replace('(', ' ( ')
line = line.replace(')', ' ) ')
return line.split()
# Returns a list of (name, startline, endline) tuples, where lines are
# 0-based, and exclusive of endline. Ignore comment blocks for now
def split_gh_file(lines):
level = 0
result = []
startline = 0
name = None
for i, line in enumerate(lines):
delta = count_parens(line)
if level == 0 and name is None:
if line.startswith('#!'):
startline = i + 1
elif startline == i and line.rstrip() == '':
startline = i + 1
else:
toks = tokenize(line)
if len(toks) >= 3 and toks[0] == 'thm' and toks[1] == '(':
name = toks[2]
elif level + delta == 0 and len(toks) and toks[-1] == ')':
startline = i + 1
level += delta
if name is not None and level == 0:
result.append((name, startline, i + 1))
name = None
startline = i + 1
return result
# for testing
if __name__ == '__main__':
import sys
lines = file(sys.argv[1]).read().split('\n')
annot = split_gh_file(lines)
print annot
ix = 0
pref = ''
for i, line in enumerate(lines):
if ix < len(annot) and i == annot[ix][2]:
pref = ''
ix += 1
if ix < len(annot) and i == annot[ix][1]:
pref = annot[ix][0] + ':'
print '%10s %s' % (pref, line)
|
Add utility function to split .gh file into thms
|
Add utility function to split .gh file into thms
This commit starts some new text utilities which will be useful for
extracting an individual thm from a .gh, for editing.
|
Python
|
apache-2.0
|
raphlinus/ghilbert,raphlinus/ghilbert,jkingdon/ghilbert,kryptine/ghilbert,raphlinus/ghilbert,kryptine/ghilbert,jkingdon/ghilbert,raphlinus/ghilbert
|
Add utility function to split .gh file into thms
This commit starts some new text utilities which will be useful for
extracting an individual thm from a .gh, for editing.
|
# Copyright 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Utilities for processing text files, including patching and diffing,
# as well as semantic splitting of ghilbert proof files.
def count_char(line, ch, limit):
count = 0
i = 0
while True:
i = line.find(ch, i, limit)
if i < 0: return count
count += 1
i += 1
def count_parens(line):
limit = line.find('#')
if limit == -1: limit = len(line)
return count_char(line, '(', limit) - count_char(line, ')', limit)
def tokenize(line):
line = line.split('#')[0]
line = line.replace('(', ' ( ')
line = line.replace(')', ' ) ')
return line.split()
# Returns a list of (name, startline, endline) tuples, where lines are
# 0-based, and exclusive of endline. Ignore comment blocks for now
def split_gh_file(lines):
level = 0
result = []
startline = 0
name = None
for i, line in enumerate(lines):
delta = count_parens(line)
if level == 0 and name is None:
if line.startswith('#!'):
startline = i + 1
elif startline == i and line.rstrip() == '':
startline = i + 1
else:
toks = tokenize(line)
if len(toks) >= 3 and toks[0] == 'thm' and toks[1] == '(':
name = toks[2]
elif level + delta == 0 and len(toks) and toks[-1] == ')':
startline = i + 1
level += delta
if name is not None and level == 0:
result.append((name, startline, i + 1))
name = None
startline = i + 1
return result
# for testing
if __name__ == '__main__':
import sys
lines = file(sys.argv[1]).read().split('\n')
annot = split_gh_file(lines)
print annot
ix = 0
pref = ''
for i, line in enumerate(lines):
if ix < len(annot) and i == annot[ix][2]:
pref = ''
ix += 1
if ix < len(annot) and i == annot[ix][1]:
pref = annot[ix][0] + ':'
print '%10s %s' % (pref, line)
|
<commit_before><commit_msg>Add utility function to split .gh file into thms
This commit starts some new text utilities which will be useful for
extracting an individual thm from a .gh, for editing.<commit_after>
|
# Copyright 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Utilities for processing text files, including patching and diffing,
# as well as semantic splitting of ghilbert proof files.
def count_char(line, ch, limit):
count = 0
i = 0
while True:
i = line.find(ch, i, limit)
if i < 0: return count
count += 1
i += 1
def count_parens(line):
limit = line.find('#')
if limit == -1: limit = len(line)
return count_char(line, '(', limit) - count_char(line, ')', limit)
def tokenize(line):
line = line.split('#')[0]
line = line.replace('(', ' ( ')
line = line.replace(')', ' ) ')
return line.split()
# Returns a list of (name, startline, endline) tuples, where lines are
# 0-based, and exclusive of endline. Ignore comment blocks for now
def split_gh_file(lines):
level = 0
result = []
startline = 0
name = None
for i, line in enumerate(lines):
delta = count_parens(line)
if level == 0 and name is None:
if line.startswith('#!'):
startline = i + 1
elif startline == i and line.rstrip() == '':
startline = i + 1
else:
toks = tokenize(line)
if len(toks) >= 3 and toks[0] == 'thm' and toks[1] == '(':
name = toks[2]
elif level + delta == 0 and len(toks) and toks[-1] == ')':
startline = i + 1
level += delta
if name is not None and level == 0:
result.append((name, startline, i + 1))
name = None
startline = i + 1
return result
# for testing
if __name__ == '__main__':
import sys
lines = file(sys.argv[1]).read().split('\n')
annot = split_gh_file(lines)
print annot
ix = 0
pref = ''
for i, line in enumerate(lines):
if ix < len(annot) and i == annot[ix][2]:
pref = ''
ix += 1
if ix < len(annot) and i == annot[ix][1]:
pref = annot[ix][0] + ':'
print '%10s %s' % (pref, line)
|
Add utility function to split .gh file into thms
This commit starts some new text utilities which will be useful for
extracting an individual thm from a .gh, for editing.# Copyright 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Utilities for processing text files, including patching and diffing,
# as well as semantic splitting of ghilbert proof files.
def count_char(line, ch, limit):
count = 0
i = 0
while True:
i = line.find(ch, i, limit)
if i < 0: return count
count += 1
i += 1
def count_parens(line):
limit = line.find('#')
if limit == -1: limit = len(line)
return count_char(line, '(', limit) - count_char(line, ')', limit)
def tokenize(line):
line = line.split('#')[0]
line = line.replace('(', ' ( ')
line = line.replace(')', ' ) ')
return line.split()
# Returns a list of (name, startline, endline) tuples, where lines are
# 0-based, and exclusive of endline. Ignore comment blocks for now
def split_gh_file(lines):
level = 0
result = []
startline = 0
name = None
for i, line in enumerate(lines):
delta = count_parens(line)
if level == 0 and name is None:
if line.startswith('#!'):
startline = i + 1
elif startline == i and line.rstrip() == '':
startline = i + 1
else:
toks = tokenize(line)
if len(toks) >= 3 and toks[0] == 'thm' and toks[1] == '(':
name = toks[2]
elif level + delta == 0 and len(toks) and toks[-1] == ')':
startline = i + 1
level += delta
if name is not None and level == 0:
result.append((name, startline, i + 1))
name = None
startline = i + 1
return result
# for testing
if __name__ == '__main__':
import sys
lines = file(sys.argv[1]).read().split('\n')
annot = split_gh_file(lines)
print annot
ix = 0
pref = ''
for i, line in enumerate(lines):
if ix < len(annot) and i == annot[ix][2]:
pref = ''
ix += 1
if ix < len(annot) and i == annot[ix][1]:
pref = annot[ix][0] + ':'
print '%10s %s' % (pref, line)
|
<commit_before><commit_msg>Add utility function to split .gh file into thms
This commit starts some new text utilities which will be useful for
extracting an individual thm from a .gh, for editing.<commit_after># Copyright 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Utilities for processing text files, including patching and diffing,
# as well as semantic splitting of ghilbert proof files.
def count_char(line, ch, limit):
count = 0
i = 0
while True:
i = line.find(ch, i, limit)
if i < 0: return count
count += 1
i += 1
def count_parens(line):
limit = line.find('#')
if limit == -1: limit = len(line)
return count_char(line, '(', limit) - count_char(line, ')', limit)
def tokenize(line):
line = line.split('#')[0]
line = line.replace('(', ' ( ')
line = line.replace(')', ' ) ')
return line.split()
# Returns a list of (name, startline, endline) tuples, where lines are
# 0-based, and exclusive of endline. Ignore comment blocks for now
def split_gh_file(lines):
level = 0
result = []
startline = 0
name = None
for i, line in enumerate(lines):
delta = count_parens(line)
if level == 0 and name is None:
if line.startswith('#!'):
startline = i + 1
elif startline == i and line.rstrip() == '':
startline = i + 1
else:
toks = tokenize(line)
if len(toks) >= 3 and toks[0] == 'thm' and toks[1] == '(':
name = toks[2]
elif level + delta == 0 and len(toks) and toks[-1] == ')':
startline = i + 1
level += delta
if name is not None and level == 0:
result.append((name, startline, i + 1))
name = None
startline = i + 1
return result
# for testing
if __name__ == '__main__':
import sys
lines = file(sys.argv[1]).read().split('\n')
annot = split_gh_file(lines)
print annot
ix = 0
pref = ''
for i, line in enumerate(lines):
if ix < len(annot) and i == annot[ix][2]:
pref = ''
ix += 1
if ix < len(annot) and i == annot[ix][1]:
pref = annot[ix][0] + ':'
print '%10s %s' % (pref, line)
|
|
f0d5dae0823f083dfe66caae12960ee32fc32967
|
autohost_manager.py
|
autohost_manager.py
|
"""
TODO Mon?: Autohost manager for AliCatFiberarts (and others).
* Have a list of high priority streams, in order (or with priorities)
* Listen for, or poll for, streams going live
* If (a) a high priority stream has just gone live, and (b) you are currently hosting, and (c) the hosted stream has lower priority
* Then send "/unhost" to the channel.
* Have a very very simple GUI (tkinter?)
* "Optional: Rename this to autohost_manager.pyw to hide the black box"
"""
|
Create a file with nothing but a TODO in it
|
ahm: Create a file with nothing but a TODO in it
|
Python
|
mit
|
Rosuav/shed,Rosuav/shed,Rosuav/shed,Rosuav/shed,Rosuav/shed
|
ahm: Create a file with nothing but a TODO in it
|
"""
TODO Mon?: Autohost manager for AliCatFiberarts (and others).
* Have a list of high priority streams, in order (or with priorities)
* Listen for, or poll for, streams going live
* If (a) a high priority stream has just gone live, and (b) you are currently hosting, and (c) the hosted stream has lower priority
* Then send "/unhost" to the channel.
* Have a very very simple GUI (tkinter?)
* "Optional: Rename this to autohost_manager.pyw to hide the black box"
"""
|
<commit_before><commit_msg>ahm: Create a file with nothing but a TODO in it<commit_after>
|
"""
TODO Mon?: Autohost manager for AliCatFiberarts (and others).
* Have a list of high priority streams, in order (or with priorities)
* Listen for, or poll for, streams going live
* If (a) a high priority stream has just gone live, and (b) you are currently hosting, and (c) the hosted stream has lower priority
* Then send "/unhost" to the channel.
* Have a very very simple GUI (tkinter?)
* "Optional: Rename this to autohost_manager.pyw to hide the black box"
"""
|
ahm: Create a file with nothing but a TODO in it"""
TODO Mon?: Autohost manager for AliCatFiberarts (and others).
* Have a list of high priority streams, in order (or with priorities)
* Listen for, or poll for, streams going live
* If (a) a high priority stream has just gone live, and (b) you are currently hosting, and (c) the hosted stream has lower priority
* Then send "/unhost" to the channel.
* Have a very very simple GUI (tkinter?)
* "Optional: Rename this to autohost_manager.pyw to hide the black box"
"""
|
<commit_before><commit_msg>ahm: Create a file with nothing but a TODO in it<commit_after>"""
TODO Mon?: Autohost manager for AliCatFiberarts (and others).
* Have a list of high priority streams, in order (or with priorities)
* Listen for, or poll for, streams going live
* If (a) a high priority stream has just gone live, and (b) you are currently hosting, and (c) the hosted stream has lower priority
* Then send "/unhost" to the channel.
* Have a very very simple GUI (tkinter?)
* "Optional: Rename this to autohost_manager.pyw to hide the black box"
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.