commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4e73cc77f8353bf18b15d704fe58336018cb68a8
|
tests/test_lc.py
|
tests/test_lc.py
|
from __future__ import division, print_function
import cPickle as pickle
import sys
import matplotlib.pyplot as plt
print('Loading lc file...')
lcdata = pickle.load(open(sys.argv[1]))
time = lcdata['time']
lc = lcdata['lc']
gti = lcdata['gti']
plt.plot(time, lc, drawstyle='steps-mid', color='k')
for g in gti:
plt.axvline(g[0], ls='--', color='red')
plt.axvline(g[1], ls='--', color='red')
plt.show()
|
Test if light curves are extracted correctly
|
Test if light curves are extracted correctly
|
Python
|
bsd-3-clause
|
matteobachetti/MaLTPyNT
|
Test if light curves are extracted correctly
|
from __future__ import division, print_function
import cPickle as pickle
import sys
import matplotlib.pyplot as plt
print('Loading lc file...')
lcdata = pickle.load(open(sys.argv[1]))
time = lcdata['time']
lc = lcdata['lc']
gti = lcdata['gti']
plt.plot(time, lc, drawstyle='steps-mid', color='k')
for g in gti:
plt.axvline(g[0], ls='--', color='red')
plt.axvline(g[1], ls='--', color='red')
plt.show()
|
<commit_before><commit_msg>Test if light curves are extracted correctly<commit_after>
|
from __future__ import division, print_function
import cPickle as pickle
import sys
import matplotlib.pyplot as plt
print('Loading lc file...')
lcdata = pickle.load(open(sys.argv[1]))
time = lcdata['time']
lc = lcdata['lc']
gti = lcdata['gti']
plt.plot(time, lc, drawstyle='steps-mid', color='k')
for g in gti:
plt.axvline(g[0], ls='--', color='red')
plt.axvline(g[1], ls='--', color='red')
plt.show()
|
Test if light curves are extracted correctlyfrom __future__ import division, print_function
import cPickle as pickle
import sys
import matplotlib.pyplot as plt
print('Loading lc file...')
lcdata = pickle.load(open(sys.argv[1]))
time = lcdata['time']
lc = lcdata['lc']
gti = lcdata['gti']
plt.plot(time, lc, drawstyle='steps-mid', color='k')
for g in gti:
plt.axvline(g[0], ls='--', color='red')
plt.axvline(g[1], ls='--', color='red')
plt.show()
|
<commit_before><commit_msg>Test if light curves are extracted correctly<commit_after>from __future__ import division, print_function
import cPickle as pickle
import sys
import matplotlib.pyplot as plt
print('Loading lc file...')
lcdata = pickle.load(open(sys.argv[1]))
time = lcdata['time']
lc = lcdata['lc']
gti = lcdata['gti']
plt.plot(time, lc, drawstyle='steps-mid', color='k')
for g in gti:
plt.axvline(g[0], ls='--', color='red')
plt.axvline(g[1], ls='--', color='red')
plt.show()
|
|
3a27fd06b182966a03c6ed92ed9b5e17e804fb24
|
src/sentry/management/commands/check_notifications.py
|
src/sentry/management/commands/check_notifications.py
|
"""
sentry.management.commands.check_notifications
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2015 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
def find_mail_plugin():
from sentry.plugins import plugins
for plugin in plugins.all():
if type(plugin).__name__.endswith('MailPlugin'):
return plugin
assert False, 'MailPlugin cannot be found'
def handle_project(plugin, project, stream):
stream.write('# Project: %s\n' % project)
from sentry.utils.email import MessageBuilder
msg = MessageBuilder('test')
msg.add_users(plugin.get_sendable_users(project), project)
for email in msg._send_to:
stream.write(email + '\n')
class Command(BaseCommand):
help = 'Dump addresses that would get an email notification'
option_list = BaseCommand.option_list + (
make_option('--organization',
action='store',
type='int',
dest='organization',
default=0,
help='',
),
make_option('--project',
action='store',
type='int',
dest='project',
default=0,
help='',
),
)
def handle(self, *args, **options):
if not (options['project'] or options['organization']):
raise CommandError('Must specify either a project or organization')
from sentry.models import Project, Organization
if options['organization']:
projects = list(Organization.objects.get(pk=options['organization']).project_set.all())
else:
projects = [Project.objects.get(pk=options['project'])]
plugin = find_mail_plugin()
for project in projects:
handle_project(plugin, project, self.stdout)
self.stdout.write('\n')
|
Add management command to check who gets email notifications
|
Add management command to check who gets email notifications
|
Python
|
bsd-3-clause
|
ifduyue/sentry,alexm92/sentry,ifduyue/sentry,zenefits/sentry,jean/sentry,mvaled/sentry,ifduyue/sentry,mvaled/sentry,JackDanger/sentry,gencer/sentry,fotinakis/sentry,zenefits/sentry,mvaled/sentry,looker/sentry,beeftornado/sentry,JamesMura/sentry,nicholasserra/sentry,looker/sentry,fotinakis/sentry,mvaled/sentry,looker/sentry,JackDanger/sentry,JamesMura/sentry,beeftornado/sentry,mitsuhiko/sentry,BuildingLink/sentry,daevaorn/sentry,JamesMura/sentry,nicholasserra/sentry,looker/sentry,zenefits/sentry,mvaled/sentry,mitsuhiko/sentry,gencer/sentry,BuildingLink/sentry,alexm92/sentry,jean/sentry,zenefits/sentry,ifduyue/sentry,jean/sentry,ifduyue/sentry,jean/sentry,mvaled/sentry,nicholasserra/sentry,BuildingLink/sentry,JamesMura/sentry,fotinakis/sentry,zenefits/sentry,gencer/sentry,gencer/sentry,BuildingLink/sentry,fotinakis/sentry,JackDanger/sentry,beeftornado/sentry,daevaorn/sentry,JamesMura/sentry,gencer/sentry,daevaorn/sentry,BuildingLink/sentry,jean/sentry,looker/sentry,alexm92/sentry,daevaorn/sentry
|
Add management command to check who gets email notifications
|
"""
sentry.management.commands.check_notifications
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2015 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
def find_mail_plugin():
from sentry.plugins import plugins
for plugin in plugins.all():
if type(plugin).__name__.endswith('MailPlugin'):
return plugin
assert False, 'MailPlugin cannot be found'
def handle_project(plugin, project, stream):
stream.write('# Project: %s\n' % project)
from sentry.utils.email import MessageBuilder
msg = MessageBuilder('test')
msg.add_users(plugin.get_sendable_users(project), project)
for email in msg._send_to:
stream.write(email + '\n')
class Command(BaseCommand):
help = 'Dump addresses that would get an email notification'
option_list = BaseCommand.option_list + (
make_option('--organization',
action='store',
type='int',
dest='organization',
default=0,
help='',
),
make_option('--project',
action='store',
type='int',
dest='project',
default=0,
help='',
),
)
def handle(self, *args, **options):
if not (options['project'] or options['organization']):
raise CommandError('Must specify either a project or organization')
from sentry.models import Project, Organization
if options['organization']:
projects = list(Organization.objects.get(pk=options['organization']).project_set.all())
else:
projects = [Project.objects.get(pk=options['project'])]
plugin = find_mail_plugin()
for project in projects:
handle_project(plugin, project, self.stdout)
self.stdout.write('\n')
|
<commit_before><commit_msg>Add management command to check who gets email notifications<commit_after>
|
"""
sentry.management.commands.check_notifications
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2015 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
def find_mail_plugin():
from sentry.plugins import plugins
for plugin in plugins.all():
if type(plugin).__name__.endswith('MailPlugin'):
return plugin
assert False, 'MailPlugin cannot be found'
def handle_project(plugin, project, stream):
stream.write('# Project: %s\n' % project)
from sentry.utils.email import MessageBuilder
msg = MessageBuilder('test')
msg.add_users(plugin.get_sendable_users(project), project)
for email in msg._send_to:
stream.write(email + '\n')
class Command(BaseCommand):
help = 'Dump addresses that would get an email notification'
option_list = BaseCommand.option_list + (
make_option('--organization',
action='store',
type='int',
dest='organization',
default=0,
help='',
),
make_option('--project',
action='store',
type='int',
dest='project',
default=0,
help='',
),
)
def handle(self, *args, **options):
if not (options['project'] or options['organization']):
raise CommandError('Must specify either a project or organization')
from sentry.models import Project, Organization
if options['organization']:
projects = list(Organization.objects.get(pk=options['organization']).project_set.all())
else:
projects = [Project.objects.get(pk=options['project'])]
plugin = find_mail_plugin()
for project in projects:
handle_project(plugin, project, self.stdout)
self.stdout.write('\n')
|
Add management command to check who gets email notifications"""
sentry.management.commands.check_notifications
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2015 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
def find_mail_plugin():
from sentry.plugins import plugins
for plugin in plugins.all():
if type(plugin).__name__.endswith('MailPlugin'):
return plugin
assert False, 'MailPlugin cannot be found'
def handle_project(plugin, project, stream):
stream.write('# Project: %s\n' % project)
from sentry.utils.email import MessageBuilder
msg = MessageBuilder('test')
msg.add_users(plugin.get_sendable_users(project), project)
for email in msg._send_to:
stream.write(email + '\n')
class Command(BaseCommand):
help = 'Dump addresses that would get an email notification'
option_list = BaseCommand.option_list + (
make_option('--organization',
action='store',
type='int',
dest='organization',
default=0,
help='',
),
make_option('--project',
action='store',
type='int',
dest='project',
default=0,
help='',
),
)
def handle(self, *args, **options):
if not (options['project'] or options['organization']):
raise CommandError('Must specify either a project or organization')
from sentry.models import Project, Organization
if options['organization']:
projects = list(Organization.objects.get(pk=options['organization']).project_set.all())
else:
projects = [Project.objects.get(pk=options['project'])]
plugin = find_mail_plugin()
for project in projects:
handle_project(plugin, project, self.stdout)
self.stdout.write('\n')
|
<commit_before><commit_msg>Add management command to check who gets email notifications<commit_after>"""
sentry.management.commands.check_notifications
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2015 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
def find_mail_plugin():
from sentry.plugins import plugins
for plugin in plugins.all():
if type(plugin).__name__.endswith('MailPlugin'):
return plugin
assert False, 'MailPlugin cannot be found'
def handle_project(plugin, project, stream):
stream.write('# Project: %s\n' % project)
from sentry.utils.email import MessageBuilder
msg = MessageBuilder('test')
msg.add_users(plugin.get_sendable_users(project), project)
for email in msg._send_to:
stream.write(email + '\n')
class Command(BaseCommand):
help = 'Dump addresses that would get an email notification'
option_list = BaseCommand.option_list + (
make_option('--organization',
action='store',
type='int',
dest='organization',
default=0,
help='',
),
make_option('--project',
action='store',
type='int',
dest='project',
default=0,
help='',
),
)
def handle(self, *args, **options):
if not (options['project'] or options['organization']):
raise CommandError('Must specify either a project or organization')
from sentry.models import Project, Organization
if options['organization']:
projects = list(Organization.objects.get(pk=options['organization']).project_set.all())
else:
projects = [Project.objects.get(pk=options['project'])]
plugin = find_mail_plugin()
for project in projects:
handle_project(plugin, project, self.stdout)
self.stdout.write('\n')
|
|
8a6e47c3e9412c29cb709fdba2026724246a30e2
|
scripts/patches/appflow.py
|
scripts/patches/appflow.py
|
patches = [
{
"op": "add",
"path": "/PropertyTypes/AWS::AppFlow::ConnectorProfile.CustomAuthCredentials/Properties/CredentialsMap/PrimitiveType",
"value": "Json",
},
{
"op": "remove",
"path": "/PropertyTypes/AWS::AppFlow::ConnectorProfile.CustomAuthCredentials/Properties/CredentialsMap/Type",
},
{
"op": "add",
"path": "/PropertyTypes/AWS::AppFlow::ConnectorProfile.OAuth2Properties/Properties/TokenUrlCustomProperties/PrimitiveType",
"value": "Json",
},
{
"op": "remove",
"path": "/PropertyTypes/AWS::AppFlow::ConnectorProfile.OAuth2Properties/Properties/TokenUrlCustomProperties/Type",
},
{
"op": "add",
"path": "/PropertyTypes/AWS::AppFlow::ConnectorProfile.CustomConnectorProfileProperties/Properties/ProfileProperties/PrimitiveType",
"value": "Json",
},
{
"op": "remove",
"path": "/PropertyTypes/AWS::AppFlow::ConnectorProfile.CustomConnectorProfileProperties/Properties/ProfileProperties/Type",
},
{
"op": "add",
"path": "/PropertyTypes/AWS::AppFlow::Flow.CustomConnectorSourceProperties/Properties/CustomProperties/PrimitiveType",
"value": "Json",
},
{
"op": "remove",
"path": "/PropertyTypes/AWS::AppFlow::Flow.CustomConnectorSourceProperties/Properties/CustomProperties/Type",
},
{
"op": "add",
"path": "/PropertyTypes/AWS::AppFlow::Flow.CustomConnectorDestinationProperties/Properties/CustomProperties/PrimitiveType",
"value": "Json",
},
{
"op": "remove",
"path": "/PropertyTypes/AWS::AppFlow::Flow.CustomConnectorDestinationProperties/Properties/CustomProperties/Type",
},
]
|
Add spec patch for AppFlow
|
Add spec patch for AppFlow
|
Python
|
bsd-2-clause
|
cloudtools/troposphere,cloudtools/troposphere
|
Add spec patch for AppFlow
|
patches = [
{
"op": "add",
"path": "/PropertyTypes/AWS::AppFlow::ConnectorProfile.CustomAuthCredentials/Properties/CredentialsMap/PrimitiveType",
"value": "Json",
},
{
"op": "remove",
"path": "/PropertyTypes/AWS::AppFlow::ConnectorProfile.CustomAuthCredentials/Properties/CredentialsMap/Type",
},
{
"op": "add",
"path": "/PropertyTypes/AWS::AppFlow::ConnectorProfile.OAuth2Properties/Properties/TokenUrlCustomProperties/PrimitiveType",
"value": "Json",
},
{
"op": "remove",
"path": "/PropertyTypes/AWS::AppFlow::ConnectorProfile.OAuth2Properties/Properties/TokenUrlCustomProperties/Type",
},
{
"op": "add",
"path": "/PropertyTypes/AWS::AppFlow::ConnectorProfile.CustomConnectorProfileProperties/Properties/ProfileProperties/PrimitiveType",
"value": "Json",
},
{
"op": "remove",
"path": "/PropertyTypes/AWS::AppFlow::ConnectorProfile.CustomConnectorProfileProperties/Properties/ProfileProperties/Type",
},
{
"op": "add",
"path": "/PropertyTypes/AWS::AppFlow::Flow.CustomConnectorSourceProperties/Properties/CustomProperties/PrimitiveType",
"value": "Json",
},
{
"op": "remove",
"path": "/PropertyTypes/AWS::AppFlow::Flow.CustomConnectorSourceProperties/Properties/CustomProperties/Type",
},
{
"op": "add",
"path": "/PropertyTypes/AWS::AppFlow::Flow.CustomConnectorDestinationProperties/Properties/CustomProperties/PrimitiveType",
"value": "Json",
},
{
"op": "remove",
"path": "/PropertyTypes/AWS::AppFlow::Flow.CustomConnectorDestinationProperties/Properties/CustomProperties/Type",
},
]
|
<commit_before><commit_msg>Add spec patch for AppFlow<commit_after>
|
patches = [
{
"op": "add",
"path": "/PropertyTypes/AWS::AppFlow::ConnectorProfile.CustomAuthCredentials/Properties/CredentialsMap/PrimitiveType",
"value": "Json",
},
{
"op": "remove",
"path": "/PropertyTypes/AWS::AppFlow::ConnectorProfile.CustomAuthCredentials/Properties/CredentialsMap/Type",
},
{
"op": "add",
"path": "/PropertyTypes/AWS::AppFlow::ConnectorProfile.OAuth2Properties/Properties/TokenUrlCustomProperties/PrimitiveType",
"value": "Json",
},
{
"op": "remove",
"path": "/PropertyTypes/AWS::AppFlow::ConnectorProfile.OAuth2Properties/Properties/TokenUrlCustomProperties/Type",
},
{
"op": "add",
"path": "/PropertyTypes/AWS::AppFlow::ConnectorProfile.CustomConnectorProfileProperties/Properties/ProfileProperties/PrimitiveType",
"value": "Json",
},
{
"op": "remove",
"path": "/PropertyTypes/AWS::AppFlow::ConnectorProfile.CustomConnectorProfileProperties/Properties/ProfileProperties/Type",
},
{
"op": "add",
"path": "/PropertyTypes/AWS::AppFlow::Flow.CustomConnectorSourceProperties/Properties/CustomProperties/PrimitiveType",
"value": "Json",
},
{
"op": "remove",
"path": "/PropertyTypes/AWS::AppFlow::Flow.CustomConnectorSourceProperties/Properties/CustomProperties/Type",
},
{
"op": "add",
"path": "/PropertyTypes/AWS::AppFlow::Flow.CustomConnectorDestinationProperties/Properties/CustomProperties/PrimitiveType",
"value": "Json",
},
{
"op": "remove",
"path": "/PropertyTypes/AWS::AppFlow::Flow.CustomConnectorDestinationProperties/Properties/CustomProperties/Type",
},
]
|
Add spec patch for AppFlowpatches = [
{
"op": "add",
"path": "/PropertyTypes/AWS::AppFlow::ConnectorProfile.CustomAuthCredentials/Properties/CredentialsMap/PrimitiveType",
"value": "Json",
},
{
"op": "remove",
"path": "/PropertyTypes/AWS::AppFlow::ConnectorProfile.CustomAuthCredentials/Properties/CredentialsMap/Type",
},
{
"op": "add",
"path": "/PropertyTypes/AWS::AppFlow::ConnectorProfile.OAuth2Properties/Properties/TokenUrlCustomProperties/PrimitiveType",
"value": "Json",
},
{
"op": "remove",
"path": "/PropertyTypes/AWS::AppFlow::ConnectorProfile.OAuth2Properties/Properties/TokenUrlCustomProperties/Type",
},
{
"op": "add",
"path": "/PropertyTypes/AWS::AppFlow::ConnectorProfile.CustomConnectorProfileProperties/Properties/ProfileProperties/PrimitiveType",
"value": "Json",
},
{
"op": "remove",
"path": "/PropertyTypes/AWS::AppFlow::ConnectorProfile.CustomConnectorProfileProperties/Properties/ProfileProperties/Type",
},
{
"op": "add",
"path": "/PropertyTypes/AWS::AppFlow::Flow.CustomConnectorSourceProperties/Properties/CustomProperties/PrimitiveType",
"value": "Json",
},
{
"op": "remove",
"path": "/PropertyTypes/AWS::AppFlow::Flow.CustomConnectorSourceProperties/Properties/CustomProperties/Type",
},
{
"op": "add",
"path": "/PropertyTypes/AWS::AppFlow::Flow.CustomConnectorDestinationProperties/Properties/CustomProperties/PrimitiveType",
"value": "Json",
},
{
"op": "remove",
"path": "/PropertyTypes/AWS::AppFlow::Flow.CustomConnectorDestinationProperties/Properties/CustomProperties/Type",
},
]
|
<commit_before><commit_msg>Add spec patch for AppFlow<commit_after>patches = [
{
"op": "add",
"path": "/PropertyTypes/AWS::AppFlow::ConnectorProfile.CustomAuthCredentials/Properties/CredentialsMap/PrimitiveType",
"value": "Json",
},
{
"op": "remove",
"path": "/PropertyTypes/AWS::AppFlow::ConnectorProfile.CustomAuthCredentials/Properties/CredentialsMap/Type",
},
{
"op": "add",
"path": "/PropertyTypes/AWS::AppFlow::ConnectorProfile.OAuth2Properties/Properties/TokenUrlCustomProperties/PrimitiveType",
"value": "Json",
},
{
"op": "remove",
"path": "/PropertyTypes/AWS::AppFlow::ConnectorProfile.OAuth2Properties/Properties/TokenUrlCustomProperties/Type",
},
{
"op": "add",
"path": "/PropertyTypes/AWS::AppFlow::ConnectorProfile.CustomConnectorProfileProperties/Properties/ProfileProperties/PrimitiveType",
"value": "Json",
},
{
"op": "remove",
"path": "/PropertyTypes/AWS::AppFlow::ConnectorProfile.CustomConnectorProfileProperties/Properties/ProfileProperties/Type",
},
{
"op": "add",
"path": "/PropertyTypes/AWS::AppFlow::Flow.CustomConnectorSourceProperties/Properties/CustomProperties/PrimitiveType",
"value": "Json",
},
{
"op": "remove",
"path": "/PropertyTypes/AWS::AppFlow::Flow.CustomConnectorSourceProperties/Properties/CustomProperties/Type",
},
{
"op": "add",
"path": "/PropertyTypes/AWS::AppFlow::Flow.CustomConnectorDestinationProperties/Properties/CustomProperties/PrimitiveType",
"value": "Json",
},
{
"op": "remove",
"path": "/PropertyTypes/AWS::AppFlow::Flow.CustomConnectorDestinationProperties/Properties/CustomProperties/Type",
},
]
|
|
f6b8cf05b025bec12e8fe82f8bfdb56558ca1b14
|
tools/test/toolchains/api.py
|
tools/test/toolchains/api.py
|
import sys
import os
ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
sys.path.insert(0, ROOT)
from tools.toolchains import TOOLCHAIN_CLASSES, LEGACY_TOOLCHAIN_NAMES
from tools.targets import TARGET_MAP
def test_instantiation():
for name, Class in TOOLCHAIN_CLASSES.items():
CLS = Class(TARGET_MAP["K64F"])
assert name == CLS.name or name == LEGACY_TOOLCHAIN_NAMES[CLS.name]
|
Add a test that exercises the abstract base class checking
|
Add a test that exercises the abstract base class checking
The test simply tries to instantiate each class exposed as part of the
TOOLCHAIN_CLASSES map. This is a test that each toolchain class
implements the required API, as the mbedToolchain will not allow the
instantiation of a subclass without a part of that API.
|
Python
|
apache-2.0
|
NXPmicro/mbed,karsev/mbed-os,screamerbg/mbed,nRFMesh/mbed-os,netzimme/mbed-os,netzimme/mbed-os,ryankurte/mbed-os,nRFMesh/mbed-os,YarivCol/mbed-os,infinnovation/mbed-os,fvincenzo/mbed-os,RonEld/mbed,bcostm/mbed-os,adustm/mbed,infinnovation/mbed-os,theotherjimmy/mbed,cvtsi2sd/mbed-os,tung7970/mbed-os,fahhem/mbed-os,svogl/mbed-os,RonEld/mbed,c1728p9/mbed-os,HeadsUpDisplayInc/mbed,svastm/mbed,CalSol/mbed,catiedev/mbed-os,svogl/mbed-os,fvincenzo/mbed-os,nvlsianpu/mbed,kl-cruz/mbed-os,HeadsUpDisplayInc/mbed,ryankurte/mbed-os,adustm/mbed,c1728p9/mbed-os,netzimme/mbed-os,maximmbed/mbed,mikaleppanen/mbed-os,screamerbg/mbed,jeremybrodt/mbed,arostm/mbed-os,rgrover/mbed,andreaslarssonublox/mbed,nvlsianpu/mbed,tung7970/mbed-os-1,catiedev/mbed-os,Archcady/mbed-os,radhika-raghavendran/mbed-os5.1-onsemi,cvtsi2sd/mbed-os,kl-cruz/mbed-os,maximmbed/mbed,CalSol/mbed,pradeep-gr/mbed-os5-onsemi,rgrover/mbed,kl-cruz/mbed-os,nvlsianpu/mbed,screamerbg/mbed,betzw/mbed-os,pradeep-gr/mbed-os5-onsemi,andreaslarssonublox/mbed,catiedev/mbed-os,mmorenobarm/mbed-os,nvlsianpu/mbed,c1728p9/mbed-os,andcor02/mbed-os,ryankurte/mbed-os,andcor02/mbed-os,adamgreen/mbed,Archcady/mbed-os,monkiineko/mbed-os,bulislaw/mbed-os,cvtsi2sd/mbed-os,pradeep-gr/mbed-os5-onsemi,radhika-raghavendran/mbed-os5.1-onsemi,tung7970/mbed-os-1,ryankurte/mbed-os,arostm/mbed-os,screamerbg/mbed,kjbracey-arm/mbed,geky/mbed,HeadsUpDisplayInc/mbed,RonEld/mbed,fanghuaqi/mbed,svastm/mbed,j-greffe/mbed-os,netzimme/mbed-os,adamgreen/mbed,mikaleppanen/mbed-os,adamgreen/mbed,RonEld/mbed,fahhem/mbed-os,maximmbed/mbed,svastm/mbed,j-greffe/mbed-os,fahhem/mbed-os,rgrover/mbed,screamerbg/mbed,pradeep-gr/mbed-os5-onsemi,svastm/mbed,CalSol/mbed,fanghuaqi/mbed,rgrover/mbed,karsev/mbed-os,NXPmicro/mbed,mazimkhan/mbed-os,ryankurte/mbed-os,svogl/mbed-os,adustm/mbed,nRFMesh/mbed-os,kjbracey-arm/mbed,j-greffe/mbed-os,betzw/mbed-os,arostm/mbed-os,svogl/mbed-os,HeadsUpDisplayInc/mbed,bulislaw/mbed-os,cvtsi2sd/mbed-os,mmorenobarm/mbed-os,geky/mbed,YarivCol/mbed-os,monkiineko/mbed-os,fahhem/mbed-os,bcostm/mbed-os,radhika-raghavendran/mbed-os5.1-onsemi,YarivCol/mbed-os,NXPmicro/mbed,jeremybrodt/mbed,bulislaw/mbed-os,fvincenzo/mbed-os,tung7970/mbed-os,YarivCol/mbed-os,c1728p9/mbed-os,fahhem/mbed-os,bcostm/mbed-os,betzw/mbed-os,bulislaw/mbed-os,geky/mbed,karsev/mbed-os,adamgreen/mbed,mmorenobarm/mbed-os,adamgreen/mbed,mazimkhan/mbed-os,fanghuaqi/mbed,bcostm/mbed-os,andcor02/mbed-os,Archcady/mbed-os,CalSol/mbed,bcostm/mbed-os,arostm/mbed-os,mikaleppanen/mbed-os,maximmbed/mbed,geky/mbed,screamerbg/mbed,rgrover/mbed,theotherjimmy/mbed,mmorenobarm/mbed-os,catiedev/mbed-os,Archcady/mbed-os,bulislaw/mbed-os,fvincenzo/mbed-os,fvincenzo/mbed-os,netzimme/mbed-os,NXPmicro/mbed,mazimkhan/mbed-os,theotherjimmy/mbed,jeremybrodt/mbed,kl-cruz/mbed-os,mikaleppanen/mbed-os,tung7970/mbed-os-1,adamgreen/mbed,andcor02/mbed-os,andreaslarssonublox/mbed,bcostm/mbed-os,nRFMesh/mbed-os,kl-cruz/mbed-os,geky/mbed,RonEld/mbed,monkiineko/mbed-os,mikaleppanen/mbed-os,maximmbed/mbed,c1728p9/mbed-os,CalSol/mbed,YarivCol/mbed-os,infinnovation/mbed-os,mbedmicro/mbed,tung7970/mbed-os-1,nRFMesh/mbed-os,fahhem/mbed-os,tung7970/mbed-os,tung7970/mbed-os,betzw/mbed-os,mmorenobarm/mbed-os,mbedmicro/mbed,mbedmicro/mbed,NXPmicro/mbed,jeremybrodt/mbed,NXPmicro/mbed,karsev/mbed-os,mbedmicro/mbed,bulislaw/mbed-os,nRFMesh/mbed-os,jeremybrodt/mbed,adustm/mbed,RonEld/mbed,betzw/mbed-os,fanghuaqi/mbed,infinnovation/mbed-os,catiedev/mbed-os,fanghuaqi/mbed,andreaslarssonublox/mbed,mmorenobarm/mbed-os,cvtsi2sd/mbed-os,andcor02/mbed-os,andreaslarssonublox/mbed,adustm/mbed,pradeep-gr/mbed-os5-onsemi,monkiineko/mbed-os,adustm/mbed,nvlsianpu/mbed,betzw/mbed-os,monkiineko/mbed-os,ryankurte/mbed-os,Archcady/mbed-os,svogl/mbed-os,arostm/mbed-os,svogl/mbed-os,theotherjimmy/mbed,andcor02/mbed-os,theotherjimmy/mbed,catiedev/mbed-os,monkiineko/mbed-os,pradeep-gr/mbed-os5-onsemi,infinnovation/mbed-os,cvtsi2sd/mbed-os,j-greffe/mbed-os,theotherjimmy/mbed,YarivCol/mbed-os,radhika-raghavendran/mbed-os5.1-onsemi,mikaleppanen/mbed-os,tung7970/mbed-os,arostm/mbed-os,mazimkhan/mbed-os,j-greffe/mbed-os,kl-cruz/mbed-os,mbedmicro/mbed,Archcady/mbed-os,HeadsUpDisplayInc/mbed,karsev/mbed-os,nvlsianpu/mbed,radhika-raghavendran/mbed-os5.1-onsemi,c1728p9/mbed-os,svastm/mbed,tung7970/mbed-os-1,netzimme/mbed-os,mazimkhan/mbed-os,kjbracey-arm/mbed,karsev/mbed-os,HeadsUpDisplayInc/mbed,CalSol/mbed,maximmbed/mbed,j-greffe/mbed-os,infinnovation/mbed-os,kjbracey-arm/mbed,radhika-raghavendran/mbed-os5.1-onsemi,mazimkhan/mbed-os
|
Add a test that exercises the abstract base class checking
The test simply tries to instantiate each class exposed as part of the
TOOLCHAIN_CLASSES map. This is a test that each toolchain class
implements the required API, as the mbedToolchain will not allow the
instantiation of a subclass without a part of that API.
|
import sys
import os
ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
sys.path.insert(0, ROOT)
from tools.toolchains import TOOLCHAIN_CLASSES, LEGACY_TOOLCHAIN_NAMES
from tools.targets import TARGET_MAP
def test_instantiation():
for name, Class in TOOLCHAIN_CLASSES.items():
CLS = Class(TARGET_MAP["K64F"])
assert name == CLS.name or name == LEGACY_TOOLCHAIN_NAMES[CLS.name]
|
<commit_before><commit_msg>Add a test that exercises the abstract base class checking
The test simply tries to instantiate each class exposed as part of the
TOOLCHAIN_CLASSES map. This is a test that each toolchain class
implements the required API, as the mbedToolchain will not allow the
instantiation of a subclass without a part of that API.<commit_after>
|
import sys
import os
ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
sys.path.insert(0, ROOT)
from tools.toolchains import TOOLCHAIN_CLASSES, LEGACY_TOOLCHAIN_NAMES
from tools.targets import TARGET_MAP
def test_instantiation():
for name, Class in TOOLCHAIN_CLASSES.items():
CLS = Class(TARGET_MAP["K64F"])
assert name == CLS.name or name == LEGACY_TOOLCHAIN_NAMES[CLS.name]
|
Add a test that exercises the abstract base class checking
The test simply tries to instantiate each class exposed as part of the
TOOLCHAIN_CLASSES map. This is a test that each toolchain class
implements the required API, as the mbedToolchain will not allow the
instantiation of a subclass without a part of that API.import sys
import os
ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
sys.path.insert(0, ROOT)
from tools.toolchains import TOOLCHAIN_CLASSES, LEGACY_TOOLCHAIN_NAMES
from tools.targets import TARGET_MAP
def test_instantiation():
for name, Class in TOOLCHAIN_CLASSES.items():
CLS = Class(TARGET_MAP["K64F"])
assert name == CLS.name or name == LEGACY_TOOLCHAIN_NAMES[CLS.name]
|
<commit_before><commit_msg>Add a test that exercises the abstract base class checking
The test simply tries to instantiate each class exposed as part of the
TOOLCHAIN_CLASSES map. This is a test that each toolchain class
implements the required API, as the mbedToolchain will not allow the
instantiation of a subclass without a part of that API.<commit_after>import sys
import os
ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
sys.path.insert(0, ROOT)
from tools.toolchains import TOOLCHAIN_CLASSES, LEGACY_TOOLCHAIN_NAMES
from tools.targets import TARGET_MAP
def test_instantiation():
for name, Class in TOOLCHAIN_CLASSES.items():
CLS = Class(TARGET_MAP["K64F"])
assert name == CLS.name or name == LEGACY_TOOLCHAIN_NAMES[CLS.name]
|
|
420319884771cd12e3c2034bdf5d9308144ffc0c
|
tests/ldap_sync/test_db_fetch.py
|
tests/ldap_sync/test_db_fetch.py
|
# Copyright (c) 2022. The Pycroft Authors. See the AUTHORS file.
# This file is part of the Pycroft project and licensed under the terms of
# the Apache License, Version 2.0. See the LICENSE file for details
import pytest
from ldap_sync.db import fetch_users_to_sync, UserProxyType
from tests import factories
@pytest.fixture(scope='module')
def group(module_session):
return factories.PropertyGroupFactory.create(granted={'ldap', 'ldap_login_enabled'})
@pytest.fixture(scope='module')
def deny_group(module_session):
return factories.PropertyGroupFactory.create(denied={'ldap_login_enabled'})
@pytest.fixture
def deny_membership(session, deny_group, user):
return factories.MembershipFactory(
user=user, group=deny_group, includes_today=True,
)
@pytest.fixture(scope='module')
def user(module_session, group):
return factories.UserFactory.create(
with_unix_account=True,
with_membership=True,
membership__group=group,
)
def test_one_user_fetch(session, user):
assert fetch_users_to_sync(session) == [
tuple(UserProxyType(user, should_be_blocked=False))
]
def test_one_user_fetch_with_property(session, user):
assert fetch_users_to_sync(session, required_property="nonexistent") == []
def test_one_user_fetch_with_existent_property(session, user):
assert fetch_users_to_sync(session, required_property='ldap') == [
tuple(UserProxyType(user, should_be_blocked=False))
]
def test_one_user_fetch_with_blockage(session, user, deny_membership):
assert fetch_users_to_sync(session, required_property='ldap') == [
tuple(UserProxyType(user, should_be_blocked=True))
]
|
Add rudimentary tests for `fetch_users_to_sync`
|
Add rudimentary tests for `fetch_users_to_sync`
|
Python
|
apache-2.0
|
agdsn/pycroft,agdsn/pycroft,agdsn/pycroft,agdsn/pycroft,agdsn/pycroft
|
Add rudimentary tests for `fetch_users_to_sync`
|
# Copyright (c) 2022. The Pycroft Authors. See the AUTHORS file.
# This file is part of the Pycroft project and licensed under the terms of
# the Apache License, Version 2.0. See the LICENSE file for details
import pytest
from ldap_sync.db import fetch_users_to_sync, UserProxyType
from tests import factories
@pytest.fixture(scope='module')
def group(module_session):
return factories.PropertyGroupFactory.create(granted={'ldap', 'ldap_login_enabled'})
@pytest.fixture(scope='module')
def deny_group(module_session):
return factories.PropertyGroupFactory.create(denied={'ldap_login_enabled'})
@pytest.fixture
def deny_membership(session, deny_group, user):
return factories.MembershipFactory(
user=user, group=deny_group, includes_today=True,
)
@pytest.fixture(scope='module')
def user(module_session, group):
return factories.UserFactory.create(
with_unix_account=True,
with_membership=True,
membership__group=group,
)
def test_one_user_fetch(session, user):
assert fetch_users_to_sync(session) == [
tuple(UserProxyType(user, should_be_blocked=False))
]
def test_one_user_fetch_with_property(session, user):
assert fetch_users_to_sync(session, required_property="nonexistent") == []
def test_one_user_fetch_with_existent_property(session, user):
assert fetch_users_to_sync(session, required_property='ldap') == [
tuple(UserProxyType(user, should_be_blocked=False))
]
def test_one_user_fetch_with_blockage(session, user, deny_membership):
assert fetch_users_to_sync(session, required_property='ldap') == [
tuple(UserProxyType(user, should_be_blocked=True))
]
|
<commit_before><commit_msg>Add rudimentary tests for `fetch_users_to_sync`<commit_after>
|
# Copyright (c) 2022. The Pycroft Authors. See the AUTHORS file.
# This file is part of the Pycroft project and licensed under the terms of
# the Apache License, Version 2.0. See the LICENSE file for details
import pytest
from ldap_sync.db import fetch_users_to_sync, UserProxyType
from tests import factories
@pytest.fixture(scope='module')
def group(module_session):
return factories.PropertyGroupFactory.create(granted={'ldap', 'ldap_login_enabled'})
@pytest.fixture(scope='module')
def deny_group(module_session):
return factories.PropertyGroupFactory.create(denied={'ldap_login_enabled'})
@pytest.fixture
def deny_membership(session, deny_group, user):
return factories.MembershipFactory(
user=user, group=deny_group, includes_today=True,
)
@pytest.fixture(scope='module')
def user(module_session, group):
return factories.UserFactory.create(
with_unix_account=True,
with_membership=True,
membership__group=group,
)
def test_one_user_fetch(session, user):
assert fetch_users_to_sync(session) == [
tuple(UserProxyType(user, should_be_blocked=False))
]
def test_one_user_fetch_with_property(session, user):
assert fetch_users_to_sync(session, required_property="nonexistent") == []
def test_one_user_fetch_with_existent_property(session, user):
assert fetch_users_to_sync(session, required_property='ldap') == [
tuple(UserProxyType(user, should_be_blocked=False))
]
def test_one_user_fetch_with_blockage(session, user, deny_membership):
assert fetch_users_to_sync(session, required_property='ldap') == [
tuple(UserProxyType(user, should_be_blocked=True))
]
|
Add rudimentary tests for `fetch_users_to_sync`# Copyright (c) 2022. The Pycroft Authors. See the AUTHORS file.
# This file is part of the Pycroft project and licensed under the terms of
# the Apache License, Version 2.0. See the LICENSE file for details
import pytest
from ldap_sync.db import fetch_users_to_sync, UserProxyType
from tests import factories
@pytest.fixture(scope='module')
def group(module_session):
return factories.PropertyGroupFactory.create(granted={'ldap', 'ldap_login_enabled'})
@pytest.fixture(scope='module')
def deny_group(module_session):
return factories.PropertyGroupFactory.create(denied={'ldap_login_enabled'})
@pytest.fixture
def deny_membership(session, deny_group, user):
return factories.MembershipFactory(
user=user, group=deny_group, includes_today=True,
)
@pytest.fixture(scope='module')
def user(module_session, group):
return factories.UserFactory.create(
with_unix_account=True,
with_membership=True,
membership__group=group,
)
def test_one_user_fetch(session, user):
assert fetch_users_to_sync(session) == [
tuple(UserProxyType(user, should_be_blocked=False))
]
def test_one_user_fetch_with_property(session, user):
assert fetch_users_to_sync(session, required_property="nonexistent") == []
def test_one_user_fetch_with_existent_property(session, user):
assert fetch_users_to_sync(session, required_property='ldap') == [
tuple(UserProxyType(user, should_be_blocked=False))
]
def test_one_user_fetch_with_blockage(session, user, deny_membership):
assert fetch_users_to_sync(session, required_property='ldap') == [
tuple(UserProxyType(user, should_be_blocked=True))
]
|
<commit_before><commit_msg>Add rudimentary tests for `fetch_users_to_sync`<commit_after># Copyright (c) 2022. The Pycroft Authors. See the AUTHORS file.
# This file is part of the Pycroft project and licensed under the terms of
# the Apache License, Version 2.0. See the LICENSE file for details
import pytest
from ldap_sync.db import fetch_users_to_sync, UserProxyType
from tests import factories
@pytest.fixture(scope='module')
def group(module_session):
return factories.PropertyGroupFactory.create(granted={'ldap', 'ldap_login_enabled'})
@pytest.fixture(scope='module')
def deny_group(module_session):
return factories.PropertyGroupFactory.create(denied={'ldap_login_enabled'})
@pytest.fixture
def deny_membership(session, deny_group, user):
return factories.MembershipFactory(
user=user, group=deny_group, includes_today=True,
)
@pytest.fixture(scope='module')
def user(module_session, group):
return factories.UserFactory.create(
with_unix_account=True,
with_membership=True,
membership__group=group,
)
def test_one_user_fetch(session, user):
assert fetch_users_to_sync(session) == [
tuple(UserProxyType(user, should_be_blocked=False))
]
def test_one_user_fetch_with_property(session, user):
assert fetch_users_to_sync(session, required_property="nonexistent") == []
def test_one_user_fetch_with_existent_property(session, user):
assert fetch_users_to_sync(session, required_property='ldap') == [
tuple(UserProxyType(user, should_be_blocked=False))
]
def test_one_user_fetch_with_blockage(session, user, deny_membership):
assert fetch_users_to_sync(session, required_property='ldap') == [
tuple(UserProxyType(user, should_be_blocked=True))
]
|
|
e1a132dce80a530051abc4c51edc36901dbf5e85
|
tests/pytests/unit/modules/test_dpkg_lowpkg.py
|
tests/pytests/unit/modules/test_dpkg_lowpkg.py
|
import os
import salt.modules.dpkg_lowpkg as dpkg
from tests.support.mock import MagicMock, mock_open, patch
def test_get_pkg_license():
"""
Test _get_pkg_license for ignore errors on reading license from copyright files
"""
license_read_mock = mock_open(read_data="")
with patch.object(os.path, "exists", MagicMock(return_value=True)), patch(
"salt.utils.files.fopen", license_read_mock
):
dpkg._get_pkg_license("bash")
assert license_read_mock.calls[0].args[0] == "/usr/share/doc/bash/copyright"
assert license_read_mock.calls[0].kwargs["errors"] == "ignore"
|
Add test for license reading with dpkg_lowpkg
|
Add test for license reading with dpkg_lowpkg
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Add test for license reading with dpkg_lowpkg
|
import os
import salt.modules.dpkg_lowpkg as dpkg
from tests.support.mock import MagicMock, mock_open, patch
def test_get_pkg_license():
"""
Test _get_pkg_license for ignore errors on reading license from copyright files
"""
license_read_mock = mock_open(read_data="")
with patch.object(os.path, "exists", MagicMock(return_value=True)), patch(
"salt.utils.files.fopen", license_read_mock
):
dpkg._get_pkg_license("bash")
assert license_read_mock.calls[0].args[0] == "/usr/share/doc/bash/copyright"
assert license_read_mock.calls[0].kwargs["errors"] == "ignore"
|
<commit_before><commit_msg>Add test for license reading with dpkg_lowpkg<commit_after>
|
import os
import salt.modules.dpkg_lowpkg as dpkg
from tests.support.mock import MagicMock, mock_open, patch
def test_get_pkg_license():
"""
Test _get_pkg_license for ignore errors on reading license from copyright files
"""
license_read_mock = mock_open(read_data="")
with patch.object(os.path, "exists", MagicMock(return_value=True)), patch(
"salt.utils.files.fopen", license_read_mock
):
dpkg._get_pkg_license("bash")
assert license_read_mock.calls[0].args[0] == "/usr/share/doc/bash/copyright"
assert license_read_mock.calls[0].kwargs["errors"] == "ignore"
|
Add test for license reading with dpkg_lowpkgimport os
import salt.modules.dpkg_lowpkg as dpkg
from tests.support.mock import MagicMock, mock_open, patch
def test_get_pkg_license():
"""
Test _get_pkg_license for ignore errors on reading license from copyright files
"""
license_read_mock = mock_open(read_data="")
with patch.object(os.path, "exists", MagicMock(return_value=True)), patch(
"salt.utils.files.fopen", license_read_mock
):
dpkg._get_pkg_license("bash")
assert license_read_mock.calls[0].args[0] == "/usr/share/doc/bash/copyright"
assert license_read_mock.calls[0].kwargs["errors"] == "ignore"
|
<commit_before><commit_msg>Add test for license reading with dpkg_lowpkg<commit_after>import os
import salt.modules.dpkg_lowpkg as dpkg
from tests.support.mock import MagicMock, mock_open, patch
def test_get_pkg_license():
"""
Test _get_pkg_license for ignore errors on reading license from copyright files
"""
license_read_mock = mock_open(read_data="")
with patch.object(os.path, "exists", MagicMock(return_value=True)), patch(
"salt.utils.files.fopen", license_read_mock
):
dpkg._get_pkg_license("bash")
assert license_read_mock.calls[0].args[0] == "/usr/share/doc/bash/copyright"
assert license_read_mock.calls[0].kwargs["errors"] == "ignore"
|
|
fff9bb1bd61e43124d1de7ef1b6e004758c38a7f
|
tests/system/python/api/test_authentication.py
|
tests/system/python/api/test_authentication.py
|
# -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
""" Test authentication REST API """
import http.client
import json
import time
import pytest
__author__ = "Vaibhav Singhal"
__copyright__ = "Copyright (c) 2019 Dianomic Systems"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
TOKEN = None
# TODO: Cover scenario when auth is optional and negative scenarios
@pytest.fixture
def change_to_auth_mandatory(reset_and_start_foglamp, foglamp_url, wait_time):
# Wait for foglamp server to start
time.sleep(wait_time)
conn = http.client.HTTPConnection(foglamp_url)
conn.request("PUT", '/foglamp/category/rest_api', json.dumps({"authentication": "mandatory"}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "mandatory" == jdoc['authentication']['value']
conn.request("PUT", '/foglamp/restart', json.dumps({}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "FogLAMP restart has been scheduled." == jdoc['message']
class TestAuthenticationAPI:
def test_login_username_regular_user(self, change_to_auth_mandatory, foglamp_url, wait_time):
time.sleep(wait_time * 2)
conn = http.client.HTTPConnection(foglamp_url)
conn.request("POST", "/foglamp/login", json.dumps({"username": "user", "password": "foglamp"}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Logged in successfully" == jdoc['message']
assert "token" in jdoc
assert not jdoc['admin']
global TOKEN
TOKEN = jdoc["token"]
def test_logout_me(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
print(TOKEN)
conn.request("PUT", '/foglamp/logout', headers={"authorization": TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert jdoc['logout']
|
Test added for login/logout pwd
|
Test added for login/logout pwd
|
Python
|
apache-2.0
|
foglamp/FogLAMP,foglamp/FogLAMP,foglamp/FogLAMP,foglamp/FogLAMP
|
Test added for login/logout pwd
|
# -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
""" Test authentication REST API """
import http.client
import json
import time
import pytest
__author__ = "Vaibhav Singhal"
__copyright__ = "Copyright (c) 2019 Dianomic Systems"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
TOKEN = None
# TODO: Cover scenario when auth is optional and negative scenarios
@pytest.fixture
def change_to_auth_mandatory(reset_and_start_foglamp, foglamp_url, wait_time):
# Wait for foglamp server to start
time.sleep(wait_time)
conn = http.client.HTTPConnection(foglamp_url)
conn.request("PUT", '/foglamp/category/rest_api', json.dumps({"authentication": "mandatory"}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "mandatory" == jdoc['authentication']['value']
conn.request("PUT", '/foglamp/restart', json.dumps({}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "FogLAMP restart has been scheduled." == jdoc['message']
class TestAuthenticationAPI:
def test_login_username_regular_user(self, change_to_auth_mandatory, foglamp_url, wait_time):
time.sleep(wait_time * 2)
conn = http.client.HTTPConnection(foglamp_url)
conn.request("POST", "/foglamp/login", json.dumps({"username": "user", "password": "foglamp"}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Logged in successfully" == jdoc['message']
assert "token" in jdoc
assert not jdoc['admin']
global TOKEN
TOKEN = jdoc["token"]
def test_logout_me(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
print(TOKEN)
conn.request("PUT", '/foglamp/logout', headers={"authorization": TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert jdoc['logout']
|
<commit_before><commit_msg>Test added for login/logout pwd<commit_after>
|
# -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
""" Test authentication REST API """
import http.client
import json
import time
import pytest
__author__ = "Vaibhav Singhal"
__copyright__ = "Copyright (c) 2019 Dianomic Systems"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
TOKEN = None
# TODO: Cover scenario when auth is optional and negative scenarios
@pytest.fixture
def change_to_auth_mandatory(reset_and_start_foglamp, foglamp_url, wait_time):
# Wait for foglamp server to start
time.sleep(wait_time)
conn = http.client.HTTPConnection(foglamp_url)
conn.request("PUT", '/foglamp/category/rest_api', json.dumps({"authentication": "mandatory"}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "mandatory" == jdoc['authentication']['value']
conn.request("PUT", '/foglamp/restart', json.dumps({}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "FogLAMP restart has been scheduled." == jdoc['message']
class TestAuthenticationAPI:
def test_login_username_regular_user(self, change_to_auth_mandatory, foglamp_url, wait_time):
time.sleep(wait_time * 2)
conn = http.client.HTTPConnection(foglamp_url)
conn.request("POST", "/foglamp/login", json.dumps({"username": "user", "password": "foglamp"}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Logged in successfully" == jdoc['message']
assert "token" in jdoc
assert not jdoc['admin']
global TOKEN
TOKEN = jdoc["token"]
def test_logout_me(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
print(TOKEN)
conn.request("PUT", '/foglamp/logout', headers={"authorization": TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert jdoc['logout']
|
Test added for login/logout pwd# -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
""" Test authentication REST API """
import http.client
import json
import time
import pytest
__author__ = "Vaibhav Singhal"
__copyright__ = "Copyright (c) 2019 Dianomic Systems"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
TOKEN = None
# TODO: Cover scenario when auth is optional and negative scenarios
@pytest.fixture
def change_to_auth_mandatory(reset_and_start_foglamp, foglamp_url, wait_time):
# Wait for foglamp server to start
time.sleep(wait_time)
conn = http.client.HTTPConnection(foglamp_url)
conn.request("PUT", '/foglamp/category/rest_api', json.dumps({"authentication": "mandatory"}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "mandatory" == jdoc['authentication']['value']
conn.request("PUT", '/foglamp/restart', json.dumps({}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "FogLAMP restart has been scheduled." == jdoc['message']
class TestAuthenticationAPI:
def test_login_username_regular_user(self, change_to_auth_mandatory, foglamp_url, wait_time):
time.sleep(wait_time * 2)
conn = http.client.HTTPConnection(foglamp_url)
conn.request("POST", "/foglamp/login", json.dumps({"username": "user", "password": "foglamp"}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Logged in successfully" == jdoc['message']
assert "token" in jdoc
assert not jdoc['admin']
global TOKEN
TOKEN = jdoc["token"]
def test_logout_me(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
print(TOKEN)
conn.request("PUT", '/foglamp/logout', headers={"authorization": TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert jdoc['logout']
|
<commit_before><commit_msg>Test added for login/logout pwd<commit_after># -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
""" Test authentication REST API """
import http.client
import json
import time
import pytest
__author__ = "Vaibhav Singhal"
__copyright__ = "Copyright (c) 2019 Dianomic Systems"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
TOKEN = None
# TODO: Cover scenario when auth is optional and negative scenarios
@pytest.fixture
def change_to_auth_mandatory(reset_and_start_foglamp, foglamp_url, wait_time):
# Wait for foglamp server to start
time.sleep(wait_time)
conn = http.client.HTTPConnection(foglamp_url)
conn.request("PUT", '/foglamp/category/rest_api', json.dumps({"authentication": "mandatory"}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "mandatory" == jdoc['authentication']['value']
conn.request("PUT", '/foglamp/restart', json.dumps({}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "FogLAMP restart has been scheduled." == jdoc['message']
class TestAuthenticationAPI:
def test_login_username_regular_user(self, change_to_auth_mandatory, foglamp_url, wait_time):
time.sleep(wait_time * 2)
conn = http.client.HTTPConnection(foglamp_url)
conn.request("POST", "/foglamp/login", json.dumps({"username": "user", "password": "foglamp"}))
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert "Logged in successfully" == jdoc['message']
assert "token" in jdoc
assert not jdoc['admin']
global TOKEN
TOKEN = jdoc["token"]
def test_logout_me(self, foglamp_url):
conn = http.client.HTTPConnection(foglamp_url)
print(TOKEN)
conn.request("PUT", '/foglamp/logout', headers={"authorization": TOKEN})
r = conn.getresponse()
assert 200 == r.status
r = r.read().decode()
jdoc = json.loads(r)
assert jdoc['logout']
|
|
2698d0d846f061e3560d6e87f43c57ed10517006
|
zou/migrations/versions/3d5c93bafb9d_.py
|
zou/migrations/versions/3d5c93bafb9d_.py
|
"""empty message
Revision ID: 3d5c93bafb9d
Revises: 7dc79d4ed7cd
Create Date: 2018-05-30 20:04:38.461178
"""
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
# revision identifiers, used by Alembic.
revision = '3d5c93bafb9d'
down_revision = '7dc79d4ed7cd'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('project', sa.Column('has_avatar', sa.Boolean(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('project', 'has_avatar')
# ### end Alembic commands ###
|
Add has avatar column to project table
|
Add has avatar column to project table
|
Python
|
agpl-3.0
|
cgwire/zou
|
Add has avatar column to project table
|
"""empty message
Revision ID: 3d5c93bafb9d
Revises: 7dc79d4ed7cd
Create Date: 2018-05-30 20:04:38.461178
"""
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
# revision identifiers, used by Alembic.
revision = '3d5c93bafb9d'
down_revision = '7dc79d4ed7cd'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('project', sa.Column('has_avatar', sa.Boolean(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('project', 'has_avatar')
# ### end Alembic commands ###
|
<commit_before><commit_msg>Add has avatar column to project table<commit_after>
|
"""empty message
Revision ID: 3d5c93bafb9d
Revises: 7dc79d4ed7cd
Create Date: 2018-05-30 20:04:38.461178
"""
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
# revision identifiers, used by Alembic.
revision = '3d5c93bafb9d'
down_revision = '7dc79d4ed7cd'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('project', sa.Column('has_avatar', sa.Boolean(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('project', 'has_avatar')
# ### end Alembic commands ###
|
Add has avatar column to project table"""empty message
Revision ID: 3d5c93bafb9d
Revises: 7dc79d4ed7cd
Create Date: 2018-05-30 20:04:38.461178
"""
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
# revision identifiers, used by Alembic.
revision = '3d5c93bafb9d'
down_revision = '7dc79d4ed7cd'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('project', sa.Column('has_avatar', sa.Boolean(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('project', 'has_avatar')
# ### end Alembic commands ###
|
<commit_before><commit_msg>Add has avatar column to project table<commit_after>"""empty message
Revision ID: 3d5c93bafb9d
Revises: 7dc79d4ed7cd
Create Date: 2018-05-30 20:04:38.461178
"""
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
# revision identifiers, used by Alembic.
revision = '3d5c93bafb9d'
down_revision = '7dc79d4ed7cd'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('project', sa.Column('has_avatar', sa.Boolean(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('project', 'has_avatar')
# ### end Alembic commands ###
|
|
8a6995d4ff89554325144115ea199d0e02086824
|
Graphs/inorder_and_preorder.py
|
Graphs/inorder_and_preorder.py
|
import unittest
"""
Construct tree from given inorder and preorder traversals.
Input:
Inorder: D B E A F C
Preorder: A B D E C F
In a preorder sequence, leftmost element is root of tree. So, we know 'A' is root from given preorder sequence.
By searching 'A' in inorder sequence, we can find out all elements on left side of 'A' are in left subtree
and all elements on right are in right subtree.
"""
class Node:
def __init__(self, data, left=None, right=None):
self.data = data
self.left = left
self.right = right
def build_tree_helper(inorder, preorder, in_start, in_end):
if in_start > in_end:
return None
tree_node = Node(preorder[build_tree_helper.pre_index])
build_tree_helper.pre_index += 1
if in_start == in_end:
return tree_node
in_index = search(inorder, in_start, in_end, tree_node.data)
tree_node.left = build_tree_helper(inorder, preorder, in_start, in_index - 1)
tree_node.right = build_tree_helper(inorder, preorder, in_index + 1, in_end)
return tree_node
def search(arr, start, end, ele):
for i in range(start, end+1):
if arr[i] == ele:
return i
return -1
def build_tree(inorder, preorder):
build_tree_helper.pre_index = 0
n = len(inorder)
return build_tree_helper(inorder, preorder, 0, n-1)
class TestBuildTree(unittest.TestCase):
def test_build_tree(self):
inorder = ['D', 'B', 'E', 'A', 'F', 'C']
preorder = ['A', 'B', 'D', 'E', 'C', 'F']
root = build_tree(inorder, preorder)
self.assertEqual(root.data, 'A')
self.assertEqual(root.left.data, 'B')
self.assertEqual(root.right.data, 'C')
self.assertEqual(root.left.left.data, 'D')
self.assertEqual(root.left.right.data, 'E')
self.assertEqual(root.right.left.data, 'F')
|
Build a tree from inorder and preorder traversals
|
Build a tree from inorder and preorder traversals
|
Python
|
mit
|
prathamtandon/g4gproblems
|
Build a tree from inorder and preorder traversals
|
import unittest
"""
Construct tree from given inorder and preorder traversals.
Input:
Inorder: D B E A F C
Preorder: A B D E C F
In a preorder sequence, leftmost element is root of tree. So, we know 'A' is root from given preorder sequence.
By searching 'A' in inorder sequence, we can find out all elements on left side of 'A' are in left subtree
and all elements on right are in right subtree.
"""
class Node:
def __init__(self, data, left=None, right=None):
self.data = data
self.left = left
self.right = right
def build_tree_helper(inorder, preorder, in_start, in_end):
if in_start > in_end:
return None
tree_node = Node(preorder[build_tree_helper.pre_index])
build_tree_helper.pre_index += 1
if in_start == in_end:
return tree_node
in_index = search(inorder, in_start, in_end, tree_node.data)
tree_node.left = build_tree_helper(inorder, preorder, in_start, in_index - 1)
tree_node.right = build_tree_helper(inorder, preorder, in_index + 1, in_end)
return tree_node
def search(arr, start, end, ele):
for i in range(start, end+1):
if arr[i] == ele:
return i
return -1
def build_tree(inorder, preorder):
build_tree_helper.pre_index = 0
n = len(inorder)
return build_tree_helper(inorder, preorder, 0, n-1)
class TestBuildTree(unittest.TestCase):
def test_build_tree(self):
inorder = ['D', 'B', 'E', 'A', 'F', 'C']
preorder = ['A', 'B', 'D', 'E', 'C', 'F']
root = build_tree(inorder, preorder)
self.assertEqual(root.data, 'A')
self.assertEqual(root.left.data, 'B')
self.assertEqual(root.right.data, 'C')
self.assertEqual(root.left.left.data, 'D')
self.assertEqual(root.left.right.data, 'E')
self.assertEqual(root.right.left.data, 'F')
|
<commit_before><commit_msg>Build a tree from inorder and preorder traversals<commit_after>
|
import unittest
"""
Construct tree from given inorder and preorder traversals.
Input:
Inorder: D B E A F C
Preorder: A B D E C F
In a preorder sequence, leftmost element is root of tree. So, we know 'A' is root from given preorder sequence.
By searching 'A' in inorder sequence, we can find out all elements on left side of 'A' are in left subtree
and all elements on right are in right subtree.
"""
class Node:
def __init__(self, data, left=None, right=None):
self.data = data
self.left = left
self.right = right
def build_tree_helper(inorder, preorder, in_start, in_end):
if in_start > in_end:
return None
tree_node = Node(preorder[build_tree_helper.pre_index])
build_tree_helper.pre_index += 1
if in_start == in_end:
return tree_node
in_index = search(inorder, in_start, in_end, tree_node.data)
tree_node.left = build_tree_helper(inorder, preorder, in_start, in_index - 1)
tree_node.right = build_tree_helper(inorder, preorder, in_index + 1, in_end)
return tree_node
def search(arr, start, end, ele):
for i in range(start, end+1):
if arr[i] == ele:
return i
return -1
def build_tree(inorder, preorder):
build_tree_helper.pre_index = 0
n = len(inorder)
return build_tree_helper(inorder, preorder, 0, n-1)
class TestBuildTree(unittest.TestCase):
def test_build_tree(self):
inorder = ['D', 'B', 'E', 'A', 'F', 'C']
preorder = ['A', 'B', 'D', 'E', 'C', 'F']
root = build_tree(inorder, preorder)
self.assertEqual(root.data, 'A')
self.assertEqual(root.left.data, 'B')
self.assertEqual(root.right.data, 'C')
self.assertEqual(root.left.left.data, 'D')
self.assertEqual(root.left.right.data, 'E')
self.assertEqual(root.right.left.data, 'F')
|
Build a tree from inorder and preorder traversalsimport unittest
"""
Construct tree from given inorder and preorder traversals.
Input:
Inorder: D B E A F C
Preorder: A B D E C F
In a preorder sequence, leftmost element is root of tree. So, we know 'A' is root from given preorder sequence.
By searching 'A' in inorder sequence, we can find out all elements on left side of 'A' are in left subtree
and all elements on right are in right subtree.
"""
class Node:
def __init__(self, data, left=None, right=None):
self.data = data
self.left = left
self.right = right
def build_tree_helper(inorder, preorder, in_start, in_end):
if in_start > in_end:
return None
tree_node = Node(preorder[build_tree_helper.pre_index])
build_tree_helper.pre_index += 1
if in_start == in_end:
return tree_node
in_index = search(inorder, in_start, in_end, tree_node.data)
tree_node.left = build_tree_helper(inorder, preorder, in_start, in_index - 1)
tree_node.right = build_tree_helper(inorder, preorder, in_index + 1, in_end)
return tree_node
def search(arr, start, end, ele):
for i in range(start, end+1):
if arr[i] == ele:
return i
return -1
def build_tree(inorder, preorder):
build_tree_helper.pre_index = 0
n = len(inorder)
return build_tree_helper(inorder, preorder, 0, n-1)
class TestBuildTree(unittest.TestCase):
def test_build_tree(self):
inorder = ['D', 'B', 'E', 'A', 'F', 'C']
preorder = ['A', 'B', 'D', 'E', 'C', 'F']
root = build_tree(inorder, preorder)
self.assertEqual(root.data, 'A')
self.assertEqual(root.left.data, 'B')
self.assertEqual(root.right.data, 'C')
self.assertEqual(root.left.left.data, 'D')
self.assertEqual(root.left.right.data, 'E')
self.assertEqual(root.right.left.data, 'F')
|
<commit_before><commit_msg>Build a tree from inorder and preorder traversals<commit_after>import unittest
"""
Construct tree from given inorder and preorder traversals.
Input:
Inorder: D B E A F C
Preorder: A B D E C F
In a preorder sequence, leftmost element is root of tree. So, we know 'A' is root from given preorder sequence.
By searching 'A' in inorder sequence, we can find out all elements on left side of 'A' are in left subtree
and all elements on right are in right subtree.
"""
class Node:
def __init__(self, data, left=None, right=None):
self.data = data
self.left = left
self.right = right
def build_tree_helper(inorder, preorder, in_start, in_end):
if in_start > in_end:
return None
tree_node = Node(preorder[build_tree_helper.pre_index])
build_tree_helper.pre_index += 1
if in_start == in_end:
return tree_node
in_index = search(inorder, in_start, in_end, tree_node.data)
tree_node.left = build_tree_helper(inorder, preorder, in_start, in_index - 1)
tree_node.right = build_tree_helper(inorder, preorder, in_index + 1, in_end)
return tree_node
def search(arr, start, end, ele):
for i in range(start, end+1):
if arr[i] == ele:
return i
return -1
def build_tree(inorder, preorder):
build_tree_helper.pre_index = 0
n = len(inorder)
return build_tree_helper(inorder, preorder, 0, n-1)
class TestBuildTree(unittest.TestCase):
def test_build_tree(self):
inorder = ['D', 'B', 'E', 'A', 'F', 'C']
preorder = ['A', 'B', 'D', 'E', 'C', 'F']
root = build_tree(inorder, preorder)
self.assertEqual(root.data, 'A')
self.assertEqual(root.left.data, 'B')
self.assertEqual(root.right.data, 'C')
self.assertEqual(root.left.left.data, 'D')
self.assertEqual(root.left.right.data, 'E')
self.assertEqual(root.right.left.data, 'F')
|
|
55d9a2067dbb8875638ad2ea6bd340272a51c432
|
test/command_line/test_cosym.py
|
test/command_line/test_cosym.py
|
from __future__ import absolute_import, division, print_function
import os
import pytest
import procrunner
def test_cosym(regression_data, run_in_tmpdir):
reg_path = regression_data("multi_crystal_proteinase_k").strpath
command = ['dials.cosym']
for i in [1, 2, 3, 4, 5, 7, 8, 10]:
command.append(os.path.join(reg_path, "experiments_"+str(i)+".json"))
command.append(os.path.join(reg_path, "reflections_"+str(i)+".pickle"))
result = procrunner.run_process(command)
assert result['exitcode'] == 0
assert result['stderr'] == ''
assert os.path.exists("reindexed_reflections.pickle")
assert os.path.exists("reindexed_experiments.json")
|
Add command line test for cosym.py
|
Add command line test for cosym.py
|
Python
|
bsd-3-clause
|
dials/dials,dials/dials,dials/dials,dials/dials,dials/dials
|
Add command line test for cosym.py
|
from __future__ import absolute_import, division, print_function
import os
import pytest
import procrunner
def test_cosym(regression_data, run_in_tmpdir):
reg_path = regression_data("multi_crystal_proteinase_k").strpath
command = ['dials.cosym']
for i in [1, 2, 3, 4, 5, 7, 8, 10]:
command.append(os.path.join(reg_path, "experiments_"+str(i)+".json"))
command.append(os.path.join(reg_path, "reflections_"+str(i)+".pickle"))
result = procrunner.run_process(command)
assert result['exitcode'] == 0
assert result['stderr'] == ''
assert os.path.exists("reindexed_reflections.pickle")
assert os.path.exists("reindexed_experiments.json")
|
<commit_before><commit_msg>Add command line test for cosym.py<commit_after>
|
from __future__ import absolute_import, division, print_function
import os
import pytest
import procrunner
def test_cosym(regression_data, run_in_tmpdir):
reg_path = regression_data("multi_crystal_proteinase_k").strpath
command = ['dials.cosym']
for i in [1, 2, 3, 4, 5, 7, 8, 10]:
command.append(os.path.join(reg_path, "experiments_"+str(i)+".json"))
command.append(os.path.join(reg_path, "reflections_"+str(i)+".pickle"))
result = procrunner.run_process(command)
assert result['exitcode'] == 0
assert result['stderr'] == ''
assert os.path.exists("reindexed_reflections.pickle")
assert os.path.exists("reindexed_experiments.json")
|
Add command line test for cosym.pyfrom __future__ import absolute_import, division, print_function
import os
import pytest
import procrunner
def test_cosym(regression_data, run_in_tmpdir):
reg_path = regression_data("multi_crystal_proteinase_k").strpath
command = ['dials.cosym']
for i in [1, 2, 3, 4, 5, 7, 8, 10]:
command.append(os.path.join(reg_path, "experiments_"+str(i)+".json"))
command.append(os.path.join(reg_path, "reflections_"+str(i)+".pickle"))
result = procrunner.run_process(command)
assert result['exitcode'] == 0
assert result['stderr'] == ''
assert os.path.exists("reindexed_reflections.pickle")
assert os.path.exists("reindexed_experiments.json")
|
<commit_before><commit_msg>Add command line test for cosym.py<commit_after>from __future__ import absolute_import, division, print_function
import os
import pytest
import procrunner
def test_cosym(regression_data, run_in_tmpdir):
reg_path = regression_data("multi_crystal_proteinase_k").strpath
command = ['dials.cosym']
for i in [1, 2, 3, 4, 5, 7, 8, 10]:
command.append(os.path.join(reg_path, "experiments_"+str(i)+".json"))
command.append(os.path.join(reg_path, "reflections_"+str(i)+".pickle"))
result = procrunner.run_process(command)
assert result['exitcode'] == 0
assert result['stderr'] == ''
assert os.path.exists("reindexed_reflections.pickle")
assert os.path.exists("reindexed_experiments.json")
|
|
35185be5f28e70f510954bc0352ec6ee625d8e98
|
genmake/tests/config_test.py
|
genmake/tests/config_test.py
|
#!/usr/bin/env python3
"""
Unit testing suite for config module.
"""
# --------------------------------- MODULES -----------------------------------
import unittest
# Avoid import globbing: each function is imported separately instead.
import genmake
# --------------------------------- MODULES -----------------------------------
class TestConfig(unittest.TestCase):
"""
Main unit testing suite, which is a subclass of 'unittest.TestCase'.
"""
pass
if __name__ == "__main__":
unittest.main()
|
Add unit test module template for 'config' module
|
Add unit test module template for 'config' module
|
Python
|
bsd-2-clause
|
jhxie/skaff,jhxie/skaff,jhxie/skaff,jhxie/genmake
|
Add unit test module template for 'config' module
|
#!/usr/bin/env python3
"""
Unit testing suite for config module.
"""
# --------------------------------- MODULES -----------------------------------
import unittest
# Avoid import globbing: each function is imported separately instead.
import genmake
# --------------------------------- MODULES -----------------------------------
class TestConfig(unittest.TestCase):
"""
Main unit testing suite, which is a subclass of 'unittest.TestCase'.
"""
pass
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add unit test module template for 'config' module<commit_after>
|
#!/usr/bin/env python3
"""
Unit testing suite for config module.
"""
# --------------------------------- MODULES -----------------------------------
import unittest
# Avoid import globbing: each function is imported separately instead.
import genmake
# --------------------------------- MODULES -----------------------------------
class TestConfig(unittest.TestCase):
"""
Main unit testing suite, which is a subclass of 'unittest.TestCase'.
"""
pass
if __name__ == "__main__":
unittest.main()
|
Add unit test module template for 'config' module#!/usr/bin/env python3
"""
Unit testing suite for config module.
"""
# --------------------------------- MODULES -----------------------------------
import unittest
# Avoid import globbing: each function is imported separately instead.
import genmake
# --------------------------------- MODULES -----------------------------------
class TestConfig(unittest.TestCase):
"""
Main unit testing suite, which is a subclass of 'unittest.TestCase'.
"""
pass
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add unit test module template for 'config' module<commit_after>#!/usr/bin/env python3
"""
Unit testing suite for config module.
"""
# --------------------------------- MODULES -----------------------------------
import unittest
# Avoid import globbing: each function is imported separately instead.
import genmake
# --------------------------------- MODULES -----------------------------------
class TestConfig(unittest.TestCase):
"""
Main unit testing suite, which is a subclass of 'unittest.TestCase'.
"""
pass
if __name__ == "__main__":
unittest.main()
|
|
d162d77611be23b55c2efd9c902765467c1162fe
|
plyer/tests/test_cpu.py
|
plyer/tests/test_cpu.py
|
'''
TestCPU
=======
Tested platforms:
* Linux - nproc
'''
import unittest
from plyer.tests.common import PlatformTest, platform_import
class MockedNProc(object):
'''
Mocked object used instead of 'nproc' binary in the Linux specific API
plyer.platforms.linux.cpu. The same output structure is tested for
the range of <min_version, max_version>.
.. note:: Extend the object with another data sample if it does not match.
'''
min_version = '8.21'
max_version = '8.21'
logical_cores = 99
def __init__(self, *args, **kwargs):
# only to ignore all args, kwargs
pass
@staticmethod
def communicate():
'''
Mock Popen.communicate, so that 'nproc' isn't used.
'''
return (str(MockedNProc.logical_cores).encode('utf-8'), )
@staticmethod
def whereis_exe(binary):
'''
Mock whereis_exe, so that it looks like
Linux NProc binary is present on the system.
'''
return binary == 'nproc'
@staticmethod
def logical():
'''
Return percentage from mocked data.
'''
return int(MockedNProc.logical_cores)
class TestCPU(unittest.TestCase):
'''
TestCase for plyer.cpu.
'''
@PlatformTest('linux')
def test_cpu_linux_logical(self):
'''
Test mocked Linux NProc for plyer.cpu.
'''
cpu = platform_import(
platform='linux',
module_name='cpu',
whereis_exe=MockedNProc.whereis_exe
)
cpu.Popen = MockedNProc
cpu = cpu.instance()
self.assertEqual(
cpu.logical, MockedNProc.logical()
)
if __name__ == '__main__':
unittest.main()
|
Add test for GNU/Linux CPU logical cores
|
Add test for GNU/Linux CPU logical cores
|
Python
|
mit
|
kivy/plyer,KeyWeeUsr/plyer,KeyWeeUsr/plyer,kivy/plyer,KeyWeeUsr/plyer,kivy/plyer
|
Add test for GNU/Linux CPU logical cores
|
'''
TestCPU
=======
Tested platforms:
* Linux - nproc
'''
import unittest
from plyer.tests.common import PlatformTest, platform_import
class MockedNProc(object):
'''
Mocked object used instead of 'nproc' binary in the Linux specific API
plyer.platforms.linux.cpu. The same output structure is tested for
the range of <min_version, max_version>.
.. note:: Extend the object with another data sample if it does not match.
'''
min_version = '8.21'
max_version = '8.21'
logical_cores = 99
def __init__(self, *args, **kwargs):
# only to ignore all args, kwargs
pass
@staticmethod
def communicate():
'''
Mock Popen.communicate, so that 'nproc' isn't used.
'''
return (str(MockedNProc.logical_cores).encode('utf-8'), )
@staticmethod
def whereis_exe(binary):
'''
Mock whereis_exe, so that it looks like
Linux NProc binary is present on the system.
'''
return binary == 'nproc'
@staticmethod
def logical():
'''
Return percentage from mocked data.
'''
return int(MockedNProc.logical_cores)
class TestCPU(unittest.TestCase):
'''
TestCase for plyer.cpu.
'''
@PlatformTest('linux')
def test_cpu_linux_logical(self):
'''
Test mocked Linux NProc for plyer.cpu.
'''
cpu = platform_import(
platform='linux',
module_name='cpu',
whereis_exe=MockedNProc.whereis_exe
)
cpu.Popen = MockedNProc
cpu = cpu.instance()
self.assertEqual(
cpu.logical, MockedNProc.logical()
)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test for GNU/Linux CPU logical cores<commit_after>
|
'''
TestCPU
=======
Tested platforms:
* Linux - nproc
'''
import unittest
from plyer.tests.common import PlatformTest, platform_import
class MockedNProc(object):
'''
Mocked object used instead of 'nproc' binary in the Linux specific API
plyer.platforms.linux.cpu. The same output structure is tested for
the range of <min_version, max_version>.
.. note:: Extend the object with another data sample if it does not match.
'''
min_version = '8.21'
max_version = '8.21'
logical_cores = 99
def __init__(self, *args, **kwargs):
# only to ignore all args, kwargs
pass
@staticmethod
def communicate():
'''
Mock Popen.communicate, so that 'nproc' isn't used.
'''
return (str(MockedNProc.logical_cores).encode('utf-8'), )
@staticmethod
def whereis_exe(binary):
'''
Mock whereis_exe, so that it looks like
Linux NProc binary is present on the system.
'''
return binary == 'nproc'
@staticmethod
def logical():
'''
Return percentage from mocked data.
'''
return int(MockedNProc.logical_cores)
class TestCPU(unittest.TestCase):
'''
TestCase for plyer.cpu.
'''
@PlatformTest('linux')
def test_cpu_linux_logical(self):
'''
Test mocked Linux NProc for plyer.cpu.
'''
cpu = platform_import(
platform='linux',
module_name='cpu',
whereis_exe=MockedNProc.whereis_exe
)
cpu.Popen = MockedNProc
cpu = cpu.instance()
self.assertEqual(
cpu.logical, MockedNProc.logical()
)
if __name__ == '__main__':
unittest.main()
|
Add test for GNU/Linux CPU logical cores'''
TestCPU
=======
Tested platforms:
* Linux - nproc
'''
import unittest
from plyer.tests.common import PlatformTest, platform_import
class MockedNProc(object):
'''
Mocked object used instead of 'nproc' binary in the Linux specific API
plyer.platforms.linux.cpu. The same output structure is tested for
the range of <min_version, max_version>.
.. note:: Extend the object with another data sample if it does not match.
'''
min_version = '8.21'
max_version = '8.21'
logical_cores = 99
def __init__(self, *args, **kwargs):
# only to ignore all args, kwargs
pass
@staticmethod
def communicate():
'''
Mock Popen.communicate, so that 'nproc' isn't used.
'''
return (str(MockedNProc.logical_cores).encode('utf-8'), )
@staticmethod
def whereis_exe(binary):
'''
Mock whereis_exe, so that it looks like
Linux NProc binary is present on the system.
'''
return binary == 'nproc'
@staticmethod
def logical():
'''
Return percentage from mocked data.
'''
return int(MockedNProc.logical_cores)
class TestCPU(unittest.TestCase):
'''
TestCase for plyer.cpu.
'''
@PlatformTest('linux')
def test_cpu_linux_logical(self):
'''
Test mocked Linux NProc for plyer.cpu.
'''
cpu = platform_import(
platform='linux',
module_name='cpu',
whereis_exe=MockedNProc.whereis_exe
)
cpu.Popen = MockedNProc
cpu = cpu.instance()
self.assertEqual(
cpu.logical, MockedNProc.logical()
)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test for GNU/Linux CPU logical cores<commit_after>'''
TestCPU
=======
Tested platforms:
* Linux - nproc
'''
import unittest
from plyer.tests.common import PlatformTest, platform_import
class MockedNProc(object):
'''
Mocked object used instead of 'nproc' binary in the Linux specific API
plyer.platforms.linux.cpu. The same output structure is tested for
the range of <min_version, max_version>.
.. note:: Extend the object with another data sample if it does not match.
'''
min_version = '8.21'
max_version = '8.21'
logical_cores = 99
def __init__(self, *args, **kwargs):
# only to ignore all args, kwargs
pass
@staticmethod
def communicate():
'''
Mock Popen.communicate, so that 'nproc' isn't used.
'''
return (str(MockedNProc.logical_cores).encode('utf-8'), )
@staticmethod
def whereis_exe(binary):
'''
Mock whereis_exe, so that it looks like
Linux NProc binary is present on the system.
'''
return binary == 'nproc'
@staticmethod
def logical():
'''
Return percentage from mocked data.
'''
return int(MockedNProc.logical_cores)
class TestCPU(unittest.TestCase):
'''
TestCase for plyer.cpu.
'''
@PlatformTest('linux')
def test_cpu_linux_logical(self):
'''
Test mocked Linux NProc for plyer.cpu.
'''
cpu = platform_import(
platform='linux',
module_name='cpu',
whereis_exe=MockedNProc.whereis_exe
)
cpu.Popen = MockedNProc
cpu = cpu.instance()
self.assertEqual(
cpu.logical, MockedNProc.logical()
)
if __name__ == '__main__':
unittest.main()
|
|
5c24579621efbf9f95f32b2feb86d507e112c26c
|
aids/sorting_and_searching/intersection_sorted_arrays.py
|
aids/sorting_and_searching/intersection_sorted_arrays.py
|
'''
In this module, we implement a function which gets the intersection
of two sorted arrays.
'''
def intersection_sorted_arrays(arr_1, arr_2):
'''
Return the intersection of two sorted arrays
'''
result = []
i,j = 0,0
while i < len(arr_1) and j < len(arr_2):
if arr_1[i] == arr_2[j]:
result.append(arr_1[i])
i += 1
j+=1
elif arr_1[i] < arr_2[j]:
i += 1
else:
j += 1
return result
|
Return intersection of two sorted arrays
|
Return intersection of two sorted arrays
|
Python
|
mit
|
ueg1990/aids
|
Return intersection of two sorted arrays
|
'''
In this module, we implement a function which gets the intersection
of two sorted arrays.
'''
def intersection_sorted_arrays(arr_1, arr_2):
'''
Return the intersection of two sorted arrays
'''
result = []
i,j = 0,0
while i < len(arr_1) and j < len(arr_2):
if arr_1[i] == arr_2[j]:
result.append(arr_1[i])
i += 1
j+=1
elif arr_1[i] < arr_2[j]:
i += 1
else:
j += 1
return result
|
<commit_before><commit_msg>Return intersection of two sorted arrays<commit_after>
|
'''
In this module, we implement a function which gets the intersection
of two sorted arrays.
'''
def intersection_sorted_arrays(arr_1, arr_2):
'''
Return the intersection of two sorted arrays
'''
result = []
i,j = 0,0
while i < len(arr_1) and j < len(arr_2):
if arr_1[i] == arr_2[j]:
result.append(arr_1[i])
i += 1
j+=1
elif arr_1[i] < arr_2[j]:
i += 1
else:
j += 1
return result
|
Return intersection of two sorted arrays'''
In this module, we implement a function which gets the intersection
of two sorted arrays.
'''
def intersection_sorted_arrays(arr_1, arr_2):
'''
Return the intersection of two sorted arrays
'''
result = []
i,j = 0,0
while i < len(arr_1) and j < len(arr_2):
if arr_1[i] == arr_2[j]:
result.append(arr_1[i])
i += 1
j+=1
elif arr_1[i] < arr_2[j]:
i += 1
else:
j += 1
return result
|
<commit_before><commit_msg>Return intersection of two sorted arrays<commit_after>'''
In this module, we implement a function which gets the intersection
of two sorted arrays.
'''
def intersection_sorted_arrays(arr_1, arr_2):
'''
Return the intersection of two sorted arrays
'''
result = []
i,j = 0,0
while i < len(arr_1) and j < len(arr_2):
if arr_1[i] == arr_2[j]:
result.append(arr_1[i])
i += 1
j+=1
elif arr_1[i] < arr_2[j]:
i += 1
else:
j += 1
return result
|
|
2d08ba191396a5e18142adb3131791d6b19373eb
|
nagios/check_n0q.py
|
nagios/check_n0q.py
|
"""
Check the production of N0Q data!
"""
import json
import datetime
import sys
j = json.load( open('/home/ldm/data/gis/images/4326/USCOMP/n0q_0.json') )
prodtime = datetime.datetime.strptime(j['meta']['valid'], '%Y-%m-%dT%H:%M:%SZ')
radarson = int(j['meta']['radar_quorum'].split("/")[0])
gentime = j['meta']['processing_time_secs']
utcnow = datetime.datetime.utcnow()
latency = (utcnow - prodtime).seconds
stats = "gentime=%s;180;240;300 radarson=%s;100;75;50" % (gentime, radarson)
if gentime < 300 and radarson > 50 and latency < 60*10:
print 'OK |%s' % (stats)
sys.exit(0)
if gentime > 300:
print 'CRITICAL - gentime %s|%s' % (gentime, stats)
sys.exit(2)
if latency > 600:
print 'CRITICAL - latency %s|%s' % (prodtime, stats)
sys.exit(2)
if radarson < 50:
print 'CRITICAL - radarson %s|%s' % (radarson, stats)
sys.exit(2)
|
Add N0Q check for iem21 to run
|
Add N0Q check for iem21 to run
|
Python
|
mit
|
akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem
|
Add N0Q check for iem21 to run
|
"""
Check the production of N0Q data!
"""
import json
import datetime
import sys
j = json.load( open('/home/ldm/data/gis/images/4326/USCOMP/n0q_0.json') )
prodtime = datetime.datetime.strptime(j['meta']['valid'], '%Y-%m-%dT%H:%M:%SZ')
radarson = int(j['meta']['radar_quorum'].split("/")[0])
gentime = j['meta']['processing_time_secs']
utcnow = datetime.datetime.utcnow()
latency = (utcnow - prodtime).seconds
stats = "gentime=%s;180;240;300 radarson=%s;100;75;50" % (gentime, radarson)
if gentime < 300 and radarson > 50 and latency < 60*10:
print 'OK |%s' % (stats)
sys.exit(0)
if gentime > 300:
print 'CRITICAL - gentime %s|%s' % (gentime, stats)
sys.exit(2)
if latency > 600:
print 'CRITICAL - latency %s|%s' % (prodtime, stats)
sys.exit(2)
if radarson < 50:
print 'CRITICAL - radarson %s|%s' % (radarson, stats)
sys.exit(2)
|
<commit_before><commit_msg>Add N0Q check for iem21 to run<commit_after>
|
"""
Check the production of N0Q data!
"""
import json
import datetime
import sys
j = json.load( open('/home/ldm/data/gis/images/4326/USCOMP/n0q_0.json') )
prodtime = datetime.datetime.strptime(j['meta']['valid'], '%Y-%m-%dT%H:%M:%SZ')
radarson = int(j['meta']['radar_quorum'].split("/")[0])
gentime = j['meta']['processing_time_secs']
utcnow = datetime.datetime.utcnow()
latency = (utcnow - prodtime).seconds
stats = "gentime=%s;180;240;300 radarson=%s;100;75;50" % (gentime, radarson)
if gentime < 300 and radarson > 50 and latency < 60*10:
print 'OK |%s' % (stats)
sys.exit(0)
if gentime > 300:
print 'CRITICAL - gentime %s|%s' % (gentime, stats)
sys.exit(2)
if latency > 600:
print 'CRITICAL - latency %s|%s' % (prodtime, stats)
sys.exit(2)
if radarson < 50:
print 'CRITICAL - radarson %s|%s' % (radarson, stats)
sys.exit(2)
|
Add N0Q check for iem21 to run"""
Check the production of N0Q data!
"""
import json
import datetime
import sys
j = json.load( open('/home/ldm/data/gis/images/4326/USCOMP/n0q_0.json') )
prodtime = datetime.datetime.strptime(j['meta']['valid'], '%Y-%m-%dT%H:%M:%SZ')
radarson = int(j['meta']['radar_quorum'].split("/")[0])
gentime = j['meta']['processing_time_secs']
utcnow = datetime.datetime.utcnow()
latency = (utcnow - prodtime).seconds
stats = "gentime=%s;180;240;300 radarson=%s;100;75;50" % (gentime, radarson)
if gentime < 300 and radarson > 50 and latency < 60*10:
print 'OK |%s' % (stats)
sys.exit(0)
if gentime > 300:
print 'CRITICAL - gentime %s|%s' % (gentime, stats)
sys.exit(2)
if latency > 600:
print 'CRITICAL - latency %s|%s' % (prodtime, stats)
sys.exit(2)
if radarson < 50:
print 'CRITICAL - radarson %s|%s' % (radarson, stats)
sys.exit(2)
|
<commit_before><commit_msg>Add N0Q check for iem21 to run<commit_after>"""
Check the production of N0Q data!
"""
import json
import datetime
import sys
j = json.load( open('/home/ldm/data/gis/images/4326/USCOMP/n0q_0.json') )
prodtime = datetime.datetime.strptime(j['meta']['valid'], '%Y-%m-%dT%H:%M:%SZ')
radarson = int(j['meta']['radar_quorum'].split("/")[0])
gentime = j['meta']['processing_time_secs']
utcnow = datetime.datetime.utcnow()
latency = (utcnow - prodtime).seconds
stats = "gentime=%s;180;240;300 radarson=%s;100;75;50" % (gentime, radarson)
if gentime < 300 and radarson > 50 and latency < 60*10:
print 'OK |%s' % (stats)
sys.exit(0)
if gentime > 300:
print 'CRITICAL - gentime %s|%s' % (gentime, stats)
sys.exit(2)
if latency > 600:
print 'CRITICAL - latency %s|%s' % (prodtime, stats)
sys.exit(2)
if radarson < 50:
print 'CRITICAL - radarson %s|%s' % (radarson, stats)
sys.exit(2)
|
|
63ec9d509b301b69737e95517de30af70e13a51d
|
cycle_context.py
|
cycle_context.py
|
import sublime, sublime_plugin
values = {}
class CycleContextTracker(sublime_plugin.EventListener):
def on_query_context(self, view, key, operator, operand, match_all):
prefix = "cycle_"
if key.startswith(prefix):
number = int(key[len(prefix):len(prefix)+1])
actual_key = key[len(prefix)+1]
if actual_key not in values:
values[actual_key] = 0
if int(operand) == values[actual_key]:
values[actual_key] = (values[actual_key] + 1) % number
return True
else:
return False
return None
|
Add a convenience way to have a context key to cycle between values
|
Add a convenience way to have a context key to cycle between values
|
Python
|
mit
|
ktuan89/sublimeplugins
|
Add a convenience way to have a context key to cycle between values
|
import sublime, sublime_plugin
values = {}
class CycleContextTracker(sublime_plugin.EventListener):
def on_query_context(self, view, key, operator, operand, match_all):
prefix = "cycle_"
if key.startswith(prefix):
number = int(key[len(prefix):len(prefix)+1])
actual_key = key[len(prefix)+1]
if actual_key not in values:
values[actual_key] = 0
if int(operand) == values[actual_key]:
values[actual_key] = (values[actual_key] + 1) % number
return True
else:
return False
return None
|
<commit_before><commit_msg>Add a convenience way to have a context key to cycle between values<commit_after>
|
import sublime, sublime_plugin
values = {}
class CycleContextTracker(sublime_plugin.EventListener):
def on_query_context(self, view, key, operator, operand, match_all):
prefix = "cycle_"
if key.startswith(prefix):
number = int(key[len(prefix):len(prefix)+1])
actual_key = key[len(prefix)+1]
if actual_key not in values:
values[actual_key] = 0
if int(operand) == values[actual_key]:
values[actual_key] = (values[actual_key] + 1) % number
return True
else:
return False
return None
|
Add a convenience way to have a context key to cycle between valuesimport sublime, sublime_plugin
values = {}
class CycleContextTracker(sublime_plugin.EventListener):
def on_query_context(self, view, key, operator, operand, match_all):
prefix = "cycle_"
if key.startswith(prefix):
number = int(key[len(prefix):len(prefix)+1])
actual_key = key[len(prefix)+1]
if actual_key not in values:
values[actual_key] = 0
if int(operand) == values[actual_key]:
values[actual_key] = (values[actual_key] + 1) % number
return True
else:
return False
return None
|
<commit_before><commit_msg>Add a convenience way to have a context key to cycle between values<commit_after>import sublime, sublime_plugin
values = {}
class CycleContextTracker(sublime_plugin.EventListener):
def on_query_context(self, view, key, operator, operand, match_all):
prefix = "cycle_"
if key.startswith(prefix):
number = int(key[len(prefix):len(prefix)+1])
actual_key = key[len(prefix)+1]
if actual_key not in values:
values[actual_key] = 0
if int(operand) == values[actual_key]:
values[actual_key] = (values[actual_key] + 1) % number
return True
else:
return False
return None
|
|
bd4f8f67283a8b5a9dc5a83e943e7a59b85999e0
|
corehq/messaging/management/commands/resync_case_type.py
|
corehq/messaging/management/commands/resync_case_type.py
|
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from corehq.messaging.tasks import sync_case_for_messaging
from corehq.util.log import with_progress_bar
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
help = "Sync messaging models for cases"
def add_arguments(self, parser):
parser.add_argument('domain')
parser.add_argument('case_type')
parser.add_argument('--limit', type=int, default=-1)
def handle(self, domain, case_type, limit, **options):
print("Fetching case ids for %s/%s ..." % (domain, case_type))
case_ids = CaseAccessors(domain).get_case_ids_in_domain(case_type)
print("Creating tasks...")
if limit > 0:
case_ids = case_ids[:limit]
print("Limiting to %s tasks..." % limit)
for case_id in with_progress_bar(case_ids):
sync_case_for_messaging.delay(domain, case_id)
|
Add command for resyncing cases for messaging
|
Add command for resyncing cases for messaging
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Add command for resyncing cases for messaging
|
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from corehq.messaging.tasks import sync_case_for_messaging
from corehq.util.log import with_progress_bar
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
help = "Sync messaging models for cases"
def add_arguments(self, parser):
parser.add_argument('domain')
parser.add_argument('case_type')
parser.add_argument('--limit', type=int, default=-1)
def handle(self, domain, case_type, limit, **options):
print("Fetching case ids for %s/%s ..." % (domain, case_type))
case_ids = CaseAccessors(domain).get_case_ids_in_domain(case_type)
print("Creating tasks...")
if limit > 0:
case_ids = case_ids[:limit]
print("Limiting to %s tasks..." % limit)
for case_id in with_progress_bar(case_ids):
sync_case_for_messaging.delay(domain, case_id)
|
<commit_before><commit_msg>Add command for resyncing cases for messaging<commit_after>
|
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from corehq.messaging.tasks import sync_case_for_messaging
from corehq.util.log import with_progress_bar
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
help = "Sync messaging models for cases"
def add_arguments(self, parser):
parser.add_argument('domain')
parser.add_argument('case_type')
parser.add_argument('--limit', type=int, default=-1)
def handle(self, domain, case_type, limit, **options):
print("Fetching case ids for %s/%s ..." % (domain, case_type))
case_ids = CaseAccessors(domain).get_case_ids_in_domain(case_type)
print("Creating tasks...")
if limit > 0:
case_ids = case_ids[:limit]
print("Limiting to %s tasks..." % limit)
for case_id in with_progress_bar(case_ids):
sync_case_for_messaging.delay(domain, case_id)
|
Add command for resyncing cases for messagingfrom corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from corehq.messaging.tasks import sync_case_for_messaging
from corehq.util.log import with_progress_bar
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
help = "Sync messaging models for cases"
def add_arguments(self, parser):
parser.add_argument('domain')
parser.add_argument('case_type')
parser.add_argument('--limit', type=int, default=-1)
def handle(self, domain, case_type, limit, **options):
print("Fetching case ids for %s/%s ..." % (domain, case_type))
case_ids = CaseAccessors(domain).get_case_ids_in_domain(case_type)
print("Creating tasks...")
if limit > 0:
case_ids = case_ids[:limit]
print("Limiting to %s tasks..." % limit)
for case_id in with_progress_bar(case_ids):
sync_case_for_messaging.delay(domain, case_id)
|
<commit_before><commit_msg>Add command for resyncing cases for messaging<commit_after>from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from corehq.messaging.tasks import sync_case_for_messaging
from corehq.util.log import with_progress_bar
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
help = "Sync messaging models for cases"
def add_arguments(self, parser):
parser.add_argument('domain')
parser.add_argument('case_type')
parser.add_argument('--limit', type=int, default=-1)
def handle(self, domain, case_type, limit, **options):
print("Fetching case ids for %s/%s ..." % (domain, case_type))
case_ids = CaseAccessors(domain).get_case_ids_in_domain(case_type)
print("Creating tasks...")
if limit > 0:
case_ids = case_ids[:limit]
print("Limiting to %s tasks..." % limit)
for case_id in with_progress_bar(case_ids):
sync_case_for_messaging.delay(domain, case_id)
|
|
99bcf23c86c3ba2a1b06e5c0dc90f5c3111f5c92
|
data_collection/social_media/twitter/sentiment_analysis/stanford_nlp.py
|
data_collection/social_media/twitter/sentiment_analysis/stanford_nlp.py
|
#!/usr/bin/env python2
import re
import os
import time
import subprocess as s
tweet_file = "../tweets.csv"
# Change it!!!
stanford_nlp_dir = "/home/tonyo/EPFL/big_data/sentiment_analysis/stanford-corenlp-full-2014-01-04"
def process_tweets(tweets):
os.chdir(stanford_nlp_dir)
cmd_str = 'java -cp "*" -mx5g edu.stanford.nlp.sentiment.SentimentPipeline -stdin'
proc = s.Popen([cmd_str], stdin=s.PIPE, stdout=s.PIPE, shell=True)
# Initial probe, to avoid time measurements skews
proc.stdin.write("\n")
proc.stdout.readline()
total_time = 0.0
tweet_number = 20
start_tweet = 100
responses = []
i = 1
print "Number of tweets loaded:", tweet_number
for t in tweets[start_tweet:start_tweet + tweet_number]:
print "Tweet", i
i += 1
proc.stdin.write(t + "\n")
t1 = time.time()
resp = proc.stdout.readline().strip()
print ' ', resp
responses.append(resp)
t2 = time.time()
elapsed = t2 - t1
print t2-t1
total_time += elapsed
avg_per_tweet = total_time / tweet_number
print "Elapsed time:", total_time
print "Average time per tweet:", avg_per_tweet
print "Average speed:", 60 / avg_per_tweet, "(tweets/min)", \
3600 / avg_per_tweet, "(tweets/hour)"
return
def preprocess_tweet(tweet):
tweet = tweet.replace("\n", '')
tweet = tweet.replace("\r", '')
return tweet
def get_tweets():
tweets = []
with open(tweet_file, 'r') as f:
f.readline()
regex = re.compile(r'^(?:[^,]+,){10}(.*)$')
for line in f:
match = re.search(regex, line)
if match:
tweet_text = preprocess_tweet(match.group(1))
tweets.append(tweet_text)
return tweets
tweets = get_tweets()
process_tweets(tweets)
#for t in tweets:
# print t
|
Test sentiment analysis with Stanford NLP
|
Test sentiment analysis with Stanford NLP
|
Python
|
bsd-3-clause
|
FAB4D/humanitas,FAB4D/humanitas,FAB4D/humanitas
|
Test sentiment analysis with Stanford NLP
|
#!/usr/bin/env python2
import re
import os
import time
import subprocess as s
tweet_file = "../tweets.csv"
# Change it!!!
stanford_nlp_dir = "/home/tonyo/EPFL/big_data/sentiment_analysis/stanford-corenlp-full-2014-01-04"
def process_tweets(tweets):
os.chdir(stanford_nlp_dir)
cmd_str = 'java -cp "*" -mx5g edu.stanford.nlp.sentiment.SentimentPipeline -stdin'
proc = s.Popen([cmd_str], stdin=s.PIPE, stdout=s.PIPE, shell=True)
# Initial probe, to avoid time measurements skews
proc.stdin.write("\n")
proc.stdout.readline()
total_time = 0.0
tweet_number = 20
start_tweet = 100
responses = []
i = 1
print "Number of tweets loaded:", tweet_number
for t in tweets[start_tweet:start_tweet + tweet_number]:
print "Tweet", i
i += 1
proc.stdin.write(t + "\n")
t1 = time.time()
resp = proc.stdout.readline().strip()
print ' ', resp
responses.append(resp)
t2 = time.time()
elapsed = t2 - t1
print t2-t1
total_time += elapsed
avg_per_tweet = total_time / tweet_number
print "Elapsed time:", total_time
print "Average time per tweet:", avg_per_tweet
print "Average speed:", 60 / avg_per_tweet, "(tweets/min)", \
3600 / avg_per_tweet, "(tweets/hour)"
return
def preprocess_tweet(tweet):
tweet = tweet.replace("\n", '')
tweet = tweet.replace("\r", '')
return tweet
def get_tweets():
tweets = []
with open(tweet_file, 'r') as f:
f.readline()
regex = re.compile(r'^(?:[^,]+,){10}(.*)$')
for line in f:
match = re.search(regex, line)
if match:
tweet_text = preprocess_tweet(match.group(1))
tweets.append(tweet_text)
return tweets
tweets = get_tweets()
process_tweets(tweets)
#for t in tweets:
# print t
|
<commit_before><commit_msg>Test sentiment analysis with Stanford NLP<commit_after>
|
#!/usr/bin/env python2
import re
import os
import time
import subprocess as s
tweet_file = "../tweets.csv"
# Change it!!!
stanford_nlp_dir = "/home/tonyo/EPFL/big_data/sentiment_analysis/stanford-corenlp-full-2014-01-04"
def process_tweets(tweets):
os.chdir(stanford_nlp_dir)
cmd_str = 'java -cp "*" -mx5g edu.stanford.nlp.sentiment.SentimentPipeline -stdin'
proc = s.Popen([cmd_str], stdin=s.PIPE, stdout=s.PIPE, shell=True)
# Initial probe, to avoid time measurements skews
proc.stdin.write("\n")
proc.stdout.readline()
total_time = 0.0
tweet_number = 20
start_tweet = 100
responses = []
i = 1
print "Number of tweets loaded:", tweet_number
for t in tweets[start_tweet:start_tweet + tweet_number]:
print "Tweet", i
i += 1
proc.stdin.write(t + "\n")
t1 = time.time()
resp = proc.stdout.readline().strip()
print ' ', resp
responses.append(resp)
t2 = time.time()
elapsed = t2 - t1
print t2-t1
total_time += elapsed
avg_per_tweet = total_time / tweet_number
print "Elapsed time:", total_time
print "Average time per tweet:", avg_per_tweet
print "Average speed:", 60 / avg_per_tweet, "(tweets/min)", \
3600 / avg_per_tweet, "(tweets/hour)"
return
def preprocess_tweet(tweet):
tweet = tweet.replace("\n", '')
tweet = tweet.replace("\r", '')
return tweet
def get_tweets():
tweets = []
with open(tweet_file, 'r') as f:
f.readline()
regex = re.compile(r'^(?:[^,]+,){10}(.*)$')
for line in f:
match = re.search(regex, line)
if match:
tweet_text = preprocess_tweet(match.group(1))
tweets.append(tweet_text)
return tweets
tweets = get_tweets()
process_tweets(tweets)
#for t in tweets:
# print t
|
Test sentiment analysis with Stanford NLP#!/usr/bin/env python2
import re
import os
import time
import subprocess as s
tweet_file = "../tweets.csv"
# Change it!!!
stanford_nlp_dir = "/home/tonyo/EPFL/big_data/sentiment_analysis/stanford-corenlp-full-2014-01-04"
def process_tweets(tweets):
os.chdir(stanford_nlp_dir)
cmd_str = 'java -cp "*" -mx5g edu.stanford.nlp.sentiment.SentimentPipeline -stdin'
proc = s.Popen([cmd_str], stdin=s.PIPE, stdout=s.PIPE, shell=True)
# Initial probe, to avoid time measurements skews
proc.stdin.write("\n")
proc.stdout.readline()
total_time = 0.0
tweet_number = 20
start_tweet = 100
responses = []
i = 1
print "Number of tweets loaded:", tweet_number
for t in tweets[start_tweet:start_tweet + tweet_number]:
print "Tweet", i
i += 1
proc.stdin.write(t + "\n")
t1 = time.time()
resp = proc.stdout.readline().strip()
print ' ', resp
responses.append(resp)
t2 = time.time()
elapsed = t2 - t1
print t2-t1
total_time += elapsed
avg_per_tweet = total_time / tweet_number
print "Elapsed time:", total_time
print "Average time per tweet:", avg_per_tweet
print "Average speed:", 60 / avg_per_tweet, "(tweets/min)", \
3600 / avg_per_tweet, "(tweets/hour)"
return
def preprocess_tweet(tweet):
tweet = tweet.replace("\n", '')
tweet = tweet.replace("\r", '')
return tweet
def get_tweets():
tweets = []
with open(tweet_file, 'r') as f:
f.readline()
regex = re.compile(r'^(?:[^,]+,){10}(.*)$')
for line in f:
match = re.search(regex, line)
if match:
tweet_text = preprocess_tweet(match.group(1))
tweets.append(tweet_text)
return tweets
tweets = get_tweets()
process_tweets(tweets)
#for t in tweets:
# print t
|
<commit_before><commit_msg>Test sentiment analysis with Stanford NLP<commit_after>#!/usr/bin/env python2
import re
import os
import time
import subprocess as s
tweet_file = "../tweets.csv"
# Change it!!!
stanford_nlp_dir = "/home/tonyo/EPFL/big_data/sentiment_analysis/stanford-corenlp-full-2014-01-04"
def process_tweets(tweets):
os.chdir(stanford_nlp_dir)
cmd_str = 'java -cp "*" -mx5g edu.stanford.nlp.sentiment.SentimentPipeline -stdin'
proc = s.Popen([cmd_str], stdin=s.PIPE, stdout=s.PIPE, shell=True)
# Initial probe, to avoid time measurements skews
proc.stdin.write("\n")
proc.stdout.readline()
total_time = 0.0
tweet_number = 20
start_tweet = 100
responses = []
i = 1
print "Number of tweets loaded:", tweet_number
for t in tweets[start_tweet:start_tweet + tweet_number]:
print "Tweet", i
i += 1
proc.stdin.write(t + "\n")
t1 = time.time()
resp = proc.stdout.readline().strip()
print ' ', resp
responses.append(resp)
t2 = time.time()
elapsed = t2 - t1
print t2-t1
total_time += elapsed
avg_per_tweet = total_time / tweet_number
print "Elapsed time:", total_time
print "Average time per tweet:", avg_per_tweet
print "Average speed:", 60 / avg_per_tweet, "(tweets/min)", \
3600 / avg_per_tweet, "(tweets/hour)"
return
def preprocess_tweet(tweet):
tweet = tweet.replace("\n", '')
tweet = tweet.replace("\r", '')
return tweet
def get_tweets():
tweets = []
with open(tweet_file, 'r') as f:
f.readline()
regex = re.compile(r'^(?:[^,]+,){10}(.*)$')
for line in f:
match = re.search(regex, line)
if match:
tweet_text = preprocess_tweet(match.group(1))
tweets.append(tweet_text)
return tweets
tweets = get_tweets()
process_tweets(tweets)
#for t in tweets:
# print t
|
|
aed52fd9cbb489548457cb65f135e64370ea8229
|
tests/test_config.py
|
tests/test_config.py
|
import httpretty
from zipa import test_com as t
def test_config_zipa():
t.config.host = 'random'
t.config.prefix = 'prefix'
t.config.append_slash = True
t.config.secure = False
t.config.headers = {
'x-custom-header': 'custom-value'
}
assert t.config['host'] == 'random'
assert t.config['prefix'] == 'prefix'
assert t.config['append_slash']
assert t.config['headers']['x-custom-header'] == 'custom-value'
httpretty.enable()
httpretty.register_uri(httpretty.GET, 'http://randomprefix/a/', status=200,
content_type='application/json', body=u'{"name": "a"}')
assert t.a().name == 'a'
assert httpretty.last_request().headers['x-custom-header'] == 'custom-value'
|
Add tests for custom headers
|
Add tests for custom headers
|
Python
|
apache-2.0
|
PressLabs/zipa
|
Add tests for custom headers
|
import httpretty
from zipa import test_com as t
def test_config_zipa():
t.config.host = 'random'
t.config.prefix = 'prefix'
t.config.append_slash = True
t.config.secure = False
t.config.headers = {
'x-custom-header': 'custom-value'
}
assert t.config['host'] == 'random'
assert t.config['prefix'] == 'prefix'
assert t.config['append_slash']
assert t.config['headers']['x-custom-header'] == 'custom-value'
httpretty.enable()
httpretty.register_uri(httpretty.GET, 'http://randomprefix/a/', status=200,
content_type='application/json', body=u'{"name": "a"}')
assert t.a().name == 'a'
assert httpretty.last_request().headers['x-custom-header'] == 'custom-value'
|
<commit_before><commit_msg>Add tests for custom headers<commit_after>
|
import httpretty
from zipa import test_com as t
def test_config_zipa():
t.config.host = 'random'
t.config.prefix = 'prefix'
t.config.append_slash = True
t.config.secure = False
t.config.headers = {
'x-custom-header': 'custom-value'
}
assert t.config['host'] == 'random'
assert t.config['prefix'] == 'prefix'
assert t.config['append_slash']
assert t.config['headers']['x-custom-header'] == 'custom-value'
httpretty.enable()
httpretty.register_uri(httpretty.GET, 'http://randomprefix/a/', status=200,
content_type='application/json', body=u'{"name": "a"}')
assert t.a().name == 'a'
assert httpretty.last_request().headers['x-custom-header'] == 'custom-value'
|
Add tests for custom headersimport httpretty
from zipa import test_com as t
def test_config_zipa():
t.config.host = 'random'
t.config.prefix = 'prefix'
t.config.append_slash = True
t.config.secure = False
t.config.headers = {
'x-custom-header': 'custom-value'
}
assert t.config['host'] == 'random'
assert t.config['prefix'] == 'prefix'
assert t.config['append_slash']
assert t.config['headers']['x-custom-header'] == 'custom-value'
httpretty.enable()
httpretty.register_uri(httpretty.GET, 'http://randomprefix/a/', status=200,
content_type='application/json', body=u'{"name": "a"}')
assert t.a().name == 'a'
assert httpretty.last_request().headers['x-custom-header'] == 'custom-value'
|
<commit_before><commit_msg>Add tests for custom headers<commit_after>import httpretty
from zipa import test_com as t
def test_config_zipa():
t.config.host = 'random'
t.config.prefix = 'prefix'
t.config.append_slash = True
t.config.secure = False
t.config.headers = {
'x-custom-header': 'custom-value'
}
assert t.config['host'] == 'random'
assert t.config['prefix'] == 'prefix'
assert t.config['append_slash']
assert t.config['headers']['x-custom-header'] == 'custom-value'
httpretty.enable()
httpretty.register_uri(httpretty.GET, 'http://randomprefix/a/', status=200,
content_type='application/json', body=u'{"name": "a"}')
assert t.a().name == 'a'
assert httpretty.last_request().headers['x-custom-header'] == 'custom-value'
|
|
451a0773da3aafc525e60e2a222fd4d1613589f6
|
tests/test_splits.py
|
tests/test_splits.py
|
from tests.base import IntegrationTest
class TestBurndown(IntegrationTest):
def execute(self):
self.command("TaskWikiBurndownDaily")
assert self.command(":py print vim.current.buffer", silent=False).startswith("<buffer burndown.daily")
assert "Daily Burndown" in self.read_buffer()[0]
|
import re
from tests.base import IntegrationTest
from tasklib.task import local_zone
from datetime import datetime
class TestBurndownDailySimple(IntegrationTest):
def execute(self):
self.command("TaskWikiBurndownDaily")
assert self.command(":py print vim.current.buffer", silent=False).startswith("<buffer burndown.daily")
assert "Daily Burndown" in self.read_buffer()[0]
class TestBurndownMonthlySimple(IntegrationTest):
def execute(self):
self.command("TaskWikiBurndownMonthly")
assert self.command(":py print vim.current.buffer", silent=False).startswith("<buffer burndown.monthly")
assert "Monthly Burndown" in self.read_buffer()[0]
class TestBurndownWeeklySimple(IntegrationTest):
def execute(self):
self.command("TaskWikiBurndownWeekly")
assert self.command(":py print vim.current.buffer", silent=False).startswith("<buffer burndown.weekly")
assert "Weekly Burndown" in self.read_buffer()[0]
class TestCalendarSimple(IntegrationTest):
def execute(self):
self.command("TaskWikiCalendar")
assert self.command(":py print vim.current.buffer", silent=False).startswith("<buffer calendar")
# Assert each day is displayed at least once.
output = self.read_buffer()
for day in map(str, range(1, 29)):
assert any(day in line for line in output)
class TestGhistorySimple(IntegrationTest):
tasks = [
dict(description="test task"),
dict(description="completed task 1", status="completed", end="now"),
dict(description="completed task 2", status="completed", end="now"),
dict(description="deleted task", status="deleted"),
]
def execute(self):
self.command("TaskWikiGhistoryAnnual")
assert self.command(":py print vim.current.buffer", silent=False).startswith("<buffer ghistory.annual")
output = self.read_buffer()
header_words = ("Year", "Number", "Added", "Completed", "Deleted")
for word in header_words:
assert word in output[0]
legend_words = ("Legend", "Added", "Completed", "Deleted")
for word in legend_words:
assert re.search(word, output[-1], re.IGNORECASE)
current_year = local_zone.localize(datetime.now()).year
assert str(current_year) in '\n'.join(output)
|
Add simple tests for burndown, calendar and ghistory commands
|
tests: Add simple tests for burndown, calendar and ghistory commands
|
Python
|
mit
|
Spirotot/taskwiki,phha/taskwiki
|
from tests.base import IntegrationTest
class TestBurndown(IntegrationTest):
def execute(self):
self.command("TaskWikiBurndownDaily")
assert self.command(":py print vim.current.buffer", silent=False).startswith("<buffer burndown.daily")
assert "Daily Burndown" in self.read_buffer()[0]
tests: Add simple tests for burndown, calendar and ghistory commands
|
import re
from tests.base import IntegrationTest
from tasklib.task import local_zone
from datetime import datetime
class TestBurndownDailySimple(IntegrationTest):
def execute(self):
self.command("TaskWikiBurndownDaily")
assert self.command(":py print vim.current.buffer", silent=False).startswith("<buffer burndown.daily")
assert "Daily Burndown" in self.read_buffer()[0]
class TestBurndownMonthlySimple(IntegrationTest):
def execute(self):
self.command("TaskWikiBurndownMonthly")
assert self.command(":py print vim.current.buffer", silent=False).startswith("<buffer burndown.monthly")
assert "Monthly Burndown" in self.read_buffer()[0]
class TestBurndownWeeklySimple(IntegrationTest):
def execute(self):
self.command("TaskWikiBurndownWeekly")
assert self.command(":py print vim.current.buffer", silent=False).startswith("<buffer burndown.weekly")
assert "Weekly Burndown" in self.read_buffer()[0]
class TestCalendarSimple(IntegrationTest):
def execute(self):
self.command("TaskWikiCalendar")
assert self.command(":py print vim.current.buffer", silent=False).startswith("<buffer calendar")
# Assert each day is displayed at least once.
output = self.read_buffer()
for day in map(str, range(1, 29)):
assert any(day in line for line in output)
class TestGhistorySimple(IntegrationTest):
tasks = [
dict(description="test task"),
dict(description="completed task 1", status="completed", end="now"),
dict(description="completed task 2", status="completed", end="now"),
dict(description="deleted task", status="deleted"),
]
def execute(self):
self.command("TaskWikiGhistoryAnnual")
assert self.command(":py print vim.current.buffer", silent=False).startswith("<buffer ghistory.annual")
output = self.read_buffer()
header_words = ("Year", "Number", "Added", "Completed", "Deleted")
for word in header_words:
assert word in output[0]
legend_words = ("Legend", "Added", "Completed", "Deleted")
for word in legend_words:
assert re.search(word, output[-1], re.IGNORECASE)
current_year = local_zone.localize(datetime.now()).year
assert str(current_year) in '\n'.join(output)
|
<commit_before>from tests.base import IntegrationTest
class TestBurndown(IntegrationTest):
def execute(self):
self.command("TaskWikiBurndownDaily")
assert self.command(":py print vim.current.buffer", silent=False).startswith("<buffer burndown.daily")
assert "Daily Burndown" in self.read_buffer()[0]
<commit_msg>tests: Add simple tests for burndown, calendar and ghistory commands<commit_after>
|
import re
from tests.base import IntegrationTest
from tasklib.task import local_zone
from datetime import datetime
class TestBurndownDailySimple(IntegrationTest):
def execute(self):
self.command("TaskWikiBurndownDaily")
assert self.command(":py print vim.current.buffer", silent=False).startswith("<buffer burndown.daily")
assert "Daily Burndown" in self.read_buffer()[0]
class TestBurndownMonthlySimple(IntegrationTest):
def execute(self):
self.command("TaskWikiBurndownMonthly")
assert self.command(":py print vim.current.buffer", silent=False).startswith("<buffer burndown.monthly")
assert "Monthly Burndown" in self.read_buffer()[0]
class TestBurndownWeeklySimple(IntegrationTest):
def execute(self):
self.command("TaskWikiBurndownWeekly")
assert self.command(":py print vim.current.buffer", silent=False).startswith("<buffer burndown.weekly")
assert "Weekly Burndown" in self.read_buffer()[0]
class TestCalendarSimple(IntegrationTest):
def execute(self):
self.command("TaskWikiCalendar")
assert self.command(":py print vim.current.buffer", silent=False).startswith("<buffer calendar")
# Assert each day is displayed at least once.
output = self.read_buffer()
for day in map(str, range(1, 29)):
assert any(day in line for line in output)
class TestGhistorySimple(IntegrationTest):
tasks = [
dict(description="test task"),
dict(description="completed task 1", status="completed", end="now"),
dict(description="completed task 2", status="completed", end="now"),
dict(description="deleted task", status="deleted"),
]
def execute(self):
self.command("TaskWikiGhistoryAnnual")
assert self.command(":py print vim.current.buffer", silent=False).startswith("<buffer ghistory.annual")
output = self.read_buffer()
header_words = ("Year", "Number", "Added", "Completed", "Deleted")
for word in header_words:
assert word in output[0]
legend_words = ("Legend", "Added", "Completed", "Deleted")
for word in legend_words:
assert re.search(word, output[-1], re.IGNORECASE)
current_year = local_zone.localize(datetime.now()).year
assert str(current_year) in '\n'.join(output)
|
from tests.base import IntegrationTest
class TestBurndown(IntegrationTest):
def execute(self):
self.command("TaskWikiBurndownDaily")
assert self.command(":py print vim.current.buffer", silent=False).startswith("<buffer burndown.daily")
assert "Daily Burndown" in self.read_buffer()[0]
tests: Add simple tests for burndown, calendar and ghistory commandsimport re
from tests.base import IntegrationTest
from tasklib.task import local_zone
from datetime import datetime
class TestBurndownDailySimple(IntegrationTest):
def execute(self):
self.command("TaskWikiBurndownDaily")
assert self.command(":py print vim.current.buffer", silent=False).startswith("<buffer burndown.daily")
assert "Daily Burndown" in self.read_buffer()[0]
class TestBurndownMonthlySimple(IntegrationTest):
def execute(self):
self.command("TaskWikiBurndownMonthly")
assert self.command(":py print vim.current.buffer", silent=False).startswith("<buffer burndown.monthly")
assert "Monthly Burndown" in self.read_buffer()[0]
class TestBurndownWeeklySimple(IntegrationTest):
def execute(self):
self.command("TaskWikiBurndownWeekly")
assert self.command(":py print vim.current.buffer", silent=False).startswith("<buffer burndown.weekly")
assert "Weekly Burndown" in self.read_buffer()[0]
class TestCalendarSimple(IntegrationTest):
def execute(self):
self.command("TaskWikiCalendar")
assert self.command(":py print vim.current.buffer", silent=False).startswith("<buffer calendar")
# Assert each day is displayed at least once.
output = self.read_buffer()
for day in map(str, range(1, 29)):
assert any(day in line for line in output)
class TestGhistorySimple(IntegrationTest):
tasks = [
dict(description="test task"),
dict(description="completed task 1", status="completed", end="now"),
dict(description="completed task 2", status="completed", end="now"),
dict(description="deleted task", status="deleted"),
]
def execute(self):
self.command("TaskWikiGhistoryAnnual")
assert self.command(":py print vim.current.buffer", silent=False).startswith("<buffer ghistory.annual")
output = self.read_buffer()
header_words = ("Year", "Number", "Added", "Completed", "Deleted")
for word in header_words:
assert word in output[0]
legend_words = ("Legend", "Added", "Completed", "Deleted")
for word in legend_words:
assert re.search(word, output[-1], re.IGNORECASE)
current_year = local_zone.localize(datetime.now()).year
assert str(current_year) in '\n'.join(output)
|
<commit_before>from tests.base import IntegrationTest
class TestBurndown(IntegrationTest):
def execute(self):
self.command("TaskWikiBurndownDaily")
assert self.command(":py print vim.current.buffer", silent=False).startswith("<buffer burndown.daily")
assert "Daily Burndown" in self.read_buffer()[0]
<commit_msg>tests: Add simple tests for burndown, calendar and ghistory commands<commit_after>import re
from tests.base import IntegrationTest
from tasklib.task import local_zone
from datetime import datetime
class TestBurndownDailySimple(IntegrationTest):
def execute(self):
self.command("TaskWikiBurndownDaily")
assert self.command(":py print vim.current.buffer", silent=False).startswith("<buffer burndown.daily")
assert "Daily Burndown" in self.read_buffer()[0]
class TestBurndownMonthlySimple(IntegrationTest):
def execute(self):
self.command("TaskWikiBurndownMonthly")
assert self.command(":py print vim.current.buffer", silent=False).startswith("<buffer burndown.monthly")
assert "Monthly Burndown" in self.read_buffer()[0]
class TestBurndownWeeklySimple(IntegrationTest):
def execute(self):
self.command("TaskWikiBurndownWeekly")
assert self.command(":py print vim.current.buffer", silent=False).startswith("<buffer burndown.weekly")
assert "Weekly Burndown" in self.read_buffer()[0]
class TestCalendarSimple(IntegrationTest):
def execute(self):
self.command("TaskWikiCalendar")
assert self.command(":py print vim.current.buffer", silent=False).startswith("<buffer calendar")
# Assert each day is displayed at least once.
output = self.read_buffer()
for day in map(str, range(1, 29)):
assert any(day in line for line in output)
class TestGhistorySimple(IntegrationTest):
tasks = [
dict(description="test task"),
dict(description="completed task 1", status="completed", end="now"),
dict(description="completed task 2", status="completed", end="now"),
dict(description="deleted task", status="deleted"),
]
def execute(self):
self.command("TaskWikiGhistoryAnnual")
assert self.command(":py print vim.current.buffer", silent=False).startswith("<buffer ghistory.annual")
output = self.read_buffer()
header_words = ("Year", "Number", "Added", "Completed", "Deleted")
for word in header_words:
assert word in output[0]
legend_words = ("Legend", "Added", "Completed", "Deleted")
for word in legend_words:
assert re.search(word, output[-1], re.IGNORECASE)
current_year = local_zone.localize(datetime.now()).year
assert str(current_year) in '\n'.join(output)
|
f5629d4fde2b2fddd84cce281443b8aa9dd5da76
|
python-practice/arrays.py
|
python-practice/arrays.py
|
def reverse_array(input_array):
left = 0
right = len(input_array) - 1 # Array is 0 indexed
tmp_array = input_array
while right > left:
tmp_array[left], tmp_array[right] = tmp_array[right], tmp_array[left]
left += 1
right -= 1
return tmp_array
def binary_search(input_array, target):
# Assumes an incrementally sorted array
array_length = len(input_array)
# Basic checks
if array_length == 0: # Empty array
return -1
if array_length == 1: # Singleton array
if input_array[0] == target:
return 0
return -1
if target > input_array[-1]: # Too big
return -1
if target < input_array[0]: # Too small
return -1
lo = 0
hi = array_length - 1
guess = -1
while lo <= hi:
guess = lo + int((hi - lo) / 2)
if input_array[guess] == target:
return guess
if input_array[guess] < target:
lo = guess + 1
else:
hi = guess - 1
return guess
if __name__ == "__main__":
print("Consider running tests instead")
input = [1, 4, 8, 10]
print(binary_search(input, 4))
|
Create file - reverse in place, binary search
|
Create file - reverse in place, binary search
|
Python
|
mit
|
daveinnyc/various,daveinnyc/various,daveinnyc/various,daveinnyc/various,daveinnyc/various,daveinnyc/various,daveinnyc/various
|
Create file - reverse in place, binary search
|
def reverse_array(input_array):
left = 0
right = len(input_array) - 1 # Array is 0 indexed
tmp_array = input_array
while right > left:
tmp_array[left], tmp_array[right] = tmp_array[right], tmp_array[left]
left += 1
right -= 1
return tmp_array
def binary_search(input_array, target):
# Assumes an incrementally sorted array
array_length = len(input_array)
# Basic checks
if array_length == 0: # Empty array
return -1
if array_length == 1: # Singleton array
if input_array[0] == target:
return 0
return -1
if target > input_array[-1]: # Too big
return -1
if target < input_array[0]: # Too small
return -1
lo = 0
hi = array_length - 1
guess = -1
while lo <= hi:
guess = lo + int((hi - lo) / 2)
if input_array[guess] == target:
return guess
if input_array[guess] < target:
lo = guess + 1
else:
hi = guess - 1
return guess
if __name__ == "__main__":
print("Consider running tests instead")
input = [1, 4, 8, 10]
print(binary_search(input, 4))
|
<commit_before><commit_msg>Create file - reverse in place, binary search<commit_after>
|
def reverse_array(input_array):
left = 0
right = len(input_array) - 1 # Array is 0 indexed
tmp_array = input_array
while right > left:
tmp_array[left], tmp_array[right] = tmp_array[right], tmp_array[left]
left += 1
right -= 1
return tmp_array
def binary_search(input_array, target):
# Assumes an incrementally sorted array
array_length = len(input_array)
# Basic checks
if array_length == 0: # Empty array
return -1
if array_length == 1: # Singleton array
if input_array[0] == target:
return 0
return -1
if target > input_array[-1]: # Too big
return -1
if target < input_array[0]: # Too small
return -1
lo = 0
hi = array_length - 1
guess = -1
while lo <= hi:
guess = lo + int((hi - lo) / 2)
if input_array[guess] == target:
return guess
if input_array[guess] < target:
lo = guess + 1
else:
hi = guess - 1
return guess
if __name__ == "__main__":
print("Consider running tests instead")
input = [1, 4, 8, 10]
print(binary_search(input, 4))
|
Create file - reverse in place, binary searchdef reverse_array(input_array):
left = 0
right = len(input_array) - 1 # Array is 0 indexed
tmp_array = input_array
while right > left:
tmp_array[left], tmp_array[right] = tmp_array[right], tmp_array[left]
left += 1
right -= 1
return tmp_array
def binary_search(input_array, target):
# Assumes an incrementally sorted array
array_length = len(input_array)
# Basic checks
if array_length == 0: # Empty array
return -1
if array_length == 1: # Singleton array
if input_array[0] == target:
return 0
return -1
if target > input_array[-1]: # Too big
return -1
if target < input_array[0]: # Too small
return -1
lo = 0
hi = array_length - 1
guess = -1
while lo <= hi:
guess = lo + int((hi - lo) / 2)
if input_array[guess] == target:
return guess
if input_array[guess] < target:
lo = guess + 1
else:
hi = guess - 1
return guess
if __name__ == "__main__":
print("Consider running tests instead")
input = [1, 4, 8, 10]
print(binary_search(input, 4))
|
<commit_before><commit_msg>Create file - reverse in place, binary search<commit_after>def reverse_array(input_array):
left = 0
right = len(input_array) - 1 # Array is 0 indexed
tmp_array = input_array
while right > left:
tmp_array[left], tmp_array[right] = tmp_array[right], tmp_array[left]
left += 1
right -= 1
return tmp_array
def binary_search(input_array, target):
# Assumes an incrementally sorted array
array_length = len(input_array)
# Basic checks
if array_length == 0: # Empty array
return -1
if array_length == 1: # Singleton array
if input_array[0] == target:
return 0
return -1
if target > input_array[-1]: # Too big
return -1
if target < input_array[0]: # Too small
return -1
lo = 0
hi = array_length - 1
guess = -1
while lo <= hi:
guess = lo + int((hi - lo) / 2)
if input_array[guess] == target:
return guess
if input_array[guess] < target:
lo = guess + 1
else:
hi = guess - 1
return guess
if __name__ == "__main__":
print("Consider running tests instead")
input = [1, 4, 8, 10]
print(binary_search(input, 4))
|
|
0d80c81bbc6280e13d1702a9df210980e5852174
|
utils/clear_redis.py
|
utils/clear_redis.py
|
"""Utility for clearing all keys out of redis -- do not use in production!"""
import sys
from optparse import OptionParser
import redis
def option_parser():
parser = OptionParser()
parser.add_option("-f", "--force",
action="store_true", dest="force", default=False,
help="Don't ask for confirmation.")
return parser
def main():
parser = option_parser()
options, args = parser.parse_args()
if args:
parser.print_help()
return 1
if not options.force:
confirm = raw_input("About to delete ALL redis keys. "
"Press Y to confirm, N to exit: ")
if confirm.lower() != 'y':
return 1
r_server = redis.Redis()
keys = r_server.keys()
for key in keys:
r_server.delete(key)
print "Deleted %i keys." % len(keys)
return 0
if __name__ == "__main__":
sys.exit(main())
|
Add utility for clearing out redis keys from tests.
|
Add utility for clearing out redis keys from tests.
|
Python
|
bsd-3-clause
|
harrissoerja/vumi,TouK/vumi,harrissoerja/vumi,TouK/vumi,vishwaprakashmishra/xmatrix,harrissoerja/vumi,TouK/vumi,vishwaprakashmishra/xmatrix,vishwaprakashmishra/xmatrix
|
Add utility for clearing out redis keys from tests.
|
"""Utility for clearing all keys out of redis -- do not use in production!"""
import sys
from optparse import OptionParser
import redis
def option_parser():
parser = OptionParser()
parser.add_option("-f", "--force",
action="store_true", dest="force", default=False,
help="Don't ask for confirmation.")
return parser
def main():
parser = option_parser()
options, args = parser.parse_args()
if args:
parser.print_help()
return 1
if not options.force:
confirm = raw_input("About to delete ALL redis keys. "
"Press Y to confirm, N to exit: ")
if confirm.lower() != 'y':
return 1
r_server = redis.Redis()
keys = r_server.keys()
for key in keys:
r_server.delete(key)
print "Deleted %i keys." % len(keys)
return 0
if __name__ == "__main__":
sys.exit(main())
|
<commit_before><commit_msg>Add utility for clearing out redis keys from tests.<commit_after>
|
"""Utility for clearing all keys out of redis -- do not use in production!"""
import sys
from optparse import OptionParser
import redis
def option_parser():
parser = OptionParser()
parser.add_option("-f", "--force",
action="store_true", dest="force", default=False,
help="Don't ask for confirmation.")
return parser
def main():
parser = option_parser()
options, args = parser.parse_args()
if args:
parser.print_help()
return 1
if not options.force:
confirm = raw_input("About to delete ALL redis keys. "
"Press Y to confirm, N to exit: ")
if confirm.lower() != 'y':
return 1
r_server = redis.Redis()
keys = r_server.keys()
for key in keys:
r_server.delete(key)
print "Deleted %i keys." % len(keys)
return 0
if __name__ == "__main__":
sys.exit(main())
|
Add utility for clearing out redis keys from tests."""Utility for clearing all keys out of redis -- do not use in production!"""
import sys
from optparse import OptionParser
import redis
def option_parser():
parser = OptionParser()
parser.add_option("-f", "--force",
action="store_true", dest="force", default=False,
help="Don't ask for confirmation.")
return parser
def main():
parser = option_parser()
options, args = parser.parse_args()
if args:
parser.print_help()
return 1
if not options.force:
confirm = raw_input("About to delete ALL redis keys. "
"Press Y to confirm, N to exit: ")
if confirm.lower() != 'y':
return 1
r_server = redis.Redis()
keys = r_server.keys()
for key in keys:
r_server.delete(key)
print "Deleted %i keys." % len(keys)
return 0
if __name__ == "__main__":
sys.exit(main())
|
<commit_before><commit_msg>Add utility for clearing out redis keys from tests.<commit_after>"""Utility for clearing all keys out of redis -- do not use in production!"""
import sys
from optparse import OptionParser
import redis
def option_parser():
parser = OptionParser()
parser.add_option("-f", "--force",
action="store_true", dest="force", default=False,
help="Don't ask for confirmation.")
return parser
def main():
parser = option_parser()
options, args = parser.parse_args()
if args:
parser.print_help()
return 1
if not options.force:
confirm = raw_input("About to delete ALL redis keys. "
"Press Y to confirm, N to exit: ")
if confirm.lower() != 'y':
return 1
r_server = redis.Redis()
keys = r_server.keys()
for key in keys:
r_server.delete(key)
print "Deleted %i keys." % len(keys)
return 0
if __name__ == "__main__":
sys.exit(main())
|
|
fb8c9def3c2de6af29bf98a2cecf13c9da48d65b
|
components/breakpad/tools/dmp2minidump.py
|
components/breakpad/tools/dmp2minidump.py
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A tool to extract minidumps from dmp crash dumps."""
import os
import sys
from cgi import parse_multipart
def ProcessDump(dump_file, minidump_file):
"""Extracts the part of the dump file that minidump_stackwalk can read.
The dump files generated by the breakpad integration multi-part form data
that include the minidump as file attachment.
Args:
dump_file: the dump file that needs to be processed.
minidump_file: the file to write the minidump to.
"""
try:
dump = open(dump_file, 'rb')
boundary = dump.readline().strip()[2:]
data = parse_multipart(dump, {'boundary': boundary})
except:
print 'Failed to read dmp file %s' % dump_file
return
if not 'upload_file_minidump' in data:
print 'Could not find minidump file in dump.'
return
f = open(minidump_file, 'w')
f.write("\r\n".join(data['upload_file_minidump']))
f.close()
def main():
if len(sys.argv) != 3:
print 'Usage: %s [dmp file] [minidump]' % sys.argv[0]
print ''
print 'Extracts the minidump stored in the crash dump file'
return 1
ProcessDump(sys.argv[1], sys.argv[2])
if '__main__' == __name__:
sys.exit(main())
|
Add a script to extract a minidump from a crash dump
|
Add a script to extract a minidump from a crash dump
minidump_stackwalk doesn't know how to read dmp files generated by our
breakpad implementation. This tool makes it easier to analyze local
crashes.
BUG=247431
R=bauerb@chromium.org
NOTRY=true
Review URL: https://codereview.chromium.org/40153002
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@230772 0039d316-1c4b-4281-b951-d872f2087c98
|
Python
|
bsd-3-clause
|
chuan9/chromium-crosswalk,fujunwei/chromium-crosswalk,ChromiumWebApps/chromium,dednal/chromium.src,markYoungH/chromium.src,ChromiumWebApps/chromium,krieger-od/nwjs_chromium.src,crosswalk-project/chromium-crosswalk-efl,dednal/chromium.src,patrickm/chromium.src,anirudhSK/chromium,M4sse/chromium.src,Chilledheart/chromium,mohamed--abdel-maksoud/chromium.src,jaruba/chromium.src,hgl888/chromium-crosswalk,bright-sparks/chromium-spacewalk,Chilledheart/chromium,hgl888/chromium-crosswalk,bright-sparks/chromium-spacewalk,Fireblend/chromium-crosswalk,jaruba/chromium.src,axinging/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,Fireblend/chromium-crosswalk,bright-sparks/chromium-spacewalk,dushu1203/chromium.src,dednal/chromium.src,Pluto-tv/chromium-crosswalk,dushu1203/chromium.src,Chilledheart/chromium,ChromiumWebApps/chromium,Jonekee/chromium.src,fujunwei/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,axinging/chromium-crosswalk,anirudhSK/chromium,axinging/chromium-crosswalk,ChromiumWebApps/chromium,PeterWangIntel/chromium-crosswalk,bright-sparks/chromium-spacewalk,littlstar/chromium.src,M4sse/chromium.src,anirudhSK/chromium,dednal/chromium.src,ltilve/chromium,ChromiumWebApps/chromium,crosswalk-project/chromium-crosswalk-efl,ondra-novak/chromium.src,Fireblend/chromium-crosswalk,M4sse/chromium.src,dednal/chromium.src,krieger-od/nwjs_chromium.src,markYoungH/chromium.src,patrickm/chromium.src,Fireblend/chromium-crosswalk,anirudhSK/chromium,littlstar/chromium.src,TheTypoMaster/chromium-crosswalk,ChromiumWebApps/chromium,ChromiumWebApps/chromium,jaruba/chromium.src,ondra-novak/chromium.src,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk,ltilve/chromium,PeterWangIntel/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,Fireblend/chromium-crosswalk,Jonekee/chromium.src,crosswalk-project/chromium-crosswalk-efl,bright-sparks/chromium-spacewalk,Just-D/chromium-1,krieger-od/nwjs_chromium.src,ChromiumWebApps/chromium,patrickm/chromium.src,chuan9/chromium-crosswalk,Just-D/chromium-1,hgl888/chromium-crosswalk-efl,crosswalk-project/chromium-crosswalk-efl,Just-D/chromium-1,patrickm/chromium.src,ChromiumWebApps/chromium,mohamed--abdel-maksoud/chromium.src,krieger-od/nwjs_chromium.src,mohamed--abdel-maksoud/chromium.src,Just-D/chromium-1,M4sse/chromium.src,krieger-od/nwjs_chromium.src,markYoungH/chromium.src,axinging/chromium-crosswalk,markYoungH/chromium.src,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk,fujunwei/chromium-crosswalk,Chilledheart/chromium,krieger-od/nwjs_chromium.src,jaruba/chromium.src,M4sse/chromium.src,Jonekee/chromium.src,patrickm/chromium.src,ltilve/chromium,fujunwei/chromium-crosswalk,fujunwei/chromium-crosswalk,Jonekee/chromium.src,mohamed--abdel-maksoud/chromium.src,PeterWangIntel/chromium-crosswalk,dushu1203/chromium.src,chuan9/chromium-crosswalk,bright-sparks/chromium-spacewalk,krieger-od/nwjs_chromium.src,Chilledheart/chromium,dushu1203/chromium.src,jaruba/chromium.src,dushu1203/chromium.src,dednal/chromium.src,hgl888/chromium-crosswalk-efl,fujunwei/chromium-crosswalk,chuan9/chromium-crosswalk,bright-sparks/chromium-spacewalk,TheTypoMaster/chromium-crosswalk,Pluto-tv/chromium-crosswalk,patrickm/chromium.src,chuan9/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,Pluto-tv/chromium-crosswalk,markYoungH/chromium.src,hgl888/chromium-crosswalk-efl,jaruba/chromium.src,PeterWangIntel/chromium-crosswalk,axinging/chromium-crosswalk,fujunwei/chromium-crosswalk,ondra-novak/chromium.src,Just-D/chromium-1,dednal/chromium.src,PeterWangIntel/chromium-crosswalk,dednal/chromium.src,krieger-od/nwjs_chromium.src,fujunwei/chromium-crosswalk,ChromiumWebApps/chromium,jaruba/chromium.src,markYoungH/chromium.src,littlstar/chromium.src,hgl888/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,Fireblend/chromium-crosswalk,axinging/chromium-crosswalk,Jonekee/chromium.src,Pluto-tv/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,M4sse/chromium.src,Jonekee/chromium.src,dushu1203/chromium.src,chuan9/chromium-crosswalk,anirudhSK/chromium,ltilve/chromium,crosswalk-project/chromium-crosswalk-efl,littlstar/chromium.src,hgl888/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,fujunwei/chromium-crosswalk,Jonekee/chromium.src,hgl888/chromium-crosswalk,ondra-novak/chromium.src,hgl888/chromium-crosswalk-efl,ltilve/chromium,littlstar/chromium.src,Just-D/chromium-1,anirudhSK/chromium,Just-D/chromium-1,ChromiumWebApps/chromium,jaruba/chromium.src,hgl888/chromium-crosswalk,patrickm/chromium.src,hgl888/chromium-crosswalk-efl,markYoungH/chromium.src,markYoungH/chromium.src,Fireblend/chromium-crosswalk,anirudhSK/chromium,TheTypoMaster/chromium-crosswalk,littlstar/chromium.src,mohamed--abdel-maksoud/chromium.src,bright-sparks/chromium-spacewalk,ltilve/chromium,M4sse/chromium.src,hgl888/chromium-crosswalk-efl,TheTypoMaster/chromium-crosswalk,anirudhSK/chromium,markYoungH/chromium.src,Fireblend/chromium-crosswalk,Chilledheart/chromium,ondra-novak/chromium.src,M4sse/chromium.src,Chilledheart/chromium,crosswalk-project/chromium-crosswalk-efl,Jonekee/chromium.src,krieger-od/nwjs_chromium.src,Pluto-tv/chromium-crosswalk,Pluto-tv/chromium-crosswalk,dushu1203/chromium.src,hgl888/chromium-crosswalk,dednal/chromium.src,patrickm/chromium.src,M4sse/chromium.src,hgl888/chromium-crosswalk-efl,Chilledheart/chromium,chuan9/chromium-crosswalk,Just-D/chromium-1,ltilve/chromium,mohamed--abdel-maksoud/chromium.src,axinging/chromium-crosswalk,ondra-novak/chromium.src,Jonekee/chromium.src,dushu1203/chromium.src,krieger-od/nwjs_chromium.src,Pluto-tv/chromium-crosswalk,Chilledheart/chromium,crosswalk-project/chromium-crosswalk-efl,ondra-novak/chromium.src,dednal/chromium.src,Pluto-tv/chromium-crosswalk,littlstar/chromium.src,krieger-od/nwjs_chromium.src,jaruba/chromium.src,markYoungH/chromium.src,dushu1203/chromium.src,M4sse/chromium.src,dushu1203/chromium.src,Fireblend/chromium-crosswalk,ondra-novak/chromium.src,markYoungH/chromium.src,Jonekee/chromium.src,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk,anirudhSK/chromium,Jonekee/chromium.src,PeterWangIntel/chromium-crosswalk,littlstar/chromium.src,jaruba/chromium.src,chuan9/chromium-crosswalk,axinging/chromium-crosswalk,dushu1203/chromium.src,crosswalk-project/chromium-crosswalk-efl,ChromiumWebApps/chromium,axinging/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,M4sse/chromium.src,anirudhSK/chromium,TheTypoMaster/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,TheTypoMaster/chromium-crosswalk,patrickm/chromium.src,ltilve/chromium,bright-sparks/chromium-spacewalk,ltilve/chromium,hgl888/chromium-crosswalk-efl,axinging/chromium-crosswalk,anirudhSK/chromium,axinging/chromium-crosswalk,anirudhSK/chromium,mohamed--abdel-maksoud/chromium.src,dednal/chromium.src,TheTypoMaster/chromium-crosswalk,Just-D/chromium-1,mohamed--abdel-maksoud/chromium.src,jaruba/chromium.src,ondra-novak/chromium.src
|
Add a script to extract a minidump from a crash dump
minidump_stackwalk doesn't know how to read dmp files generated by our
breakpad implementation. This tool makes it easier to analyze local
crashes.
BUG=247431
R=bauerb@chromium.org
NOTRY=true
Review URL: https://codereview.chromium.org/40153002
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@230772 0039d316-1c4b-4281-b951-d872f2087c98
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A tool to extract minidumps from dmp crash dumps."""
import os
import sys
from cgi import parse_multipart
def ProcessDump(dump_file, minidump_file):
"""Extracts the part of the dump file that minidump_stackwalk can read.
The dump files generated by the breakpad integration multi-part form data
that include the minidump as file attachment.
Args:
dump_file: the dump file that needs to be processed.
minidump_file: the file to write the minidump to.
"""
try:
dump = open(dump_file, 'rb')
boundary = dump.readline().strip()[2:]
data = parse_multipart(dump, {'boundary': boundary})
except:
print 'Failed to read dmp file %s' % dump_file
return
if not 'upload_file_minidump' in data:
print 'Could not find minidump file in dump.'
return
f = open(minidump_file, 'w')
f.write("\r\n".join(data['upload_file_minidump']))
f.close()
def main():
if len(sys.argv) != 3:
print 'Usage: %s [dmp file] [minidump]' % sys.argv[0]
print ''
print 'Extracts the minidump stored in the crash dump file'
return 1
ProcessDump(sys.argv[1], sys.argv[2])
if '__main__' == __name__:
sys.exit(main())
|
<commit_before><commit_msg>Add a script to extract a minidump from a crash dump
minidump_stackwalk doesn't know how to read dmp files generated by our
breakpad implementation. This tool makes it easier to analyze local
crashes.
BUG=247431
R=bauerb@chromium.org
NOTRY=true
Review URL: https://codereview.chromium.org/40153002
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@230772 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A tool to extract minidumps from dmp crash dumps."""
import os
import sys
from cgi import parse_multipart
def ProcessDump(dump_file, minidump_file):
"""Extracts the part of the dump file that minidump_stackwalk can read.
The dump files generated by the breakpad integration multi-part form data
that include the minidump as file attachment.
Args:
dump_file: the dump file that needs to be processed.
minidump_file: the file to write the minidump to.
"""
try:
dump = open(dump_file, 'rb')
boundary = dump.readline().strip()[2:]
data = parse_multipart(dump, {'boundary': boundary})
except:
print 'Failed to read dmp file %s' % dump_file
return
if not 'upload_file_minidump' in data:
print 'Could not find minidump file in dump.'
return
f = open(minidump_file, 'w')
f.write("\r\n".join(data['upload_file_minidump']))
f.close()
def main():
if len(sys.argv) != 3:
print 'Usage: %s [dmp file] [minidump]' % sys.argv[0]
print ''
print 'Extracts the minidump stored in the crash dump file'
return 1
ProcessDump(sys.argv[1], sys.argv[2])
if '__main__' == __name__:
sys.exit(main())
|
Add a script to extract a minidump from a crash dump
minidump_stackwalk doesn't know how to read dmp files generated by our
breakpad implementation. This tool makes it easier to analyze local
crashes.
BUG=247431
R=bauerb@chromium.org
NOTRY=true
Review URL: https://codereview.chromium.org/40153002
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@230772 0039d316-1c4b-4281-b951-d872f2087c98#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A tool to extract minidumps from dmp crash dumps."""
import os
import sys
from cgi import parse_multipart
def ProcessDump(dump_file, minidump_file):
"""Extracts the part of the dump file that minidump_stackwalk can read.
The dump files generated by the breakpad integration multi-part form data
that include the minidump as file attachment.
Args:
dump_file: the dump file that needs to be processed.
minidump_file: the file to write the minidump to.
"""
try:
dump = open(dump_file, 'rb')
boundary = dump.readline().strip()[2:]
data = parse_multipart(dump, {'boundary': boundary})
except:
print 'Failed to read dmp file %s' % dump_file
return
if not 'upload_file_minidump' in data:
print 'Could not find minidump file in dump.'
return
f = open(minidump_file, 'w')
f.write("\r\n".join(data['upload_file_minidump']))
f.close()
def main():
if len(sys.argv) != 3:
print 'Usage: %s [dmp file] [minidump]' % sys.argv[0]
print ''
print 'Extracts the minidump stored in the crash dump file'
return 1
ProcessDump(sys.argv[1], sys.argv[2])
if '__main__' == __name__:
sys.exit(main())
|
<commit_before><commit_msg>Add a script to extract a minidump from a crash dump
minidump_stackwalk doesn't know how to read dmp files generated by our
breakpad implementation. This tool makes it easier to analyze local
crashes.
BUG=247431
R=bauerb@chromium.org
NOTRY=true
Review URL: https://codereview.chromium.org/40153002
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@230772 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A tool to extract minidumps from dmp crash dumps."""
import os
import sys
from cgi import parse_multipart
def ProcessDump(dump_file, minidump_file):
"""Extracts the part of the dump file that minidump_stackwalk can read.
The dump files generated by the breakpad integration multi-part form data
that include the minidump as file attachment.
Args:
dump_file: the dump file that needs to be processed.
minidump_file: the file to write the minidump to.
"""
try:
dump = open(dump_file, 'rb')
boundary = dump.readline().strip()[2:]
data = parse_multipart(dump, {'boundary': boundary})
except:
print 'Failed to read dmp file %s' % dump_file
return
if not 'upload_file_minidump' in data:
print 'Could not find minidump file in dump.'
return
f = open(minidump_file, 'w')
f.write("\r\n".join(data['upload_file_minidump']))
f.close()
def main():
if len(sys.argv) != 3:
print 'Usage: %s [dmp file] [minidump]' % sys.argv[0]
print ''
print 'Extracts the minidump stored in the crash dump file'
return 1
ProcessDump(sys.argv[1], sys.argv[2])
if '__main__' == __name__:
sys.exit(main())
|
|
6aa0627b35dfa46c0b5c777ef1fd111ea263bcb2
|
PunchCard-test.py
|
PunchCard-test.py
|
import unittest
import PunchCard
class CaclWorkTimeTests(unittest.TestCase):
def test_returnsTwo_givenEightAndTenOclock(self):
self.assertEqual(PunchCard.calcWorkTime('8:00', '10:00'), 2)
def test_returnsZero_givenEightAndEightOclock(self):
self.assertEqual(PunchCard.calcWorkTime('8:00', '8:00'), 0)
def test_returnsFour_givenTenAndTwoOclock(self):
self.assertEqual(PunchCard.calcWorkTime('10:00', '2:00'), 4)
def test_returnsTwoAndHalf_givenEightThirtyAndElevenOclock(self):
self.assertEqual(PunchCard.calcWorkTime('8:30', '11:00'), 2.5)
def test_throwsError_givenAnIncorrectTime(self):
with self.assertRaises(ValueError):
PunchCard.calcWorkTime('a', '10:00')
class CalculateDayTests(unittest.TestCase):
def test_returnsZero_givenEmptyArray(self):
self.assertEqual(PunchCard.calculateDay([]), 0.0)
def test_returnsZero_givenTwoEqualTimeEntries(self):
self.assertEqual(PunchCard.calculateDay(['8:00', '8:00']), 0.0)
def test_returnsEight_givenFourEntries(self):
dayEntry = ['8:00', '12:00', '1:00', '5:00']
self.assertEqual(PunchCard.calculateDay(dayEntry), 8.0)
def test_returnsEight_givenTwoEntries(self):
self.assertEqual(PunchCard.calculateDay(['8:00', '4:00']), 8.0)
def test_throwsError_givenAnIncorrectTime(self):
with self.assertRaises(ValueError):
PunchCard.calculateDay(['a', '10:00'])
if __name__ == '__main__':
unittest.main()
|
Add test file with tests using unittest module
|
Add test file with tests using unittest module
Test calcWorkTime method. Test calculateDay method.
|
Python
|
mit
|
NLSteveO/PunchCard,NLSteveO/PunchCard
|
Add test file with tests using unittest module
Test calcWorkTime method. Test calculateDay method.
|
import unittest
import PunchCard
class CaclWorkTimeTests(unittest.TestCase):
def test_returnsTwo_givenEightAndTenOclock(self):
self.assertEqual(PunchCard.calcWorkTime('8:00', '10:00'), 2)
def test_returnsZero_givenEightAndEightOclock(self):
self.assertEqual(PunchCard.calcWorkTime('8:00', '8:00'), 0)
def test_returnsFour_givenTenAndTwoOclock(self):
self.assertEqual(PunchCard.calcWorkTime('10:00', '2:00'), 4)
def test_returnsTwoAndHalf_givenEightThirtyAndElevenOclock(self):
self.assertEqual(PunchCard.calcWorkTime('8:30', '11:00'), 2.5)
def test_throwsError_givenAnIncorrectTime(self):
with self.assertRaises(ValueError):
PunchCard.calcWorkTime('a', '10:00')
class CalculateDayTests(unittest.TestCase):
def test_returnsZero_givenEmptyArray(self):
self.assertEqual(PunchCard.calculateDay([]), 0.0)
def test_returnsZero_givenTwoEqualTimeEntries(self):
self.assertEqual(PunchCard.calculateDay(['8:00', '8:00']), 0.0)
def test_returnsEight_givenFourEntries(self):
dayEntry = ['8:00', '12:00', '1:00', '5:00']
self.assertEqual(PunchCard.calculateDay(dayEntry), 8.0)
def test_returnsEight_givenTwoEntries(self):
self.assertEqual(PunchCard.calculateDay(['8:00', '4:00']), 8.0)
def test_throwsError_givenAnIncorrectTime(self):
with self.assertRaises(ValueError):
PunchCard.calculateDay(['a', '10:00'])
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test file with tests using unittest module
Test calcWorkTime method. Test calculateDay method.<commit_after>
|
import unittest
import PunchCard
class CaclWorkTimeTests(unittest.TestCase):
def test_returnsTwo_givenEightAndTenOclock(self):
self.assertEqual(PunchCard.calcWorkTime('8:00', '10:00'), 2)
def test_returnsZero_givenEightAndEightOclock(self):
self.assertEqual(PunchCard.calcWorkTime('8:00', '8:00'), 0)
def test_returnsFour_givenTenAndTwoOclock(self):
self.assertEqual(PunchCard.calcWorkTime('10:00', '2:00'), 4)
def test_returnsTwoAndHalf_givenEightThirtyAndElevenOclock(self):
self.assertEqual(PunchCard.calcWorkTime('8:30', '11:00'), 2.5)
def test_throwsError_givenAnIncorrectTime(self):
with self.assertRaises(ValueError):
PunchCard.calcWorkTime('a', '10:00')
class CalculateDayTests(unittest.TestCase):
def test_returnsZero_givenEmptyArray(self):
self.assertEqual(PunchCard.calculateDay([]), 0.0)
def test_returnsZero_givenTwoEqualTimeEntries(self):
self.assertEqual(PunchCard.calculateDay(['8:00', '8:00']), 0.0)
def test_returnsEight_givenFourEntries(self):
dayEntry = ['8:00', '12:00', '1:00', '5:00']
self.assertEqual(PunchCard.calculateDay(dayEntry), 8.0)
def test_returnsEight_givenTwoEntries(self):
self.assertEqual(PunchCard.calculateDay(['8:00', '4:00']), 8.0)
def test_throwsError_givenAnIncorrectTime(self):
with self.assertRaises(ValueError):
PunchCard.calculateDay(['a', '10:00'])
if __name__ == '__main__':
unittest.main()
|
Add test file with tests using unittest module
Test calcWorkTime method. Test calculateDay method.import unittest
import PunchCard
class CaclWorkTimeTests(unittest.TestCase):
def test_returnsTwo_givenEightAndTenOclock(self):
self.assertEqual(PunchCard.calcWorkTime('8:00', '10:00'), 2)
def test_returnsZero_givenEightAndEightOclock(self):
self.assertEqual(PunchCard.calcWorkTime('8:00', '8:00'), 0)
def test_returnsFour_givenTenAndTwoOclock(self):
self.assertEqual(PunchCard.calcWorkTime('10:00', '2:00'), 4)
def test_returnsTwoAndHalf_givenEightThirtyAndElevenOclock(self):
self.assertEqual(PunchCard.calcWorkTime('8:30', '11:00'), 2.5)
def test_throwsError_givenAnIncorrectTime(self):
with self.assertRaises(ValueError):
PunchCard.calcWorkTime('a', '10:00')
class CalculateDayTests(unittest.TestCase):
def test_returnsZero_givenEmptyArray(self):
self.assertEqual(PunchCard.calculateDay([]), 0.0)
def test_returnsZero_givenTwoEqualTimeEntries(self):
self.assertEqual(PunchCard.calculateDay(['8:00', '8:00']), 0.0)
def test_returnsEight_givenFourEntries(self):
dayEntry = ['8:00', '12:00', '1:00', '5:00']
self.assertEqual(PunchCard.calculateDay(dayEntry), 8.0)
def test_returnsEight_givenTwoEntries(self):
self.assertEqual(PunchCard.calculateDay(['8:00', '4:00']), 8.0)
def test_throwsError_givenAnIncorrectTime(self):
with self.assertRaises(ValueError):
PunchCard.calculateDay(['a', '10:00'])
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test file with tests using unittest module
Test calcWorkTime method. Test calculateDay method.<commit_after>import unittest
import PunchCard
class CaclWorkTimeTests(unittest.TestCase):
def test_returnsTwo_givenEightAndTenOclock(self):
self.assertEqual(PunchCard.calcWorkTime('8:00', '10:00'), 2)
def test_returnsZero_givenEightAndEightOclock(self):
self.assertEqual(PunchCard.calcWorkTime('8:00', '8:00'), 0)
def test_returnsFour_givenTenAndTwoOclock(self):
self.assertEqual(PunchCard.calcWorkTime('10:00', '2:00'), 4)
def test_returnsTwoAndHalf_givenEightThirtyAndElevenOclock(self):
self.assertEqual(PunchCard.calcWorkTime('8:30', '11:00'), 2.5)
def test_throwsError_givenAnIncorrectTime(self):
with self.assertRaises(ValueError):
PunchCard.calcWorkTime('a', '10:00')
class CalculateDayTests(unittest.TestCase):
def test_returnsZero_givenEmptyArray(self):
self.assertEqual(PunchCard.calculateDay([]), 0.0)
def test_returnsZero_givenTwoEqualTimeEntries(self):
self.assertEqual(PunchCard.calculateDay(['8:00', '8:00']), 0.0)
def test_returnsEight_givenFourEntries(self):
dayEntry = ['8:00', '12:00', '1:00', '5:00']
self.assertEqual(PunchCard.calculateDay(dayEntry), 8.0)
def test_returnsEight_givenTwoEntries(self):
self.assertEqual(PunchCard.calculateDay(['8:00', '4:00']), 8.0)
def test_throwsError_givenAnIncorrectTime(self):
with self.assertRaises(ValueError):
PunchCard.calculateDay(['a', '10:00'])
if __name__ == '__main__':
unittest.main()
|
|
93f385349d0adfd145eeb0ee72a0b6b132286900
|
lib/tarSupport.py
|
lib/tarSupport.py
|
import os
import sys
import tarfile
import cStringIO
class GlideinTar:
"""
potential exception needs to be caught by calling routine
"""
def __init__(self):
self.strings = {}
self.files = []
def add_file(self, filename, arc_dirname):
if os.path.exists(filename):
self.files.append((filename, dirname))
def add_string(self, name, string_data):
self.strings[name] = string_data
def create_tar(self, tf):
for file in self.files:
file, dirname = file
if dirname:
tf.add(file, arcname=os.path.join(dirname, os.path.split(file)[-1]))
else:
tf.add(file)
for filename, string in self.strings.items():
fd_str = cStringIO.StringIO(string)
fd_str.seek(0)
ti = tarfile.TarInfo()
ti.size = len(string)
ti.name = filename
ti.type = tarfile.REGTYPE
tf.addfile(ti, fd_str)
def create_tar_file(self, fd):
tf = tarfile.open(fileobj=fd, mode="w:gz")
self.create_tar(tf)
tf.close()
def create_tar_blob(self):
from cStringIO import StringIO
file_out = StringIO()
tf = tarfile.open(fileobj=file_out, mode="w:gz")
self.create_tar(tf)
tf.close()
return file_out.getvalue()
|
Add the ability to create Tar files
|
Add the ability to create Tar files
|
Python
|
bsd-3-clause
|
holzman/glideinwms-old,holzman/glideinwms-old,holzman/glideinwms-old
|
Add the ability to create Tar files
|
import os
import sys
import tarfile
import cStringIO
class GlideinTar:
"""
potential exception needs to be caught by calling routine
"""
def __init__(self):
self.strings = {}
self.files = []
def add_file(self, filename, arc_dirname):
if os.path.exists(filename):
self.files.append((filename, dirname))
def add_string(self, name, string_data):
self.strings[name] = string_data
def create_tar(self, tf):
for file in self.files:
file, dirname = file
if dirname:
tf.add(file, arcname=os.path.join(dirname, os.path.split(file)[-1]))
else:
tf.add(file)
for filename, string in self.strings.items():
fd_str = cStringIO.StringIO(string)
fd_str.seek(0)
ti = tarfile.TarInfo()
ti.size = len(string)
ti.name = filename
ti.type = tarfile.REGTYPE
tf.addfile(ti, fd_str)
def create_tar_file(self, fd):
tf = tarfile.open(fileobj=fd, mode="w:gz")
self.create_tar(tf)
tf.close()
def create_tar_blob(self):
from cStringIO import StringIO
file_out = StringIO()
tf = tarfile.open(fileobj=file_out, mode="w:gz")
self.create_tar(tf)
tf.close()
return file_out.getvalue()
|
<commit_before><commit_msg>Add the ability to create Tar files<commit_after>
|
import os
import sys
import tarfile
import cStringIO
class GlideinTar:
"""
potential exception needs to be caught by calling routine
"""
def __init__(self):
self.strings = {}
self.files = []
def add_file(self, filename, arc_dirname):
if os.path.exists(filename):
self.files.append((filename, dirname))
def add_string(self, name, string_data):
self.strings[name] = string_data
def create_tar(self, tf):
for file in self.files:
file, dirname = file
if dirname:
tf.add(file, arcname=os.path.join(dirname, os.path.split(file)[-1]))
else:
tf.add(file)
for filename, string in self.strings.items():
fd_str = cStringIO.StringIO(string)
fd_str.seek(0)
ti = tarfile.TarInfo()
ti.size = len(string)
ti.name = filename
ti.type = tarfile.REGTYPE
tf.addfile(ti, fd_str)
def create_tar_file(self, fd):
tf = tarfile.open(fileobj=fd, mode="w:gz")
self.create_tar(tf)
tf.close()
def create_tar_blob(self):
from cStringIO import StringIO
file_out = StringIO()
tf = tarfile.open(fileobj=file_out, mode="w:gz")
self.create_tar(tf)
tf.close()
return file_out.getvalue()
|
Add the ability to create Tar filesimport os
import sys
import tarfile
import cStringIO
class GlideinTar:
"""
potential exception needs to be caught by calling routine
"""
def __init__(self):
self.strings = {}
self.files = []
def add_file(self, filename, arc_dirname):
if os.path.exists(filename):
self.files.append((filename, dirname))
def add_string(self, name, string_data):
self.strings[name] = string_data
def create_tar(self, tf):
for file in self.files:
file, dirname = file
if dirname:
tf.add(file, arcname=os.path.join(dirname, os.path.split(file)[-1]))
else:
tf.add(file)
for filename, string in self.strings.items():
fd_str = cStringIO.StringIO(string)
fd_str.seek(0)
ti = tarfile.TarInfo()
ti.size = len(string)
ti.name = filename
ti.type = tarfile.REGTYPE
tf.addfile(ti, fd_str)
def create_tar_file(self, fd):
tf = tarfile.open(fileobj=fd, mode="w:gz")
self.create_tar(tf)
tf.close()
def create_tar_blob(self):
from cStringIO import StringIO
file_out = StringIO()
tf = tarfile.open(fileobj=file_out, mode="w:gz")
self.create_tar(tf)
tf.close()
return file_out.getvalue()
|
<commit_before><commit_msg>Add the ability to create Tar files<commit_after>import os
import sys
import tarfile
import cStringIO
class GlideinTar:
"""
potential exception needs to be caught by calling routine
"""
def __init__(self):
self.strings = {}
self.files = []
def add_file(self, filename, arc_dirname):
if os.path.exists(filename):
self.files.append((filename, dirname))
def add_string(self, name, string_data):
self.strings[name] = string_data
def create_tar(self, tf):
for file in self.files:
file, dirname = file
if dirname:
tf.add(file, arcname=os.path.join(dirname, os.path.split(file)[-1]))
else:
tf.add(file)
for filename, string in self.strings.items():
fd_str = cStringIO.StringIO(string)
fd_str.seek(0)
ti = tarfile.TarInfo()
ti.size = len(string)
ti.name = filename
ti.type = tarfile.REGTYPE
tf.addfile(ti, fd_str)
def create_tar_file(self, fd):
tf = tarfile.open(fileobj=fd, mode="w:gz")
self.create_tar(tf)
tf.close()
def create_tar_blob(self):
from cStringIO import StringIO
file_out = StringIO()
tf = tarfile.open(fileobj=file_out, mode="w:gz")
self.create_tar(tf)
tf.close()
return file_out.getvalue()
|
|
04b4594054dd2cb2161ce52947e0f29d97320cf2
|
scripts/check_finished.py
|
scripts/check_finished.py
|
import os
import sys
def check_finished(sim_dir, file_name='lammps_out.txt'):
dir_list = os.listdir(sim_dir)
if file_name in dir_list:
with open(os.path.join(sim_dir, file_name), 'r') as lout:
lammps_lines = lout.readlines()
if len(lammps_lines) > 0:
if 'Total wall time' in lammps_lines[-1]:
print('%-20s -> finished' % os.path.basename(sim_dir))
else:
print('%-20s -> running' % os.path.basename(sim_dir))
else:
print('%-20s -> NOT started' % os.path.basename(sim_dir))
elif all([os.path.isdir(os.path.join(sim_dir, f)) for f in dir_list]):
print('########## %s ##########' % sim_dir)
for sdir in dir_list:
check_finished(os.path.join(sim_dir, sdir))
else:
print('Something wrong with the formatting of directories...')
check_finished(sys.argv[-1])
|
Add script to check if cluster jobs are finished.
|
Add script to check if cluster jobs are finished.
|
Python
|
mit
|
kbsezginel/tee_mof,kbsezginel/tee_mof
|
Add script to check if cluster jobs are finished.
|
import os
import sys
def check_finished(sim_dir, file_name='lammps_out.txt'):
dir_list = os.listdir(sim_dir)
if file_name in dir_list:
with open(os.path.join(sim_dir, file_name), 'r') as lout:
lammps_lines = lout.readlines()
if len(lammps_lines) > 0:
if 'Total wall time' in lammps_lines[-1]:
print('%-20s -> finished' % os.path.basename(sim_dir))
else:
print('%-20s -> running' % os.path.basename(sim_dir))
else:
print('%-20s -> NOT started' % os.path.basename(sim_dir))
elif all([os.path.isdir(os.path.join(sim_dir, f)) for f in dir_list]):
print('########## %s ##########' % sim_dir)
for sdir in dir_list:
check_finished(os.path.join(sim_dir, sdir))
else:
print('Something wrong with the formatting of directories...')
check_finished(sys.argv[-1])
|
<commit_before><commit_msg>Add script to check if cluster jobs are finished.<commit_after>
|
import os
import sys
def check_finished(sim_dir, file_name='lammps_out.txt'):
dir_list = os.listdir(sim_dir)
if file_name in dir_list:
with open(os.path.join(sim_dir, file_name), 'r') as lout:
lammps_lines = lout.readlines()
if len(lammps_lines) > 0:
if 'Total wall time' in lammps_lines[-1]:
print('%-20s -> finished' % os.path.basename(sim_dir))
else:
print('%-20s -> running' % os.path.basename(sim_dir))
else:
print('%-20s -> NOT started' % os.path.basename(sim_dir))
elif all([os.path.isdir(os.path.join(sim_dir, f)) for f in dir_list]):
print('########## %s ##########' % sim_dir)
for sdir in dir_list:
check_finished(os.path.join(sim_dir, sdir))
else:
print('Something wrong with the formatting of directories...')
check_finished(sys.argv[-1])
|
Add script to check if cluster jobs are finished.import os
import sys
def check_finished(sim_dir, file_name='lammps_out.txt'):
dir_list = os.listdir(sim_dir)
if file_name in dir_list:
with open(os.path.join(sim_dir, file_name), 'r') as lout:
lammps_lines = lout.readlines()
if len(lammps_lines) > 0:
if 'Total wall time' in lammps_lines[-1]:
print('%-20s -> finished' % os.path.basename(sim_dir))
else:
print('%-20s -> running' % os.path.basename(sim_dir))
else:
print('%-20s -> NOT started' % os.path.basename(sim_dir))
elif all([os.path.isdir(os.path.join(sim_dir, f)) for f in dir_list]):
print('########## %s ##########' % sim_dir)
for sdir in dir_list:
check_finished(os.path.join(sim_dir, sdir))
else:
print('Something wrong with the formatting of directories...')
check_finished(sys.argv[-1])
|
<commit_before><commit_msg>Add script to check if cluster jobs are finished.<commit_after>import os
import sys
def check_finished(sim_dir, file_name='lammps_out.txt'):
dir_list = os.listdir(sim_dir)
if file_name in dir_list:
with open(os.path.join(sim_dir, file_name), 'r') as lout:
lammps_lines = lout.readlines()
if len(lammps_lines) > 0:
if 'Total wall time' in lammps_lines[-1]:
print('%-20s -> finished' % os.path.basename(sim_dir))
else:
print('%-20s -> running' % os.path.basename(sim_dir))
else:
print('%-20s -> NOT started' % os.path.basename(sim_dir))
elif all([os.path.isdir(os.path.join(sim_dir, f)) for f in dir_list]):
print('########## %s ##########' % sim_dir)
for sdir in dir_list:
check_finished(os.path.join(sim_dir, sdir))
else:
print('Something wrong with the formatting of directories...')
check_finished(sys.argv[-1])
|
|
88fb394d69214fea35a5c68cb0497c45e7b1987f
|
tests/test_request_handler.py
|
tests/test_request_handler.py
|
from fake_webapp import EXAMPLE_APP
from nose.tools import assert_equals
from splinter.request_handler.request_handler import RequestHandler
class RequestHandlerTest(object):
def setUp(self):
self.request_handler = RequestHandler(EXAMPLE_APP)
def test_should_receive_an_url_and_get_an_200_response(self):
assert_equals(self.request_handler.status_code, 200)
def test_should_start_a_request_and_with_localhost_and_get_localhost_as_hostname(self):
assert_equals(self.request_handler.host, "localhost")
|
Test file for created package
|
Test file for created package
|
Python
|
bsd-3-clause
|
bubenkoff/splinter,cobrateam/splinter,bmcculley/splinter,bmcculley/splinter,lrowe/splinter,gjvis/splinter,myself659/splinter,nikolas/splinter,nikolas/splinter,underdogio/splinter,myself659/splinter,drptbl/splinter,myself659/splinter,objarni/splinter,objarni/splinter,drptbl/splinter,bubenkoff/splinter,objarni/splinter,underdogio/splinter,gjvis/splinter,nikolas/splinter,drptbl/splinter,lrowe/splinter,lrowe/splinter,gjvis/splinter,cobrateam/splinter,underdogio/splinter,bmcculley/splinter,cobrateam/splinter
|
Test file for created package
|
from fake_webapp import EXAMPLE_APP
from nose.tools import assert_equals
from splinter.request_handler.request_handler import RequestHandler
class RequestHandlerTest(object):
def setUp(self):
self.request_handler = RequestHandler(EXAMPLE_APP)
def test_should_receive_an_url_and_get_an_200_response(self):
assert_equals(self.request_handler.status_code, 200)
def test_should_start_a_request_and_with_localhost_and_get_localhost_as_hostname(self):
assert_equals(self.request_handler.host, "localhost")
|
<commit_before><commit_msg>Test file for created package<commit_after>
|
from fake_webapp import EXAMPLE_APP
from nose.tools import assert_equals
from splinter.request_handler.request_handler import RequestHandler
class RequestHandlerTest(object):
def setUp(self):
self.request_handler = RequestHandler(EXAMPLE_APP)
def test_should_receive_an_url_and_get_an_200_response(self):
assert_equals(self.request_handler.status_code, 200)
def test_should_start_a_request_and_with_localhost_and_get_localhost_as_hostname(self):
assert_equals(self.request_handler.host, "localhost")
|
Test file for created packagefrom fake_webapp import EXAMPLE_APP
from nose.tools import assert_equals
from splinter.request_handler.request_handler import RequestHandler
class RequestHandlerTest(object):
def setUp(self):
self.request_handler = RequestHandler(EXAMPLE_APP)
def test_should_receive_an_url_and_get_an_200_response(self):
assert_equals(self.request_handler.status_code, 200)
def test_should_start_a_request_and_with_localhost_and_get_localhost_as_hostname(self):
assert_equals(self.request_handler.host, "localhost")
|
<commit_before><commit_msg>Test file for created package<commit_after>from fake_webapp import EXAMPLE_APP
from nose.tools import assert_equals
from splinter.request_handler.request_handler import RequestHandler
class RequestHandlerTest(object):
def setUp(self):
self.request_handler = RequestHandler(EXAMPLE_APP)
def test_should_receive_an_url_and_get_an_200_response(self):
assert_equals(self.request_handler.status_code, 200)
def test_should_start_a_request_and_with_localhost_and_get_localhost_as_hostname(self):
assert_equals(self.request_handler.host, "localhost")
|
|
64b76d0ee7fa28d5338048a9eff7d62e3713e9ca
|
2017/bfjit/plots/plot-runtime.py
|
2017/bfjit/plots/plot-runtime.py
|
import numpy as np
import matplotlib.pyplot as plt
import seaborn
mandelbrot_runtime = (38.6, 18.4)
factor_runtime = (16.5, 6.7)
N = len(mandelbrot_runtime)
ind = np.arange(N) # the x locations for the groups
width = 0.25 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(ind, mandelbrot_runtime, width)
rects2 = ax.bar(ind + width, factor_runtime, width, color='y')
# add some text for labels, title and axes ticks
ax.set_ylabel('Run-time (sec)', fontsize=14)
ax.set_xticks(ind + width)
ax.set_xticklabels(('mandelbrot', 'factor'), fontsize=14)
ax.legend((rects1[0], rects2[0]), ('simpleinterp', 'optinterp'), fontsize=14)
fig = plt.gcf()
fig.set_tight_layout(True)
fig.set_size_inches((8, 6))
plt.savefig('plot-runtime.png', dpi=80)
plt.show()
|
Add a script to plot runtimes with matplotlib
|
Add a script to plot runtimes with matplotlib
|
Python
|
unlicense
|
eliben/code-for-blog,eliben/code-for-blog,eliben/code-for-blog,eliben/code-for-blog,eliben/code-for-blog,eliben/code-for-blog,eliben/code-for-blog,eliben/code-for-blog,eliben/code-for-blog,eliben/code-for-blog,eliben/code-for-blog
|
Add a script to plot runtimes with matplotlib
|
import numpy as np
import matplotlib.pyplot as plt
import seaborn
mandelbrot_runtime = (38.6, 18.4)
factor_runtime = (16.5, 6.7)
N = len(mandelbrot_runtime)
ind = np.arange(N) # the x locations for the groups
width = 0.25 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(ind, mandelbrot_runtime, width)
rects2 = ax.bar(ind + width, factor_runtime, width, color='y')
# add some text for labels, title and axes ticks
ax.set_ylabel('Run-time (sec)', fontsize=14)
ax.set_xticks(ind + width)
ax.set_xticklabels(('mandelbrot', 'factor'), fontsize=14)
ax.legend((rects1[0], rects2[0]), ('simpleinterp', 'optinterp'), fontsize=14)
fig = plt.gcf()
fig.set_tight_layout(True)
fig.set_size_inches((8, 6))
plt.savefig('plot-runtime.png', dpi=80)
plt.show()
|
<commit_before><commit_msg>Add a script to plot runtimes with matplotlib<commit_after>
|
import numpy as np
import matplotlib.pyplot as plt
import seaborn
mandelbrot_runtime = (38.6, 18.4)
factor_runtime = (16.5, 6.7)
N = len(mandelbrot_runtime)
ind = np.arange(N) # the x locations for the groups
width = 0.25 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(ind, mandelbrot_runtime, width)
rects2 = ax.bar(ind + width, factor_runtime, width, color='y')
# add some text for labels, title and axes ticks
ax.set_ylabel('Run-time (sec)', fontsize=14)
ax.set_xticks(ind + width)
ax.set_xticklabels(('mandelbrot', 'factor'), fontsize=14)
ax.legend((rects1[0], rects2[0]), ('simpleinterp', 'optinterp'), fontsize=14)
fig = plt.gcf()
fig.set_tight_layout(True)
fig.set_size_inches((8, 6))
plt.savefig('plot-runtime.png', dpi=80)
plt.show()
|
Add a script to plot runtimes with matplotlibimport numpy as np
import matplotlib.pyplot as plt
import seaborn
mandelbrot_runtime = (38.6, 18.4)
factor_runtime = (16.5, 6.7)
N = len(mandelbrot_runtime)
ind = np.arange(N) # the x locations for the groups
width = 0.25 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(ind, mandelbrot_runtime, width)
rects2 = ax.bar(ind + width, factor_runtime, width, color='y')
# add some text for labels, title and axes ticks
ax.set_ylabel('Run-time (sec)', fontsize=14)
ax.set_xticks(ind + width)
ax.set_xticklabels(('mandelbrot', 'factor'), fontsize=14)
ax.legend((rects1[0], rects2[0]), ('simpleinterp', 'optinterp'), fontsize=14)
fig = plt.gcf()
fig.set_tight_layout(True)
fig.set_size_inches((8, 6))
plt.savefig('plot-runtime.png', dpi=80)
plt.show()
|
<commit_before><commit_msg>Add a script to plot runtimes with matplotlib<commit_after>import numpy as np
import matplotlib.pyplot as plt
import seaborn
mandelbrot_runtime = (38.6, 18.4)
factor_runtime = (16.5, 6.7)
N = len(mandelbrot_runtime)
ind = np.arange(N) # the x locations for the groups
width = 0.25 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(ind, mandelbrot_runtime, width)
rects2 = ax.bar(ind + width, factor_runtime, width, color='y')
# add some text for labels, title and axes ticks
ax.set_ylabel('Run-time (sec)', fontsize=14)
ax.set_xticks(ind + width)
ax.set_xticklabels(('mandelbrot', 'factor'), fontsize=14)
ax.legend((rects1[0], rects2[0]), ('simpleinterp', 'optinterp'), fontsize=14)
fig = plt.gcf()
fig.set_tight_layout(True)
fig.set_size_inches((8, 6))
plt.savefig('plot-runtime.png', dpi=80)
plt.show()
|
|
fcf7dce14ae430fd6710aa55d37523386ace6489
|
CASA_functions/extract_from_ms.py
|
CASA_functions/extract_from_ms.py
|
import numpy as np
from taskinit import mstool
def extract_ms_data(vis, columns, field="", **kwargs):
'''
Extract data points from an MS.
'''
myms = mstool()
myms.open(vis)
myms.selectinit(0)
selection_dict = dict(field=field)
selection_dict.update(kwargs)
assert myms.msselect(selection_dict), "Data selection has failed"
if not isinstance(columns, list):
columns = list(columns)
datadict = myms.getdata(columns)
myms.close()
for column in columns:
np.save("{0}_{1}.npy".format(vis.rstrip(".ms"), column.lower()),
datadict[column])
|
Save data in an MS into a numpy file
|
Save data in an MS into a numpy file
|
Python
|
mit
|
e-koch/VLA_Lband,e-koch/VLA_Lband
|
Save data in an MS into a numpy file
|
import numpy as np
from taskinit import mstool
def extract_ms_data(vis, columns, field="", **kwargs):
'''
Extract data points from an MS.
'''
myms = mstool()
myms.open(vis)
myms.selectinit(0)
selection_dict = dict(field=field)
selection_dict.update(kwargs)
assert myms.msselect(selection_dict), "Data selection has failed"
if not isinstance(columns, list):
columns = list(columns)
datadict = myms.getdata(columns)
myms.close()
for column in columns:
np.save("{0}_{1}.npy".format(vis.rstrip(".ms"), column.lower()),
datadict[column])
|
<commit_before><commit_msg>Save data in an MS into a numpy file<commit_after>
|
import numpy as np
from taskinit import mstool
def extract_ms_data(vis, columns, field="", **kwargs):
'''
Extract data points from an MS.
'''
myms = mstool()
myms.open(vis)
myms.selectinit(0)
selection_dict = dict(field=field)
selection_dict.update(kwargs)
assert myms.msselect(selection_dict), "Data selection has failed"
if not isinstance(columns, list):
columns = list(columns)
datadict = myms.getdata(columns)
myms.close()
for column in columns:
np.save("{0}_{1}.npy".format(vis.rstrip(".ms"), column.lower()),
datadict[column])
|
Save data in an MS into a numpy file
import numpy as np
from taskinit import mstool
def extract_ms_data(vis, columns, field="", **kwargs):
'''
Extract data points from an MS.
'''
myms = mstool()
myms.open(vis)
myms.selectinit(0)
selection_dict = dict(field=field)
selection_dict.update(kwargs)
assert myms.msselect(selection_dict), "Data selection has failed"
if not isinstance(columns, list):
columns = list(columns)
datadict = myms.getdata(columns)
myms.close()
for column in columns:
np.save("{0}_{1}.npy".format(vis.rstrip(".ms"), column.lower()),
datadict[column])
|
<commit_before><commit_msg>Save data in an MS into a numpy file<commit_after>
import numpy as np
from taskinit import mstool
def extract_ms_data(vis, columns, field="", **kwargs):
'''
Extract data points from an MS.
'''
myms = mstool()
myms.open(vis)
myms.selectinit(0)
selection_dict = dict(field=field)
selection_dict.update(kwargs)
assert myms.msselect(selection_dict), "Data selection has failed"
if not isinstance(columns, list):
columns = list(columns)
datadict = myms.getdata(columns)
myms.close()
for column in columns:
np.save("{0}_{1}.npy".format(vis.rstrip(".ms"), column.lower()),
datadict[column])
|
|
894c05c24de3e4b1d47b97f42c39389fdb73d2ed
|
alembic/versions/542cdf68faa5_salt_passwords.py
|
alembic/versions/542cdf68faa5_salt_passwords.py
|
"""Salt passwords
Revision ID: 542cdf68faa5
Revises: 26745016c3ce
Create Date: 2014-04-16 15:15:05.393213
"""
import string
import random
from hashlib import new as new_hash
# revision identifiers, used by Alembic.
revision = '542cdf68faa5'
down_revision = '26745016c3ce'
from alembic import op
import sqlalchemy as sa
import sqlalchemy.sql as sql
def create_salted_password(password):
alphabet = string.ascii_letters + string.digits
CHARS = 6
random_str = ""
for _ in range(CHARS):
random_str += random.choice(alphabet)
salted_password = unicode(new_hash("sha", random_str + password).hexdigest())
return random_str + "::" + salted_password
metadata = sa.MetaData()
user = sa.Table('Users', metadata,
sa.Column('id', sa.Integer()),
sa.Column('auth_system', sa.Unicode(20)),
sa.Column('auth_data', sa.String(255)),
)
def upgrade():
users_data = sql.select([user.c.id, user.c.auth_data], user.c.auth_system == "userpass")
user_passwords = {}
for row in op.get_bind().execute(users_data):
user_passwords[row[user.c.id]] = row[user.c.auth_data]
for user_id in user_passwords:
new_password = create_salted_password( user_passwords[user_id] )
update_stmt = user.update().where(user.c.id == user_id).values(auth_data = new_password)
op.execute(update_stmt)
def downgrade():
pass
|
Add script for salting the existing passwords
|
Add script for salting the existing passwords
|
Python
|
bsd-2-clause
|
go-lab/appcomposer,porduna/appcomposer,porduna/appcomposer,morelab/appcomposer,go-lab/appcomposer,morelab/appcomposer,morelab/appcomposer,porduna/appcomposer,morelab/appcomposer,go-lab/appcomposer,go-lab/appcomposer,porduna/appcomposer
|
Add script for salting the existing passwords
|
"""Salt passwords
Revision ID: 542cdf68faa5
Revises: 26745016c3ce
Create Date: 2014-04-16 15:15:05.393213
"""
import string
import random
from hashlib import new as new_hash
# revision identifiers, used by Alembic.
revision = '542cdf68faa5'
down_revision = '26745016c3ce'
from alembic import op
import sqlalchemy as sa
import sqlalchemy.sql as sql
def create_salted_password(password):
alphabet = string.ascii_letters + string.digits
CHARS = 6
random_str = ""
for _ in range(CHARS):
random_str += random.choice(alphabet)
salted_password = unicode(new_hash("sha", random_str + password).hexdigest())
return random_str + "::" + salted_password
metadata = sa.MetaData()
user = sa.Table('Users', metadata,
sa.Column('id', sa.Integer()),
sa.Column('auth_system', sa.Unicode(20)),
sa.Column('auth_data', sa.String(255)),
)
def upgrade():
users_data = sql.select([user.c.id, user.c.auth_data], user.c.auth_system == "userpass")
user_passwords = {}
for row in op.get_bind().execute(users_data):
user_passwords[row[user.c.id]] = row[user.c.auth_data]
for user_id in user_passwords:
new_password = create_salted_password( user_passwords[user_id] )
update_stmt = user.update().where(user.c.id == user_id).values(auth_data = new_password)
op.execute(update_stmt)
def downgrade():
pass
|
<commit_before><commit_msg>Add script for salting the existing passwords<commit_after>
|
"""Salt passwords
Revision ID: 542cdf68faa5
Revises: 26745016c3ce
Create Date: 2014-04-16 15:15:05.393213
"""
import string
import random
from hashlib import new as new_hash
# revision identifiers, used by Alembic.
revision = '542cdf68faa5'
down_revision = '26745016c3ce'
from alembic import op
import sqlalchemy as sa
import sqlalchemy.sql as sql
def create_salted_password(password):
alphabet = string.ascii_letters + string.digits
CHARS = 6
random_str = ""
for _ in range(CHARS):
random_str += random.choice(alphabet)
salted_password = unicode(new_hash("sha", random_str + password).hexdigest())
return random_str + "::" + salted_password
metadata = sa.MetaData()
user = sa.Table('Users', metadata,
sa.Column('id', sa.Integer()),
sa.Column('auth_system', sa.Unicode(20)),
sa.Column('auth_data', sa.String(255)),
)
def upgrade():
users_data = sql.select([user.c.id, user.c.auth_data], user.c.auth_system == "userpass")
user_passwords = {}
for row in op.get_bind().execute(users_data):
user_passwords[row[user.c.id]] = row[user.c.auth_data]
for user_id in user_passwords:
new_password = create_salted_password( user_passwords[user_id] )
update_stmt = user.update().where(user.c.id == user_id).values(auth_data = new_password)
op.execute(update_stmt)
def downgrade():
pass
|
Add script for salting the existing passwords"""Salt passwords
Revision ID: 542cdf68faa5
Revises: 26745016c3ce
Create Date: 2014-04-16 15:15:05.393213
"""
import string
import random
from hashlib import new as new_hash
# revision identifiers, used by Alembic.
revision = '542cdf68faa5'
down_revision = '26745016c3ce'
from alembic import op
import sqlalchemy as sa
import sqlalchemy.sql as sql
def create_salted_password(password):
alphabet = string.ascii_letters + string.digits
CHARS = 6
random_str = ""
for _ in range(CHARS):
random_str += random.choice(alphabet)
salted_password = unicode(new_hash("sha", random_str + password).hexdigest())
return random_str + "::" + salted_password
metadata = sa.MetaData()
user = sa.Table('Users', metadata,
sa.Column('id', sa.Integer()),
sa.Column('auth_system', sa.Unicode(20)),
sa.Column('auth_data', sa.String(255)),
)
def upgrade():
users_data = sql.select([user.c.id, user.c.auth_data], user.c.auth_system == "userpass")
user_passwords = {}
for row in op.get_bind().execute(users_data):
user_passwords[row[user.c.id]] = row[user.c.auth_data]
for user_id in user_passwords:
new_password = create_salted_password( user_passwords[user_id] )
update_stmt = user.update().where(user.c.id == user_id).values(auth_data = new_password)
op.execute(update_stmt)
def downgrade():
pass
|
<commit_before><commit_msg>Add script for salting the existing passwords<commit_after>"""Salt passwords
Revision ID: 542cdf68faa5
Revises: 26745016c3ce
Create Date: 2014-04-16 15:15:05.393213
"""
import string
import random
from hashlib import new as new_hash
# revision identifiers, used by Alembic.
revision = '542cdf68faa5'
down_revision = '26745016c3ce'
from alembic import op
import sqlalchemy as sa
import sqlalchemy.sql as sql
def create_salted_password(password):
alphabet = string.ascii_letters + string.digits
CHARS = 6
random_str = ""
for _ in range(CHARS):
random_str += random.choice(alphabet)
salted_password = unicode(new_hash("sha", random_str + password).hexdigest())
return random_str + "::" + salted_password
metadata = sa.MetaData()
user = sa.Table('Users', metadata,
sa.Column('id', sa.Integer()),
sa.Column('auth_system', sa.Unicode(20)),
sa.Column('auth_data', sa.String(255)),
)
def upgrade():
users_data = sql.select([user.c.id, user.c.auth_data], user.c.auth_system == "userpass")
user_passwords = {}
for row in op.get_bind().execute(users_data):
user_passwords[row[user.c.id]] = row[user.c.auth_data]
for user_id in user_passwords:
new_password = create_salted_password( user_passwords[user_id] )
update_stmt = user.update().where(user.c.id == user_id).values(auth_data = new_password)
op.execute(update_stmt)
def downgrade():
pass
|
|
9011e9e7e85804b532cb9e5ccbaa64425798c018
|
anillo/utils/structures.py
|
anillo/utils/structures.py
|
import collections
class CaseInsensitiveDict(collections.MutableMapping):
"""
A case-insensitive ``dict``-like object.
Implements all methods and operations of
``collections.MutableMapping`` as well as dict's ``copy``. Also
provides ``lower_items``.
All keys are expected to be strings. The structure remembers the
case of the last key to be set, and ``iter(instance)``,
``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
will contain case-sensitive keys. However, querying and contains
testing is case insensitive::
cid = CaseInsensitiveDict()
cid['Accept'] = 'application/json'
cid['aCCEPT'] == 'application/json' # True
list(cid) == ['Accept'] # True
For example, ``headers['content-encoding']`` will return the
value of a ``'Content-Encoding'`` response header, regardless
of how the header name was originally stored.
If the constructor, ``.update``, or equality comparison
operations are given keys that have equal ``.lower()``s, the
behavior is undefined.
"""
def __init__(self, data=None, **kwargs):
self._store = dict()
if data is None:
data = {}
self.update(data, **kwargs)
def __setitem__(self, key, value):
# Use the lowercased key for lookups, but store the actual
# key alongside the value.
self._store[key.lower()] = (key, value)
def __getitem__(self, key):
return self._store[key.lower()][1]
def __delitem__(self, key):
del self._store[key.lower()]
def __iter__(self):
return (casedkey for casedkey, mappedvalue in self._store.values())
def __len__(self):
return len(self._store)
def lower_items(self):
"""Like iteritems(), but with all lowercase keys."""
return (
(lowerkey, keyval[1])
for (lowerkey, keyval)
in self._store.items()
)
def __eq__(self, other):
if isinstance(other, collections.Mapping):
other = CaseInsensitiveDict(other)
else:
return NotImplemented
# Compare insensitively
return dict(self.lower_items()) == dict(other.lower_items())
# Copy is required
def copy(self):
return CaseInsensitiveDict(self._store.values())
def __repr__(self):
return str(dict(self.items()))
|
Add case insensitive dict implementation into utils package.
|
Add case insensitive dict implementation into utils package.
|
Python
|
bsd-2-clause
|
hirunatan/anillo,niwinz/anillo,hirunatan/anillo,jespino/anillo,niwinz/anillo,jespino/anillo
|
Add case insensitive dict implementation into utils package.
|
import collections
class CaseInsensitiveDict(collections.MutableMapping):
"""
A case-insensitive ``dict``-like object.
Implements all methods and operations of
``collections.MutableMapping`` as well as dict's ``copy``. Also
provides ``lower_items``.
All keys are expected to be strings. The structure remembers the
case of the last key to be set, and ``iter(instance)``,
``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
will contain case-sensitive keys. However, querying and contains
testing is case insensitive::
cid = CaseInsensitiveDict()
cid['Accept'] = 'application/json'
cid['aCCEPT'] == 'application/json' # True
list(cid) == ['Accept'] # True
For example, ``headers['content-encoding']`` will return the
value of a ``'Content-Encoding'`` response header, regardless
of how the header name was originally stored.
If the constructor, ``.update``, or equality comparison
operations are given keys that have equal ``.lower()``s, the
behavior is undefined.
"""
def __init__(self, data=None, **kwargs):
self._store = dict()
if data is None:
data = {}
self.update(data, **kwargs)
def __setitem__(self, key, value):
# Use the lowercased key for lookups, but store the actual
# key alongside the value.
self._store[key.lower()] = (key, value)
def __getitem__(self, key):
return self._store[key.lower()][1]
def __delitem__(self, key):
del self._store[key.lower()]
def __iter__(self):
return (casedkey for casedkey, mappedvalue in self._store.values())
def __len__(self):
return len(self._store)
def lower_items(self):
"""Like iteritems(), but with all lowercase keys."""
return (
(lowerkey, keyval[1])
for (lowerkey, keyval)
in self._store.items()
)
def __eq__(self, other):
if isinstance(other, collections.Mapping):
other = CaseInsensitiveDict(other)
else:
return NotImplemented
# Compare insensitively
return dict(self.lower_items()) == dict(other.lower_items())
# Copy is required
def copy(self):
return CaseInsensitiveDict(self._store.values())
def __repr__(self):
return str(dict(self.items()))
|
<commit_before><commit_msg>Add case insensitive dict implementation into utils package.<commit_after>
|
import collections
class CaseInsensitiveDict(collections.MutableMapping):
"""
A case-insensitive ``dict``-like object.
Implements all methods and operations of
``collections.MutableMapping`` as well as dict's ``copy``. Also
provides ``lower_items``.
All keys are expected to be strings. The structure remembers the
case of the last key to be set, and ``iter(instance)``,
``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
will contain case-sensitive keys. However, querying and contains
testing is case insensitive::
cid = CaseInsensitiveDict()
cid['Accept'] = 'application/json'
cid['aCCEPT'] == 'application/json' # True
list(cid) == ['Accept'] # True
For example, ``headers['content-encoding']`` will return the
value of a ``'Content-Encoding'`` response header, regardless
of how the header name was originally stored.
If the constructor, ``.update``, or equality comparison
operations are given keys that have equal ``.lower()``s, the
behavior is undefined.
"""
def __init__(self, data=None, **kwargs):
self._store = dict()
if data is None:
data = {}
self.update(data, **kwargs)
def __setitem__(self, key, value):
# Use the lowercased key for lookups, but store the actual
# key alongside the value.
self._store[key.lower()] = (key, value)
def __getitem__(self, key):
return self._store[key.lower()][1]
def __delitem__(self, key):
del self._store[key.lower()]
def __iter__(self):
return (casedkey for casedkey, mappedvalue in self._store.values())
def __len__(self):
return len(self._store)
def lower_items(self):
"""Like iteritems(), but with all lowercase keys."""
return (
(lowerkey, keyval[1])
for (lowerkey, keyval)
in self._store.items()
)
def __eq__(self, other):
if isinstance(other, collections.Mapping):
other = CaseInsensitiveDict(other)
else:
return NotImplemented
# Compare insensitively
return dict(self.lower_items()) == dict(other.lower_items())
# Copy is required
def copy(self):
return CaseInsensitiveDict(self._store.values())
def __repr__(self):
return str(dict(self.items()))
|
Add case insensitive dict implementation into utils package.import collections
class CaseInsensitiveDict(collections.MutableMapping):
"""
A case-insensitive ``dict``-like object.
Implements all methods and operations of
``collections.MutableMapping`` as well as dict's ``copy``. Also
provides ``lower_items``.
All keys are expected to be strings. The structure remembers the
case of the last key to be set, and ``iter(instance)``,
``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
will contain case-sensitive keys. However, querying and contains
testing is case insensitive::
cid = CaseInsensitiveDict()
cid['Accept'] = 'application/json'
cid['aCCEPT'] == 'application/json' # True
list(cid) == ['Accept'] # True
For example, ``headers['content-encoding']`` will return the
value of a ``'Content-Encoding'`` response header, regardless
of how the header name was originally stored.
If the constructor, ``.update``, or equality comparison
operations are given keys that have equal ``.lower()``s, the
behavior is undefined.
"""
def __init__(self, data=None, **kwargs):
self._store = dict()
if data is None:
data = {}
self.update(data, **kwargs)
def __setitem__(self, key, value):
# Use the lowercased key for lookups, but store the actual
# key alongside the value.
self._store[key.lower()] = (key, value)
def __getitem__(self, key):
return self._store[key.lower()][1]
def __delitem__(self, key):
del self._store[key.lower()]
def __iter__(self):
return (casedkey for casedkey, mappedvalue in self._store.values())
def __len__(self):
return len(self._store)
def lower_items(self):
"""Like iteritems(), but with all lowercase keys."""
return (
(lowerkey, keyval[1])
for (lowerkey, keyval)
in self._store.items()
)
def __eq__(self, other):
if isinstance(other, collections.Mapping):
other = CaseInsensitiveDict(other)
else:
return NotImplemented
# Compare insensitively
return dict(self.lower_items()) == dict(other.lower_items())
# Copy is required
def copy(self):
return CaseInsensitiveDict(self._store.values())
def __repr__(self):
return str(dict(self.items()))
|
<commit_before><commit_msg>Add case insensitive dict implementation into utils package.<commit_after>import collections
class CaseInsensitiveDict(collections.MutableMapping):
"""
A case-insensitive ``dict``-like object.
Implements all methods and operations of
``collections.MutableMapping`` as well as dict's ``copy``. Also
provides ``lower_items``.
All keys are expected to be strings. The structure remembers the
case of the last key to be set, and ``iter(instance)``,
``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
will contain case-sensitive keys. However, querying and contains
testing is case insensitive::
cid = CaseInsensitiveDict()
cid['Accept'] = 'application/json'
cid['aCCEPT'] == 'application/json' # True
list(cid) == ['Accept'] # True
For example, ``headers['content-encoding']`` will return the
value of a ``'Content-Encoding'`` response header, regardless
of how the header name was originally stored.
If the constructor, ``.update``, or equality comparison
operations are given keys that have equal ``.lower()``s, the
behavior is undefined.
"""
def __init__(self, data=None, **kwargs):
self._store = dict()
if data is None:
data = {}
self.update(data, **kwargs)
def __setitem__(self, key, value):
# Use the lowercased key for lookups, but store the actual
# key alongside the value.
self._store[key.lower()] = (key, value)
def __getitem__(self, key):
return self._store[key.lower()][1]
def __delitem__(self, key):
del self._store[key.lower()]
def __iter__(self):
return (casedkey for casedkey, mappedvalue in self._store.values())
def __len__(self):
return len(self._store)
def lower_items(self):
"""Like iteritems(), but with all lowercase keys."""
return (
(lowerkey, keyval[1])
for (lowerkey, keyval)
in self._store.items()
)
def __eq__(self, other):
if isinstance(other, collections.Mapping):
other = CaseInsensitiveDict(other)
else:
return NotImplemented
# Compare insensitively
return dict(self.lower_items()) == dict(other.lower_items())
# Copy is required
def copy(self):
return CaseInsensitiveDict(self._store.values())
def __repr__(self):
return str(dict(self.items()))
|
|
ebec193c624ef9db69d8ea260e13c41459f6fa57
|
Week01/Problem05/cyu_05.py
|
Week01/Problem05/cyu_05.py
|
#!/usr/bin/env python3
"""This script is written by Chuanping Yu, on Jul 24, 2017,
for the Assignment#1 in IDEaS workshop"""
#Problem 5
from fractions import gcd
def lcm(int1, int2):
"""Calculate the least common multiple of two integers, a and b."""
return int(int1*int2/gcd(int1, int2))
from functools import reduce
print(reduce(lcm, range(1, 20+1)))
|
Add Chuanping Yu's solutions to Problem05
|
Add Chuanping Yu's solutions to Problem05
|
Python
|
bsd-3-clause
|
GT-IDEaS/SkillsWorkshop2017,GT-IDEaS/SkillsWorkshop2017,GT-IDEaS/SkillsWorkshop2017
|
Add Chuanping Yu's solutions to Problem05
|
#!/usr/bin/env python3
"""This script is written by Chuanping Yu, on Jul 24, 2017,
for the Assignment#1 in IDEaS workshop"""
#Problem 5
from fractions import gcd
def lcm(int1, int2):
"""Calculate the least common multiple of two integers, a and b."""
return int(int1*int2/gcd(int1, int2))
from functools import reduce
print(reduce(lcm, range(1, 20+1)))
|
<commit_before><commit_msg>Add Chuanping Yu's solutions to Problem05<commit_after>
|
#!/usr/bin/env python3
"""This script is written by Chuanping Yu, on Jul 24, 2017,
for the Assignment#1 in IDEaS workshop"""
#Problem 5
from fractions import gcd
def lcm(int1, int2):
"""Calculate the least common multiple of two integers, a and b."""
return int(int1*int2/gcd(int1, int2))
from functools import reduce
print(reduce(lcm, range(1, 20+1)))
|
Add Chuanping Yu's solutions to Problem05#!/usr/bin/env python3
"""This script is written by Chuanping Yu, on Jul 24, 2017,
for the Assignment#1 in IDEaS workshop"""
#Problem 5
from fractions import gcd
def lcm(int1, int2):
"""Calculate the least common multiple of two integers, a and b."""
return int(int1*int2/gcd(int1, int2))
from functools import reduce
print(reduce(lcm, range(1, 20+1)))
|
<commit_before><commit_msg>Add Chuanping Yu's solutions to Problem05<commit_after>#!/usr/bin/env python3
"""This script is written by Chuanping Yu, on Jul 24, 2017,
for the Assignment#1 in IDEaS workshop"""
#Problem 5
from fractions import gcd
def lcm(int1, int2):
"""Calculate the least common multiple of two integers, a and b."""
return int(int1*int2/gcd(int1, int2))
from functools import reduce
print(reduce(lcm, range(1, 20+1)))
|
|
9e218caa6c9c65e89079c3e0c2f3ff8834389832
|
st2common/tests/unit/test_transport.py
|
st2common/tests/unit/test_transport.py
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ssl
import unittest2
from st2common.transport.utils import _get_ssl_kwargs
__all__ = [
'TransportUtilsTestCase'
]
class TransportUtilsTestCase(unittest2.TestCase):
def test_get_ssl_kwargs(self):
# 1. No SSL kwargs provided
ssl_kwargs = _get_ssl_kwargs()
self.assertEqual(ssl_kwargs, {})
# 2. ssl kwarg provided
ssl_kwargs = _get_ssl_kwargs(ssl=True)
self.assertEqual(ssl_kwargs, {
'ssl': True
})
# 3. ssl_keyfile provided
ssl_kwargs = _get_ssl_kwargs(ssl_keyfile='/tmp/keyfile')
self.assertEqual(ssl_kwargs, {
'ssl': True,
'keyfile': '/tmp/keyfile'
})
# 4. ssl_certfile provided
ssl_kwargs = _get_ssl_kwargs(ssl_certfile='/tmp/certfile')
self.assertEqual(ssl_kwargs, {
'ssl': True,
'certfile': '/tmp/certfile'
})
# 5. ssl_ca_certs provided
ssl_kwargs = _get_ssl_kwargs(ssl_ca_certs='/tmp/ca_certs')
self.assertEqual(ssl_kwargs, {
'ssl': True,
'ca_certs': '/tmp/ca_certs'
})
# 6. ssl_ca_certs and ssl_cert_reqs combinations
ssl_kwargs = _get_ssl_kwargs(ssl_ca_certs='/tmp/ca_certs', ssl_cert_reqs='none')
self.assertEqual(ssl_kwargs, {
'ssl': True,
'ca_certs': '/tmp/ca_certs',
'cert_reqs': ssl.CERT_NONE
})
ssl_kwargs = _get_ssl_kwargs(ssl_ca_certs='/tmp/ca_certs', ssl_cert_reqs='optional')
self.assertEqual(ssl_kwargs, {
'ssl': True,
'ca_certs': '/tmp/ca_certs',
'cert_reqs': ssl.CERT_OPTIONAL
})
ssl_kwargs = _get_ssl_kwargs(ssl_ca_certs='/tmp/ca_certs', ssl_cert_reqs='required')
self.assertEqual(ssl_kwargs, {
'ssl': True,
'ca_certs': '/tmp/ca_certs',
'cert_reqs': ssl.CERT_REQUIRED
})
|
Add tests for st2common.transport.utils._get_ssl_kwargs function.
|
Add tests for st2common.transport.utils._get_ssl_kwargs function.
|
Python
|
apache-2.0
|
nzlosh/st2,nzlosh/st2,StackStorm/st2,StackStorm/st2,Plexxi/st2,Plexxi/st2,Plexxi/st2,nzlosh/st2,Plexxi/st2,StackStorm/st2,StackStorm/st2,nzlosh/st2
|
Add tests for st2common.transport.utils._get_ssl_kwargs function.
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ssl
import unittest2
from st2common.transport.utils import _get_ssl_kwargs
__all__ = [
'TransportUtilsTestCase'
]
class TransportUtilsTestCase(unittest2.TestCase):
def test_get_ssl_kwargs(self):
# 1. No SSL kwargs provided
ssl_kwargs = _get_ssl_kwargs()
self.assertEqual(ssl_kwargs, {})
# 2. ssl kwarg provided
ssl_kwargs = _get_ssl_kwargs(ssl=True)
self.assertEqual(ssl_kwargs, {
'ssl': True
})
# 3. ssl_keyfile provided
ssl_kwargs = _get_ssl_kwargs(ssl_keyfile='/tmp/keyfile')
self.assertEqual(ssl_kwargs, {
'ssl': True,
'keyfile': '/tmp/keyfile'
})
# 4. ssl_certfile provided
ssl_kwargs = _get_ssl_kwargs(ssl_certfile='/tmp/certfile')
self.assertEqual(ssl_kwargs, {
'ssl': True,
'certfile': '/tmp/certfile'
})
# 5. ssl_ca_certs provided
ssl_kwargs = _get_ssl_kwargs(ssl_ca_certs='/tmp/ca_certs')
self.assertEqual(ssl_kwargs, {
'ssl': True,
'ca_certs': '/tmp/ca_certs'
})
# 6. ssl_ca_certs and ssl_cert_reqs combinations
ssl_kwargs = _get_ssl_kwargs(ssl_ca_certs='/tmp/ca_certs', ssl_cert_reqs='none')
self.assertEqual(ssl_kwargs, {
'ssl': True,
'ca_certs': '/tmp/ca_certs',
'cert_reqs': ssl.CERT_NONE
})
ssl_kwargs = _get_ssl_kwargs(ssl_ca_certs='/tmp/ca_certs', ssl_cert_reqs='optional')
self.assertEqual(ssl_kwargs, {
'ssl': True,
'ca_certs': '/tmp/ca_certs',
'cert_reqs': ssl.CERT_OPTIONAL
})
ssl_kwargs = _get_ssl_kwargs(ssl_ca_certs='/tmp/ca_certs', ssl_cert_reqs='required')
self.assertEqual(ssl_kwargs, {
'ssl': True,
'ca_certs': '/tmp/ca_certs',
'cert_reqs': ssl.CERT_REQUIRED
})
|
<commit_before><commit_msg>Add tests for st2common.transport.utils._get_ssl_kwargs function.<commit_after>
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ssl
import unittest2
from st2common.transport.utils import _get_ssl_kwargs
__all__ = [
'TransportUtilsTestCase'
]
class TransportUtilsTestCase(unittest2.TestCase):
def test_get_ssl_kwargs(self):
# 1. No SSL kwargs provided
ssl_kwargs = _get_ssl_kwargs()
self.assertEqual(ssl_kwargs, {})
# 2. ssl kwarg provided
ssl_kwargs = _get_ssl_kwargs(ssl=True)
self.assertEqual(ssl_kwargs, {
'ssl': True
})
# 3. ssl_keyfile provided
ssl_kwargs = _get_ssl_kwargs(ssl_keyfile='/tmp/keyfile')
self.assertEqual(ssl_kwargs, {
'ssl': True,
'keyfile': '/tmp/keyfile'
})
# 4. ssl_certfile provided
ssl_kwargs = _get_ssl_kwargs(ssl_certfile='/tmp/certfile')
self.assertEqual(ssl_kwargs, {
'ssl': True,
'certfile': '/tmp/certfile'
})
# 5. ssl_ca_certs provided
ssl_kwargs = _get_ssl_kwargs(ssl_ca_certs='/tmp/ca_certs')
self.assertEqual(ssl_kwargs, {
'ssl': True,
'ca_certs': '/tmp/ca_certs'
})
# 6. ssl_ca_certs and ssl_cert_reqs combinations
ssl_kwargs = _get_ssl_kwargs(ssl_ca_certs='/tmp/ca_certs', ssl_cert_reqs='none')
self.assertEqual(ssl_kwargs, {
'ssl': True,
'ca_certs': '/tmp/ca_certs',
'cert_reqs': ssl.CERT_NONE
})
ssl_kwargs = _get_ssl_kwargs(ssl_ca_certs='/tmp/ca_certs', ssl_cert_reqs='optional')
self.assertEqual(ssl_kwargs, {
'ssl': True,
'ca_certs': '/tmp/ca_certs',
'cert_reqs': ssl.CERT_OPTIONAL
})
ssl_kwargs = _get_ssl_kwargs(ssl_ca_certs='/tmp/ca_certs', ssl_cert_reqs='required')
self.assertEqual(ssl_kwargs, {
'ssl': True,
'ca_certs': '/tmp/ca_certs',
'cert_reqs': ssl.CERT_REQUIRED
})
|
Add tests for st2common.transport.utils._get_ssl_kwargs function.# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ssl
import unittest2
from st2common.transport.utils import _get_ssl_kwargs
__all__ = [
'TransportUtilsTestCase'
]
class TransportUtilsTestCase(unittest2.TestCase):
def test_get_ssl_kwargs(self):
# 1. No SSL kwargs provided
ssl_kwargs = _get_ssl_kwargs()
self.assertEqual(ssl_kwargs, {})
# 2. ssl kwarg provided
ssl_kwargs = _get_ssl_kwargs(ssl=True)
self.assertEqual(ssl_kwargs, {
'ssl': True
})
# 3. ssl_keyfile provided
ssl_kwargs = _get_ssl_kwargs(ssl_keyfile='/tmp/keyfile')
self.assertEqual(ssl_kwargs, {
'ssl': True,
'keyfile': '/tmp/keyfile'
})
# 4. ssl_certfile provided
ssl_kwargs = _get_ssl_kwargs(ssl_certfile='/tmp/certfile')
self.assertEqual(ssl_kwargs, {
'ssl': True,
'certfile': '/tmp/certfile'
})
# 5. ssl_ca_certs provided
ssl_kwargs = _get_ssl_kwargs(ssl_ca_certs='/tmp/ca_certs')
self.assertEqual(ssl_kwargs, {
'ssl': True,
'ca_certs': '/tmp/ca_certs'
})
# 6. ssl_ca_certs and ssl_cert_reqs combinations
ssl_kwargs = _get_ssl_kwargs(ssl_ca_certs='/tmp/ca_certs', ssl_cert_reqs='none')
self.assertEqual(ssl_kwargs, {
'ssl': True,
'ca_certs': '/tmp/ca_certs',
'cert_reqs': ssl.CERT_NONE
})
ssl_kwargs = _get_ssl_kwargs(ssl_ca_certs='/tmp/ca_certs', ssl_cert_reqs='optional')
self.assertEqual(ssl_kwargs, {
'ssl': True,
'ca_certs': '/tmp/ca_certs',
'cert_reqs': ssl.CERT_OPTIONAL
})
ssl_kwargs = _get_ssl_kwargs(ssl_ca_certs='/tmp/ca_certs', ssl_cert_reqs='required')
self.assertEqual(ssl_kwargs, {
'ssl': True,
'ca_certs': '/tmp/ca_certs',
'cert_reqs': ssl.CERT_REQUIRED
})
|
<commit_before><commit_msg>Add tests for st2common.transport.utils._get_ssl_kwargs function.<commit_after># Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ssl
import unittest2
from st2common.transport.utils import _get_ssl_kwargs
__all__ = [
'TransportUtilsTestCase'
]
class TransportUtilsTestCase(unittest2.TestCase):
def test_get_ssl_kwargs(self):
# 1. No SSL kwargs provided
ssl_kwargs = _get_ssl_kwargs()
self.assertEqual(ssl_kwargs, {})
# 2. ssl kwarg provided
ssl_kwargs = _get_ssl_kwargs(ssl=True)
self.assertEqual(ssl_kwargs, {
'ssl': True
})
# 3. ssl_keyfile provided
ssl_kwargs = _get_ssl_kwargs(ssl_keyfile='/tmp/keyfile')
self.assertEqual(ssl_kwargs, {
'ssl': True,
'keyfile': '/tmp/keyfile'
})
# 4. ssl_certfile provided
ssl_kwargs = _get_ssl_kwargs(ssl_certfile='/tmp/certfile')
self.assertEqual(ssl_kwargs, {
'ssl': True,
'certfile': '/tmp/certfile'
})
# 5. ssl_ca_certs provided
ssl_kwargs = _get_ssl_kwargs(ssl_ca_certs='/tmp/ca_certs')
self.assertEqual(ssl_kwargs, {
'ssl': True,
'ca_certs': '/tmp/ca_certs'
})
# 6. ssl_ca_certs and ssl_cert_reqs combinations
ssl_kwargs = _get_ssl_kwargs(ssl_ca_certs='/tmp/ca_certs', ssl_cert_reqs='none')
self.assertEqual(ssl_kwargs, {
'ssl': True,
'ca_certs': '/tmp/ca_certs',
'cert_reqs': ssl.CERT_NONE
})
ssl_kwargs = _get_ssl_kwargs(ssl_ca_certs='/tmp/ca_certs', ssl_cert_reqs='optional')
self.assertEqual(ssl_kwargs, {
'ssl': True,
'ca_certs': '/tmp/ca_certs',
'cert_reqs': ssl.CERT_OPTIONAL
})
ssl_kwargs = _get_ssl_kwargs(ssl_ca_certs='/tmp/ca_certs', ssl_cert_reqs='required')
self.assertEqual(ssl_kwargs, {
'ssl': True,
'ca_certs': '/tmp/ca_certs',
'cert_reqs': ssl.CERT_REQUIRED
})
|
|
ca0037e4e7d5983017c169a3d8ae0a5d7a31cddf
|
tests/tasks/thrift/test_multiplexed.py
|
tests/tasks/thrift/test_multiplexed.py
|
# Copyright (c) 2014, Facebook, Inc. All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
#
from __future__ import absolute_import
from sparts.tests.base import MultiTaskTestCase, Skip
from sparts.thrift import compiler
from sparts.thrift.client import ThriftClient
# Make sure we have the thrift-runtime related sparts tasks
try:
from sparts.tasks.thrift.handler import ThriftHandlerTask
from sparts.tasks.thrift.nbserver import NBServerTask
except ImportError:
raise Skip("Need thrift language bindings to run this test")
# Make sure we have the thrift compiler
try:
compiler._require_executable('thrift1', 'thrift')
except AssertionError:
raise Skip("Need thrift compiler to run this test")
# String containing .thrift file contents for some example services
EXAMPLE_SERVICES = """
service FooService {
string makeFoos(1: i16 numfoos),
}
service BarService {
string makeBars(1: i16 numbars),
}
"""
# Compile the above service
SERVICES = compiler.CompileContext().importThriftStr(EXAMPLE_SERVICES)
class FooHandler(ThriftHandlerTask):
MODULE = SERVICES.FooService
def makeFoos(self, numfoos):
return "foo" * numfoos
class BarHandler(ThriftHandlerTask):
MODULE = SERVICES.BarService
SERVICE_NAME = 'bar'
def makeBars(self, numbars):
return "bar" * numbars
class MultiplexedServer(NBServerTask):
MULTIPLEX = True
class NonMultiplexedServer(NBServerTask):
MULTIPLEX = False
class TestMultiplexedServer(MultiTaskTestCase):
TASKS = [FooHandler, BarHandler, MultiplexedServer]
def testClientWorks(self):
server = self.service.requireTask(MultiplexedServer)
# Verify the client and service for FooService/Handler
client = ThriftClient.for_localhost(
server.bound_port,
module=SERVICES.FooService,
multiplex_service='FooHandler',
)
self.assertEqual(
client.makeFoos(3),
"foofoofoo",
)
# Make sure makeBars does not work for FooService
with self.assertRaises(Exception):
client.makeBars(1)
# Verify the client and service for BarService/Handler
client = ThriftClient.for_localhost(
server.bound_port,
module=SERVICES.BarService,
multiplex_service='bar',
)
self.assertEqual(
client.makeBars(2),
"barbar",
)
# Make sure makeFoos does not work for BarService
with self.assertRaises(Exception):
client.makeFoos(1)
|
Add a test case to verify Multiplexed server/client functionality
|
Add a test case to verify Multiplexed server/client functionality
|
Python
|
bsd-3-clause
|
facebook/sparts,djipko/sparts,fmoo/sparts,djipko/sparts,fmoo/sparts,facebook/sparts
|
Add a test case to verify Multiplexed server/client functionality
|
# Copyright (c) 2014, Facebook, Inc. All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
#
from __future__ import absolute_import
from sparts.tests.base import MultiTaskTestCase, Skip
from sparts.thrift import compiler
from sparts.thrift.client import ThriftClient
# Make sure we have the thrift-runtime related sparts tasks
try:
from sparts.tasks.thrift.handler import ThriftHandlerTask
from sparts.tasks.thrift.nbserver import NBServerTask
except ImportError:
raise Skip("Need thrift language bindings to run this test")
# Make sure we have the thrift compiler
try:
compiler._require_executable('thrift1', 'thrift')
except AssertionError:
raise Skip("Need thrift compiler to run this test")
# String containing .thrift file contents for some example services
EXAMPLE_SERVICES = """
service FooService {
string makeFoos(1: i16 numfoos),
}
service BarService {
string makeBars(1: i16 numbars),
}
"""
# Compile the above service
SERVICES = compiler.CompileContext().importThriftStr(EXAMPLE_SERVICES)
class FooHandler(ThriftHandlerTask):
MODULE = SERVICES.FooService
def makeFoos(self, numfoos):
return "foo" * numfoos
class BarHandler(ThriftHandlerTask):
MODULE = SERVICES.BarService
SERVICE_NAME = 'bar'
def makeBars(self, numbars):
return "bar" * numbars
class MultiplexedServer(NBServerTask):
MULTIPLEX = True
class NonMultiplexedServer(NBServerTask):
MULTIPLEX = False
class TestMultiplexedServer(MultiTaskTestCase):
TASKS = [FooHandler, BarHandler, MultiplexedServer]
def testClientWorks(self):
server = self.service.requireTask(MultiplexedServer)
# Verify the client and service for FooService/Handler
client = ThriftClient.for_localhost(
server.bound_port,
module=SERVICES.FooService,
multiplex_service='FooHandler',
)
self.assertEqual(
client.makeFoos(3),
"foofoofoo",
)
# Make sure makeBars does not work for FooService
with self.assertRaises(Exception):
client.makeBars(1)
# Verify the client and service for BarService/Handler
client = ThriftClient.for_localhost(
server.bound_port,
module=SERVICES.BarService,
multiplex_service='bar',
)
self.assertEqual(
client.makeBars(2),
"barbar",
)
# Make sure makeFoos does not work for BarService
with self.assertRaises(Exception):
client.makeFoos(1)
|
<commit_before><commit_msg>Add a test case to verify Multiplexed server/client functionality<commit_after>
|
# Copyright (c) 2014, Facebook, Inc. All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
#
from __future__ import absolute_import
from sparts.tests.base import MultiTaskTestCase, Skip
from sparts.thrift import compiler
from sparts.thrift.client import ThriftClient
# Make sure we have the thrift-runtime related sparts tasks
try:
from sparts.tasks.thrift.handler import ThriftHandlerTask
from sparts.tasks.thrift.nbserver import NBServerTask
except ImportError:
raise Skip("Need thrift language bindings to run this test")
# Make sure we have the thrift compiler
try:
compiler._require_executable('thrift1', 'thrift')
except AssertionError:
raise Skip("Need thrift compiler to run this test")
# String containing .thrift file contents for some example services
EXAMPLE_SERVICES = """
service FooService {
string makeFoos(1: i16 numfoos),
}
service BarService {
string makeBars(1: i16 numbars),
}
"""
# Compile the above service
SERVICES = compiler.CompileContext().importThriftStr(EXAMPLE_SERVICES)
class FooHandler(ThriftHandlerTask):
MODULE = SERVICES.FooService
def makeFoos(self, numfoos):
return "foo" * numfoos
class BarHandler(ThriftHandlerTask):
MODULE = SERVICES.BarService
SERVICE_NAME = 'bar'
def makeBars(self, numbars):
return "bar" * numbars
class MultiplexedServer(NBServerTask):
MULTIPLEX = True
class NonMultiplexedServer(NBServerTask):
MULTIPLEX = False
class TestMultiplexedServer(MultiTaskTestCase):
TASKS = [FooHandler, BarHandler, MultiplexedServer]
def testClientWorks(self):
server = self.service.requireTask(MultiplexedServer)
# Verify the client and service for FooService/Handler
client = ThriftClient.for_localhost(
server.bound_port,
module=SERVICES.FooService,
multiplex_service='FooHandler',
)
self.assertEqual(
client.makeFoos(3),
"foofoofoo",
)
# Make sure makeBars does not work for FooService
with self.assertRaises(Exception):
client.makeBars(1)
# Verify the client and service for BarService/Handler
client = ThriftClient.for_localhost(
server.bound_port,
module=SERVICES.BarService,
multiplex_service='bar',
)
self.assertEqual(
client.makeBars(2),
"barbar",
)
# Make sure makeFoos does not work for BarService
with self.assertRaises(Exception):
client.makeFoos(1)
|
Add a test case to verify Multiplexed server/client functionality# Copyright (c) 2014, Facebook, Inc. All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
#
from __future__ import absolute_import
from sparts.tests.base import MultiTaskTestCase, Skip
from sparts.thrift import compiler
from sparts.thrift.client import ThriftClient
# Make sure we have the thrift-runtime related sparts tasks
try:
from sparts.tasks.thrift.handler import ThriftHandlerTask
from sparts.tasks.thrift.nbserver import NBServerTask
except ImportError:
raise Skip("Need thrift language bindings to run this test")
# Make sure we have the thrift compiler
try:
compiler._require_executable('thrift1', 'thrift')
except AssertionError:
raise Skip("Need thrift compiler to run this test")
# String containing .thrift file contents for some example services
EXAMPLE_SERVICES = """
service FooService {
string makeFoos(1: i16 numfoos),
}
service BarService {
string makeBars(1: i16 numbars),
}
"""
# Compile the above service
SERVICES = compiler.CompileContext().importThriftStr(EXAMPLE_SERVICES)
class FooHandler(ThriftHandlerTask):
MODULE = SERVICES.FooService
def makeFoos(self, numfoos):
return "foo" * numfoos
class BarHandler(ThriftHandlerTask):
MODULE = SERVICES.BarService
SERVICE_NAME = 'bar'
def makeBars(self, numbars):
return "bar" * numbars
class MultiplexedServer(NBServerTask):
MULTIPLEX = True
class NonMultiplexedServer(NBServerTask):
MULTIPLEX = False
class TestMultiplexedServer(MultiTaskTestCase):
TASKS = [FooHandler, BarHandler, MultiplexedServer]
def testClientWorks(self):
server = self.service.requireTask(MultiplexedServer)
# Verify the client and service for FooService/Handler
client = ThriftClient.for_localhost(
server.bound_port,
module=SERVICES.FooService,
multiplex_service='FooHandler',
)
self.assertEqual(
client.makeFoos(3),
"foofoofoo",
)
# Make sure makeBars does not work for FooService
with self.assertRaises(Exception):
client.makeBars(1)
# Verify the client and service for BarService/Handler
client = ThriftClient.for_localhost(
server.bound_port,
module=SERVICES.BarService,
multiplex_service='bar',
)
self.assertEqual(
client.makeBars(2),
"barbar",
)
# Make sure makeFoos does not work for BarService
with self.assertRaises(Exception):
client.makeFoos(1)
|
<commit_before><commit_msg>Add a test case to verify Multiplexed server/client functionality<commit_after># Copyright (c) 2014, Facebook, Inc. All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
#
from __future__ import absolute_import
from sparts.tests.base import MultiTaskTestCase, Skip
from sparts.thrift import compiler
from sparts.thrift.client import ThriftClient
# Make sure we have the thrift-runtime related sparts tasks
try:
from sparts.tasks.thrift.handler import ThriftHandlerTask
from sparts.tasks.thrift.nbserver import NBServerTask
except ImportError:
raise Skip("Need thrift language bindings to run this test")
# Make sure we have the thrift compiler
try:
compiler._require_executable('thrift1', 'thrift')
except AssertionError:
raise Skip("Need thrift compiler to run this test")
# String containing .thrift file contents for some example services
EXAMPLE_SERVICES = """
service FooService {
string makeFoos(1: i16 numfoos),
}
service BarService {
string makeBars(1: i16 numbars),
}
"""
# Compile the above service
SERVICES = compiler.CompileContext().importThriftStr(EXAMPLE_SERVICES)
class FooHandler(ThriftHandlerTask):
MODULE = SERVICES.FooService
def makeFoos(self, numfoos):
return "foo" * numfoos
class BarHandler(ThriftHandlerTask):
MODULE = SERVICES.BarService
SERVICE_NAME = 'bar'
def makeBars(self, numbars):
return "bar" * numbars
class MultiplexedServer(NBServerTask):
MULTIPLEX = True
class NonMultiplexedServer(NBServerTask):
MULTIPLEX = False
class TestMultiplexedServer(MultiTaskTestCase):
TASKS = [FooHandler, BarHandler, MultiplexedServer]
def testClientWorks(self):
server = self.service.requireTask(MultiplexedServer)
# Verify the client and service for FooService/Handler
client = ThriftClient.for_localhost(
server.bound_port,
module=SERVICES.FooService,
multiplex_service='FooHandler',
)
self.assertEqual(
client.makeFoos(3),
"foofoofoo",
)
# Make sure makeBars does not work for FooService
with self.assertRaises(Exception):
client.makeBars(1)
# Verify the client and service for BarService/Handler
client = ThriftClient.for_localhost(
server.bound_port,
module=SERVICES.BarService,
multiplex_service='bar',
)
self.assertEqual(
client.makeBars(2),
"barbar",
)
# Make sure makeFoos does not work for BarService
with self.assertRaises(Exception):
client.makeFoos(1)
|
|
31ccca324d87badba17d46d66868d86a6ca684e9
|
Gui/find_files_with_no_license.py
|
Gui/find_files_with_no_license.py
|
import os
for dirpath, dirnames, filenames in os.walk('./opensim'):
for fname in filenames:
if fname.endswith('.java'):
fpath = os.path.join(dirpath, fname)
filecontents = ""
with open(fpath, 'r') as f:
filecontents = f.read()
if ('LGPL' in filecontents or 'Lesser' in filecontents):
print(fpath + ' LGPL')
if (('Copyright' in filecontents or 'License' in filecontents) and
not 'Stanford' in filecontents):
print(fpath)
print('Files with the old OpenSim license:')
for dirpath, dirnames, filenames in os.walk('./opensim'):
for fname in filenames:
if fname.endswith('.java'):
fpath = os.path.join(dirpath, fname)
filecontents = ""
with open(fpath, 'r') as f:
filecontents = f.read()
if ('non-commercial' in filecontents or
'simtk.org/home/opensim' in filecontents):
print(" " + fpath)
|
Add python script for finding licenses.
|
Add python script for finding licenses.
|
Python
|
apache-2.0
|
opensim-org/opensim-gui,opensim-org/opensim-gui,opensim-org/opensim-gui,opensim-org/opensim-gui
|
Add python script for finding licenses.
|
import os
for dirpath, dirnames, filenames in os.walk('./opensim'):
for fname in filenames:
if fname.endswith('.java'):
fpath = os.path.join(dirpath, fname)
filecontents = ""
with open(fpath, 'r') as f:
filecontents = f.read()
if ('LGPL' in filecontents or 'Lesser' in filecontents):
print(fpath + ' LGPL')
if (('Copyright' in filecontents or 'License' in filecontents) and
not 'Stanford' in filecontents):
print(fpath)
print('Files with the old OpenSim license:')
for dirpath, dirnames, filenames in os.walk('./opensim'):
for fname in filenames:
if fname.endswith('.java'):
fpath = os.path.join(dirpath, fname)
filecontents = ""
with open(fpath, 'r') as f:
filecontents = f.read()
if ('non-commercial' in filecontents or
'simtk.org/home/opensim' in filecontents):
print(" " + fpath)
|
<commit_before><commit_msg>Add python script for finding licenses.<commit_after>
|
import os
for dirpath, dirnames, filenames in os.walk('./opensim'):
for fname in filenames:
if fname.endswith('.java'):
fpath = os.path.join(dirpath, fname)
filecontents = ""
with open(fpath, 'r') as f:
filecontents = f.read()
if ('LGPL' in filecontents or 'Lesser' in filecontents):
print(fpath + ' LGPL')
if (('Copyright' in filecontents or 'License' in filecontents) and
not 'Stanford' in filecontents):
print(fpath)
print('Files with the old OpenSim license:')
for dirpath, dirnames, filenames in os.walk('./opensim'):
for fname in filenames:
if fname.endswith('.java'):
fpath = os.path.join(dirpath, fname)
filecontents = ""
with open(fpath, 'r') as f:
filecontents = f.read()
if ('non-commercial' in filecontents or
'simtk.org/home/opensim' in filecontents):
print(" " + fpath)
|
Add python script for finding licenses.
import os
for dirpath, dirnames, filenames in os.walk('./opensim'):
for fname in filenames:
if fname.endswith('.java'):
fpath = os.path.join(dirpath, fname)
filecontents = ""
with open(fpath, 'r') as f:
filecontents = f.read()
if ('LGPL' in filecontents or 'Lesser' in filecontents):
print(fpath + ' LGPL')
if (('Copyright' in filecontents or 'License' in filecontents) and
not 'Stanford' in filecontents):
print(fpath)
print('Files with the old OpenSim license:')
for dirpath, dirnames, filenames in os.walk('./opensim'):
for fname in filenames:
if fname.endswith('.java'):
fpath = os.path.join(dirpath, fname)
filecontents = ""
with open(fpath, 'r') as f:
filecontents = f.read()
if ('non-commercial' in filecontents or
'simtk.org/home/opensim' in filecontents):
print(" " + fpath)
|
<commit_before><commit_msg>Add python script for finding licenses.<commit_after>
import os
for dirpath, dirnames, filenames in os.walk('./opensim'):
for fname in filenames:
if fname.endswith('.java'):
fpath = os.path.join(dirpath, fname)
filecontents = ""
with open(fpath, 'r') as f:
filecontents = f.read()
if ('LGPL' in filecontents or 'Lesser' in filecontents):
print(fpath + ' LGPL')
if (('Copyright' in filecontents or 'License' in filecontents) and
not 'Stanford' in filecontents):
print(fpath)
print('Files with the old OpenSim license:')
for dirpath, dirnames, filenames in os.walk('./opensim'):
for fname in filenames:
if fname.endswith('.java'):
fpath = os.path.join(dirpath, fname)
filecontents = ""
with open(fpath, 'r') as f:
filecontents = f.read()
if ('non-commercial' in filecontents or
'simtk.org/home/opensim' in filecontents):
print(" " + fpath)
|
|
cf02ad4621412fec5a1d6d999d7ec12a52683cf1
|
custom/enikshay/management/commands/get_patients_for_bets.py
|
custom/enikshay/management/commands/get_patients_for_bets.py
|
import csv
from django.core.management.base import BaseCommand
from corehq.apps.locations.models import SQLLocation
from corehq.util.log import with_progress_bar
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from custom.enikshay.const import ENROLLED_IN_PRIVATE
from custom.enikshay.case_utils import CASE_TYPE_PERSON
class Command(BaseCommand):
field_names = [
]
def add_arguments(self, parser):
parser.add_argument('domain')
parser.add_argument('parent_location_id')
def handle(self, domain, parent_location_id, **options):
self.domain = domain
self.accessor = CaseAccessors(domain)
self.location = SQLLocation.objects.get(domain=domain, location_id=parent_location_id)
owner_ids = self.location.get_descendants(include_self=True).location_ids()
filename = 'patients.csv'
with open(filename, 'w') as f:
writer = csv.writer(f)
writer.writerow(self.field_names)
person_ids = self.accessor.get_open_case_ids_in_domain_by_type(CASE_TYPE_PERSON, owner_ids)
for person in with_progress_bar(self.accessor.iter_cases(person_ids)):
if person.get_case_property(ENROLLED_IN_PRIVATE) == 'true':
self.add_person(person, writer)
print "Wrote to {}".format(filename)
def add_person(self, person, writer):
return
|
Add skeleton of Patient dump command
|
Add skeleton of Patient dump command
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Add skeleton of Patient dump command
|
import csv
from django.core.management.base import BaseCommand
from corehq.apps.locations.models import SQLLocation
from corehq.util.log import with_progress_bar
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from custom.enikshay.const import ENROLLED_IN_PRIVATE
from custom.enikshay.case_utils import CASE_TYPE_PERSON
class Command(BaseCommand):
field_names = [
]
def add_arguments(self, parser):
parser.add_argument('domain')
parser.add_argument('parent_location_id')
def handle(self, domain, parent_location_id, **options):
self.domain = domain
self.accessor = CaseAccessors(domain)
self.location = SQLLocation.objects.get(domain=domain, location_id=parent_location_id)
owner_ids = self.location.get_descendants(include_self=True).location_ids()
filename = 'patients.csv'
with open(filename, 'w') as f:
writer = csv.writer(f)
writer.writerow(self.field_names)
person_ids = self.accessor.get_open_case_ids_in_domain_by_type(CASE_TYPE_PERSON, owner_ids)
for person in with_progress_bar(self.accessor.iter_cases(person_ids)):
if person.get_case_property(ENROLLED_IN_PRIVATE) == 'true':
self.add_person(person, writer)
print "Wrote to {}".format(filename)
def add_person(self, person, writer):
return
|
<commit_before><commit_msg>Add skeleton of Patient dump command<commit_after>
|
import csv
from django.core.management.base import BaseCommand
from corehq.apps.locations.models import SQLLocation
from corehq.util.log import with_progress_bar
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from custom.enikshay.const import ENROLLED_IN_PRIVATE
from custom.enikshay.case_utils import CASE_TYPE_PERSON
class Command(BaseCommand):
field_names = [
]
def add_arguments(self, parser):
parser.add_argument('domain')
parser.add_argument('parent_location_id')
def handle(self, domain, parent_location_id, **options):
self.domain = domain
self.accessor = CaseAccessors(domain)
self.location = SQLLocation.objects.get(domain=domain, location_id=parent_location_id)
owner_ids = self.location.get_descendants(include_self=True).location_ids()
filename = 'patients.csv'
with open(filename, 'w') as f:
writer = csv.writer(f)
writer.writerow(self.field_names)
person_ids = self.accessor.get_open_case_ids_in_domain_by_type(CASE_TYPE_PERSON, owner_ids)
for person in with_progress_bar(self.accessor.iter_cases(person_ids)):
if person.get_case_property(ENROLLED_IN_PRIVATE) == 'true':
self.add_person(person, writer)
print "Wrote to {}".format(filename)
def add_person(self, person, writer):
return
|
Add skeleton of Patient dump commandimport csv
from django.core.management.base import BaseCommand
from corehq.apps.locations.models import SQLLocation
from corehq.util.log import with_progress_bar
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from custom.enikshay.const import ENROLLED_IN_PRIVATE
from custom.enikshay.case_utils import CASE_TYPE_PERSON
class Command(BaseCommand):
field_names = [
]
def add_arguments(self, parser):
parser.add_argument('domain')
parser.add_argument('parent_location_id')
def handle(self, domain, parent_location_id, **options):
self.domain = domain
self.accessor = CaseAccessors(domain)
self.location = SQLLocation.objects.get(domain=domain, location_id=parent_location_id)
owner_ids = self.location.get_descendants(include_self=True).location_ids()
filename = 'patients.csv'
with open(filename, 'w') as f:
writer = csv.writer(f)
writer.writerow(self.field_names)
person_ids = self.accessor.get_open_case_ids_in_domain_by_type(CASE_TYPE_PERSON, owner_ids)
for person in with_progress_bar(self.accessor.iter_cases(person_ids)):
if person.get_case_property(ENROLLED_IN_PRIVATE) == 'true':
self.add_person(person, writer)
print "Wrote to {}".format(filename)
def add_person(self, person, writer):
return
|
<commit_before><commit_msg>Add skeleton of Patient dump command<commit_after>import csv
from django.core.management.base import BaseCommand
from corehq.apps.locations.models import SQLLocation
from corehq.util.log import with_progress_bar
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from custom.enikshay.const import ENROLLED_IN_PRIVATE
from custom.enikshay.case_utils import CASE_TYPE_PERSON
class Command(BaseCommand):
field_names = [
]
def add_arguments(self, parser):
parser.add_argument('domain')
parser.add_argument('parent_location_id')
def handle(self, domain, parent_location_id, **options):
self.domain = domain
self.accessor = CaseAccessors(domain)
self.location = SQLLocation.objects.get(domain=domain, location_id=parent_location_id)
owner_ids = self.location.get_descendants(include_self=True).location_ids()
filename = 'patients.csv'
with open(filename, 'w') as f:
writer = csv.writer(f)
writer.writerow(self.field_names)
person_ids = self.accessor.get_open_case_ids_in_domain_by_type(CASE_TYPE_PERSON, owner_ids)
for person in with_progress_bar(self.accessor.iter_cases(person_ids)):
if person.get_case_property(ENROLLED_IN_PRIVATE) == 'true':
self.add_person(person, writer)
print "Wrote to {}".format(filename)
def add_person(self, person, writer):
return
|
|
d66cdb7b1171abd96fa92c89535c3785df5d0a43
|
packages/syft/src/syft/core/node/common/node_table/ledger.py
|
packages/syft/src/syft/core/node/common/node_table/ledger.py
|
# stdlib
from typing import Any
# third party
from sqlalchemy import Column
from sqlalchemy import LargeBinary
from sqlalchemy import String
from sqlalchemy import Float
# syft absolute
from syft import deserialize
from syft import serialize
# relative
from . import Base
class Ledger(Base):
__tablename__ = "ledger"
id = Column(String(256), primary_key=True)
entity_name = Column(String(256))
mechanism_name = Column(String(256))
max_budget = Column(Float())
delta = Column(Float())
mechanism_bin = Column(LargeBinary(3072))
entity_bin = Column(LargeBinary(3072))
@property
def mechanism(self) -> Any:
return deserialize(self.mechanism_bin, from_bytes=True) # TODO: techdebt fix
@mechanism.setter
def mechanism(self, value: Any) -> None:
self.mechanism_bin = serialize(value, to_bytes=True) # TODO: techdebt fix
@property
def entity(self) -> Any:
return deserialize(self.entity_bin, from_bytes=True) # TODO: techdebt fix
@entity.setter
def entity(self, value: Any) -> None:
self.entity_bin = serialize(value, to_bytes=True) # TODO: techdebt fix
|
Create new database table: Ledger
|
Create new database table: Ledger
|
Python
|
apache-2.0
|
OpenMined/PySyft,OpenMined/PySyft,OpenMined/PySyft,OpenMined/PySyft
|
Create new database table: Ledger
|
# stdlib
from typing import Any
# third party
from sqlalchemy import Column
from sqlalchemy import LargeBinary
from sqlalchemy import String
from sqlalchemy import Float
# syft absolute
from syft import deserialize
from syft import serialize
# relative
from . import Base
class Ledger(Base):
__tablename__ = "ledger"
id = Column(String(256), primary_key=True)
entity_name = Column(String(256))
mechanism_name = Column(String(256))
max_budget = Column(Float())
delta = Column(Float())
mechanism_bin = Column(LargeBinary(3072))
entity_bin = Column(LargeBinary(3072))
@property
def mechanism(self) -> Any:
return deserialize(self.mechanism_bin, from_bytes=True) # TODO: techdebt fix
@mechanism.setter
def mechanism(self, value: Any) -> None:
self.mechanism_bin = serialize(value, to_bytes=True) # TODO: techdebt fix
@property
def entity(self) -> Any:
return deserialize(self.entity_bin, from_bytes=True) # TODO: techdebt fix
@entity.setter
def entity(self, value: Any) -> None:
self.entity_bin = serialize(value, to_bytes=True) # TODO: techdebt fix
|
<commit_before><commit_msg>Create new database table: Ledger<commit_after>
|
# stdlib
from typing import Any
# third party
from sqlalchemy import Column
from sqlalchemy import LargeBinary
from sqlalchemy import String
from sqlalchemy import Float
# syft absolute
from syft import deserialize
from syft import serialize
# relative
from . import Base
class Ledger(Base):
__tablename__ = "ledger"
id = Column(String(256), primary_key=True)
entity_name = Column(String(256))
mechanism_name = Column(String(256))
max_budget = Column(Float())
delta = Column(Float())
mechanism_bin = Column(LargeBinary(3072))
entity_bin = Column(LargeBinary(3072))
@property
def mechanism(self) -> Any:
return deserialize(self.mechanism_bin, from_bytes=True) # TODO: techdebt fix
@mechanism.setter
def mechanism(self, value: Any) -> None:
self.mechanism_bin = serialize(value, to_bytes=True) # TODO: techdebt fix
@property
def entity(self) -> Any:
return deserialize(self.entity_bin, from_bytes=True) # TODO: techdebt fix
@entity.setter
def entity(self, value: Any) -> None:
self.entity_bin = serialize(value, to_bytes=True) # TODO: techdebt fix
|
Create new database table: Ledger# stdlib
from typing import Any
# third party
from sqlalchemy import Column
from sqlalchemy import LargeBinary
from sqlalchemy import String
from sqlalchemy import Float
# syft absolute
from syft import deserialize
from syft import serialize
# relative
from . import Base
class Ledger(Base):
__tablename__ = "ledger"
id = Column(String(256), primary_key=True)
entity_name = Column(String(256))
mechanism_name = Column(String(256))
max_budget = Column(Float())
delta = Column(Float())
mechanism_bin = Column(LargeBinary(3072))
entity_bin = Column(LargeBinary(3072))
@property
def mechanism(self) -> Any:
return deserialize(self.mechanism_bin, from_bytes=True) # TODO: techdebt fix
@mechanism.setter
def mechanism(self, value: Any) -> None:
self.mechanism_bin = serialize(value, to_bytes=True) # TODO: techdebt fix
@property
def entity(self) -> Any:
return deserialize(self.entity_bin, from_bytes=True) # TODO: techdebt fix
@entity.setter
def entity(self, value: Any) -> None:
self.entity_bin = serialize(value, to_bytes=True) # TODO: techdebt fix
|
<commit_before><commit_msg>Create new database table: Ledger<commit_after># stdlib
from typing import Any
# third party
from sqlalchemy import Column
from sqlalchemy import LargeBinary
from sqlalchemy import String
from sqlalchemy import Float
# syft absolute
from syft import deserialize
from syft import serialize
# relative
from . import Base
class Ledger(Base):
__tablename__ = "ledger"
id = Column(String(256), primary_key=True)
entity_name = Column(String(256))
mechanism_name = Column(String(256))
max_budget = Column(Float())
delta = Column(Float())
mechanism_bin = Column(LargeBinary(3072))
entity_bin = Column(LargeBinary(3072))
@property
def mechanism(self) -> Any:
return deserialize(self.mechanism_bin, from_bytes=True) # TODO: techdebt fix
@mechanism.setter
def mechanism(self, value: Any) -> None:
self.mechanism_bin = serialize(value, to_bytes=True) # TODO: techdebt fix
@property
def entity(self) -> Any:
return deserialize(self.entity_bin, from_bytes=True) # TODO: techdebt fix
@entity.setter
def entity(self, value: Any) -> None:
self.entity_bin = serialize(value, to_bytes=True) # TODO: techdebt fix
|
|
5d3559fed567f14cf0efa09f7077154eb61ccee0
|
glowing-lines2.py
|
glowing-lines2.py
|
from PIL import Image, ImageDraw
import random
W = 500
im = Image.new('RGB', (W, W))
NCOLORS = 19
NLINES = 15
def make_line_mask(im):
mask = Image.new('L', im.size, color=0)
grays = []
v = 255.0
for i in range(NCOLORS):
grays.append(int(v))
v *= 0.91
grays.reverse()
draw=ImageDraw.Draw(mask)
y = im.size[1]/2
for i in range(NCOLORS):
draw.line((0,y, im.size[0], y), fill=grays[i], width=NCOLORS-i)
mask.save('mask.png')
return mask
def make_master_line():
'''Make an image with alpha to be pasted for all lines'''
im = Image.new('RGB', (W, W), color=(0, 255, 0))
mask = make_line_mask(im)
im.putalpha(mask)
im.save('mline.png')
return im
def add_line(im0, im1):
x = random.randint(-W/2, W/2)
y = random.randint(-W/2, W/2)
r1 = im1.rotate(random.randint(5, 145))
im0.paste(r1, (x, y), r1)
def make_image():
im = Image.new('RGB', (W, W), color=(0, 0, 0))
ml = make_master_line()
for i in range(NLINES):
add_line(im, ml)
im.save('f.png')
make_image()
|
Add better glowing line script; uses alpha to create the line out of solid green; handles intersections well
|
Add better glowing line script; uses alpha to create the line out of solid green; handles intersections well
|
Python
|
mit
|
redpig2/pilhacks
|
Add better glowing line script; uses alpha to create the line out of solid green; handles intersections well
|
from PIL import Image, ImageDraw
import random
W = 500
im = Image.new('RGB', (W, W))
NCOLORS = 19
NLINES = 15
def make_line_mask(im):
mask = Image.new('L', im.size, color=0)
grays = []
v = 255.0
for i in range(NCOLORS):
grays.append(int(v))
v *= 0.91
grays.reverse()
draw=ImageDraw.Draw(mask)
y = im.size[1]/2
for i in range(NCOLORS):
draw.line((0,y, im.size[0], y), fill=grays[i], width=NCOLORS-i)
mask.save('mask.png')
return mask
def make_master_line():
'''Make an image with alpha to be pasted for all lines'''
im = Image.new('RGB', (W, W), color=(0, 255, 0))
mask = make_line_mask(im)
im.putalpha(mask)
im.save('mline.png')
return im
def add_line(im0, im1):
x = random.randint(-W/2, W/2)
y = random.randint(-W/2, W/2)
r1 = im1.rotate(random.randint(5, 145))
im0.paste(r1, (x, y), r1)
def make_image():
im = Image.new('RGB', (W, W), color=(0, 0, 0))
ml = make_master_line()
for i in range(NLINES):
add_line(im, ml)
im.save('f.png')
make_image()
|
<commit_before><commit_msg>Add better glowing line script; uses alpha to create the line out of solid green; handles intersections well<commit_after>
|
from PIL import Image, ImageDraw
import random
W = 500
im = Image.new('RGB', (W, W))
NCOLORS = 19
NLINES = 15
def make_line_mask(im):
mask = Image.new('L', im.size, color=0)
grays = []
v = 255.0
for i in range(NCOLORS):
grays.append(int(v))
v *= 0.91
grays.reverse()
draw=ImageDraw.Draw(mask)
y = im.size[1]/2
for i in range(NCOLORS):
draw.line((0,y, im.size[0], y), fill=grays[i], width=NCOLORS-i)
mask.save('mask.png')
return mask
def make_master_line():
'''Make an image with alpha to be pasted for all lines'''
im = Image.new('RGB', (W, W), color=(0, 255, 0))
mask = make_line_mask(im)
im.putalpha(mask)
im.save('mline.png')
return im
def add_line(im0, im1):
x = random.randint(-W/2, W/2)
y = random.randint(-W/2, W/2)
r1 = im1.rotate(random.randint(5, 145))
im0.paste(r1, (x, y), r1)
def make_image():
im = Image.new('RGB', (W, W), color=(0, 0, 0))
ml = make_master_line()
for i in range(NLINES):
add_line(im, ml)
im.save('f.png')
make_image()
|
Add better glowing line script; uses alpha to create the line out of solid green; handles intersections wellfrom PIL import Image, ImageDraw
import random
W = 500
im = Image.new('RGB', (W, W))
NCOLORS = 19
NLINES = 15
def make_line_mask(im):
mask = Image.new('L', im.size, color=0)
grays = []
v = 255.0
for i in range(NCOLORS):
grays.append(int(v))
v *= 0.91
grays.reverse()
draw=ImageDraw.Draw(mask)
y = im.size[1]/2
for i in range(NCOLORS):
draw.line((0,y, im.size[0], y), fill=grays[i], width=NCOLORS-i)
mask.save('mask.png')
return mask
def make_master_line():
'''Make an image with alpha to be pasted for all lines'''
im = Image.new('RGB', (W, W), color=(0, 255, 0))
mask = make_line_mask(im)
im.putalpha(mask)
im.save('mline.png')
return im
def add_line(im0, im1):
x = random.randint(-W/2, W/2)
y = random.randint(-W/2, W/2)
r1 = im1.rotate(random.randint(5, 145))
im0.paste(r1, (x, y), r1)
def make_image():
im = Image.new('RGB', (W, W), color=(0, 0, 0))
ml = make_master_line()
for i in range(NLINES):
add_line(im, ml)
im.save('f.png')
make_image()
|
<commit_before><commit_msg>Add better glowing line script; uses alpha to create the line out of solid green; handles intersections well<commit_after>from PIL import Image, ImageDraw
import random
W = 500
im = Image.new('RGB', (W, W))
NCOLORS = 19
NLINES = 15
def make_line_mask(im):
mask = Image.new('L', im.size, color=0)
grays = []
v = 255.0
for i in range(NCOLORS):
grays.append(int(v))
v *= 0.91
grays.reverse()
draw=ImageDraw.Draw(mask)
y = im.size[1]/2
for i in range(NCOLORS):
draw.line((0,y, im.size[0], y), fill=grays[i], width=NCOLORS-i)
mask.save('mask.png')
return mask
def make_master_line():
'''Make an image with alpha to be pasted for all lines'''
im = Image.new('RGB', (W, W), color=(0, 255, 0))
mask = make_line_mask(im)
im.putalpha(mask)
im.save('mline.png')
return im
def add_line(im0, im1):
x = random.randint(-W/2, W/2)
y = random.randint(-W/2, W/2)
r1 = im1.rotate(random.randint(5, 145))
im0.paste(r1, (x, y), r1)
def make_image():
im = Image.new('RGB', (W, W), color=(0, 0, 0))
ml = make_master_line()
for i in range(NLINES):
add_line(im, ml)
im.save('f.png')
make_image()
|
|
8ca7478ceb377374dd824a9e112b9a943b8d6e0c
|
firmware/test_serial.py
|
firmware/test_serial.py
|
from tamproxy import Sketch, Timer
class SerialConnect(Sketch):
def setup(self):
self.timer = Timer()
def loop(self):
if (self.timer.millis() > 5000):
self.stop()
if __name__ == "__main__":
sketch = SerialConnect()
sketch.run()
|
Test whether or not the tablet is connected to the teensy.
|
Test whether or not the tablet is connected to the teensy.
|
Python
|
mit
|
pravinas/et-maslab-2016
|
Test whether or not the tablet is connected to the teensy.
|
from tamproxy import Sketch, Timer
class SerialConnect(Sketch):
def setup(self):
self.timer = Timer()
def loop(self):
if (self.timer.millis() > 5000):
self.stop()
if __name__ == "__main__":
sketch = SerialConnect()
sketch.run()
|
<commit_before><commit_msg>Test whether or not the tablet is connected to the teensy.<commit_after>
|
from tamproxy import Sketch, Timer
class SerialConnect(Sketch):
def setup(self):
self.timer = Timer()
def loop(self):
if (self.timer.millis() > 5000):
self.stop()
if __name__ == "__main__":
sketch = SerialConnect()
sketch.run()
|
Test whether or not the tablet is connected to the teensy.from tamproxy import Sketch, Timer
class SerialConnect(Sketch):
def setup(self):
self.timer = Timer()
def loop(self):
if (self.timer.millis() > 5000):
self.stop()
if __name__ == "__main__":
sketch = SerialConnect()
sketch.run()
|
<commit_before><commit_msg>Test whether or not the tablet is connected to the teensy.<commit_after>from tamproxy import Sketch, Timer
class SerialConnect(Sketch):
def setup(self):
self.timer = Timer()
def loop(self):
if (self.timer.millis() > 5000):
self.stop()
if __name__ == "__main__":
sketch = SerialConnect()
sketch.run()
|
|
e85190dc3d17f24678d04563dc0d402bb6b330a6
|
conman/routes/migrations/0002_remove_slug_parent.py
|
conman/routes/migrations/0002_remove_slug_parent.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('routes', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='route',
name='url',
field=models.TextField(db_index=True, unique=True),
),
migrations.AlterUniqueTogether(
name='route',
unique_together=set([]),
),
migrations.RemoveField(
model_name='route',
name='parent',
),
migrations.RemoveField(
model_name='route',
name='slug',
),
]
|
Add migration for slug and parent removal
|
Add migration for slug and parent removal
|
Python
|
bsd-2-clause
|
meshy/django-conman,meshy/django-conman,Ian-Foote/django-conman
|
Add migration for slug and parent removal
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('routes', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='route',
name='url',
field=models.TextField(db_index=True, unique=True),
),
migrations.AlterUniqueTogether(
name='route',
unique_together=set([]),
),
migrations.RemoveField(
model_name='route',
name='parent',
),
migrations.RemoveField(
model_name='route',
name='slug',
),
]
|
<commit_before><commit_msg>Add migration for slug and parent removal<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('routes', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='route',
name='url',
field=models.TextField(db_index=True, unique=True),
),
migrations.AlterUniqueTogether(
name='route',
unique_together=set([]),
),
migrations.RemoveField(
model_name='route',
name='parent',
),
migrations.RemoveField(
model_name='route',
name='slug',
),
]
|
Add migration for slug and parent removal# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('routes', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='route',
name='url',
field=models.TextField(db_index=True, unique=True),
),
migrations.AlterUniqueTogether(
name='route',
unique_together=set([]),
),
migrations.RemoveField(
model_name='route',
name='parent',
),
migrations.RemoveField(
model_name='route',
name='slug',
),
]
|
<commit_before><commit_msg>Add migration for slug and parent removal<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('routes', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='route',
name='url',
field=models.TextField(db_index=True, unique=True),
),
migrations.AlterUniqueTogether(
name='route',
unique_together=set([]),
),
migrations.RemoveField(
model_name='route',
name='parent',
),
migrations.RemoveField(
model_name='route',
name='slug',
),
]
|
|
2a939a567990b9fe8efce1430c5686fb008ffb4c
|
cineapp/slack.py
|
cineapp/slack.py
|
from slackclient import SlackClient
import json
class SlackChannel:
"""Class describing a slack channel on which one we can send some notifications """
def __init__(self,slack_token,channel_name):
self.channel_id = None
self.channel_name = None
self.slack_token = None
# This is here we are going to send the Slack notification
self.slack_token = SlackClient(slack_token)
# Let's try to find a match with the channel name
response=self.slack_token.api_call("channels.list")
if response["ok"] == True:
for cur_channel in response["channels"]:
if cur_channel["name"] == channel_name:
self.channel_id = cur_channel["name"]
self.channel_name = cur_channel["id"]
break;
# If there is no matching with the channels list, let's try with the private groups
if self.channel_id == None:
response=self.slack_token.api_call("groups.list")
for cur_channel in response["groups"]:
if cur_channel["name"] == channel_name:
self.channel_id = cur_channel["name"]
self.channel_name = cur_channel["id"]
break;
def send_message(self,message,attachment=None):
""" Function that sends a message using SLACK API"""
# Send the message
response=self.slack_token.api_call(
"chat.postMessage",
channel=self.channel_id,
text=message,
attachments=attachment,
link_names=1,
unfurl_links=True
)
# Return the result
if response["ok"] == False:
raise SystemError("Slack API Error : %s" % response["error"])
def slack_mark_notification(mark,app):
# Create a Slack object
if app.config.has_key("SLACK_TOKEN") and app.config["SLACK_NOTIFICATION_CHANNEL"]:
slack_channel = SlackChannel(app.config["SLACK_TOKEN"],app.config["SLACK_NOTIFICATION_CHANNEL"])
app.logger.info("Notification sur SLACK pour la note de %s sur le film %s" % (mark.user.nickname,mark.movie.name))
try:
attachment = json.dumps([
{
"text": mark.comment
}
])
# We encode as str in order to avoid SLACK Api Parsing when ufurling the URL
slack_channel.send_message(message="<" + mark.movie.url.encode("utf-8") + "?language=fr|" + mark.movie.name.encode("utf-8") + ">")
slack_channel.send_message(message="Note de @%s: *%s*" % (mark.user.nickname, str(mark.mark)) ,attachment=attachment)
return 0
except Exception as e:
app.logger.error("Impossible d'envoyer l'URL du film sur SLACK: %s",str(e))
return 1
else:
return -1
|
Add communication layer with Slack API
|
Add communication layer with Slack API
- New class SlackChannel : Allows to send message on a specific channel using
Slack Web API
- New method : Wraps SlackChannel objets and generates a Cineapp notification
which sends a notification containing the movie, the mark and the comment by
the logged user.
Related to #100
|
Python
|
mit
|
ptitoliv/cineapp,ptitoliv/cineapp,ptitoliv/cineapp
|
Add communication layer with Slack API
- New class SlackChannel : Allows to send message on a specific channel using
Slack Web API
- New method : Wraps SlackChannel objets and generates a Cineapp notification
which sends a notification containing the movie, the mark and the comment by
the logged user.
Related to #100
|
from slackclient import SlackClient
import json
class SlackChannel:
"""Class describing a slack channel on which one we can send some notifications """
def __init__(self,slack_token,channel_name):
self.channel_id = None
self.channel_name = None
self.slack_token = None
# This is here we are going to send the Slack notification
self.slack_token = SlackClient(slack_token)
# Let's try to find a match with the channel name
response=self.slack_token.api_call("channels.list")
if response["ok"] == True:
for cur_channel in response["channels"]:
if cur_channel["name"] == channel_name:
self.channel_id = cur_channel["name"]
self.channel_name = cur_channel["id"]
break;
# If there is no matching with the channels list, let's try with the private groups
if self.channel_id == None:
response=self.slack_token.api_call("groups.list")
for cur_channel in response["groups"]:
if cur_channel["name"] == channel_name:
self.channel_id = cur_channel["name"]
self.channel_name = cur_channel["id"]
break;
def send_message(self,message,attachment=None):
""" Function that sends a message using SLACK API"""
# Send the message
response=self.slack_token.api_call(
"chat.postMessage",
channel=self.channel_id,
text=message,
attachments=attachment,
link_names=1,
unfurl_links=True
)
# Return the result
if response["ok"] == False:
raise SystemError("Slack API Error : %s" % response["error"])
def slack_mark_notification(mark,app):
# Create a Slack object
if app.config.has_key("SLACK_TOKEN") and app.config["SLACK_NOTIFICATION_CHANNEL"]:
slack_channel = SlackChannel(app.config["SLACK_TOKEN"],app.config["SLACK_NOTIFICATION_CHANNEL"])
app.logger.info("Notification sur SLACK pour la note de %s sur le film %s" % (mark.user.nickname,mark.movie.name))
try:
attachment = json.dumps([
{
"text": mark.comment
}
])
# We encode as str in order to avoid SLACK Api Parsing when ufurling the URL
slack_channel.send_message(message="<" + mark.movie.url.encode("utf-8") + "?language=fr|" + mark.movie.name.encode("utf-8") + ">")
slack_channel.send_message(message="Note de @%s: *%s*" % (mark.user.nickname, str(mark.mark)) ,attachment=attachment)
return 0
except Exception as e:
app.logger.error("Impossible d'envoyer l'URL du film sur SLACK: %s",str(e))
return 1
else:
return -1
|
<commit_before><commit_msg>Add communication layer with Slack API
- New class SlackChannel : Allows to send message on a specific channel using
Slack Web API
- New method : Wraps SlackChannel objets and generates a Cineapp notification
which sends a notification containing the movie, the mark and the comment by
the logged user.
Related to #100<commit_after>
|
from slackclient import SlackClient
import json
class SlackChannel:
"""Class describing a slack channel on which one we can send some notifications """
def __init__(self,slack_token,channel_name):
self.channel_id = None
self.channel_name = None
self.slack_token = None
# This is here we are going to send the Slack notification
self.slack_token = SlackClient(slack_token)
# Let's try to find a match with the channel name
response=self.slack_token.api_call("channels.list")
if response["ok"] == True:
for cur_channel in response["channels"]:
if cur_channel["name"] == channel_name:
self.channel_id = cur_channel["name"]
self.channel_name = cur_channel["id"]
break;
# If there is no matching with the channels list, let's try with the private groups
if self.channel_id == None:
response=self.slack_token.api_call("groups.list")
for cur_channel in response["groups"]:
if cur_channel["name"] == channel_name:
self.channel_id = cur_channel["name"]
self.channel_name = cur_channel["id"]
break;
def send_message(self,message,attachment=None):
""" Function that sends a message using SLACK API"""
# Send the message
response=self.slack_token.api_call(
"chat.postMessage",
channel=self.channel_id,
text=message,
attachments=attachment,
link_names=1,
unfurl_links=True
)
# Return the result
if response["ok"] == False:
raise SystemError("Slack API Error : %s" % response["error"])
def slack_mark_notification(mark,app):
# Create a Slack object
if app.config.has_key("SLACK_TOKEN") and app.config["SLACK_NOTIFICATION_CHANNEL"]:
slack_channel = SlackChannel(app.config["SLACK_TOKEN"],app.config["SLACK_NOTIFICATION_CHANNEL"])
app.logger.info("Notification sur SLACK pour la note de %s sur le film %s" % (mark.user.nickname,mark.movie.name))
try:
attachment = json.dumps([
{
"text": mark.comment
}
])
# We encode as str in order to avoid SLACK Api Parsing when ufurling the URL
slack_channel.send_message(message="<" + mark.movie.url.encode("utf-8") + "?language=fr|" + mark.movie.name.encode("utf-8") + ">")
slack_channel.send_message(message="Note de @%s: *%s*" % (mark.user.nickname, str(mark.mark)) ,attachment=attachment)
return 0
except Exception as e:
app.logger.error("Impossible d'envoyer l'URL du film sur SLACK: %s",str(e))
return 1
else:
return -1
|
Add communication layer with Slack API
- New class SlackChannel : Allows to send message on a specific channel using
Slack Web API
- New method : Wraps SlackChannel objets and generates a Cineapp notification
which sends a notification containing the movie, the mark and the comment by
the logged user.
Related to #100from slackclient import SlackClient
import json
class SlackChannel:
"""Class describing a slack channel on which one we can send some notifications """
def __init__(self,slack_token,channel_name):
self.channel_id = None
self.channel_name = None
self.slack_token = None
# This is here we are going to send the Slack notification
self.slack_token = SlackClient(slack_token)
# Let's try to find a match with the channel name
response=self.slack_token.api_call("channels.list")
if response["ok"] == True:
for cur_channel in response["channels"]:
if cur_channel["name"] == channel_name:
self.channel_id = cur_channel["name"]
self.channel_name = cur_channel["id"]
break;
# If there is no matching with the channels list, let's try with the private groups
if self.channel_id == None:
response=self.slack_token.api_call("groups.list")
for cur_channel in response["groups"]:
if cur_channel["name"] == channel_name:
self.channel_id = cur_channel["name"]
self.channel_name = cur_channel["id"]
break;
def send_message(self,message,attachment=None):
""" Function that sends a message using SLACK API"""
# Send the message
response=self.slack_token.api_call(
"chat.postMessage",
channel=self.channel_id,
text=message,
attachments=attachment,
link_names=1,
unfurl_links=True
)
# Return the result
if response["ok"] == False:
raise SystemError("Slack API Error : %s" % response["error"])
def slack_mark_notification(mark,app):
# Create a Slack object
if app.config.has_key("SLACK_TOKEN") and app.config["SLACK_NOTIFICATION_CHANNEL"]:
slack_channel = SlackChannel(app.config["SLACK_TOKEN"],app.config["SLACK_NOTIFICATION_CHANNEL"])
app.logger.info("Notification sur SLACK pour la note de %s sur le film %s" % (mark.user.nickname,mark.movie.name))
try:
attachment = json.dumps([
{
"text": mark.comment
}
])
# We encode as str in order to avoid SLACK Api Parsing when ufurling the URL
slack_channel.send_message(message="<" + mark.movie.url.encode("utf-8") + "?language=fr|" + mark.movie.name.encode("utf-8") + ">")
slack_channel.send_message(message="Note de @%s: *%s*" % (mark.user.nickname, str(mark.mark)) ,attachment=attachment)
return 0
except Exception as e:
app.logger.error("Impossible d'envoyer l'URL du film sur SLACK: %s",str(e))
return 1
else:
return -1
|
<commit_before><commit_msg>Add communication layer with Slack API
- New class SlackChannel : Allows to send message on a specific channel using
Slack Web API
- New method : Wraps SlackChannel objets and generates a Cineapp notification
which sends a notification containing the movie, the mark and the comment by
the logged user.
Related to #100<commit_after>from slackclient import SlackClient
import json
class SlackChannel:
"""Class describing a slack channel on which one we can send some notifications """
def __init__(self,slack_token,channel_name):
self.channel_id = None
self.channel_name = None
self.slack_token = None
# This is here we are going to send the Slack notification
self.slack_token = SlackClient(slack_token)
# Let's try to find a match with the channel name
response=self.slack_token.api_call("channels.list")
if response["ok"] == True:
for cur_channel in response["channels"]:
if cur_channel["name"] == channel_name:
self.channel_id = cur_channel["name"]
self.channel_name = cur_channel["id"]
break;
# If there is no matching with the channels list, let's try with the private groups
if self.channel_id == None:
response=self.slack_token.api_call("groups.list")
for cur_channel in response["groups"]:
if cur_channel["name"] == channel_name:
self.channel_id = cur_channel["name"]
self.channel_name = cur_channel["id"]
break;
def send_message(self,message,attachment=None):
""" Function that sends a message using SLACK API"""
# Send the message
response=self.slack_token.api_call(
"chat.postMessage",
channel=self.channel_id,
text=message,
attachments=attachment,
link_names=1,
unfurl_links=True
)
# Return the result
if response["ok"] == False:
raise SystemError("Slack API Error : %s" % response["error"])
def slack_mark_notification(mark,app):
# Create a Slack object
if app.config.has_key("SLACK_TOKEN") and app.config["SLACK_NOTIFICATION_CHANNEL"]:
slack_channel = SlackChannel(app.config["SLACK_TOKEN"],app.config["SLACK_NOTIFICATION_CHANNEL"])
app.logger.info("Notification sur SLACK pour la note de %s sur le film %s" % (mark.user.nickname,mark.movie.name))
try:
attachment = json.dumps([
{
"text": mark.comment
}
])
# We encode as str in order to avoid SLACK Api Parsing when ufurling the URL
slack_channel.send_message(message="<" + mark.movie.url.encode("utf-8") + "?language=fr|" + mark.movie.name.encode("utf-8") + ">")
slack_channel.send_message(message="Note de @%s: *%s*" % (mark.user.nickname, str(mark.mark)) ,attachment=attachment)
return 0
except Exception as e:
app.logger.error("Impossible d'envoyer l'URL du film sur SLACK: %s",str(e))
return 1
else:
return -1
|
|
f7c2d3891164ad365061a0df3c89bdced6a95b24
|
flexget/plugins/search_btn.py
|
flexget/plugins/search_btn.py
|
from __future__ import unicode_literals, division, absolute_import
import logging
from flexget import validator
from flexget.entry import Entry
from flexget.plugin import register_plugin
from flexget.utils import requests, json
from flexget.utils.search import torrent_availability
session = requests.Session()
log = logging.getLogger('search_btn')
# TODO: btn has a limit of 150 searches per hour
class SearchBTN(object):
def validator(self):
return validator.factory('text')
def search(self, entry, config):
api_key = config
searches = entry.get('search_strings', [entry['title']])
if 'series_name' in entry:
search = {'series': entry['series_name']}
if 'series_id' in entry:
search['name'] = entry['series_id']
searches = [search]
results = []
for search in searches:
data = json.dumps({'method': 'getTorrents', 'params': [api_key, search], 'id': 1})
try:
r = session.post('http://api.btnapps.net/', data=data, headers={'Content-type': 'application/json'})
except requests.RequestException as e:
log.error('Error searching btn: %s' % e)
continue
content = r.json()
if content['result']['results']:
for item in content['result']['torrents'].itervalues():
if item['Category'] != 'Episode':
continue
entry = Entry()
entry['title'] = item['ReleaseName']
entry['url'] = item['DownloadURL']
entry['torrent_seeds'] = int(item['Seeders'])
entry['torrent_leeches'] = int(item['Leechers'])
entry['torrent_info_hash'] = item['InfoHash']
entry['search_sort'] = torrent_availability(entry['torrent_seeds'], entry['torrent_leeches'])
if item['TvdbID']:
entry['tvdb_id'] = int(item['TvdbID'])
results.append(entry)
return results
register_plugin(SearchBTN, 'btn', groups=['search'], debug=True)
|
Add preliminary btn search plugin
|
Add preliminary btn search plugin
|
Python
|
mit
|
Pretagonist/Flexget,ZefQ/Flexget,asm0dey/Flexget,qk4l/Flexget,Danfocus/Flexget,jawilson/Flexget,tobinjt/Flexget,ibrahimkarahan/Flexget,tarzasai/Flexget,tvcsantos/Flexget,qk4l/Flexget,ZefQ/Flexget,patsissons/Flexget,ianstalk/Flexget,offbyone/Flexget,patsissons/Flexget,antivirtel/Flexget,sean797/Flexget,ratoaq2/Flexget,tvcsantos/Flexget,spencerjanssen/Flexget,tobinjt/Flexget,poulpito/Flexget,malkavi/Flexget,jacobmetrick/Flexget,oxc/Flexget,gazpachoking/Flexget,LynxyssCZ/Flexget,gazpachoking/Flexget,offbyone/Flexget,cvium/Flexget,grrr2/Flexget,ianstalk/Flexget,asm0dey/Flexget,tobinjt/Flexget,dsemi/Flexget,ratoaq2/Flexget,spencerjanssen/Flexget,oxc/Flexget,OmgOhnoes/Flexget,Flexget/Flexget,OmgOhnoes/Flexget,crawln45/Flexget,LynxyssCZ/Flexget,jawilson/Flexget,oxc/Flexget,Pretagonist/Flexget,crawln45/Flexget,Flexget/Flexget,jawilson/Flexget,drwyrm/Flexget,tarzasai/Flexget,xfouloux/Flexget,crawln45/Flexget,thalamus/Flexget,poulpito/Flexget,lildadou/Flexget,Danfocus/Flexget,tsnoam/Flexget,JorisDeRieck/Flexget,antivirtel/Flexget,LynxyssCZ/Flexget,tsnoam/Flexget,malkavi/Flexget,patsissons/Flexget,ibrahimkarahan/Flexget,camon/Flexget,JorisDeRieck/Flexget,crawln45/Flexget,X-dark/Flexget,vfrc2/Flexget,spencerjanssen/Flexget,Flexget/Flexget,voriux/Flexget,JorisDeRieck/Flexget,tobinjt/Flexget,sean797/Flexget,thalamus/Flexget,OmgOhnoes/Flexget,qvazzler/Flexget,LynxyssCZ/Flexget,ianstalk/Flexget,malkavi/Flexget,v17al/Flexget,lildadou/Flexget,ratoaq2/Flexget,lildadou/Flexget,vfrc2/Flexget,xfouloux/Flexget,offbyone/Flexget,antivirtel/Flexget,qvazzler/Flexget,Danfocus/Flexget,grrr2/Flexget,v17al/Flexget,dsemi/Flexget,camon/Flexget,jacobmetrick/Flexget,X-dark/Flexget,tsnoam/Flexget,tarzasai/Flexget,jacobmetrick/Flexget,X-dark/Flexget,voriux/Flexget,Flexget/Flexget,xfouloux/Flexget,sean797/Flexget,drwyrm/Flexget,qvazzler/Flexget,v17al/Flexget,ibrahimkarahan/Flexget,grrr2/Flexget,poulpito/Flexget,drwyrm/Flexget,Pretagonist/Flexget,Danfocus/Flexget,ZefQ/Flexget,malkavi/Flexget,asm0dey/Flexget,dsemi/Flexget,cvium/Flexget,cvium/Flexget,jawilson/Flexget,vfrc2/Flexget,qk4l/Flexget,JorisDeRieck/Flexget,thalamus/Flexget
|
Add preliminary btn search plugin
|
from __future__ import unicode_literals, division, absolute_import
import logging
from flexget import validator
from flexget.entry import Entry
from flexget.plugin import register_plugin
from flexget.utils import requests, json
from flexget.utils.search import torrent_availability
session = requests.Session()
log = logging.getLogger('search_btn')
# TODO: btn has a limit of 150 searches per hour
class SearchBTN(object):
def validator(self):
return validator.factory('text')
def search(self, entry, config):
api_key = config
searches = entry.get('search_strings', [entry['title']])
if 'series_name' in entry:
search = {'series': entry['series_name']}
if 'series_id' in entry:
search['name'] = entry['series_id']
searches = [search]
results = []
for search in searches:
data = json.dumps({'method': 'getTorrents', 'params': [api_key, search], 'id': 1})
try:
r = session.post('http://api.btnapps.net/', data=data, headers={'Content-type': 'application/json'})
except requests.RequestException as e:
log.error('Error searching btn: %s' % e)
continue
content = r.json()
if content['result']['results']:
for item in content['result']['torrents'].itervalues():
if item['Category'] != 'Episode':
continue
entry = Entry()
entry['title'] = item['ReleaseName']
entry['url'] = item['DownloadURL']
entry['torrent_seeds'] = int(item['Seeders'])
entry['torrent_leeches'] = int(item['Leechers'])
entry['torrent_info_hash'] = item['InfoHash']
entry['search_sort'] = torrent_availability(entry['torrent_seeds'], entry['torrent_leeches'])
if item['TvdbID']:
entry['tvdb_id'] = int(item['TvdbID'])
results.append(entry)
return results
register_plugin(SearchBTN, 'btn', groups=['search'], debug=True)
|
<commit_before><commit_msg>Add preliminary btn search plugin<commit_after>
|
from __future__ import unicode_literals, division, absolute_import
import logging
from flexget import validator
from flexget.entry import Entry
from flexget.plugin import register_plugin
from flexget.utils import requests, json
from flexget.utils.search import torrent_availability
session = requests.Session()
log = logging.getLogger('search_btn')
# TODO: btn has a limit of 150 searches per hour
class SearchBTN(object):
def validator(self):
return validator.factory('text')
def search(self, entry, config):
api_key = config
searches = entry.get('search_strings', [entry['title']])
if 'series_name' in entry:
search = {'series': entry['series_name']}
if 'series_id' in entry:
search['name'] = entry['series_id']
searches = [search]
results = []
for search in searches:
data = json.dumps({'method': 'getTorrents', 'params': [api_key, search], 'id': 1})
try:
r = session.post('http://api.btnapps.net/', data=data, headers={'Content-type': 'application/json'})
except requests.RequestException as e:
log.error('Error searching btn: %s' % e)
continue
content = r.json()
if content['result']['results']:
for item in content['result']['torrents'].itervalues():
if item['Category'] != 'Episode':
continue
entry = Entry()
entry['title'] = item['ReleaseName']
entry['url'] = item['DownloadURL']
entry['torrent_seeds'] = int(item['Seeders'])
entry['torrent_leeches'] = int(item['Leechers'])
entry['torrent_info_hash'] = item['InfoHash']
entry['search_sort'] = torrent_availability(entry['torrent_seeds'], entry['torrent_leeches'])
if item['TvdbID']:
entry['tvdb_id'] = int(item['TvdbID'])
results.append(entry)
return results
register_plugin(SearchBTN, 'btn', groups=['search'], debug=True)
|
Add preliminary btn search pluginfrom __future__ import unicode_literals, division, absolute_import
import logging
from flexget import validator
from flexget.entry import Entry
from flexget.plugin import register_plugin
from flexget.utils import requests, json
from flexget.utils.search import torrent_availability
session = requests.Session()
log = logging.getLogger('search_btn')
# TODO: btn has a limit of 150 searches per hour
class SearchBTN(object):
def validator(self):
return validator.factory('text')
def search(self, entry, config):
api_key = config
searches = entry.get('search_strings', [entry['title']])
if 'series_name' in entry:
search = {'series': entry['series_name']}
if 'series_id' in entry:
search['name'] = entry['series_id']
searches = [search]
results = []
for search in searches:
data = json.dumps({'method': 'getTorrents', 'params': [api_key, search], 'id': 1})
try:
r = session.post('http://api.btnapps.net/', data=data, headers={'Content-type': 'application/json'})
except requests.RequestException as e:
log.error('Error searching btn: %s' % e)
continue
content = r.json()
if content['result']['results']:
for item in content['result']['torrents'].itervalues():
if item['Category'] != 'Episode':
continue
entry = Entry()
entry['title'] = item['ReleaseName']
entry['url'] = item['DownloadURL']
entry['torrent_seeds'] = int(item['Seeders'])
entry['torrent_leeches'] = int(item['Leechers'])
entry['torrent_info_hash'] = item['InfoHash']
entry['search_sort'] = torrent_availability(entry['torrent_seeds'], entry['torrent_leeches'])
if item['TvdbID']:
entry['tvdb_id'] = int(item['TvdbID'])
results.append(entry)
return results
register_plugin(SearchBTN, 'btn', groups=['search'], debug=True)
|
<commit_before><commit_msg>Add preliminary btn search plugin<commit_after>from __future__ import unicode_literals, division, absolute_import
import logging
from flexget import validator
from flexget.entry import Entry
from flexget.plugin import register_plugin
from flexget.utils import requests, json
from flexget.utils.search import torrent_availability
session = requests.Session()
log = logging.getLogger('search_btn')
# TODO: btn has a limit of 150 searches per hour
class SearchBTN(object):
def validator(self):
return validator.factory('text')
def search(self, entry, config):
api_key = config
searches = entry.get('search_strings', [entry['title']])
if 'series_name' in entry:
search = {'series': entry['series_name']}
if 'series_id' in entry:
search['name'] = entry['series_id']
searches = [search]
results = []
for search in searches:
data = json.dumps({'method': 'getTorrents', 'params': [api_key, search], 'id': 1})
try:
r = session.post('http://api.btnapps.net/', data=data, headers={'Content-type': 'application/json'})
except requests.RequestException as e:
log.error('Error searching btn: %s' % e)
continue
content = r.json()
if content['result']['results']:
for item in content['result']['torrents'].itervalues():
if item['Category'] != 'Episode':
continue
entry = Entry()
entry['title'] = item['ReleaseName']
entry['url'] = item['DownloadURL']
entry['torrent_seeds'] = int(item['Seeders'])
entry['torrent_leeches'] = int(item['Leechers'])
entry['torrent_info_hash'] = item['InfoHash']
entry['search_sort'] = torrent_availability(entry['torrent_seeds'], entry['torrent_leeches'])
if item['TvdbID']:
entry['tvdb_id'] = int(item['TvdbID'])
results.append(entry)
return results
register_plugin(SearchBTN, 'btn', groups=['search'], debug=True)
|
|
3a05589a6b423ed27dd4c3a10fdc0f7cc2cd8387
|
usr/examples/09-Feature-Detection/lines.py
|
usr/examples/09-Feature-Detection/lines.py
|
# Hough Transform Example:
#
# This example demonstrates using the Hough transform to find lines in an image.
import sensor, image, time
kernel_size = 1 # kernel width = (size*2)+1, kernel height = (size*2)+1
kernel = [-1, -1, -1,\
-1, +8, -1,\
-1, -1, -1]
# This is a high pass filter kernel. see here for more kernels:
# http://www.fmwconcepts.com/imagemagick/digital_image_filtering.pdf
thresholds = [(200, 255)] # grayscale thresholds
sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
sensor.skip_frames(30) # Let new settings take affect.
clock = time.clock() # Tracks FPS.
# On the OV7725 sensor, edge detection can be enhanced
# significantly by setting the sharpness/edge registers.
# Note: This will be implemented as a function later.
if (sensor.get_id() == sensor.OV7725):
sensor.__write_reg(0xAC, 0xDF)
sensor.__write_reg(0x8F, 0xFF)
while(True):
clock.tick() # Track elapsed milliseconds between snapshots().
sensor.set_pixformat(sensor.GRAYSCALE)
img = sensor.snapshot() # Take a picture and return the image.
img.morph(kernel_size, kernel)
img.binary(thresholds)
# Erode pixels with less than 2 neighbors using a 3x3 image kernel
img.erode(1, threshold = 2)
img.draw_rectangle((0, 0, 160, 120), color=0x00)
# Find lines.
lines = img.find_lines(threshold=40)
# Switch back to RGB to draw red lines.
sensor.set_pixformat(sensor.RGB565) # or sensor.RGB565
img = sensor.snapshot()
for l in lines:
img.draw_line(l, color=(0xFF, 0x00, 0x00))
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
img = sensor.snapshot() # Take a picture and return the image.
|
Add Hough Transform example script.
|
Add Hough Transform example script.
|
Python
|
mit
|
kwagyeman/openmv,openmv/openmv,kwagyeman/openmv,kwagyeman/openmv,kwagyeman/openmv,iabdalkader/openmv,iabdalkader/openmv,openmv/openmv,iabdalkader/openmv,openmv/openmv,iabdalkader/openmv,openmv/openmv
|
Add Hough Transform example script.
|
# Hough Transform Example:
#
# This example demonstrates using the Hough transform to find lines in an image.
import sensor, image, time
kernel_size = 1 # kernel width = (size*2)+1, kernel height = (size*2)+1
kernel = [-1, -1, -1,\
-1, +8, -1,\
-1, -1, -1]
# This is a high pass filter kernel. see here for more kernels:
# http://www.fmwconcepts.com/imagemagick/digital_image_filtering.pdf
thresholds = [(200, 255)] # grayscale thresholds
sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
sensor.skip_frames(30) # Let new settings take affect.
clock = time.clock() # Tracks FPS.
# On the OV7725 sensor, edge detection can be enhanced
# significantly by setting the sharpness/edge registers.
# Note: This will be implemented as a function later.
if (sensor.get_id() == sensor.OV7725):
sensor.__write_reg(0xAC, 0xDF)
sensor.__write_reg(0x8F, 0xFF)
while(True):
clock.tick() # Track elapsed milliseconds between snapshots().
sensor.set_pixformat(sensor.GRAYSCALE)
img = sensor.snapshot() # Take a picture and return the image.
img.morph(kernel_size, kernel)
img.binary(thresholds)
# Erode pixels with less than 2 neighbors using a 3x3 image kernel
img.erode(1, threshold = 2)
img.draw_rectangle((0, 0, 160, 120), color=0x00)
# Find lines.
lines = img.find_lines(threshold=40)
# Switch back to RGB to draw red lines.
sensor.set_pixformat(sensor.RGB565) # or sensor.RGB565
img = sensor.snapshot()
for l in lines:
img.draw_line(l, color=(0xFF, 0x00, 0x00))
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
img = sensor.snapshot() # Take a picture and return the image.
|
<commit_before><commit_msg>Add Hough Transform example script.<commit_after>
|
# Hough Transform Example:
#
# This example demonstrates using the Hough transform to find lines in an image.
import sensor, image, time
kernel_size = 1 # kernel width = (size*2)+1, kernel height = (size*2)+1
kernel = [-1, -1, -1,\
-1, +8, -1,\
-1, -1, -1]
# This is a high pass filter kernel. see here for more kernels:
# http://www.fmwconcepts.com/imagemagick/digital_image_filtering.pdf
thresholds = [(200, 255)] # grayscale thresholds
sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
sensor.skip_frames(30) # Let new settings take affect.
clock = time.clock() # Tracks FPS.
# On the OV7725 sensor, edge detection can be enhanced
# significantly by setting the sharpness/edge registers.
# Note: This will be implemented as a function later.
if (sensor.get_id() == sensor.OV7725):
sensor.__write_reg(0xAC, 0xDF)
sensor.__write_reg(0x8F, 0xFF)
while(True):
clock.tick() # Track elapsed milliseconds between snapshots().
sensor.set_pixformat(sensor.GRAYSCALE)
img = sensor.snapshot() # Take a picture and return the image.
img.morph(kernel_size, kernel)
img.binary(thresholds)
# Erode pixels with less than 2 neighbors using a 3x3 image kernel
img.erode(1, threshold = 2)
img.draw_rectangle((0, 0, 160, 120), color=0x00)
# Find lines.
lines = img.find_lines(threshold=40)
# Switch back to RGB to draw red lines.
sensor.set_pixformat(sensor.RGB565) # or sensor.RGB565
img = sensor.snapshot()
for l in lines:
img.draw_line(l, color=(0xFF, 0x00, 0x00))
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
img = sensor.snapshot() # Take a picture and return the image.
|
Add Hough Transform example script.# Hough Transform Example:
#
# This example demonstrates using the Hough transform to find lines in an image.
import sensor, image, time
kernel_size = 1 # kernel width = (size*2)+1, kernel height = (size*2)+1
kernel = [-1, -1, -1,\
-1, +8, -1,\
-1, -1, -1]
# This is a high pass filter kernel. see here for more kernels:
# http://www.fmwconcepts.com/imagemagick/digital_image_filtering.pdf
thresholds = [(200, 255)] # grayscale thresholds
sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
sensor.skip_frames(30) # Let new settings take affect.
clock = time.clock() # Tracks FPS.
# On the OV7725 sensor, edge detection can be enhanced
# significantly by setting the sharpness/edge registers.
# Note: This will be implemented as a function later.
if (sensor.get_id() == sensor.OV7725):
sensor.__write_reg(0xAC, 0xDF)
sensor.__write_reg(0x8F, 0xFF)
while(True):
clock.tick() # Track elapsed milliseconds between snapshots().
sensor.set_pixformat(sensor.GRAYSCALE)
img = sensor.snapshot() # Take a picture and return the image.
img.morph(kernel_size, kernel)
img.binary(thresholds)
# Erode pixels with less than 2 neighbors using a 3x3 image kernel
img.erode(1, threshold = 2)
img.draw_rectangle((0, 0, 160, 120), color=0x00)
# Find lines.
lines = img.find_lines(threshold=40)
# Switch back to RGB to draw red lines.
sensor.set_pixformat(sensor.RGB565) # or sensor.RGB565
img = sensor.snapshot()
for l in lines:
img.draw_line(l, color=(0xFF, 0x00, 0x00))
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
img = sensor.snapshot() # Take a picture and return the image.
|
<commit_before><commit_msg>Add Hough Transform example script.<commit_after># Hough Transform Example:
#
# This example demonstrates using the Hough transform to find lines in an image.
import sensor, image, time
kernel_size = 1 # kernel width = (size*2)+1, kernel height = (size*2)+1
kernel = [-1, -1, -1,\
-1, +8, -1,\
-1, -1, -1]
# This is a high pass filter kernel. see here for more kernels:
# http://www.fmwconcepts.com/imagemagick/digital_image_filtering.pdf
thresholds = [(200, 255)] # grayscale thresholds
sensor.reset() # Initialize the camera sensor.
sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565
sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others)
sensor.skip_frames(30) # Let new settings take affect.
clock = time.clock() # Tracks FPS.
# On the OV7725 sensor, edge detection can be enhanced
# significantly by setting the sharpness/edge registers.
# Note: This will be implemented as a function later.
if (sensor.get_id() == sensor.OV7725):
sensor.__write_reg(0xAC, 0xDF)
sensor.__write_reg(0x8F, 0xFF)
while(True):
clock.tick() # Track elapsed milliseconds between snapshots().
sensor.set_pixformat(sensor.GRAYSCALE)
img = sensor.snapshot() # Take a picture and return the image.
img.morph(kernel_size, kernel)
img.binary(thresholds)
# Erode pixels with less than 2 neighbors using a 3x3 image kernel
img.erode(1, threshold = 2)
img.draw_rectangle((0, 0, 160, 120), color=0x00)
# Find lines.
lines = img.find_lines(threshold=40)
# Switch back to RGB to draw red lines.
sensor.set_pixformat(sensor.RGB565) # or sensor.RGB565
img = sensor.snapshot()
for l in lines:
img.draw_line(l, color=(0xFF, 0x00, 0x00))
print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while
img = sensor.snapshot() # Take a picture and return the image.
|
|
8986051aa3dd292af60fe652a639e63092337d4b
|
scripts/start_baxter.py
|
scripts/start_baxter.py
|
#!/usr/bin/python
from baxter_myo.ArmController import ArmController
def main():
s = ArmController()
s.loop()
if __name__ == "__main__":
main()
|
Create starting scripts for the baxter
|
Create starting scripts for the baxter
|
Python
|
mit
|
ipab-rad/myo_baxter_pc,ipab-rad/myo_baxter_pc,ipab-rad/baxter_myo,ipab-rad/baxter_myo
|
Create starting scripts for the baxter
|
#!/usr/bin/python
from baxter_myo.ArmController import ArmController
def main():
s = ArmController()
s.loop()
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Create starting scripts for the baxter<commit_after>
|
#!/usr/bin/python
from baxter_myo.ArmController import ArmController
def main():
s = ArmController()
s.loop()
if __name__ == "__main__":
main()
|
Create starting scripts for the baxter#!/usr/bin/python
from baxter_myo.ArmController import ArmController
def main():
s = ArmController()
s.loop()
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Create starting scripts for the baxter<commit_after>#!/usr/bin/python
from baxter_myo.ArmController import ArmController
def main():
s = ArmController()
s.loop()
if __name__ == "__main__":
main()
|
|
255f2e95954682f20c9ab2383d769ff1a656cbdd
|
CodeFights/differentSquares.py
|
CodeFights/differentSquares.py
|
#!/usr/local/bin/python
# Code Fights Different D Problem
def differentSquares(matrix):
rows, cols = len(matrix), len(matrix[0])
if rows < 2 or cols < 2:
return 0
s = [[matrix[r + i][c + j] for i in [0, 1] for j in [0, 1]] for r in
range(rows - 1) for c in range(cols - 1)]
return len(set([tuple(x) for x in s]))
def main():
tests = [
[
[[3]],
0
],
[
[[1, 2, 1],
[2, 2, 2],
[2, 2, 2],
[1, 2, 3],
[2, 2, 1]],
6
],
[
[[9, 9, 9, 9, 9],
[9, 9, 9, 9, 9],
[9, 9, 9, 9, 9],
[9, 9, 9, 9, 9],
[9, 9, 9, 9, 9],
[9, 9, 9, 9, 9]],
1
],
[
[[3, 4, 5, 6, 7, 8, 9]],
0
],
[
[[3], [4], [5], [6], [7]],
0
],
[
[[2, 5, 3, 4, 3, 1, 3, 2],
[4, 5, 4, 1, 2, 4, 1, 3],
[1, 1, 2, 1, 4, 1, 1, 5],
[1, 3, 4, 2, 3, 4, 2, 4],
[1, 5, 5, 2, 1, 3, 1, 1],
[1, 2, 3, 3, 5, 1, 2, 4],
[3, 1, 4, 4, 4, 1, 5, 5],
[5, 1, 3, 3, 1, 5, 3, 5],
[5, 4, 4, 3, 5, 4, 4, 4]],
54
],
[[[1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 9, 9, 9, 2, 3, 9]], 0]
]
for t in tests:
res = differentSquares(t[0])
ans = t[1]
if ans == res:
print("PASSED: differentSquares({}) returned {}"
.format(t[0], res))
else:
print("FAILED: differentSquares({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights different squares problem
|
Solve Code Fights different squares problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights different squares problem
|
#!/usr/local/bin/python
# Code Fights Different D Problem
def differentSquares(matrix):
rows, cols = len(matrix), len(matrix[0])
if rows < 2 or cols < 2:
return 0
s = [[matrix[r + i][c + j] for i in [0, 1] for j in [0, 1]] for r in
range(rows - 1) for c in range(cols - 1)]
return len(set([tuple(x) for x in s]))
def main():
tests = [
[
[[3]],
0
],
[
[[1, 2, 1],
[2, 2, 2],
[2, 2, 2],
[1, 2, 3],
[2, 2, 1]],
6
],
[
[[9, 9, 9, 9, 9],
[9, 9, 9, 9, 9],
[9, 9, 9, 9, 9],
[9, 9, 9, 9, 9],
[9, 9, 9, 9, 9],
[9, 9, 9, 9, 9]],
1
],
[
[[3, 4, 5, 6, 7, 8, 9]],
0
],
[
[[3], [4], [5], [6], [7]],
0
],
[
[[2, 5, 3, 4, 3, 1, 3, 2],
[4, 5, 4, 1, 2, 4, 1, 3],
[1, 1, 2, 1, 4, 1, 1, 5],
[1, 3, 4, 2, 3, 4, 2, 4],
[1, 5, 5, 2, 1, 3, 1, 1],
[1, 2, 3, 3, 5, 1, 2, 4],
[3, 1, 4, 4, 4, 1, 5, 5],
[5, 1, 3, 3, 1, 5, 3, 5],
[5, 4, 4, 3, 5, 4, 4, 4]],
54
],
[[[1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 9, 9, 9, 2, 3, 9]], 0]
]
for t in tests:
res = differentSquares(t[0])
ans = t[1]
if ans == res:
print("PASSED: differentSquares({}) returned {}"
.format(t[0], res))
else:
print("FAILED: differentSquares({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights different squares problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Different D Problem
def differentSquares(matrix):
rows, cols = len(matrix), len(matrix[0])
if rows < 2 or cols < 2:
return 0
s = [[matrix[r + i][c + j] for i in [0, 1] for j in [0, 1]] for r in
range(rows - 1) for c in range(cols - 1)]
return len(set([tuple(x) for x in s]))
def main():
tests = [
[
[[3]],
0
],
[
[[1, 2, 1],
[2, 2, 2],
[2, 2, 2],
[1, 2, 3],
[2, 2, 1]],
6
],
[
[[9, 9, 9, 9, 9],
[9, 9, 9, 9, 9],
[9, 9, 9, 9, 9],
[9, 9, 9, 9, 9],
[9, 9, 9, 9, 9],
[9, 9, 9, 9, 9]],
1
],
[
[[3, 4, 5, 6, 7, 8, 9]],
0
],
[
[[3], [4], [5], [6], [7]],
0
],
[
[[2, 5, 3, 4, 3, 1, 3, 2],
[4, 5, 4, 1, 2, 4, 1, 3],
[1, 1, 2, 1, 4, 1, 1, 5],
[1, 3, 4, 2, 3, 4, 2, 4],
[1, 5, 5, 2, 1, 3, 1, 1],
[1, 2, 3, 3, 5, 1, 2, 4],
[3, 1, 4, 4, 4, 1, 5, 5],
[5, 1, 3, 3, 1, 5, 3, 5],
[5, 4, 4, 3, 5, 4, 4, 4]],
54
],
[[[1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 9, 9, 9, 2, 3, 9]], 0]
]
for t in tests:
res = differentSquares(t[0])
ans = t[1]
if ans == res:
print("PASSED: differentSquares({}) returned {}"
.format(t[0], res))
else:
print("FAILED: differentSquares({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights different squares problem#!/usr/local/bin/python
# Code Fights Different D Problem
def differentSquares(matrix):
rows, cols = len(matrix), len(matrix[0])
if rows < 2 or cols < 2:
return 0
s = [[matrix[r + i][c + j] for i in [0, 1] for j in [0, 1]] for r in
range(rows - 1) for c in range(cols - 1)]
return len(set([tuple(x) for x in s]))
def main():
tests = [
[
[[3]],
0
],
[
[[1, 2, 1],
[2, 2, 2],
[2, 2, 2],
[1, 2, 3],
[2, 2, 1]],
6
],
[
[[9, 9, 9, 9, 9],
[9, 9, 9, 9, 9],
[9, 9, 9, 9, 9],
[9, 9, 9, 9, 9],
[9, 9, 9, 9, 9],
[9, 9, 9, 9, 9]],
1
],
[
[[3, 4, 5, 6, 7, 8, 9]],
0
],
[
[[3], [4], [5], [6], [7]],
0
],
[
[[2, 5, 3, 4, 3, 1, 3, 2],
[4, 5, 4, 1, 2, 4, 1, 3],
[1, 1, 2, 1, 4, 1, 1, 5],
[1, 3, 4, 2, 3, 4, 2, 4],
[1, 5, 5, 2, 1, 3, 1, 1],
[1, 2, 3, 3, 5, 1, 2, 4],
[3, 1, 4, 4, 4, 1, 5, 5],
[5, 1, 3, 3, 1, 5, 3, 5],
[5, 4, 4, 3, 5, 4, 4, 4]],
54
],
[[[1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 9, 9, 9, 2, 3, 9]], 0]
]
for t in tests:
res = differentSquares(t[0])
ans = t[1]
if ans == res:
print("PASSED: differentSquares({}) returned {}"
.format(t[0], res))
else:
print("FAILED: differentSquares({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights different squares problem<commit_after>#!/usr/local/bin/python
# Code Fights Different D Problem
def differentSquares(matrix):
rows, cols = len(matrix), len(matrix[0])
if rows < 2 or cols < 2:
return 0
s = [[matrix[r + i][c + j] for i in [0, 1] for j in [0, 1]] for r in
range(rows - 1) for c in range(cols - 1)]
return len(set([tuple(x) for x in s]))
def main():
tests = [
[
[[3]],
0
],
[
[[1, 2, 1],
[2, 2, 2],
[2, 2, 2],
[1, 2, 3],
[2, 2, 1]],
6
],
[
[[9, 9, 9, 9, 9],
[9, 9, 9, 9, 9],
[9, 9, 9, 9, 9],
[9, 9, 9, 9, 9],
[9, 9, 9, 9, 9],
[9, 9, 9, 9, 9]],
1
],
[
[[3, 4, 5, 6, 7, 8, 9]],
0
],
[
[[3], [4], [5], [6], [7]],
0
],
[
[[2, 5, 3, 4, 3, 1, 3, 2],
[4, 5, 4, 1, 2, 4, 1, 3],
[1, 1, 2, 1, 4, 1, 1, 5],
[1, 3, 4, 2, 3, 4, 2, 4],
[1, 5, 5, 2, 1, 3, 1, 1],
[1, 2, 3, 3, 5, 1, 2, 4],
[3, 1, 4, 4, 4, 1, 5, 5],
[5, 1, 3, 3, 1, 5, 3, 5],
[5, 4, 4, 3, 5, 4, 4, 4]],
54
],
[[[1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 9, 9, 9, 2, 3, 9]], 0]
]
for t in tests:
res = differentSquares(t[0])
ans = t[1]
if ans == res:
print("PASSED: differentSquares({}) returned {}"
.format(t[0], res))
else:
print("FAILED: differentSquares({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
|
56cccfa2b7a351f38edee805406085caeee65707
|
src/icp/apps/modeling/migrations/0021_clear_old_results.py
|
src/icp/apps/modeling/migrations/0021_clear_old_results.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def clear_old_results(apps, schema_editor):
Scenario = apps.get_model('modeling', 'Scenario')
Scenario.objects.all().update(
results='[]',
modification_hash='',
inputmod_hash='',
)
class Migration(migrations.Migration):
dependencies = [
('modeling', '0020_auto_20161206_1556'),
]
operations = [
migrations.RunPython(clear_old_results)
]
|
Clear old Pollination Mapper results
|
Clear old Pollination Mapper results
Since we upgraded to the 2017 CDL in #410 and 598ed284,
the current stored results in the database are out of date.
By clearing them out, we ensure they will be recalculated
when those scenarios are revisited.
|
Python
|
apache-2.0
|
project-icp/bee-pollinator-app,project-icp/bee-pollinator-app,project-icp/bee-pollinator-app,project-icp/bee-pollinator-app
|
Clear old Pollination Mapper results
Since we upgraded to the 2017 CDL in #410 and 598ed284,
the current stored results in the database are out of date.
By clearing them out, we ensure they will be recalculated
when those scenarios are revisited.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def clear_old_results(apps, schema_editor):
Scenario = apps.get_model('modeling', 'Scenario')
Scenario.objects.all().update(
results='[]',
modification_hash='',
inputmod_hash='',
)
class Migration(migrations.Migration):
dependencies = [
('modeling', '0020_auto_20161206_1556'),
]
operations = [
migrations.RunPython(clear_old_results)
]
|
<commit_before><commit_msg>Clear old Pollination Mapper results
Since we upgraded to the 2017 CDL in #410 and 598ed284,
the current stored results in the database are out of date.
By clearing them out, we ensure they will be recalculated
when those scenarios are revisited.<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def clear_old_results(apps, schema_editor):
Scenario = apps.get_model('modeling', 'Scenario')
Scenario.objects.all().update(
results='[]',
modification_hash='',
inputmod_hash='',
)
class Migration(migrations.Migration):
dependencies = [
('modeling', '0020_auto_20161206_1556'),
]
operations = [
migrations.RunPython(clear_old_results)
]
|
Clear old Pollination Mapper results
Since we upgraded to the 2017 CDL in #410 and 598ed284,
the current stored results in the database are out of date.
By clearing them out, we ensure they will be recalculated
when those scenarios are revisited.# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def clear_old_results(apps, schema_editor):
Scenario = apps.get_model('modeling', 'Scenario')
Scenario.objects.all().update(
results='[]',
modification_hash='',
inputmod_hash='',
)
class Migration(migrations.Migration):
dependencies = [
('modeling', '0020_auto_20161206_1556'),
]
operations = [
migrations.RunPython(clear_old_results)
]
|
<commit_before><commit_msg>Clear old Pollination Mapper results
Since we upgraded to the 2017 CDL in #410 and 598ed284,
the current stored results in the database are out of date.
By clearing them out, we ensure they will be recalculated
when those scenarios are revisited.<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def clear_old_results(apps, schema_editor):
Scenario = apps.get_model('modeling', 'Scenario')
Scenario.objects.all().update(
results='[]',
modification_hash='',
inputmod_hash='',
)
class Migration(migrations.Migration):
dependencies = [
('modeling', '0020_auto_20161206_1556'),
]
operations = [
migrations.RunPython(clear_old_results)
]
|
|
db9c1affcfda4122b3ebc708581900048af59424
|
django_project/realtime/management/commands/rerunlasthazard.py
|
django_project/realtime/management/commands/rerunlasthazard.py
|
# coding=utf-8
import logging
from django.core.management.base import BaseCommand
from realtime.models.ash import Ash
from realtime.models.earthquake import Earthquake
from realtime.models.flood import Flood
__author__ = 'Rizky Maulana Nugraha "lucernae" <lana.pcfre@gmail.com>'
LOGGER = logging.getLogger(__name__)
class Command(BaseCommand):
"""Script to check indicator status. Can be executed via cronjob
"""
help = 'Command to re-execute analysis and report generation for ' \
'last hazard'
def handle(self, *args, **options):
root_logger = logging.getLogger('')
root_logger.setLevel(logging.INFO)
# Regenerate Earthquake
try:
LOGGER.info('Regenerate Last EQ')
event = Earthquake.objects.order_by('id').last()
LOGGER.info('Shake ID: {0}'.format(event.shake_id))
LOGGER.info('Source Type: {0}'.format(event.source_type))
event.rerun_analysis()
LOGGER.info('Delegated analysis rerun')
except BaseException as e:
LOGGER.exception(e)
# Regenerate Flood
try:
LOGGER.info('Regenerate Last Flood')
event = Flood.objects.order_by('id').last()
LOGGER.info('Flood ID: {0}'.format(event.event_id))
event.rerun_analysis()
LOGGER.info('Delegated analysis rerun')
except BaseException as e:
LOGGER.exception(e)
# Regenerate Ash
try:
LOGGER.info('Regenerate Last Ash')
event = Ash.objects.order_by('id').last()
LOGGER.info('Ash ID: {0}'.format(event.event_id_formatted))
event.rerun_analysis()
LOGGER.info('Delegated analysis rerun')
except BaseException as e:
LOGGER.exception(e)
LOGGER.info('Command finished.')
|
Add management command to rerun last analysis for testing
|
Add management command to rerun last analysis for testing
|
Python
|
bsd-2-clause
|
AIFDR/inasafe-django,AIFDR/inasafe-django,AIFDR/inasafe-django,AIFDR/inasafe-django
|
Add management command to rerun last analysis for testing
|
# coding=utf-8
import logging
from django.core.management.base import BaseCommand
from realtime.models.ash import Ash
from realtime.models.earthquake import Earthquake
from realtime.models.flood import Flood
__author__ = 'Rizky Maulana Nugraha "lucernae" <lana.pcfre@gmail.com>'
LOGGER = logging.getLogger(__name__)
class Command(BaseCommand):
"""Script to check indicator status. Can be executed via cronjob
"""
help = 'Command to re-execute analysis and report generation for ' \
'last hazard'
def handle(self, *args, **options):
root_logger = logging.getLogger('')
root_logger.setLevel(logging.INFO)
# Regenerate Earthquake
try:
LOGGER.info('Regenerate Last EQ')
event = Earthquake.objects.order_by('id').last()
LOGGER.info('Shake ID: {0}'.format(event.shake_id))
LOGGER.info('Source Type: {0}'.format(event.source_type))
event.rerun_analysis()
LOGGER.info('Delegated analysis rerun')
except BaseException as e:
LOGGER.exception(e)
# Regenerate Flood
try:
LOGGER.info('Regenerate Last Flood')
event = Flood.objects.order_by('id').last()
LOGGER.info('Flood ID: {0}'.format(event.event_id))
event.rerun_analysis()
LOGGER.info('Delegated analysis rerun')
except BaseException as e:
LOGGER.exception(e)
# Regenerate Ash
try:
LOGGER.info('Regenerate Last Ash')
event = Ash.objects.order_by('id').last()
LOGGER.info('Ash ID: {0}'.format(event.event_id_formatted))
event.rerun_analysis()
LOGGER.info('Delegated analysis rerun')
except BaseException as e:
LOGGER.exception(e)
LOGGER.info('Command finished.')
|
<commit_before><commit_msg>Add management command to rerun last analysis for testing<commit_after>
|
# coding=utf-8
import logging
from django.core.management.base import BaseCommand
from realtime.models.ash import Ash
from realtime.models.earthquake import Earthquake
from realtime.models.flood import Flood
__author__ = 'Rizky Maulana Nugraha "lucernae" <lana.pcfre@gmail.com>'
LOGGER = logging.getLogger(__name__)
class Command(BaseCommand):
"""Script to check indicator status. Can be executed via cronjob
"""
help = 'Command to re-execute analysis and report generation for ' \
'last hazard'
def handle(self, *args, **options):
root_logger = logging.getLogger('')
root_logger.setLevel(logging.INFO)
# Regenerate Earthquake
try:
LOGGER.info('Regenerate Last EQ')
event = Earthquake.objects.order_by('id').last()
LOGGER.info('Shake ID: {0}'.format(event.shake_id))
LOGGER.info('Source Type: {0}'.format(event.source_type))
event.rerun_analysis()
LOGGER.info('Delegated analysis rerun')
except BaseException as e:
LOGGER.exception(e)
# Regenerate Flood
try:
LOGGER.info('Regenerate Last Flood')
event = Flood.objects.order_by('id').last()
LOGGER.info('Flood ID: {0}'.format(event.event_id))
event.rerun_analysis()
LOGGER.info('Delegated analysis rerun')
except BaseException as e:
LOGGER.exception(e)
# Regenerate Ash
try:
LOGGER.info('Regenerate Last Ash')
event = Ash.objects.order_by('id').last()
LOGGER.info('Ash ID: {0}'.format(event.event_id_formatted))
event.rerun_analysis()
LOGGER.info('Delegated analysis rerun')
except BaseException as e:
LOGGER.exception(e)
LOGGER.info('Command finished.')
|
Add management command to rerun last analysis for testing# coding=utf-8
import logging
from django.core.management.base import BaseCommand
from realtime.models.ash import Ash
from realtime.models.earthquake import Earthquake
from realtime.models.flood import Flood
__author__ = 'Rizky Maulana Nugraha "lucernae" <lana.pcfre@gmail.com>'
LOGGER = logging.getLogger(__name__)
class Command(BaseCommand):
"""Script to check indicator status. Can be executed via cronjob
"""
help = 'Command to re-execute analysis and report generation for ' \
'last hazard'
def handle(self, *args, **options):
root_logger = logging.getLogger('')
root_logger.setLevel(logging.INFO)
# Regenerate Earthquake
try:
LOGGER.info('Regenerate Last EQ')
event = Earthquake.objects.order_by('id').last()
LOGGER.info('Shake ID: {0}'.format(event.shake_id))
LOGGER.info('Source Type: {0}'.format(event.source_type))
event.rerun_analysis()
LOGGER.info('Delegated analysis rerun')
except BaseException as e:
LOGGER.exception(e)
# Regenerate Flood
try:
LOGGER.info('Regenerate Last Flood')
event = Flood.objects.order_by('id').last()
LOGGER.info('Flood ID: {0}'.format(event.event_id))
event.rerun_analysis()
LOGGER.info('Delegated analysis rerun')
except BaseException as e:
LOGGER.exception(e)
# Regenerate Ash
try:
LOGGER.info('Regenerate Last Ash')
event = Ash.objects.order_by('id').last()
LOGGER.info('Ash ID: {0}'.format(event.event_id_formatted))
event.rerun_analysis()
LOGGER.info('Delegated analysis rerun')
except BaseException as e:
LOGGER.exception(e)
LOGGER.info('Command finished.')
|
<commit_before><commit_msg>Add management command to rerun last analysis for testing<commit_after># coding=utf-8
import logging
from django.core.management.base import BaseCommand
from realtime.models.ash import Ash
from realtime.models.earthquake import Earthquake
from realtime.models.flood import Flood
__author__ = 'Rizky Maulana Nugraha "lucernae" <lana.pcfre@gmail.com>'
LOGGER = logging.getLogger(__name__)
class Command(BaseCommand):
"""Script to check indicator status. Can be executed via cronjob
"""
help = 'Command to re-execute analysis and report generation for ' \
'last hazard'
def handle(self, *args, **options):
root_logger = logging.getLogger('')
root_logger.setLevel(logging.INFO)
# Regenerate Earthquake
try:
LOGGER.info('Regenerate Last EQ')
event = Earthquake.objects.order_by('id').last()
LOGGER.info('Shake ID: {0}'.format(event.shake_id))
LOGGER.info('Source Type: {0}'.format(event.source_type))
event.rerun_analysis()
LOGGER.info('Delegated analysis rerun')
except BaseException as e:
LOGGER.exception(e)
# Regenerate Flood
try:
LOGGER.info('Regenerate Last Flood')
event = Flood.objects.order_by('id').last()
LOGGER.info('Flood ID: {0}'.format(event.event_id))
event.rerun_analysis()
LOGGER.info('Delegated analysis rerun')
except BaseException as e:
LOGGER.exception(e)
# Regenerate Ash
try:
LOGGER.info('Regenerate Last Ash')
event = Ash.objects.order_by('id').last()
LOGGER.info('Ash ID: {0}'.format(event.event_id_formatted))
event.rerun_analysis()
LOGGER.info('Delegated analysis rerun')
except BaseException as e:
LOGGER.exception(e)
LOGGER.info('Command finished.')
|
|
9cb93b712c931754c81ec9c89d61d85bf986c6ed
|
tests/bouncer/app_test.py
|
tests/bouncer/app_test.py
|
from unittest import mock
import pytest
from pyramid.config import Configurator
from bouncer.app import app
def test_the_default_settings(config, pyramid):
app()
pyramid.config.Configurator.assert_called_once_with(
settings={
"chrome_extension_id": "bjfhmglciegochdpefhhlphglcehbmek",
"debug": False,
"elasticsearch_index": "hypothesis",
"hypothesis_authority": "localhost",
"hypothesis_url": "https://hypothes.is",
"via_base_url": "https://via.hypothes.is",
}
)
@pytest.fixture
def config():
config = mock.create_autospec(Configurator, instance=True)
config.registry = mock.Mock(spec_set=["settings"], settings={})
return config
@pytest.fixture(autouse=True)
def os(patch):
os = patch("bouncer.app.os")
os.environ = {}
return os
@pytest.fixture(autouse=True)
def pyramid(config, patch):
pyramid = patch("bouncer.app.pyramid")
pyramid.config.Configurator.return_value = config
return pyramid
|
Add the beginnings of app.py tests
|
Add the beginnings of app.py tests
This doesn't test everything in app.py but it's a start.
|
Python
|
bsd-2-clause
|
hypothesis/bouncer,hypothesis/bouncer,hypothesis/bouncer
|
Add the beginnings of app.py tests
This doesn't test everything in app.py but it's a start.
|
from unittest import mock
import pytest
from pyramid.config import Configurator
from bouncer.app import app
def test_the_default_settings(config, pyramid):
app()
pyramid.config.Configurator.assert_called_once_with(
settings={
"chrome_extension_id": "bjfhmglciegochdpefhhlphglcehbmek",
"debug": False,
"elasticsearch_index": "hypothesis",
"hypothesis_authority": "localhost",
"hypothesis_url": "https://hypothes.is",
"via_base_url": "https://via.hypothes.is",
}
)
@pytest.fixture
def config():
config = mock.create_autospec(Configurator, instance=True)
config.registry = mock.Mock(spec_set=["settings"], settings={})
return config
@pytest.fixture(autouse=True)
def os(patch):
os = patch("bouncer.app.os")
os.environ = {}
return os
@pytest.fixture(autouse=True)
def pyramid(config, patch):
pyramid = patch("bouncer.app.pyramid")
pyramid.config.Configurator.return_value = config
return pyramid
|
<commit_before><commit_msg>Add the beginnings of app.py tests
This doesn't test everything in app.py but it's a start.<commit_after>
|
from unittest import mock
import pytest
from pyramid.config import Configurator
from bouncer.app import app
def test_the_default_settings(config, pyramid):
app()
pyramid.config.Configurator.assert_called_once_with(
settings={
"chrome_extension_id": "bjfhmglciegochdpefhhlphglcehbmek",
"debug": False,
"elasticsearch_index": "hypothesis",
"hypothesis_authority": "localhost",
"hypothesis_url": "https://hypothes.is",
"via_base_url": "https://via.hypothes.is",
}
)
@pytest.fixture
def config():
config = mock.create_autospec(Configurator, instance=True)
config.registry = mock.Mock(spec_set=["settings"], settings={})
return config
@pytest.fixture(autouse=True)
def os(patch):
os = patch("bouncer.app.os")
os.environ = {}
return os
@pytest.fixture(autouse=True)
def pyramid(config, patch):
pyramid = patch("bouncer.app.pyramid")
pyramid.config.Configurator.return_value = config
return pyramid
|
Add the beginnings of app.py tests
This doesn't test everything in app.py but it's a start.from unittest import mock
import pytest
from pyramid.config import Configurator
from bouncer.app import app
def test_the_default_settings(config, pyramid):
app()
pyramid.config.Configurator.assert_called_once_with(
settings={
"chrome_extension_id": "bjfhmglciegochdpefhhlphglcehbmek",
"debug": False,
"elasticsearch_index": "hypothesis",
"hypothesis_authority": "localhost",
"hypothesis_url": "https://hypothes.is",
"via_base_url": "https://via.hypothes.is",
}
)
@pytest.fixture
def config():
config = mock.create_autospec(Configurator, instance=True)
config.registry = mock.Mock(spec_set=["settings"], settings={})
return config
@pytest.fixture(autouse=True)
def os(patch):
os = patch("bouncer.app.os")
os.environ = {}
return os
@pytest.fixture(autouse=True)
def pyramid(config, patch):
pyramid = patch("bouncer.app.pyramid")
pyramid.config.Configurator.return_value = config
return pyramid
|
<commit_before><commit_msg>Add the beginnings of app.py tests
This doesn't test everything in app.py but it's a start.<commit_after>from unittest import mock
import pytest
from pyramid.config import Configurator
from bouncer.app import app
def test_the_default_settings(config, pyramid):
app()
pyramid.config.Configurator.assert_called_once_with(
settings={
"chrome_extension_id": "bjfhmglciegochdpefhhlphglcehbmek",
"debug": False,
"elasticsearch_index": "hypothesis",
"hypothesis_authority": "localhost",
"hypothesis_url": "https://hypothes.is",
"via_base_url": "https://via.hypothes.is",
}
)
@pytest.fixture
def config():
config = mock.create_autospec(Configurator, instance=True)
config.registry = mock.Mock(spec_set=["settings"], settings={})
return config
@pytest.fixture(autouse=True)
def os(patch):
os = patch("bouncer.app.os")
os.environ = {}
return os
@pytest.fixture(autouse=True)
def pyramid(config, patch):
pyramid = patch("bouncer.app.pyramid")
pyramid.config.Configurator.return_value = config
return pyramid
|
|
b490ca24c7f9a3f2f0a645be75b0f70d6fd15471
|
openfisca_france/scripts/parameters/reduce_parameters_paths.py
|
openfisca_france/scripts/parameters/reduce_parameters_paths.py
|
# -*- coding: utf-8 -*-
import os
PARENT_DIRECTORY = os.path.realpath('../..')
PATH_LENGTH_TO_IGNORE = len(PARENT_DIRECTORY)
PARAMETERS_DIRECTORY = os.path.join(PARENT_DIRECTORY, 'parameters')
PATH_MAX_LENGTH = 150
CLEANED_PARAMETERS_DIRECTORY = os.path.join(PARENT_DIRECTORY, 'new_parameters')
def list_long_paths(absolute_directory_path):
# print absolute_directory_path
long_paths = []
for directory, sub_directories, files in os.walk(absolute_directory_path):
for f in files:
file_path = os.path.join(directory, f)
sub_path_length = len(file_path) - PATH_LENGTH_TO_IGNORE
if(sub_path_length > PATH_MAX_LENGTH):
# print(str(sub_path_length) + " " + os.path.relpath(file_path, PARENT_DIRECTORY))
long_paths.append(file_path)
return long_paths
def reduce_long_paths(long_paths_list):
for file_path in long_paths_list:
print os.path.dirname(file_path)
long_parameters_paths = list_long_paths(PARAMETERS_DIRECTORY)
print str(len(long_parameters_paths)) + " files have more than " + str(PATH_MAX_LENGTH) + \
" characters in their paths starting from this directory: " + PARENT_DIRECTORY
reduce_long_paths(long_parameters_paths)
|
Add script detection of long parameters files' paths
|
Add script detection of long parameters files' paths
|
Python
|
agpl-3.0
|
sgmap/openfisca-france,antoinearnoud/openfisca-france,antoinearnoud/openfisca-france,sgmap/openfisca-france
|
Add script detection of long parameters files' paths
|
# -*- coding: utf-8 -*-
import os
PARENT_DIRECTORY = os.path.realpath('../..')
PATH_LENGTH_TO_IGNORE = len(PARENT_DIRECTORY)
PARAMETERS_DIRECTORY = os.path.join(PARENT_DIRECTORY, 'parameters')
PATH_MAX_LENGTH = 150
CLEANED_PARAMETERS_DIRECTORY = os.path.join(PARENT_DIRECTORY, 'new_parameters')
def list_long_paths(absolute_directory_path):
# print absolute_directory_path
long_paths = []
for directory, sub_directories, files in os.walk(absolute_directory_path):
for f in files:
file_path = os.path.join(directory, f)
sub_path_length = len(file_path) - PATH_LENGTH_TO_IGNORE
if(sub_path_length > PATH_MAX_LENGTH):
# print(str(sub_path_length) + " " + os.path.relpath(file_path, PARENT_DIRECTORY))
long_paths.append(file_path)
return long_paths
def reduce_long_paths(long_paths_list):
for file_path in long_paths_list:
print os.path.dirname(file_path)
long_parameters_paths = list_long_paths(PARAMETERS_DIRECTORY)
print str(len(long_parameters_paths)) + " files have more than " + str(PATH_MAX_LENGTH) + \
" characters in their paths starting from this directory: " + PARENT_DIRECTORY
reduce_long_paths(long_parameters_paths)
|
<commit_before><commit_msg>Add script detection of long parameters files' paths<commit_after>
|
# -*- coding: utf-8 -*-
import os
PARENT_DIRECTORY = os.path.realpath('../..')
PATH_LENGTH_TO_IGNORE = len(PARENT_DIRECTORY)
PARAMETERS_DIRECTORY = os.path.join(PARENT_DIRECTORY, 'parameters')
PATH_MAX_LENGTH = 150
CLEANED_PARAMETERS_DIRECTORY = os.path.join(PARENT_DIRECTORY, 'new_parameters')
def list_long_paths(absolute_directory_path):
# print absolute_directory_path
long_paths = []
for directory, sub_directories, files in os.walk(absolute_directory_path):
for f in files:
file_path = os.path.join(directory, f)
sub_path_length = len(file_path) - PATH_LENGTH_TO_IGNORE
if(sub_path_length > PATH_MAX_LENGTH):
# print(str(sub_path_length) + " " + os.path.relpath(file_path, PARENT_DIRECTORY))
long_paths.append(file_path)
return long_paths
def reduce_long_paths(long_paths_list):
for file_path in long_paths_list:
print os.path.dirname(file_path)
long_parameters_paths = list_long_paths(PARAMETERS_DIRECTORY)
print str(len(long_parameters_paths)) + " files have more than " + str(PATH_MAX_LENGTH) + \
" characters in their paths starting from this directory: " + PARENT_DIRECTORY
reduce_long_paths(long_parameters_paths)
|
Add script detection of long parameters files' paths# -*- coding: utf-8 -*-
import os
PARENT_DIRECTORY = os.path.realpath('../..')
PATH_LENGTH_TO_IGNORE = len(PARENT_DIRECTORY)
PARAMETERS_DIRECTORY = os.path.join(PARENT_DIRECTORY, 'parameters')
PATH_MAX_LENGTH = 150
CLEANED_PARAMETERS_DIRECTORY = os.path.join(PARENT_DIRECTORY, 'new_parameters')
def list_long_paths(absolute_directory_path):
# print absolute_directory_path
long_paths = []
for directory, sub_directories, files in os.walk(absolute_directory_path):
for f in files:
file_path = os.path.join(directory, f)
sub_path_length = len(file_path) - PATH_LENGTH_TO_IGNORE
if(sub_path_length > PATH_MAX_LENGTH):
# print(str(sub_path_length) + " " + os.path.relpath(file_path, PARENT_DIRECTORY))
long_paths.append(file_path)
return long_paths
def reduce_long_paths(long_paths_list):
for file_path in long_paths_list:
print os.path.dirname(file_path)
long_parameters_paths = list_long_paths(PARAMETERS_DIRECTORY)
print str(len(long_parameters_paths)) + " files have more than " + str(PATH_MAX_LENGTH) + \
" characters in their paths starting from this directory: " + PARENT_DIRECTORY
reduce_long_paths(long_parameters_paths)
|
<commit_before><commit_msg>Add script detection of long parameters files' paths<commit_after># -*- coding: utf-8 -*-
import os
PARENT_DIRECTORY = os.path.realpath('../..')
PATH_LENGTH_TO_IGNORE = len(PARENT_DIRECTORY)
PARAMETERS_DIRECTORY = os.path.join(PARENT_DIRECTORY, 'parameters')
PATH_MAX_LENGTH = 150
CLEANED_PARAMETERS_DIRECTORY = os.path.join(PARENT_DIRECTORY, 'new_parameters')
def list_long_paths(absolute_directory_path):
# print absolute_directory_path
long_paths = []
for directory, sub_directories, files in os.walk(absolute_directory_path):
for f in files:
file_path = os.path.join(directory, f)
sub_path_length = len(file_path) - PATH_LENGTH_TO_IGNORE
if(sub_path_length > PATH_MAX_LENGTH):
# print(str(sub_path_length) + " " + os.path.relpath(file_path, PARENT_DIRECTORY))
long_paths.append(file_path)
return long_paths
def reduce_long_paths(long_paths_list):
for file_path in long_paths_list:
print os.path.dirname(file_path)
long_parameters_paths = list_long_paths(PARAMETERS_DIRECTORY)
print str(len(long_parameters_paths)) + " files have more than " + str(PATH_MAX_LENGTH) + \
" characters in their paths starting from this directory: " + PARENT_DIRECTORY
reduce_long_paths(long_parameters_paths)
|
|
b382a6568131dfec818a284fbb805df6efc2748d
|
examples/network_wordcount.py
|
examples/network_wordcount.py
|
#! /usr/env python
""" a recreation of spark-streaming's network_wordcount
https://spark.apache.org/docs/2.2.0/streaming-programming-guide.html#a-quick-example
Run this within an interactive session, or with
> python -i -network_wordcount.py
so that python won't exit before producing output.
"""
from streamz import Stream
# absolute port on localhost for now
s = Stream.from_tcp(9999)
s.map(bytes.split).flatten().frequencies().sink(print)
print(
"""In another terminal executs
> nc 127.0.0.1 9999
and then start typing content
"""
)
s.start()
|
Add wordcount example from spark
|
Add wordcount example from spark
|
Python
|
bsd-3-clause
|
mrocklin/streams
|
Add wordcount example from spark
|
#! /usr/env python
""" a recreation of spark-streaming's network_wordcount
https://spark.apache.org/docs/2.2.0/streaming-programming-guide.html#a-quick-example
Run this within an interactive session, or with
> python -i -network_wordcount.py
so that python won't exit before producing output.
"""
from streamz import Stream
# absolute port on localhost for now
s = Stream.from_tcp(9999)
s.map(bytes.split).flatten().frequencies().sink(print)
print(
"""In another terminal executs
> nc 127.0.0.1 9999
and then start typing content
"""
)
s.start()
|
<commit_before><commit_msg>Add wordcount example from spark<commit_after>
|
#! /usr/env python
""" a recreation of spark-streaming's network_wordcount
https://spark.apache.org/docs/2.2.0/streaming-programming-guide.html#a-quick-example
Run this within an interactive session, or with
> python -i -network_wordcount.py
so that python won't exit before producing output.
"""
from streamz import Stream
# absolute port on localhost for now
s = Stream.from_tcp(9999)
s.map(bytes.split).flatten().frequencies().sink(print)
print(
"""In another terminal executs
> nc 127.0.0.1 9999
and then start typing content
"""
)
s.start()
|
Add wordcount example from spark#! /usr/env python
""" a recreation of spark-streaming's network_wordcount
https://spark.apache.org/docs/2.2.0/streaming-programming-guide.html#a-quick-example
Run this within an interactive session, or with
> python -i -network_wordcount.py
so that python won't exit before producing output.
"""
from streamz import Stream
# absolute port on localhost for now
s = Stream.from_tcp(9999)
s.map(bytes.split).flatten().frequencies().sink(print)
print(
"""In another terminal executs
> nc 127.0.0.1 9999
and then start typing content
"""
)
s.start()
|
<commit_before><commit_msg>Add wordcount example from spark<commit_after>#! /usr/env python
""" a recreation of spark-streaming's network_wordcount
https://spark.apache.org/docs/2.2.0/streaming-programming-guide.html#a-quick-example
Run this within an interactive session, or with
> python -i -network_wordcount.py
so that python won't exit before producing output.
"""
from streamz import Stream
# absolute port on localhost for now
s = Stream.from_tcp(9999)
s.map(bytes.split).flatten().frequencies().sink(print)
print(
"""In another terminal executs
> nc 127.0.0.1 9999
and then start typing content
"""
)
s.start()
|
|
974c52a13df6c9c53ea46140a7496d7c774d277e
|
osf/migrations/0144_add_preprint_contenttype_to_collections.py
|
osf/migrations/0144_add_preprint_contenttype_to_collections.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-11-08 16:56
from __future__ import unicode_literals
from django.db import migrations
from osf.models import Collection
from django.contrib.contenttypes.models import ContentType
def reverse_func(state, schema):
preprint_content_type = ContentType.objects.get(app_label='osf', model='preprint')
collections = Collection.objects.filter(collected_types__in=[preprint_content_type.id])
for collection in collections:
collection.collected_types.remove(preprint_content_type)
def add_preprint_type_to_collections(state, schema):
preprint_content_type = ContentType.objects.get(app_label='osf', model='preprint')
collections = Collection.objects.exclude(collected_types__in=[preprint_content_type.id])
for collection in collections:
collection.collected_types.add(preprint_content_type)
class Migration(migrations.Migration):
dependencies = [
('osf', '0143_merge_20181023_1807'),
]
operations = [
migrations.RunPython(add_preprint_type_to_collections, reverse_func)
]
|
Add data migration so preprints can be added to existing collections.
|
Add data migration so preprints can be added to existing collections.
|
Python
|
apache-2.0
|
mfraezz/osf.io,baylee-d/osf.io,cslzchen/osf.io,saradbowman/osf.io,aaxelb/osf.io,saradbowman/osf.io,mattclark/osf.io,Johnetordoff/osf.io,brianjgeiger/osf.io,adlius/osf.io,CenterForOpenScience/osf.io,aaxelb/osf.io,felliott/osf.io,cslzchen/osf.io,adlius/osf.io,CenterForOpenScience/osf.io,mattclark/osf.io,pattisdr/osf.io,CenterForOpenScience/osf.io,baylee-d/osf.io,brianjgeiger/osf.io,felliott/osf.io,CenterForOpenScience/osf.io,aaxelb/osf.io,pattisdr/osf.io,adlius/osf.io,brianjgeiger/osf.io,felliott/osf.io,Johnetordoff/osf.io,felliott/osf.io,mfraezz/osf.io,baylee-d/osf.io,pattisdr/osf.io,cslzchen/osf.io,brianjgeiger/osf.io,mfraezz/osf.io,mattclark/osf.io,adlius/osf.io,Johnetordoff/osf.io,Johnetordoff/osf.io,aaxelb/osf.io,cslzchen/osf.io,mfraezz/osf.io
|
Add data migration so preprints can be added to existing collections.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-11-08 16:56
from __future__ import unicode_literals
from django.db import migrations
from osf.models import Collection
from django.contrib.contenttypes.models import ContentType
def reverse_func(state, schema):
preprint_content_type = ContentType.objects.get(app_label='osf', model='preprint')
collections = Collection.objects.filter(collected_types__in=[preprint_content_type.id])
for collection in collections:
collection.collected_types.remove(preprint_content_type)
def add_preprint_type_to_collections(state, schema):
preprint_content_type = ContentType.objects.get(app_label='osf', model='preprint')
collections = Collection.objects.exclude(collected_types__in=[preprint_content_type.id])
for collection in collections:
collection.collected_types.add(preprint_content_type)
class Migration(migrations.Migration):
dependencies = [
('osf', '0143_merge_20181023_1807'),
]
operations = [
migrations.RunPython(add_preprint_type_to_collections, reverse_func)
]
|
<commit_before><commit_msg>Add data migration so preprints can be added to existing collections.<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-11-08 16:56
from __future__ import unicode_literals
from django.db import migrations
from osf.models import Collection
from django.contrib.contenttypes.models import ContentType
def reverse_func(state, schema):
preprint_content_type = ContentType.objects.get(app_label='osf', model='preprint')
collections = Collection.objects.filter(collected_types__in=[preprint_content_type.id])
for collection in collections:
collection.collected_types.remove(preprint_content_type)
def add_preprint_type_to_collections(state, schema):
preprint_content_type = ContentType.objects.get(app_label='osf', model='preprint')
collections = Collection.objects.exclude(collected_types__in=[preprint_content_type.id])
for collection in collections:
collection.collected_types.add(preprint_content_type)
class Migration(migrations.Migration):
dependencies = [
('osf', '0143_merge_20181023_1807'),
]
operations = [
migrations.RunPython(add_preprint_type_to_collections, reverse_func)
]
|
Add data migration so preprints can be added to existing collections.# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-11-08 16:56
from __future__ import unicode_literals
from django.db import migrations
from osf.models import Collection
from django.contrib.contenttypes.models import ContentType
def reverse_func(state, schema):
preprint_content_type = ContentType.objects.get(app_label='osf', model='preprint')
collections = Collection.objects.filter(collected_types__in=[preprint_content_type.id])
for collection in collections:
collection.collected_types.remove(preprint_content_type)
def add_preprint_type_to_collections(state, schema):
preprint_content_type = ContentType.objects.get(app_label='osf', model='preprint')
collections = Collection.objects.exclude(collected_types__in=[preprint_content_type.id])
for collection in collections:
collection.collected_types.add(preprint_content_type)
class Migration(migrations.Migration):
dependencies = [
('osf', '0143_merge_20181023_1807'),
]
operations = [
migrations.RunPython(add_preprint_type_to_collections, reverse_func)
]
|
<commit_before><commit_msg>Add data migration so preprints can be added to existing collections.<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-11-08 16:56
from __future__ import unicode_literals
from django.db import migrations
from osf.models import Collection
from django.contrib.contenttypes.models import ContentType
def reverse_func(state, schema):
preprint_content_type = ContentType.objects.get(app_label='osf', model='preprint')
collections = Collection.objects.filter(collected_types__in=[preprint_content_type.id])
for collection in collections:
collection.collected_types.remove(preprint_content_type)
def add_preprint_type_to_collections(state, schema):
preprint_content_type = ContentType.objects.get(app_label='osf', model='preprint')
collections = Collection.objects.exclude(collected_types__in=[preprint_content_type.id])
for collection in collections:
collection.collected_types.add(preprint_content_type)
class Migration(migrations.Migration):
dependencies = [
('osf', '0143_merge_20181023_1807'),
]
operations = [
migrations.RunPython(add_preprint_type_to_collections, reverse_func)
]
|
|
74e1dad9a639233be379574e6abebf02dff323ef
|
tests/test_approxest.py
|
tests/test_approxest.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for approximate estimators."""
import numpy as np
from bayeslite import bayesdb_open
from stochastic import stochastic
@stochastic(max_runs=2, min_passes=1)
def test_mutinf__ci_slow(seed):
with bayesdb_open(':memory:', seed=seed) as bdb:
npr = bdb.np_prng
bdb.sql_execute('create table t(x, y, z)')
D0_XY = npr.multivariate_normal([10,10], [[0,1],[2,0]], size=50)
D1_XY = npr.multivariate_normal([0,0], [[0,-1],[2,0]], size=50)
D_XY = np.concatenate([D0_XY, D1_XY])
D_Z = npr.multivariate_normal([5], [[0.5]], size=100)
D = np.hstack([D_XY, D_Z])
for d in D:
bdb.sql_execute('INSERT INTO t VALUES(?,?,?)', d)
bdb.execute(
'create population p for t(x numerical; y numerical; z numerical)')
bdb.execute('create metamodel m for p with baseline crosscat')
bdb.execute('initialize 10 models for m')
bdb.execute('analyze m for 10 iterations wait (optimized; quiet)')
vars_by_mutinf = bdb.execute('''
estimate * from variables of p
order by probability of (mutual information with x > 0.1) desc
''').fetchall()
vars_by_depprob = bdb.execute('''
estimate * from variables of p
order by dependence probability with x desc
''').fetchall()
assert vars_by_mutinf == [('x',), ('y',), ('z',)]
assert vars_by_depprob == [('x',), ('y',), ('z',)]
|
Add test for approximate estimator of dependence probability by MI.
|
Add test for approximate estimator of dependence probability by MI.
|
Python
|
apache-2.0
|
probcomp/bayeslite,probcomp/bayeslite
|
Add test for approximate estimator of dependence probability by MI.
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for approximate estimators."""
import numpy as np
from bayeslite import bayesdb_open
from stochastic import stochastic
@stochastic(max_runs=2, min_passes=1)
def test_mutinf__ci_slow(seed):
with bayesdb_open(':memory:', seed=seed) as bdb:
npr = bdb.np_prng
bdb.sql_execute('create table t(x, y, z)')
D0_XY = npr.multivariate_normal([10,10], [[0,1],[2,0]], size=50)
D1_XY = npr.multivariate_normal([0,0], [[0,-1],[2,0]], size=50)
D_XY = np.concatenate([D0_XY, D1_XY])
D_Z = npr.multivariate_normal([5], [[0.5]], size=100)
D = np.hstack([D_XY, D_Z])
for d in D:
bdb.sql_execute('INSERT INTO t VALUES(?,?,?)', d)
bdb.execute(
'create population p for t(x numerical; y numerical; z numerical)')
bdb.execute('create metamodel m for p with baseline crosscat')
bdb.execute('initialize 10 models for m')
bdb.execute('analyze m for 10 iterations wait (optimized; quiet)')
vars_by_mutinf = bdb.execute('''
estimate * from variables of p
order by probability of (mutual information with x > 0.1) desc
''').fetchall()
vars_by_depprob = bdb.execute('''
estimate * from variables of p
order by dependence probability with x desc
''').fetchall()
assert vars_by_mutinf == [('x',), ('y',), ('z',)]
assert vars_by_depprob == [('x',), ('y',), ('z',)]
|
<commit_before><commit_msg>Add test for approximate estimator of dependence probability by MI.<commit_after>
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for approximate estimators."""
import numpy as np
from bayeslite import bayesdb_open
from stochastic import stochastic
@stochastic(max_runs=2, min_passes=1)
def test_mutinf__ci_slow(seed):
with bayesdb_open(':memory:', seed=seed) as bdb:
npr = bdb.np_prng
bdb.sql_execute('create table t(x, y, z)')
D0_XY = npr.multivariate_normal([10,10], [[0,1],[2,0]], size=50)
D1_XY = npr.multivariate_normal([0,0], [[0,-1],[2,0]], size=50)
D_XY = np.concatenate([D0_XY, D1_XY])
D_Z = npr.multivariate_normal([5], [[0.5]], size=100)
D = np.hstack([D_XY, D_Z])
for d in D:
bdb.sql_execute('INSERT INTO t VALUES(?,?,?)', d)
bdb.execute(
'create population p for t(x numerical; y numerical; z numerical)')
bdb.execute('create metamodel m for p with baseline crosscat')
bdb.execute('initialize 10 models for m')
bdb.execute('analyze m for 10 iterations wait (optimized; quiet)')
vars_by_mutinf = bdb.execute('''
estimate * from variables of p
order by probability of (mutual information with x > 0.1) desc
''').fetchall()
vars_by_depprob = bdb.execute('''
estimate * from variables of p
order by dependence probability with x desc
''').fetchall()
assert vars_by_mutinf == [('x',), ('y',), ('z',)]
assert vars_by_depprob == [('x',), ('y',), ('z',)]
|
Add test for approximate estimator of dependence probability by MI.# -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for approximate estimators."""
import numpy as np
from bayeslite import bayesdb_open
from stochastic import stochastic
@stochastic(max_runs=2, min_passes=1)
def test_mutinf__ci_slow(seed):
with bayesdb_open(':memory:', seed=seed) as bdb:
npr = bdb.np_prng
bdb.sql_execute('create table t(x, y, z)')
D0_XY = npr.multivariate_normal([10,10], [[0,1],[2,0]], size=50)
D1_XY = npr.multivariate_normal([0,0], [[0,-1],[2,0]], size=50)
D_XY = np.concatenate([D0_XY, D1_XY])
D_Z = npr.multivariate_normal([5], [[0.5]], size=100)
D = np.hstack([D_XY, D_Z])
for d in D:
bdb.sql_execute('INSERT INTO t VALUES(?,?,?)', d)
bdb.execute(
'create population p for t(x numerical; y numerical; z numerical)')
bdb.execute('create metamodel m for p with baseline crosscat')
bdb.execute('initialize 10 models for m')
bdb.execute('analyze m for 10 iterations wait (optimized; quiet)')
vars_by_mutinf = bdb.execute('''
estimate * from variables of p
order by probability of (mutual information with x > 0.1) desc
''').fetchall()
vars_by_depprob = bdb.execute('''
estimate * from variables of p
order by dependence probability with x desc
''').fetchall()
assert vars_by_mutinf == [('x',), ('y',), ('z',)]
assert vars_by_depprob == [('x',), ('y',), ('z',)]
|
<commit_before><commit_msg>Add test for approximate estimator of dependence probability by MI.<commit_after># -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for approximate estimators."""
import numpy as np
from bayeslite import bayesdb_open
from stochastic import stochastic
@stochastic(max_runs=2, min_passes=1)
def test_mutinf__ci_slow(seed):
with bayesdb_open(':memory:', seed=seed) as bdb:
npr = bdb.np_prng
bdb.sql_execute('create table t(x, y, z)')
D0_XY = npr.multivariate_normal([10,10], [[0,1],[2,0]], size=50)
D1_XY = npr.multivariate_normal([0,0], [[0,-1],[2,0]], size=50)
D_XY = np.concatenate([D0_XY, D1_XY])
D_Z = npr.multivariate_normal([5], [[0.5]], size=100)
D = np.hstack([D_XY, D_Z])
for d in D:
bdb.sql_execute('INSERT INTO t VALUES(?,?,?)', d)
bdb.execute(
'create population p for t(x numerical; y numerical; z numerical)')
bdb.execute('create metamodel m for p with baseline crosscat')
bdb.execute('initialize 10 models for m')
bdb.execute('analyze m for 10 iterations wait (optimized; quiet)')
vars_by_mutinf = bdb.execute('''
estimate * from variables of p
order by probability of (mutual information with x > 0.1) desc
''').fetchall()
vars_by_depprob = bdb.execute('''
estimate * from variables of p
order by dependence probability with x desc
''').fetchall()
assert vars_by_mutinf == [('x',), ('y',), ('z',)]
assert vars_by_depprob == [('x',), ('y',), ('z',)]
|
|
a4d3ace2079fd5c6da707525864d865cc52e777c
|
ynr/apps/utils/db.py
|
ynr/apps/utils/db.py
|
from django.db.models import Transform
class LastWord(Transform):
"""
Split a field on space and get the last element
"""
function = "LastWord"
template = """
(regexp_split_to_array(%(field)s, ' '))[
array_upper(regexp_split_to_array(%(field)s, ' '), 1)
]
"""
def __init__(self, column, output_field=None):
super(LastWord, self).__init__(column, output_field=output_field)
def as_postgresql(self, compiler, connection):
return self.as_sql(compiler, connection)
|
Add LastWord DB tranformation until
|
Add LastWord DB tranformation until
This is useful when used as an annotation, for example:
`qs.annotate(last_name=LastWord('full_name')).order_by('last_name')`
|
Python
|
agpl-3.0
|
DemocracyClub/yournextrepresentative,DemocracyClub/yournextrepresentative,DemocracyClub/yournextrepresentative
|
Add LastWord DB tranformation until
This is useful when used as an annotation, for example:
`qs.annotate(last_name=LastWord('full_name')).order_by('last_name')`
|
from django.db.models import Transform
class LastWord(Transform):
"""
Split a field on space and get the last element
"""
function = "LastWord"
template = """
(regexp_split_to_array(%(field)s, ' '))[
array_upper(regexp_split_to_array(%(field)s, ' '), 1)
]
"""
def __init__(self, column, output_field=None):
super(LastWord, self).__init__(column, output_field=output_field)
def as_postgresql(self, compiler, connection):
return self.as_sql(compiler, connection)
|
<commit_before><commit_msg>Add LastWord DB tranformation until
This is useful when used as an annotation, for example:
`qs.annotate(last_name=LastWord('full_name')).order_by('last_name')`<commit_after>
|
from django.db.models import Transform
class LastWord(Transform):
"""
Split a field on space and get the last element
"""
function = "LastWord"
template = """
(regexp_split_to_array(%(field)s, ' '))[
array_upper(regexp_split_to_array(%(field)s, ' '), 1)
]
"""
def __init__(self, column, output_field=None):
super(LastWord, self).__init__(column, output_field=output_field)
def as_postgresql(self, compiler, connection):
return self.as_sql(compiler, connection)
|
Add LastWord DB tranformation until
This is useful when used as an annotation, for example:
`qs.annotate(last_name=LastWord('full_name')).order_by('last_name')`from django.db.models import Transform
class LastWord(Transform):
"""
Split a field on space and get the last element
"""
function = "LastWord"
template = """
(regexp_split_to_array(%(field)s, ' '))[
array_upper(regexp_split_to_array(%(field)s, ' '), 1)
]
"""
def __init__(self, column, output_field=None):
super(LastWord, self).__init__(column, output_field=output_field)
def as_postgresql(self, compiler, connection):
return self.as_sql(compiler, connection)
|
<commit_before><commit_msg>Add LastWord DB tranformation until
This is useful when used as an annotation, for example:
`qs.annotate(last_name=LastWord('full_name')).order_by('last_name')`<commit_after>from django.db.models import Transform
class LastWord(Transform):
"""
Split a field on space and get the last element
"""
function = "LastWord"
template = """
(regexp_split_to_array(%(field)s, ' '))[
array_upper(regexp_split_to_array(%(field)s, ' '), 1)
]
"""
def __init__(self, column, output_field=None):
super(LastWord, self).__init__(column, output_field=output_field)
def as_postgresql(self, compiler, connection):
return self.as_sql(compiler, connection)
|
|
58fff9a7b0abd525f495b44c86521b8240ed3276
|
instruments/bbn.py
|
instruments/bbn.py
|
from .instrument import Instrument, VisaInterface
from types import MethodType
class Attenuator(Instrument):
NUM_CHANNELS = 3
"""BBN 3 Channel Instrument"""
def __init__(self, name, resource_name):
super(Attenuator, self).__init__(name, resource_name, interface_type="VISA")
self.name = name
self.interface._resource.baud_rate = 115200
self.interface._resource.read_termination = u"\r\n"
self.interface._resource.write_termination = u"\n"
#Clear "unknown command" from connect
#TODO: where the heck does this come from
# self.interface.read()
# self.interface.read()
#Override query to look for ``end``
def query(self, query_string):
val = self._resource.query(query_string)
assert self.read() == "END"
return val
self.interface.query = MethodType(query, self.interface, VisaInterface)
def get_attenuation(self, chan):
return float(self.interface.query("GET {:d}".format(chan)))
def set_attenuation(self, chan, val):
self.interface.write("SET {:d} {:.1f}".format(chan, val))
assert self.interface.read() == "Setting channel {:d} to {:.2f}".format(chan, val)
assert self.interface.read() == "END"
|
Add BBN digital attenuator driver
|
Add BBN digital attenuator driver
--CAR and GER
|
Python
|
apache-2.0
|
BBN-Q/Auspex,BBN-Q/Auspex,BBN-Q/Auspex,BBN-Q/Auspex
|
Add BBN digital attenuator driver
--CAR and GER
|
from .instrument import Instrument, VisaInterface
from types import MethodType
class Attenuator(Instrument):
NUM_CHANNELS = 3
"""BBN 3 Channel Instrument"""
def __init__(self, name, resource_name):
super(Attenuator, self).__init__(name, resource_name, interface_type="VISA")
self.name = name
self.interface._resource.baud_rate = 115200
self.interface._resource.read_termination = u"\r\n"
self.interface._resource.write_termination = u"\n"
#Clear "unknown command" from connect
#TODO: where the heck does this come from
# self.interface.read()
# self.interface.read()
#Override query to look for ``end``
def query(self, query_string):
val = self._resource.query(query_string)
assert self.read() == "END"
return val
self.interface.query = MethodType(query, self.interface, VisaInterface)
def get_attenuation(self, chan):
return float(self.interface.query("GET {:d}".format(chan)))
def set_attenuation(self, chan, val):
self.interface.write("SET {:d} {:.1f}".format(chan, val))
assert self.interface.read() == "Setting channel {:d} to {:.2f}".format(chan, val)
assert self.interface.read() == "END"
|
<commit_before><commit_msg>Add BBN digital attenuator driver
--CAR and GER<commit_after>
|
from .instrument import Instrument, VisaInterface
from types import MethodType
class Attenuator(Instrument):
NUM_CHANNELS = 3
"""BBN 3 Channel Instrument"""
def __init__(self, name, resource_name):
super(Attenuator, self).__init__(name, resource_name, interface_type="VISA")
self.name = name
self.interface._resource.baud_rate = 115200
self.interface._resource.read_termination = u"\r\n"
self.interface._resource.write_termination = u"\n"
#Clear "unknown command" from connect
#TODO: where the heck does this come from
# self.interface.read()
# self.interface.read()
#Override query to look for ``end``
def query(self, query_string):
val = self._resource.query(query_string)
assert self.read() == "END"
return val
self.interface.query = MethodType(query, self.interface, VisaInterface)
def get_attenuation(self, chan):
return float(self.interface.query("GET {:d}".format(chan)))
def set_attenuation(self, chan, val):
self.interface.write("SET {:d} {:.1f}".format(chan, val))
assert self.interface.read() == "Setting channel {:d} to {:.2f}".format(chan, val)
assert self.interface.read() == "END"
|
Add BBN digital attenuator driver
--CAR and GERfrom .instrument import Instrument, VisaInterface
from types import MethodType
class Attenuator(Instrument):
NUM_CHANNELS = 3
"""BBN 3 Channel Instrument"""
def __init__(self, name, resource_name):
super(Attenuator, self).__init__(name, resource_name, interface_type="VISA")
self.name = name
self.interface._resource.baud_rate = 115200
self.interface._resource.read_termination = u"\r\n"
self.interface._resource.write_termination = u"\n"
#Clear "unknown command" from connect
#TODO: where the heck does this come from
# self.interface.read()
# self.interface.read()
#Override query to look for ``end``
def query(self, query_string):
val = self._resource.query(query_string)
assert self.read() == "END"
return val
self.interface.query = MethodType(query, self.interface, VisaInterface)
def get_attenuation(self, chan):
return float(self.interface.query("GET {:d}".format(chan)))
def set_attenuation(self, chan, val):
self.interface.write("SET {:d} {:.1f}".format(chan, val))
assert self.interface.read() == "Setting channel {:d} to {:.2f}".format(chan, val)
assert self.interface.read() == "END"
|
<commit_before><commit_msg>Add BBN digital attenuator driver
--CAR and GER<commit_after>from .instrument import Instrument, VisaInterface
from types import MethodType
class Attenuator(Instrument):
NUM_CHANNELS = 3
"""BBN 3 Channel Instrument"""
def __init__(self, name, resource_name):
super(Attenuator, self).__init__(name, resource_name, interface_type="VISA")
self.name = name
self.interface._resource.baud_rate = 115200
self.interface._resource.read_termination = u"\r\n"
self.interface._resource.write_termination = u"\n"
#Clear "unknown command" from connect
#TODO: where the heck does this come from
# self.interface.read()
# self.interface.read()
#Override query to look for ``end``
def query(self, query_string):
val = self._resource.query(query_string)
assert self.read() == "END"
return val
self.interface.query = MethodType(query, self.interface, VisaInterface)
def get_attenuation(self, chan):
return float(self.interface.query("GET {:d}".format(chan)))
def set_attenuation(self, chan, val):
self.interface.write("SET {:d} {:.1f}".format(chan, val))
assert self.interface.read() == "Setting channel {:d} to {:.2f}".format(chan, val)
assert self.interface.read() == "END"
|
|
727cea8fd531b680625a5dba4928a6658b74545f
|
olympiad/name.py
|
olympiad/name.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2014 Fabian M.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Read name from standard input and print the length, the amount of
# uppercase characters, the amount of unique characters and
# the reversed name.
if __name__ == "__main__":
name = input()
print(len(name))
print(len(list(filter(lambda x: x.isupper(), name))))
print(len(set(name)))
print("".join(reversed(name)))
|
Add solution for problem A3
|
Add solution for problem A3
|
Python
|
apache-2.0
|
fabianm/olympiad,fabianm/olympiad,fabianm/olympiad
|
Add solution for problem A3
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2014 Fabian M.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Read name from standard input and print the length, the amount of
# uppercase characters, the amount of unique characters and
# the reversed name.
if __name__ == "__main__":
name = input()
print(len(name))
print(len(list(filter(lambda x: x.isupper(), name))))
print(len(set(name)))
print("".join(reversed(name)))
|
<commit_before><commit_msg>Add solution for problem A3<commit_after>
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2014 Fabian M.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Read name from standard input and print the length, the amount of
# uppercase characters, the amount of unique characters and
# the reversed name.
if __name__ == "__main__":
name = input()
print(len(name))
print(len(list(filter(lambda x: x.isupper(), name))))
print(len(set(name)))
print("".join(reversed(name)))
|
Add solution for problem A3#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2014 Fabian M.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Read name from standard input and print the length, the amount of
# uppercase characters, the amount of unique characters and
# the reversed name.
if __name__ == "__main__":
name = input()
print(len(name))
print(len(list(filter(lambda x: x.isupper(), name))))
print(len(set(name)))
print("".join(reversed(name)))
|
<commit_before><commit_msg>Add solution for problem A3<commit_after>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2014 Fabian M.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Read name from standard input and print the length, the amount of
# uppercase characters, the amount of unique characters and
# the reversed name.
if __name__ == "__main__":
name = input()
print(len(name))
print(len(list(filter(lambda x: x.isupper(), name))))
print(len(set(name)))
print("".join(reversed(name)))
|
|
c0f0f685adc66f772921618d37474002634974a7
|
locations/spiders/ey.py
|
locations/spiders/ey.py
|
# -*- coding: utf-8 -*-
import json
import re
import scrapy
from locations.items import GeojsonPointItem
from locations.hours import OpeningHours
class EySpider(scrapy.Spider):
name = "ey"
allowed_domains = []
start_urls = [
'https://www.ey.com/eydff/services/officeLocations.json',
]
def parse_office(self, office):
properties = {
'name': office["name"],
'ref': office["href"].replace('/locations/', ''),
'addr_full': office["officeAddress"].strip().replace('\r\n', ' '),
'city': office["officeCity"],
'postcode': office["officePostalCode"],
'country': office["officeCountry"],
'phone': office["officePhoneNumber"],
'lat': float(office["officeLatitude"]),
'lon': float(office["officeLongitude"]),
}
return properties
def parse(self, response):
data = json.loads(response.body_as_unicode())
for country in data["countries"]:
for state in country["states"]:
state_abbr = state["stateAbbreviation"]
for city in state["cities"]:
for office in city["offices"]:
properties = self.parse_office(office)
properties["state"] = state_abbr
properties["website"] = response.urljoin(office["href"])
yield GeojsonPointItem(**properties)
for city in country["cities"]:
for office in city["offices"]:
properties = self.parse_office(office)
properties["website"] = response.urljoin(office["href"])
yield GeojsonPointItem(**properties)
|
Add spider for Ernst & Young
|
Add spider for Ernst & Young
|
Python
|
mit
|
iandees/all-the-places,iandees/all-the-places,iandees/all-the-places
|
Add spider for Ernst & Young
|
# -*- coding: utf-8 -*-
import json
import re
import scrapy
from locations.items import GeojsonPointItem
from locations.hours import OpeningHours
class EySpider(scrapy.Spider):
name = "ey"
allowed_domains = []
start_urls = [
'https://www.ey.com/eydff/services/officeLocations.json',
]
def parse_office(self, office):
properties = {
'name': office["name"],
'ref': office["href"].replace('/locations/', ''),
'addr_full': office["officeAddress"].strip().replace('\r\n', ' '),
'city': office["officeCity"],
'postcode': office["officePostalCode"],
'country': office["officeCountry"],
'phone': office["officePhoneNumber"],
'lat': float(office["officeLatitude"]),
'lon': float(office["officeLongitude"]),
}
return properties
def parse(self, response):
data = json.loads(response.body_as_unicode())
for country in data["countries"]:
for state in country["states"]:
state_abbr = state["stateAbbreviation"]
for city in state["cities"]:
for office in city["offices"]:
properties = self.parse_office(office)
properties["state"] = state_abbr
properties["website"] = response.urljoin(office["href"])
yield GeojsonPointItem(**properties)
for city in country["cities"]:
for office in city["offices"]:
properties = self.parse_office(office)
properties["website"] = response.urljoin(office["href"])
yield GeojsonPointItem(**properties)
|
<commit_before><commit_msg>Add spider for Ernst & Young<commit_after>
|
# -*- coding: utf-8 -*-
import json
import re
import scrapy
from locations.items import GeojsonPointItem
from locations.hours import OpeningHours
class EySpider(scrapy.Spider):
name = "ey"
allowed_domains = []
start_urls = [
'https://www.ey.com/eydff/services/officeLocations.json',
]
def parse_office(self, office):
properties = {
'name': office["name"],
'ref': office["href"].replace('/locations/', ''),
'addr_full': office["officeAddress"].strip().replace('\r\n', ' '),
'city': office["officeCity"],
'postcode': office["officePostalCode"],
'country': office["officeCountry"],
'phone': office["officePhoneNumber"],
'lat': float(office["officeLatitude"]),
'lon': float(office["officeLongitude"]),
}
return properties
def parse(self, response):
data = json.loads(response.body_as_unicode())
for country in data["countries"]:
for state in country["states"]:
state_abbr = state["stateAbbreviation"]
for city in state["cities"]:
for office in city["offices"]:
properties = self.parse_office(office)
properties["state"] = state_abbr
properties["website"] = response.urljoin(office["href"])
yield GeojsonPointItem(**properties)
for city in country["cities"]:
for office in city["offices"]:
properties = self.parse_office(office)
properties["website"] = response.urljoin(office["href"])
yield GeojsonPointItem(**properties)
|
Add spider for Ernst & Young# -*- coding: utf-8 -*-
import json
import re
import scrapy
from locations.items import GeojsonPointItem
from locations.hours import OpeningHours
class EySpider(scrapy.Spider):
name = "ey"
allowed_domains = []
start_urls = [
'https://www.ey.com/eydff/services/officeLocations.json',
]
def parse_office(self, office):
properties = {
'name': office["name"],
'ref': office["href"].replace('/locations/', ''),
'addr_full': office["officeAddress"].strip().replace('\r\n', ' '),
'city': office["officeCity"],
'postcode': office["officePostalCode"],
'country': office["officeCountry"],
'phone': office["officePhoneNumber"],
'lat': float(office["officeLatitude"]),
'lon': float(office["officeLongitude"]),
}
return properties
def parse(self, response):
data = json.loads(response.body_as_unicode())
for country in data["countries"]:
for state in country["states"]:
state_abbr = state["stateAbbreviation"]
for city in state["cities"]:
for office in city["offices"]:
properties = self.parse_office(office)
properties["state"] = state_abbr
properties["website"] = response.urljoin(office["href"])
yield GeojsonPointItem(**properties)
for city in country["cities"]:
for office in city["offices"]:
properties = self.parse_office(office)
properties["website"] = response.urljoin(office["href"])
yield GeojsonPointItem(**properties)
|
<commit_before><commit_msg>Add spider for Ernst & Young<commit_after># -*- coding: utf-8 -*-
import json
import re
import scrapy
from locations.items import GeojsonPointItem
from locations.hours import OpeningHours
class EySpider(scrapy.Spider):
name = "ey"
allowed_domains = []
start_urls = [
'https://www.ey.com/eydff/services/officeLocations.json',
]
def parse_office(self, office):
properties = {
'name': office["name"],
'ref': office["href"].replace('/locations/', ''),
'addr_full': office["officeAddress"].strip().replace('\r\n', ' '),
'city': office["officeCity"],
'postcode': office["officePostalCode"],
'country': office["officeCountry"],
'phone': office["officePhoneNumber"],
'lat': float(office["officeLatitude"]),
'lon': float(office["officeLongitude"]),
}
return properties
def parse(self, response):
data = json.loads(response.body_as_unicode())
for country in data["countries"]:
for state in country["states"]:
state_abbr = state["stateAbbreviation"]
for city in state["cities"]:
for office in city["offices"]:
properties = self.parse_office(office)
properties["state"] = state_abbr
properties["website"] = response.urljoin(office["href"])
yield GeojsonPointItem(**properties)
for city in country["cities"]:
for office in city["offices"]:
properties = self.parse_office(office)
properties["website"] = response.urljoin(office["href"])
yield GeojsonPointItem(**properties)
|
|
4dcc520d0b1c14980e56d732f25523c2bca795da
|
altair/examples/scatter_with_minimap.py
|
altair/examples/scatter_with_minimap.py
|
"""
Scatter Plot with Minimap
-------------------------
This example shows how to create a miniature version of a plot
such that creating a selection in the miniature version
adjusts the axis limits in another, more detailed view.
"""
# category: scatter plots
import altair as alt
from vega_datasets import data
source = data.seattle_weather()
zoom = alt.selection_interval(encodings=["x", "y"])
minimap = (
alt.Chart(source)
.mark_point()
.add_selection(zoom)
.encode(
x="date:T",
y="temp_max:Q",
color=alt.condition(zoom, "weather", alt.value("lightgray")),
)
.properties(
width=200,
height=200,
title="Minimap -- click and drag to zoom in the detail view",
)
)
detail = (
alt.Chart(source)
.mark_point()
.encode(
x=alt.X(
"date:T", scale=alt.Scale(domain={"selection": zoom.name, "encoding": "x"})
),
y=alt.Y(
"temp_max:Q",
scale=alt.Scale(domain={"selection": zoom.name, "encoding": "y"}),
),
color="weather",
)
.properties(width=600, height=400, title="Seattle weather -- detail view")
)
detail | minimap
|
Add an example showing how to create a minimap
|
Add an example showing how to create a minimap
Discussed here: https://github.com/altair-viz/altair/issues/2037
|
Python
|
bsd-3-clause
|
altair-viz/altair,jakevdp/altair
|
Add an example showing how to create a minimap
Discussed here: https://github.com/altair-viz/altair/issues/2037
|
"""
Scatter Plot with Minimap
-------------------------
This example shows how to create a miniature version of a plot
such that creating a selection in the miniature version
adjusts the axis limits in another, more detailed view.
"""
# category: scatter plots
import altair as alt
from vega_datasets import data
source = data.seattle_weather()
zoom = alt.selection_interval(encodings=["x", "y"])
minimap = (
alt.Chart(source)
.mark_point()
.add_selection(zoom)
.encode(
x="date:T",
y="temp_max:Q",
color=alt.condition(zoom, "weather", alt.value("lightgray")),
)
.properties(
width=200,
height=200,
title="Minimap -- click and drag to zoom in the detail view",
)
)
detail = (
alt.Chart(source)
.mark_point()
.encode(
x=alt.X(
"date:T", scale=alt.Scale(domain={"selection": zoom.name, "encoding": "x"})
),
y=alt.Y(
"temp_max:Q",
scale=alt.Scale(domain={"selection": zoom.name, "encoding": "y"}),
),
color="weather",
)
.properties(width=600, height=400, title="Seattle weather -- detail view")
)
detail | minimap
|
<commit_before><commit_msg>Add an example showing how to create a minimap
Discussed here: https://github.com/altair-viz/altair/issues/2037<commit_after>
|
"""
Scatter Plot with Minimap
-------------------------
This example shows how to create a miniature version of a plot
such that creating a selection in the miniature version
adjusts the axis limits in another, more detailed view.
"""
# category: scatter plots
import altair as alt
from vega_datasets import data
source = data.seattle_weather()
zoom = alt.selection_interval(encodings=["x", "y"])
minimap = (
alt.Chart(source)
.mark_point()
.add_selection(zoom)
.encode(
x="date:T",
y="temp_max:Q",
color=alt.condition(zoom, "weather", alt.value("lightgray")),
)
.properties(
width=200,
height=200,
title="Minimap -- click and drag to zoom in the detail view",
)
)
detail = (
alt.Chart(source)
.mark_point()
.encode(
x=alt.X(
"date:T", scale=alt.Scale(domain={"selection": zoom.name, "encoding": "x"})
),
y=alt.Y(
"temp_max:Q",
scale=alt.Scale(domain={"selection": zoom.name, "encoding": "y"}),
),
color="weather",
)
.properties(width=600, height=400, title="Seattle weather -- detail view")
)
detail | minimap
|
Add an example showing how to create a minimap
Discussed here: https://github.com/altair-viz/altair/issues/2037"""
Scatter Plot with Minimap
-------------------------
This example shows how to create a miniature version of a plot
such that creating a selection in the miniature version
adjusts the axis limits in another, more detailed view.
"""
# category: scatter plots
import altair as alt
from vega_datasets import data
source = data.seattle_weather()
zoom = alt.selection_interval(encodings=["x", "y"])
minimap = (
alt.Chart(source)
.mark_point()
.add_selection(zoom)
.encode(
x="date:T",
y="temp_max:Q",
color=alt.condition(zoom, "weather", alt.value("lightgray")),
)
.properties(
width=200,
height=200,
title="Minimap -- click and drag to zoom in the detail view",
)
)
detail = (
alt.Chart(source)
.mark_point()
.encode(
x=alt.X(
"date:T", scale=alt.Scale(domain={"selection": zoom.name, "encoding": "x"})
),
y=alt.Y(
"temp_max:Q",
scale=alt.Scale(domain={"selection": zoom.name, "encoding": "y"}),
),
color="weather",
)
.properties(width=600, height=400, title="Seattle weather -- detail view")
)
detail | minimap
|
<commit_before><commit_msg>Add an example showing how to create a minimap
Discussed here: https://github.com/altair-viz/altair/issues/2037<commit_after>"""
Scatter Plot with Minimap
-------------------------
This example shows how to create a miniature version of a plot
such that creating a selection in the miniature version
adjusts the axis limits in another, more detailed view.
"""
# category: scatter plots
import altair as alt
from vega_datasets import data
source = data.seattle_weather()
zoom = alt.selection_interval(encodings=["x", "y"])
minimap = (
alt.Chart(source)
.mark_point()
.add_selection(zoom)
.encode(
x="date:T",
y="temp_max:Q",
color=alt.condition(zoom, "weather", alt.value("lightgray")),
)
.properties(
width=200,
height=200,
title="Minimap -- click and drag to zoom in the detail view",
)
)
detail = (
alt.Chart(source)
.mark_point()
.encode(
x=alt.X(
"date:T", scale=alt.Scale(domain={"selection": zoom.name, "encoding": "x"})
),
y=alt.Y(
"temp_max:Q",
scale=alt.Scale(domain={"selection": zoom.name, "encoding": "y"}),
),
color="weather",
)
.properties(width=600, height=400, title="Seattle weather -- detail view")
)
detail | minimap
|
|
d4ebf0d1792be8b74a5c51d4275c7ee6fbc3d7d2
|
pvs_si.py
|
pvs_si.py
|
#!/usr/bin/env python3
import sirius
record_names = sirius.si.record_names.get_record_names()
pvs_list = list(record_names.keys())
pvs_string = ' '.join(pvs_list)
print(pvs_string)
|
Add python script that returns string with PVs
|
Add python script that returns string with PVs
|
Python
|
mit
|
lnls-fac/scripts,lnls-fac/scripts
|
Add python script that returns string with PVs
|
#!/usr/bin/env python3
import sirius
record_names = sirius.si.record_names.get_record_names()
pvs_list = list(record_names.keys())
pvs_string = ' '.join(pvs_list)
print(pvs_string)
|
<commit_before><commit_msg>Add python script that returns string with PVs<commit_after>
|
#!/usr/bin/env python3
import sirius
record_names = sirius.si.record_names.get_record_names()
pvs_list = list(record_names.keys())
pvs_string = ' '.join(pvs_list)
print(pvs_string)
|
Add python script that returns string with PVs#!/usr/bin/env python3
import sirius
record_names = sirius.si.record_names.get_record_names()
pvs_list = list(record_names.keys())
pvs_string = ' '.join(pvs_list)
print(pvs_string)
|
<commit_before><commit_msg>Add python script that returns string with PVs<commit_after>#!/usr/bin/env python3
import sirius
record_names = sirius.si.record_names.get_record_names()
pvs_list = list(record_names.keys())
pvs_string = ' '.join(pvs_list)
print(pvs_string)
|
|
44d820daf3a56284f3204f4e1d586ddd5938a29f
|
bin/physicsforum_scraper.py
|
bin/physicsforum_scraper.py
|
#!/usr/bin/env python
import argparse
import os
import os.path
from bs4 import BeautifulSoup
import requests
def post_filter(tag):
if tag.name != 'blockquote':
return False
if not tag.has_attr('class'):
return False
return isinstance(tag['class'], list) and 'messageText' in tag['class']
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Scrape Physicsforum forum')
parser.add_argument('id', type=str, help='Thread ID')
parser.add_argument('-p', '--page', type=int, default=1,
help='Page number')
parser.add_argument('-o', '--outdir', type=str, help='Output directory')
parser.add_argument('-n', '--number', type=int, default=0,
help='Start post number')
args = parser.parse_args()
url = 'https://www.physicsforums.com/threads/{}/page-{}'
url = url.format(args.id, args.page)
r = requests.get(url)
if r.status_code == 200:
# Create save directory
if args.outdir is not None:
savedir = os.path.join(args.outdir, args.id)
else:
savedir = args.id
os.makedirs(savedir, exist_ok=True)
soup = BeautifulSoup(r.text)
for i, post in enumerate(soup.find_all(post_filter)):
num = i + args.number
savefile = os.path.join(savedir, 'post-{}.txt'.format(num))
with open(savefile, 'w') as fout:
print('\n'.join(list(post.stripped_strings)), file=fout)
|
Write scraper script for Physicsforum forum
|
Write scraper script for Physicsforum forum
|
Python
|
mit
|
kemskems/otdet
|
Write scraper script for Physicsforum forum
|
#!/usr/bin/env python
import argparse
import os
import os.path
from bs4 import BeautifulSoup
import requests
def post_filter(tag):
if tag.name != 'blockquote':
return False
if not tag.has_attr('class'):
return False
return isinstance(tag['class'], list) and 'messageText' in tag['class']
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Scrape Physicsforum forum')
parser.add_argument('id', type=str, help='Thread ID')
parser.add_argument('-p', '--page', type=int, default=1,
help='Page number')
parser.add_argument('-o', '--outdir', type=str, help='Output directory')
parser.add_argument('-n', '--number', type=int, default=0,
help='Start post number')
args = parser.parse_args()
url = 'https://www.physicsforums.com/threads/{}/page-{}'
url = url.format(args.id, args.page)
r = requests.get(url)
if r.status_code == 200:
# Create save directory
if args.outdir is not None:
savedir = os.path.join(args.outdir, args.id)
else:
savedir = args.id
os.makedirs(savedir, exist_ok=True)
soup = BeautifulSoup(r.text)
for i, post in enumerate(soup.find_all(post_filter)):
num = i + args.number
savefile = os.path.join(savedir, 'post-{}.txt'.format(num))
with open(savefile, 'w') as fout:
print('\n'.join(list(post.stripped_strings)), file=fout)
|
<commit_before><commit_msg>Write scraper script for Physicsforum forum<commit_after>
|
#!/usr/bin/env python
import argparse
import os
import os.path
from bs4 import BeautifulSoup
import requests
def post_filter(tag):
if tag.name != 'blockquote':
return False
if not tag.has_attr('class'):
return False
return isinstance(tag['class'], list) and 'messageText' in tag['class']
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Scrape Physicsforum forum')
parser.add_argument('id', type=str, help='Thread ID')
parser.add_argument('-p', '--page', type=int, default=1,
help='Page number')
parser.add_argument('-o', '--outdir', type=str, help='Output directory')
parser.add_argument('-n', '--number', type=int, default=0,
help='Start post number')
args = parser.parse_args()
url = 'https://www.physicsforums.com/threads/{}/page-{}'
url = url.format(args.id, args.page)
r = requests.get(url)
if r.status_code == 200:
# Create save directory
if args.outdir is not None:
savedir = os.path.join(args.outdir, args.id)
else:
savedir = args.id
os.makedirs(savedir, exist_ok=True)
soup = BeautifulSoup(r.text)
for i, post in enumerate(soup.find_all(post_filter)):
num = i + args.number
savefile = os.path.join(savedir, 'post-{}.txt'.format(num))
with open(savefile, 'w') as fout:
print('\n'.join(list(post.stripped_strings)), file=fout)
|
Write scraper script for Physicsforum forum#!/usr/bin/env python
import argparse
import os
import os.path
from bs4 import BeautifulSoup
import requests
def post_filter(tag):
if tag.name != 'blockquote':
return False
if not tag.has_attr('class'):
return False
return isinstance(tag['class'], list) and 'messageText' in tag['class']
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Scrape Physicsforum forum')
parser.add_argument('id', type=str, help='Thread ID')
parser.add_argument('-p', '--page', type=int, default=1,
help='Page number')
parser.add_argument('-o', '--outdir', type=str, help='Output directory')
parser.add_argument('-n', '--number', type=int, default=0,
help='Start post number')
args = parser.parse_args()
url = 'https://www.physicsforums.com/threads/{}/page-{}'
url = url.format(args.id, args.page)
r = requests.get(url)
if r.status_code == 200:
# Create save directory
if args.outdir is not None:
savedir = os.path.join(args.outdir, args.id)
else:
savedir = args.id
os.makedirs(savedir, exist_ok=True)
soup = BeautifulSoup(r.text)
for i, post in enumerate(soup.find_all(post_filter)):
num = i + args.number
savefile = os.path.join(savedir, 'post-{}.txt'.format(num))
with open(savefile, 'w') as fout:
print('\n'.join(list(post.stripped_strings)), file=fout)
|
<commit_before><commit_msg>Write scraper script for Physicsforum forum<commit_after>#!/usr/bin/env python
import argparse
import os
import os.path
from bs4 import BeautifulSoup
import requests
def post_filter(tag):
if tag.name != 'blockquote':
return False
if not tag.has_attr('class'):
return False
return isinstance(tag['class'], list) and 'messageText' in tag['class']
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Scrape Physicsforum forum')
parser.add_argument('id', type=str, help='Thread ID')
parser.add_argument('-p', '--page', type=int, default=1,
help='Page number')
parser.add_argument('-o', '--outdir', type=str, help='Output directory')
parser.add_argument('-n', '--number', type=int, default=0,
help='Start post number')
args = parser.parse_args()
url = 'https://www.physicsforums.com/threads/{}/page-{}'
url = url.format(args.id, args.page)
r = requests.get(url)
if r.status_code == 200:
# Create save directory
if args.outdir is not None:
savedir = os.path.join(args.outdir, args.id)
else:
savedir = args.id
os.makedirs(savedir, exist_ok=True)
soup = BeautifulSoup(r.text)
for i, post in enumerate(soup.find_all(post_filter)):
num = i + args.number
savefile = os.path.join(savedir, 'post-{}.txt'.format(num))
with open(savefile, 'w') as fout:
print('\n'.join(list(post.stripped_strings)), file=fout)
|
|
c571601a2970c9c06b8274f5ad6d1b9b9405569c
|
confluent_server/confluent/forwarder.py
|
confluent_server/confluent/forwarder.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2017 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#This handles port forwarding for web interfaces on management devices
#It will also hijack port 3900 and do best effort..
import eventlet
import eventlet.green.select as select
import eventlet.green.socket as socket
forwarders = {}
sockhandler = {}
vidtarget = None
vidforwarder = None
def handle_connection(incoming, outgoing):
while True:
r, _, _ = select.select((incoming, outgoing), (), (), 60)
for mysock in r:
data = mysock.recv(32768)
if not data:
return
if mysock == incoming:
outgoing.sendall(data)
elif mysock == outgoing:
incoming.sendall(data)
def forward_port(sock, target):
while True:
conn, _ = sock.accept()
try:
client = socket.create_connection((target, 443))
except Exception:
conn.close()
continue
eventlet.spawn_n(handle_connection, conn, client)
def forward_video():
sock = eventlet.listen(('localhost', 3900, 0, 0), family=socket.AF_INET6)
while True:
conn, _ = sock.accept()
if vidtarget is None:
conn.close()
continue
try:
vidclient = socket.create_connection((vidtarget, 3900))
except Exception:
conn.close()
continue
eventlet.spawn_n(handle_connection, conn, vidclient)
def get_port(addr):
global vidtarget
global vidforwarder
if addr not in forwarders:
newsock = eventlet.listen(('localhost', 0, 0, 0),
family=socket.AF_INET6)
forwarders[addr] = newsock
sockhandler[newsock] = eventlet.spawn(forward_port, newsock, addr)
if not vidforwarder:
vidforwarder = eventlet.spawn(forward_video)
vidtarget = addr
return forwarders[addr].getsockname()[1]
|
Add utility library to do port forwarding
|
Add utility library to do port forwarding
This will be useful for creating forwarding for
users.
|
Python
|
apache-2.0
|
jjohnson42/confluent,xcat2/confluent,xcat2/confluent,jjohnson42/confluent,whowutwut/confluent,whowutwut/confluent,whowutwut/confluent,whowutwut/confluent,xcat2/confluent,jjohnson42/confluent,xcat2/confluent,xcat2/confluent,jjohnson42/confluent,jjohnson42/confluent
|
Add utility library to do port forwarding
This will be useful for creating forwarding for
users.
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2017 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#This handles port forwarding for web interfaces on management devices
#It will also hijack port 3900 and do best effort..
import eventlet
import eventlet.green.select as select
import eventlet.green.socket as socket
forwarders = {}
sockhandler = {}
vidtarget = None
vidforwarder = None
def handle_connection(incoming, outgoing):
while True:
r, _, _ = select.select((incoming, outgoing), (), (), 60)
for mysock in r:
data = mysock.recv(32768)
if not data:
return
if mysock == incoming:
outgoing.sendall(data)
elif mysock == outgoing:
incoming.sendall(data)
def forward_port(sock, target):
while True:
conn, _ = sock.accept()
try:
client = socket.create_connection((target, 443))
except Exception:
conn.close()
continue
eventlet.spawn_n(handle_connection, conn, client)
def forward_video():
sock = eventlet.listen(('localhost', 3900, 0, 0), family=socket.AF_INET6)
while True:
conn, _ = sock.accept()
if vidtarget is None:
conn.close()
continue
try:
vidclient = socket.create_connection((vidtarget, 3900))
except Exception:
conn.close()
continue
eventlet.spawn_n(handle_connection, conn, vidclient)
def get_port(addr):
global vidtarget
global vidforwarder
if addr not in forwarders:
newsock = eventlet.listen(('localhost', 0, 0, 0),
family=socket.AF_INET6)
forwarders[addr] = newsock
sockhandler[newsock] = eventlet.spawn(forward_port, newsock, addr)
if not vidforwarder:
vidforwarder = eventlet.spawn(forward_video)
vidtarget = addr
return forwarders[addr].getsockname()[1]
|
<commit_before><commit_msg>Add utility library to do port forwarding
This will be useful for creating forwarding for
users.<commit_after>
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2017 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#This handles port forwarding for web interfaces on management devices
#It will also hijack port 3900 and do best effort..
import eventlet
import eventlet.green.select as select
import eventlet.green.socket as socket
forwarders = {}
sockhandler = {}
vidtarget = None
vidforwarder = None
def handle_connection(incoming, outgoing):
while True:
r, _, _ = select.select((incoming, outgoing), (), (), 60)
for mysock in r:
data = mysock.recv(32768)
if not data:
return
if mysock == incoming:
outgoing.sendall(data)
elif mysock == outgoing:
incoming.sendall(data)
def forward_port(sock, target):
while True:
conn, _ = sock.accept()
try:
client = socket.create_connection((target, 443))
except Exception:
conn.close()
continue
eventlet.spawn_n(handle_connection, conn, client)
def forward_video():
sock = eventlet.listen(('localhost', 3900, 0, 0), family=socket.AF_INET6)
while True:
conn, _ = sock.accept()
if vidtarget is None:
conn.close()
continue
try:
vidclient = socket.create_connection((vidtarget, 3900))
except Exception:
conn.close()
continue
eventlet.spawn_n(handle_connection, conn, vidclient)
def get_port(addr):
global vidtarget
global vidforwarder
if addr not in forwarders:
newsock = eventlet.listen(('localhost', 0, 0, 0),
family=socket.AF_INET6)
forwarders[addr] = newsock
sockhandler[newsock] = eventlet.spawn(forward_port, newsock, addr)
if not vidforwarder:
vidforwarder = eventlet.spawn(forward_video)
vidtarget = addr
return forwarders[addr].getsockname()[1]
|
Add utility library to do port forwarding
This will be useful for creating forwarding for
users.# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2017 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#This handles port forwarding for web interfaces on management devices
#It will also hijack port 3900 and do best effort..
import eventlet
import eventlet.green.select as select
import eventlet.green.socket as socket
forwarders = {}
sockhandler = {}
vidtarget = None
vidforwarder = None
def handle_connection(incoming, outgoing):
while True:
r, _, _ = select.select((incoming, outgoing), (), (), 60)
for mysock in r:
data = mysock.recv(32768)
if not data:
return
if mysock == incoming:
outgoing.sendall(data)
elif mysock == outgoing:
incoming.sendall(data)
def forward_port(sock, target):
while True:
conn, _ = sock.accept()
try:
client = socket.create_connection((target, 443))
except Exception:
conn.close()
continue
eventlet.spawn_n(handle_connection, conn, client)
def forward_video():
sock = eventlet.listen(('localhost', 3900, 0, 0), family=socket.AF_INET6)
while True:
conn, _ = sock.accept()
if vidtarget is None:
conn.close()
continue
try:
vidclient = socket.create_connection((vidtarget, 3900))
except Exception:
conn.close()
continue
eventlet.spawn_n(handle_connection, conn, vidclient)
def get_port(addr):
global vidtarget
global vidforwarder
if addr not in forwarders:
newsock = eventlet.listen(('localhost', 0, 0, 0),
family=socket.AF_INET6)
forwarders[addr] = newsock
sockhandler[newsock] = eventlet.spawn(forward_port, newsock, addr)
if not vidforwarder:
vidforwarder = eventlet.spawn(forward_video)
vidtarget = addr
return forwarders[addr].getsockname()[1]
|
<commit_before><commit_msg>Add utility library to do port forwarding
This will be useful for creating forwarding for
users.<commit_after># vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2017 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#This handles port forwarding for web interfaces on management devices
#It will also hijack port 3900 and do best effort..
import eventlet
import eventlet.green.select as select
import eventlet.green.socket as socket
forwarders = {}
sockhandler = {}
vidtarget = None
vidforwarder = None
def handle_connection(incoming, outgoing):
while True:
r, _, _ = select.select((incoming, outgoing), (), (), 60)
for mysock in r:
data = mysock.recv(32768)
if not data:
return
if mysock == incoming:
outgoing.sendall(data)
elif mysock == outgoing:
incoming.sendall(data)
def forward_port(sock, target):
while True:
conn, _ = sock.accept()
try:
client = socket.create_connection((target, 443))
except Exception:
conn.close()
continue
eventlet.spawn_n(handle_connection, conn, client)
def forward_video():
sock = eventlet.listen(('localhost', 3900, 0, 0), family=socket.AF_INET6)
while True:
conn, _ = sock.accept()
if vidtarget is None:
conn.close()
continue
try:
vidclient = socket.create_connection((vidtarget, 3900))
except Exception:
conn.close()
continue
eventlet.spawn_n(handle_connection, conn, vidclient)
def get_port(addr):
global vidtarget
global vidforwarder
if addr not in forwarders:
newsock = eventlet.listen(('localhost', 0, 0, 0),
family=socket.AF_INET6)
forwarders[addr] = newsock
sockhandler[newsock] = eventlet.spawn(forward_port, newsock, addr)
if not vidforwarder:
vidforwarder = eventlet.spawn(forward_video)
vidtarget = addr
return forwarders[addr].getsockname()[1]
|
|
cd78318ccb3f54d01dc936eef82ce4757f5fc706
|
ichnaea/tests/test_util.py
|
ichnaea/tests/test_util.py
|
from unittest import TestCase
from ichnaea.util import _is_true
class TestUtils(TestCase):
def test_is_true(self):
self.assertTrue(_is_true('1'))
self.assertTrue(_is_true('true'))
self.assertTrue(_is_true('True'))
self.assertTrue(_is_true(True))
self.assertFalse(_is_true(False))
self.assertFalse(_is_true('false'))
|
Add a test for the util module.
|
Add a test for the util module.
|
Python
|
apache-2.0
|
mozilla/ichnaea,mozilla/ichnaea,therewillbecode/ichnaea,mozilla/ichnaea,therewillbecode/ichnaea,mozilla/ichnaea,therewillbecode/ichnaea
|
Add a test for the util module.
|
from unittest import TestCase
from ichnaea.util import _is_true
class TestUtils(TestCase):
def test_is_true(self):
self.assertTrue(_is_true('1'))
self.assertTrue(_is_true('true'))
self.assertTrue(_is_true('True'))
self.assertTrue(_is_true(True))
self.assertFalse(_is_true(False))
self.assertFalse(_is_true('false'))
|
<commit_before><commit_msg>Add a test for the util module.<commit_after>
|
from unittest import TestCase
from ichnaea.util import _is_true
class TestUtils(TestCase):
def test_is_true(self):
self.assertTrue(_is_true('1'))
self.assertTrue(_is_true('true'))
self.assertTrue(_is_true('True'))
self.assertTrue(_is_true(True))
self.assertFalse(_is_true(False))
self.assertFalse(_is_true('false'))
|
Add a test for the util module.from unittest import TestCase
from ichnaea.util import _is_true
class TestUtils(TestCase):
def test_is_true(self):
self.assertTrue(_is_true('1'))
self.assertTrue(_is_true('true'))
self.assertTrue(_is_true('True'))
self.assertTrue(_is_true(True))
self.assertFalse(_is_true(False))
self.assertFalse(_is_true('false'))
|
<commit_before><commit_msg>Add a test for the util module.<commit_after>from unittest import TestCase
from ichnaea.util import _is_true
class TestUtils(TestCase):
def test_is_true(self):
self.assertTrue(_is_true('1'))
self.assertTrue(_is_true('true'))
self.assertTrue(_is_true('True'))
self.assertTrue(_is_true(True))
self.assertFalse(_is_true(False))
self.assertFalse(_is_true('false'))
|
|
0385ec6f12124ca6e76d35f0c94b67a4fb9edbdd
|
initial-embryo-segmentation.py
|
initial-embryo-segmentation.py
|
# IPython log file
droso = io.imread('/Users/jni/Dropbox/data1/drosophila-embryo/E_z2_512_1um_CONTROL.tif') / 4096
sys.path.append('/Users/jni/projects/mpl-volume-viewer/')
import slice_view as sv
nuclei = droso[..., 0]
nuclei_smooth = filters.gaussian(nuclei, sigma=[0.6, 3, 3])
centroids = feature.peak_local_max(nuclei_smooth, min_distance=3, exclude_border=False, indices=False)
seeds = ndi.label(centroids)[0]
seeds.max()
seg = morphology.watershed(droso[..., 1], seeds, compactness=0.05)
labels = np.arange(np.max(seg) + 1)
np.random.shuffle(labels)
colors = plt.cm.spectral(labels[seg]/labels[seg].max())
#viewer = sv.SliceViewer(colors, spacing=[5, 1, 1])
|
Add embryo segmentation basic analysis
|
Add embryo segmentation basic analysis
|
Python
|
bsd-3-clause
|
jni/useful-histories
|
Add embryo segmentation basic analysis
|
# IPython log file
droso = io.imread('/Users/jni/Dropbox/data1/drosophila-embryo/E_z2_512_1um_CONTROL.tif') / 4096
sys.path.append('/Users/jni/projects/mpl-volume-viewer/')
import slice_view as sv
nuclei = droso[..., 0]
nuclei_smooth = filters.gaussian(nuclei, sigma=[0.6, 3, 3])
centroids = feature.peak_local_max(nuclei_smooth, min_distance=3, exclude_border=False, indices=False)
seeds = ndi.label(centroids)[0]
seeds.max()
seg = morphology.watershed(droso[..., 1], seeds, compactness=0.05)
labels = np.arange(np.max(seg) + 1)
np.random.shuffle(labels)
colors = plt.cm.spectral(labels[seg]/labels[seg].max())
#viewer = sv.SliceViewer(colors, spacing=[5, 1, 1])
|
<commit_before><commit_msg>Add embryo segmentation basic analysis<commit_after>
|
# IPython log file
droso = io.imread('/Users/jni/Dropbox/data1/drosophila-embryo/E_z2_512_1um_CONTROL.tif') / 4096
sys.path.append('/Users/jni/projects/mpl-volume-viewer/')
import slice_view as sv
nuclei = droso[..., 0]
nuclei_smooth = filters.gaussian(nuclei, sigma=[0.6, 3, 3])
centroids = feature.peak_local_max(nuclei_smooth, min_distance=3, exclude_border=False, indices=False)
seeds = ndi.label(centroids)[0]
seeds.max()
seg = morphology.watershed(droso[..., 1], seeds, compactness=0.05)
labels = np.arange(np.max(seg) + 1)
np.random.shuffle(labels)
colors = plt.cm.spectral(labels[seg]/labels[seg].max())
#viewer = sv.SliceViewer(colors, spacing=[5, 1, 1])
|
Add embryo segmentation basic analysis# IPython log file
droso = io.imread('/Users/jni/Dropbox/data1/drosophila-embryo/E_z2_512_1um_CONTROL.tif') / 4096
sys.path.append('/Users/jni/projects/mpl-volume-viewer/')
import slice_view as sv
nuclei = droso[..., 0]
nuclei_smooth = filters.gaussian(nuclei, sigma=[0.6, 3, 3])
centroids = feature.peak_local_max(nuclei_smooth, min_distance=3, exclude_border=False, indices=False)
seeds = ndi.label(centroids)[0]
seeds.max()
seg = morphology.watershed(droso[..., 1], seeds, compactness=0.05)
labels = np.arange(np.max(seg) + 1)
np.random.shuffle(labels)
colors = plt.cm.spectral(labels[seg]/labels[seg].max())
#viewer = sv.SliceViewer(colors, spacing=[5, 1, 1])
|
<commit_before><commit_msg>Add embryo segmentation basic analysis<commit_after># IPython log file
droso = io.imread('/Users/jni/Dropbox/data1/drosophila-embryo/E_z2_512_1um_CONTROL.tif') / 4096
sys.path.append('/Users/jni/projects/mpl-volume-viewer/')
import slice_view as sv
nuclei = droso[..., 0]
nuclei_smooth = filters.gaussian(nuclei, sigma=[0.6, 3, 3])
centroids = feature.peak_local_max(nuclei_smooth, min_distance=3, exclude_border=False, indices=False)
seeds = ndi.label(centroids)[0]
seeds.max()
seg = morphology.watershed(droso[..., 1], seeds, compactness=0.05)
labels = np.arange(np.max(seg) + 1)
np.random.shuffle(labels)
colors = plt.cm.spectral(labels[seg]/labels[seg].max())
#viewer = sv.SliceViewer(colors, spacing=[5, 1, 1])
|
|
1c7a308b77f81f965a69ceead00b096375cca271
|
lava_server/bread_crumbs.py
|
lava_server/bread_crumbs.py
|
# Copyright (C) 2010, 2011 Linaro Limited
#
# Author: Zygmunt Krynicki <zygmunt.krynicki@linaro.org>
#
# This file is part of LAVA Server.
#
# LAVA Server is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License version 3
# as published by the Free Software Foundation
#
# LAVA Server is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with LAVA Server. If not, see <http://www.gnu.org/licenses/>.
"""
Bread crubm management for LAVA server
"""
from django.core.urlresolvers import reverse
import logging
class BreadCrumb(object):
def __init__(self, name, parent=None, needs=None):
self.name = name
self.view = None
self.parent = parent
self.needs = needs or []
def __repr__(self):
return "<BreadCrumb name=%r view=%r parent=%r>" % (
self.name, self.view, self.parent)
def __call__(self, view):
self.view = view
view._bread_crumb = self
return view
def get_name(self, kwargs):
try:
return self.name.format(**kwargs)
except:
logging.exception("Unable to construct breadcrumb name for view %r", self.view)
raise
def get_absolute_url(self, kwargs):
try:
return reverse(self.view, args=[kwargs[name] for name in self.needs])
except:
logging.exception("Unable to construct breadcrumb URL for view %r", self.view)
raise
class LiveBreadCrumb(object):
def __init__(self, bread_crumb, kwargs):
self.bread_crumb = bread_crumb
self.kwargs = kwargs
def get_name(self):
return self.bread_crumb.get_name(self.kwargs)
def get_absolute_url(self):
return self.bread_crumb.get_absolute_url(self.kwargs)
class BreadCrumbTrail(object):
def __init__(self, bread_crumb_list, kwargs):
self.bread_crumb_list = bread_crumb_list
self.kwargs = kwargs
def __iter__(self):
for bread_crumb in self.bread_crumb_list:
yield LiveBreadCrumb(bread_crumb, self.kwargs)
@classmethod
def leading_to(cls, view, **kwargs):
lst = []
while view is not None:
lst.append(view._bread_crumb)
view = view._bread_crumb.parent
lst.reverse()
return cls(lst, kwargs or {})
|
Add bread crumb helpers from lava-dashboard
|
Add bread crumb helpers from lava-dashboard
|
Python
|
agpl-3.0
|
OSSystems/lava-server,Linaro/lava-server,Linaro/lava-server,Linaro/lava-server,OSSystems/lava-server,OSSystems/lava-server,Linaro/lava-server
|
Add bread crumb helpers from lava-dashboard
|
# Copyright (C) 2010, 2011 Linaro Limited
#
# Author: Zygmunt Krynicki <zygmunt.krynicki@linaro.org>
#
# This file is part of LAVA Server.
#
# LAVA Server is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License version 3
# as published by the Free Software Foundation
#
# LAVA Server is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with LAVA Server. If not, see <http://www.gnu.org/licenses/>.
"""
Bread crubm management for LAVA server
"""
from django.core.urlresolvers import reverse
import logging
class BreadCrumb(object):
def __init__(self, name, parent=None, needs=None):
self.name = name
self.view = None
self.parent = parent
self.needs = needs or []
def __repr__(self):
return "<BreadCrumb name=%r view=%r parent=%r>" % (
self.name, self.view, self.parent)
def __call__(self, view):
self.view = view
view._bread_crumb = self
return view
def get_name(self, kwargs):
try:
return self.name.format(**kwargs)
except:
logging.exception("Unable to construct breadcrumb name for view %r", self.view)
raise
def get_absolute_url(self, kwargs):
try:
return reverse(self.view, args=[kwargs[name] for name in self.needs])
except:
logging.exception("Unable to construct breadcrumb URL for view %r", self.view)
raise
class LiveBreadCrumb(object):
def __init__(self, bread_crumb, kwargs):
self.bread_crumb = bread_crumb
self.kwargs = kwargs
def get_name(self):
return self.bread_crumb.get_name(self.kwargs)
def get_absolute_url(self):
return self.bread_crumb.get_absolute_url(self.kwargs)
class BreadCrumbTrail(object):
def __init__(self, bread_crumb_list, kwargs):
self.bread_crumb_list = bread_crumb_list
self.kwargs = kwargs
def __iter__(self):
for bread_crumb in self.bread_crumb_list:
yield LiveBreadCrumb(bread_crumb, self.kwargs)
@classmethod
def leading_to(cls, view, **kwargs):
lst = []
while view is not None:
lst.append(view._bread_crumb)
view = view._bread_crumb.parent
lst.reverse()
return cls(lst, kwargs or {})
|
<commit_before><commit_msg>Add bread crumb helpers from lava-dashboard<commit_after>
|
# Copyright (C) 2010, 2011 Linaro Limited
#
# Author: Zygmunt Krynicki <zygmunt.krynicki@linaro.org>
#
# This file is part of LAVA Server.
#
# LAVA Server is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License version 3
# as published by the Free Software Foundation
#
# LAVA Server is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with LAVA Server. If not, see <http://www.gnu.org/licenses/>.
"""
Bread crubm management for LAVA server
"""
from django.core.urlresolvers import reverse
import logging
class BreadCrumb(object):
def __init__(self, name, parent=None, needs=None):
self.name = name
self.view = None
self.parent = parent
self.needs = needs or []
def __repr__(self):
return "<BreadCrumb name=%r view=%r parent=%r>" % (
self.name, self.view, self.parent)
def __call__(self, view):
self.view = view
view._bread_crumb = self
return view
def get_name(self, kwargs):
try:
return self.name.format(**kwargs)
except:
logging.exception("Unable to construct breadcrumb name for view %r", self.view)
raise
def get_absolute_url(self, kwargs):
try:
return reverse(self.view, args=[kwargs[name] for name in self.needs])
except:
logging.exception("Unable to construct breadcrumb URL for view %r", self.view)
raise
class LiveBreadCrumb(object):
def __init__(self, bread_crumb, kwargs):
self.bread_crumb = bread_crumb
self.kwargs = kwargs
def get_name(self):
return self.bread_crumb.get_name(self.kwargs)
def get_absolute_url(self):
return self.bread_crumb.get_absolute_url(self.kwargs)
class BreadCrumbTrail(object):
def __init__(self, bread_crumb_list, kwargs):
self.bread_crumb_list = bread_crumb_list
self.kwargs = kwargs
def __iter__(self):
for bread_crumb in self.bread_crumb_list:
yield LiveBreadCrumb(bread_crumb, self.kwargs)
@classmethod
def leading_to(cls, view, **kwargs):
lst = []
while view is not None:
lst.append(view._bread_crumb)
view = view._bread_crumb.parent
lst.reverse()
return cls(lst, kwargs or {})
|
Add bread crumb helpers from lava-dashboard# Copyright (C) 2010, 2011 Linaro Limited
#
# Author: Zygmunt Krynicki <zygmunt.krynicki@linaro.org>
#
# This file is part of LAVA Server.
#
# LAVA Server is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License version 3
# as published by the Free Software Foundation
#
# LAVA Server is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with LAVA Server. If not, see <http://www.gnu.org/licenses/>.
"""
Bread crubm management for LAVA server
"""
from django.core.urlresolvers import reverse
import logging
class BreadCrumb(object):
def __init__(self, name, parent=None, needs=None):
self.name = name
self.view = None
self.parent = parent
self.needs = needs or []
def __repr__(self):
return "<BreadCrumb name=%r view=%r parent=%r>" % (
self.name, self.view, self.parent)
def __call__(self, view):
self.view = view
view._bread_crumb = self
return view
def get_name(self, kwargs):
try:
return self.name.format(**kwargs)
except:
logging.exception("Unable to construct breadcrumb name for view %r", self.view)
raise
def get_absolute_url(self, kwargs):
try:
return reverse(self.view, args=[kwargs[name] for name in self.needs])
except:
logging.exception("Unable to construct breadcrumb URL for view %r", self.view)
raise
class LiveBreadCrumb(object):
def __init__(self, bread_crumb, kwargs):
self.bread_crumb = bread_crumb
self.kwargs = kwargs
def get_name(self):
return self.bread_crumb.get_name(self.kwargs)
def get_absolute_url(self):
return self.bread_crumb.get_absolute_url(self.kwargs)
class BreadCrumbTrail(object):
def __init__(self, bread_crumb_list, kwargs):
self.bread_crumb_list = bread_crumb_list
self.kwargs = kwargs
def __iter__(self):
for bread_crumb in self.bread_crumb_list:
yield LiveBreadCrumb(bread_crumb, self.kwargs)
@classmethod
def leading_to(cls, view, **kwargs):
lst = []
while view is not None:
lst.append(view._bread_crumb)
view = view._bread_crumb.parent
lst.reverse()
return cls(lst, kwargs or {})
|
<commit_before><commit_msg>Add bread crumb helpers from lava-dashboard<commit_after># Copyright (C) 2010, 2011 Linaro Limited
#
# Author: Zygmunt Krynicki <zygmunt.krynicki@linaro.org>
#
# This file is part of LAVA Server.
#
# LAVA Server is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License version 3
# as published by the Free Software Foundation
#
# LAVA Server is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with LAVA Server. If not, see <http://www.gnu.org/licenses/>.
"""
Bread crubm management for LAVA server
"""
from django.core.urlresolvers import reverse
import logging
class BreadCrumb(object):
def __init__(self, name, parent=None, needs=None):
self.name = name
self.view = None
self.parent = parent
self.needs = needs or []
def __repr__(self):
return "<BreadCrumb name=%r view=%r parent=%r>" % (
self.name, self.view, self.parent)
def __call__(self, view):
self.view = view
view._bread_crumb = self
return view
def get_name(self, kwargs):
try:
return self.name.format(**kwargs)
except:
logging.exception("Unable to construct breadcrumb name for view %r", self.view)
raise
def get_absolute_url(self, kwargs):
try:
return reverse(self.view, args=[kwargs[name] for name in self.needs])
except:
logging.exception("Unable to construct breadcrumb URL for view %r", self.view)
raise
class LiveBreadCrumb(object):
def __init__(self, bread_crumb, kwargs):
self.bread_crumb = bread_crumb
self.kwargs = kwargs
def get_name(self):
return self.bread_crumb.get_name(self.kwargs)
def get_absolute_url(self):
return self.bread_crumb.get_absolute_url(self.kwargs)
class BreadCrumbTrail(object):
def __init__(self, bread_crumb_list, kwargs):
self.bread_crumb_list = bread_crumb_list
self.kwargs = kwargs
def __iter__(self):
for bread_crumb in self.bread_crumb_list:
yield LiveBreadCrumb(bread_crumb, self.kwargs)
@classmethod
def leading_to(cls, view, **kwargs):
lst = []
while view is not None:
lst.append(view._bread_crumb)
view = view._bread_crumb.parent
lst.reverse()
return cls(lst, kwargs or {})
|
|
9214c09de7d4f9420e77098d2ace87db3191d480
|
apps/challenge/management/commands/find_challengeset_home.py
|
apps/challenge/management/commands/find_challengeset_home.py
|
from optparse import make_option
import sys
from django.core.management.base import BaseCommand
from challenge.models import Challenge, ChallengeSet
class Command(BaseCommand):
args = ""
help = "Update concept total_question counts (post db import)"
def handle(self, *args, **options):
for challengeset in ChallengeSet.objects.filter(challenge=0):
challenges = Challenge.objects.all()
for concept in challengeset.concepts.all():
challenges = challenges.filter(concepts=concept)
if challenges:
challengeset.challenge = challenges[0]
challengeset.save()
|
Add management function for challengeset migration
|
Add management function for challengeset migration
|
Python
|
bsd-3-clause
|
mfitzp/smrtr,mfitzp/smrtr
|
Add management function for challengeset migration
|
from optparse import make_option
import sys
from django.core.management.base import BaseCommand
from challenge.models import Challenge, ChallengeSet
class Command(BaseCommand):
args = ""
help = "Update concept total_question counts (post db import)"
def handle(self, *args, **options):
for challengeset in ChallengeSet.objects.filter(challenge=0):
challenges = Challenge.objects.all()
for concept in challengeset.concepts.all():
challenges = challenges.filter(concepts=concept)
if challenges:
challengeset.challenge = challenges[0]
challengeset.save()
|
<commit_before><commit_msg>Add management function for challengeset migration<commit_after>
|
from optparse import make_option
import sys
from django.core.management.base import BaseCommand
from challenge.models import Challenge, ChallengeSet
class Command(BaseCommand):
args = ""
help = "Update concept total_question counts (post db import)"
def handle(self, *args, **options):
for challengeset in ChallengeSet.objects.filter(challenge=0):
challenges = Challenge.objects.all()
for concept in challengeset.concepts.all():
challenges = challenges.filter(concepts=concept)
if challenges:
challengeset.challenge = challenges[0]
challengeset.save()
|
Add management function for challengeset migrationfrom optparse import make_option
import sys
from django.core.management.base import BaseCommand
from challenge.models import Challenge, ChallengeSet
class Command(BaseCommand):
args = ""
help = "Update concept total_question counts (post db import)"
def handle(self, *args, **options):
for challengeset in ChallengeSet.objects.filter(challenge=0):
challenges = Challenge.objects.all()
for concept in challengeset.concepts.all():
challenges = challenges.filter(concepts=concept)
if challenges:
challengeset.challenge = challenges[0]
challengeset.save()
|
<commit_before><commit_msg>Add management function for challengeset migration<commit_after>from optparse import make_option
import sys
from django.core.management.base import BaseCommand
from challenge.models import Challenge, ChallengeSet
class Command(BaseCommand):
args = ""
help = "Update concept total_question counts (post db import)"
def handle(self, *args, **options):
for challengeset in ChallengeSet.objects.filter(challenge=0):
challenges = Challenge.objects.all()
for concept in challengeset.concepts.all():
challenges = challenges.filter(concepts=concept)
if challenges:
challengeset.challenge = challenges[0]
challengeset.save()
|
|
a69004d4db2038fe24ee629f43327a9a3b5ad6e4
|
vtk_simple.py
|
vtk_simple.py
|
import vtk
def setup():
# create a rendering window and renderer
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
# create a renderwindowinteractor
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
return ren,renWin,iren
def create_sphere(ren=None,r=5.0,center=(0,0,0)):
if ren is None:
ren,renWin,iren=setup()
# create source
source = vtk.vtkSphereSource()
source.SetCenter(0,0,0)
source.SetRadius(5.0)
# mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInput(source.GetOutput())
# actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# assign actor to the renderer
ren.AddActor(actor)
return source
|
Add simple VTK example (to be put inside an event driver)
|
Add simple VTK example (to be put inside an event driver)
|
Python
|
mit
|
martindurant/misc
|
Add simple VTK example (to be put inside an event driver)
|
import vtk
def setup():
# create a rendering window and renderer
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
# create a renderwindowinteractor
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
return ren,renWin,iren
def create_sphere(ren=None,r=5.0,center=(0,0,0)):
if ren is None:
ren,renWin,iren=setup()
# create source
source = vtk.vtkSphereSource()
source.SetCenter(0,0,0)
source.SetRadius(5.0)
# mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInput(source.GetOutput())
# actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# assign actor to the renderer
ren.AddActor(actor)
return source
|
<commit_before><commit_msg>Add simple VTK example (to be put inside an event driver)<commit_after>
|
import vtk
def setup():
# create a rendering window and renderer
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
# create a renderwindowinteractor
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
return ren,renWin,iren
def create_sphere(ren=None,r=5.0,center=(0,0,0)):
if ren is None:
ren,renWin,iren=setup()
# create source
source = vtk.vtkSphereSource()
source.SetCenter(0,0,0)
source.SetRadius(5.0)
# mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInput(source.GetOutput())
# actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# assign actor to the renderer
ren.AddActor(actor)
return source
|
Add simple VTK example (to be put inside an event driver)import vtk
def setup():
# create a rendering window and renderer
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
# create a renderwindowinteractor
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
return ren,renWin,iren
def create_sphere(ren=None,r=5.0,center=(0,0,0)):
if ren is None:
ren,renWin,iren=setup()
# create source
source = vtk.vtkSphereSource()
source.SetCenter(0,0,0)
source.SetRadius(5.0)
# mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInput(source.GetOutput())
# actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# assign actor to the renderer
ren.AddActor(actor)
return source
|
<commit_before><commit_msg>Add simple VTK example (to be put inside an event driver)<commit_after>import vtk
def setup():
# create a rendering window and renderer
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
# create a renderwindowinteractor
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
return ren,renWin,iren
def create_sphere(ren=None,r=5.0,center=(0,0,0)):
if ren is None:
ren,renWin,iren=setup()
# create source
source = vtk.vtkSphereSource()
source.SetCenter(0,0,0)
source.SetRadius(5.0)
# mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInput(source.GetOutput())
# actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# assign actor to the renderer
ren.AddActor(actor)
return source
|
|
bfe111a760aa88046c0f3d0b70daf7c2f1e88b35
|
third_party/__init__.py
|
third_party/__init__.py
|
import os
import sys
try:
__file__
except NameError:
__file__ = sys.argv[0]
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
|
Allow third_party modules to be imported at the top-level.
|
Allow third_party modules to be imported at the top-level.
import third_party
import dns
git-svn-id: abd770af2affaf62930f25b244f6edafe8ddae5b@11 164481a5-15cb-f69f-4b93-856c5b7754c5
|
Python
|
apache-2.0
|
bpsinc-native/src_third_party_webpagereplay,andrey-malets/web-page-replay,chromium/web-page-replay,snorp/web-page-replay,snorp/web-page-replay,colin-scott/web-page-replay,chromium/web-page-replay,bpsinc-native/src_third_party_webpagereplay,colin-scott/web-page-replay,bpsinc-native/src_third_party_webpagereplay,andrey-malets/web-page-replay,ting-yuan/web-page-replay,ting-yuan/web-page-replay
|
Allow third_party modules to be imported at the top-level.
import third_party
import dns
git-svn-id: abd770af2affaf62930f25b244f6edafe8ddae5b@11 164481a5-15cb-f69f-4b93-856c5b7754c5
|
import os
import sys
try:
__file__
except NameError:
__file__ = sys.argv[0]
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
|
<commit_before><commit_msg>Allow third_party modules to be imported at the top-level.
import third_party
import dns
git-svn-id: abd770af2affaf62930f25b244f6edafe8ddae5b@11 164481a5-15cb-f69f-4b93-856c5b7754c5<commit_after>
|
import os
import sys
try:
__file__
except NameError:
__file__ = sys.argv[0]
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
|
Allow third_party modules to be imported at the top-level.
import third_party
import dns
git-svn-id: abd770af2affaf62930f25b244f6edafe8ddae5b@11 164481a5-15cb-f69f-4b93-856c5b7754c5import os
import sys
try:
__file__
except NameError:
__file__ = sys.argv[0]
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
|
<commit_before><commit_msg>Allow third_party modules to be imported at the top-level.
import third_party
import dns
git-svn-id: abd770af2affaf62930f25b244f6edafe8ddae5b@11 164481a5-15cb-f69f-4b93-856c5b7754c5<commit_after>import os
import sys
try:
__file__
except NameError:
__file__ = sys.argv[0]
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
|
|
c4ed17ff345ee59f284a2f6d1ec18034eeb311a9
|
tests/cpydiff/module_array_constructor.py
|
tests/cpydiff/module_array_constructor.py
|
"""
categories: Modules,array
description: Overflow checking is not implemented
cause: MicroPython implements implicit truncation in order to reduce code size and execution time
workaround: If CPython compatibility is needed then mask the value explicitly
"""
import array
a = array.array("b", [257])
print(a)
|
Add test for array constructor with overflowing value.
|
tests/cpydiff: Add test for array constructor with overflowing value.
|
Python
|
mit
|
bvernoux/micropython,bvernoux/micropython,bvernoux/micropython,bvernoux/micropython,adafruit/circuitpython,adafruit/circuitpython,adafruit/circuitpython,adafruit/circuitpython,adafruit/circuitpython,bvernoux/micropython,adafruit/circuitpython
|
tests/cpydiff: Add test for array constructor with overflowing value.
|
"""
categories: Modules,array
description: Overflow checking is not implemented
cause: MicroPython implements implicit truncation in order to reduce code size and execution time
workaround: If CPython compatibility is needed then mask the value explicitly
"""
import array
a = array.array("b", [257])
print(a)
|
<commit_before><commit_msg>tests/cpydiff: Add test for array constructor with overflowing value.<commit_after>
|
"""
categories: Modules,array
description: Overflow checking is not implemented
cause: MicroPython implements implicit truncation in order to reduce code size and execution time
workaround: If CPython compatibility is needed then mask the value explicitly
"""
import array
a = array.array("b", [257])
print(a)
|
tests/cpydiff: Add test for array constructor with overflowing value."""
categories: Modules,array
description: Overflow checking is not implemented
cause: MicroPython implements implicit truncation in order to reduce code size and execution time
workaround: If CPython compatibility is needed then mask the value explicitly
"""
import array
a = array.array("b", [257])
print(a)
|
<commit_before><commit_msg>tests/cpydiff: Add test for array constructor with overflowing value.<commit_after>"""
categories: Modules,array
description: Overflow checking is not implemented
cause: MicroPython implements implicit truncation in order to reduce code size and execution time
workaround: If CPython compatibility is needed then mask the value explicitly
"""
import array
a = array.array("b", [257])
print(a)
|
|
1f06a9e66ebc74a171d4cde7a15f40454f4f2199
|
calvin/actorstore/systemactors/data/Sink.py
|
calvin/actorstore/systemactors/data/Sink.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.actor.actor import Actor, manage, condition, stateguard, calvinsys
from calvin.utilities.calvinlogger import get_logger
_log = get_logger(__name__)
class Sink(Actor):
"""
Data sink - usually some form of permanent storage
input:
data: a list of json structures to be saved
"""
@manage([])
def init(self ):
self.sink = calvinsys.open(self, "data.sink")
@stateguard(lambda self: calvinsys.can_write(self.sink))
@condition(["data"], [])
def write(self, data):
calvinsys.write(self.sink, data)
action_priority = (write,)
requires = ['data.sink']
|
Add generic data sink actor
|
Actors: Add generic data sink actor
|
Python
|
apache-2.0
|
EricssonResearch/calvin-base,EricssonResearch/calvin-base,EricssonResearch/calvin-base,EricssonResearch/calvin-base
|
Actors: Add generic data sink actor
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.actor.actor import Actor, manage, condition, stateguard, calvinsys
from calvin.utilities.calvinlogger import get_logger
_log = get_logger(__name__)
class Sink(Actor):
"""
Data sink - usually some form of permanent storage
input:
data: a list of json structures to be saved
"""
@manage([])
def init(self ):
self.sink = calvinsys.open(self, "data.sink")
@stateguard(lambda self: calvinsys.can_write(self.sink))
@condition(["data"], [])
def write(self, data):
calvinsys.write(self.sink, data)
action_priority = (write,)
requires = ['data.sink']
|
<commit_before><commit_msg>Actors: Add generic data sink actor<commit_after>
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.actor.actor import Actor, manage, condition, stateguard, calvinsys
from calvin.utilities.calvinlogger import get_logger
_log = get_logger(__name__)
class Sink(Actor):
"""
Data sink - usually some form of permanent storage
input:
data: a list of json structures to be saved
"""
@manage([])
def init(self ):
self.sink = calvinsys.open(self, "data.sink")
@stateguard(lambda self: calvinsys.can_write(self.sink))
@condition(["data"], [])
def write(self, data):
calvinsys.write(self.sink, data)
action_priority = (write,)
requires = ['data.sink']
|
Actors: Add generic data sink actor# -*- coding: utf-8 -*-
# Copyright (c) 2018 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.actor.actor import Actor, manage, condition, stateguard, calvinsys
from calvin.utilities.calvinlogger import get_logger
_log = get_logger(__name__)
class Sink(Actor):
"""
Data sink - usually some form of permanent storage
input:
data: a list of json structures to be saved
"""
@manage([])
def init(self ):
self.sink = calvinsys.open(self, "data.sink")
@stateguard(lambda self: calvinsys.can_write(self.sink))
@condition(["data"], [])
def write(self, data):
calvinsys.write(self.sink, data)
action_priority = (write,)
requires = ['data.sink']
|
<commit_before><commit_msg>Actors: Add generic data sink actor<commit_after># -*- coding: utf-8 -*-
# Copyright (c) 2018 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.actor.actor import Actor, manage, condition, stateguard, calvinsys
from calvin.utilities.calvinlogger import get_logger
_log = get_logger(__name__)
class Sink(Actor):
"""
Data sink - usually some form of permanent storage
input:
data: a list of json structures to be saved
"""
@manage([])
def init(self ):
self.sink = calvinsys.open(self, "data.sink")
@stateguard(lambda self: calvinsys.can_write(self.sink))
@condition(["data"], [])
def write(self, data):
calvinsys.write(self.sink, data)
action_priority = (write,)
requires = ['data.sink']
|
|
035fa5877a05bb70b8207693c547c97cfd104db3
|
libqtile/widget/textbox.py
|
libqtile/widget/textbox.py
|
from .. import bar, manager
import base
class TextBox(base._TextBox):
"""
A flexible textbox that can be updated from bound keys, scripts and
qsh.
"""
defaults = manager.Defaults(
("font", "Arial", "Text font"),
("fontsize", None, "Font pixel size. Calculated if None."),
("fontshadow", None,
"font shadow color, default is None(no shadow)"),
("padding", None, "Padding left and right. Calculated if None."),
("background", None, "Background colour."),
("foreground", "#ffffff", "Foreground colour.")
)
def __init__(self, name, text=" ", width=bar.CALCULATED, **config):
"""
- name: Name for this widget. Used to address the widget from
scripts, commands and qsh.
- text: Initial widget text.
- width: An integer width, bar.STRETCH, or bar.CALCULATED .
"""
self.name = name
base._TextBox.__init__(self, text, width, **config)
def update(self, text):
self.text = text
self.bar.draw()
def cmd_update(self, text):
"""
Update the text in a TextBox widget.
"""
self.update(text)
def cmd_get(self):
"""
Retrieve the text in a TextBox widget.
"""
return self.text
|
from .. import bar, manager
import base
class TextBox(base._TextBox):
"""
A flexible textbox that can be updated from bound keys, scripts and
qsh.
"""
defaults = manager.Defaults(
("font", "Arial", "Text font"),
("fontsize", None, "Font pixel size. Calculated if None."),
("fontshadow", None,
"font shadow color, default is None(no shadow)"),
("padding", None, "Padding left and right. Calculated if None."),
("background", None, "Background colour."),
("foreground", "#ffffff", "Foreground colour.")
)
def __init__(self, text=" ", width=bar.CALCULATED, **config):
"""
- text: Initial widget text.
- width: An integer width, bar.STRETCH, or bar.CALCULATED .
"""
base._TextBox.__init__(self, text, width, **config)
def update(self, text):
self.text = text
self.bar.draw()
def cmd_update(self, text):
"""
Update the text in a TextBox widget.
"""
self.update(text)
def cmd_get(self):
"""
Retrieve the text in a TextBox widget.
"""
return self.text
|
Remove positional argument "name" from Textbox
|
Remove positional argument "name" from Textbox
Users should just use the kwarg name; no sense in having both.
Closes #239
|
Python
|
mit
|
andrewyoung1991/qtile,StephenBarnes/qtile,de-vri-es/qtile,tych0/qtile,apinsard/qtile,nxnfufunezn/qtile,soulchainer/qtile,encukou/qtile,ramnes/qtile,jdowner/qtile,jdowner/qtile,soulchainer/qtile,de-vri-es/qtile,kopchik/qtile,tych0/qtile,qtile/qtile,kiniou/qtile,kynikos/qtile,kiniou/qtile,EndPointCorp/qtile,himaaaatti/qtile,zordsdavini/qtile,StephenBarnes/qtile,frostidaho/qtile,aniruddhkanojia/qtile,kseistrup/qtile,cortesi/qtile,xplv/qtile,qtile/qtile,nxnfufunezn/qtile,flacjacket/qtile,flacjacket/qtile,andrewyoung1991/qtile,ramnes/qtile,aniruddhkanojia/qtile,dequis/qtile,rxcomm/qtile,zordsdavini/qtile,farebord/qtile,EndPointCorp/qtile,kynikos/qtile,dequis/qtile,w1ndy/qtile,encukou/qtile,kopchik/qtile,rxcomm/qtile,xplv/qtile,farebord/qtile,frostidaho/qtile,w1ndy/qtile,cortesi/qtile,kseistrup/qtile,apinsard/qtile,himaaaatti/qtile
|
from .. import bar, manager
import base
class TextBox(base._TextBox):
"""
A flexible textbox that can be updated from bound keys, scripts and
qsh.
"""
defaults = manager.Defaults(
("font", "Arial", "Text font"),
("fontsize", None, "Font pixel size. Calculated if None."),
("fontshadow", None,
"font shadow color, default is None(no shadow)"),
("padding", None, "Padding left and right. Calculated if None."),
("background", None, "Background colour."),
("foreground", "#ffffff", "Foreground colour.")
)
def __init__(self, name, text=" ", width=bar.CALCULATED, **config):
"""
- name: Name for this widget. Used to address the widget from
scripts, commands and qsh.
- text: Initial widget text.
- width: An integer width, bar.STRETCH, or bar.CALCULATED .
"""
self.name = name
base._TextBox.__init__(self, text, width, **config)
def update(self, text):
self.text = text
self.bar.draw()
def cmd_update(self, text):
"""
Update the text in a TextBox widget.
"""
self.update(text)
def cmd_get(self):
"""
Retrieve the text in a TextBox widget.
"""
return self.text
Remove positional argument "name" from Textbox
Users should just use the kwarg name; no sense in having both.
Closes #239
|
from .. import bar, manager
import base
class TextBox(base._TextBox):
"""
A flexible textbox that can be updated from bound keys, scripts and
qsh.
"""
defaults = manager.Defaults(
("font", "Arial", "Text font"),
("fontsize", None, "Font pixel size. Calculated if None."),
("fontshadow", None,
"font shadow color, default is None(no shadow)"),
("padding", None, "Padding left and right. Calculated if None."),
("background", None, "Background colour."),
("foreground", "#ffffff", "Foreground colour.")
)
def __init__(self, text=" ", width=bar.CALCULATED, **config):
"""
- text: Initial widget text.
- width: An integer width, bar.STRETCH, or bar.CALCULATED .
"""
base._TextBox.__init__(self, text, width, **config)
def update(self, text):
self.text = text
self.bar.draw()
def cmd_update(self, text):
"""
Update the text in a TextBox widget.
"""
self.update(text)
def cmd_get(self):
"""
Retrieve the text in a TextBox widget.
"""
return self.text
|
<commit_before>from .. import bar, manager
import base
class TextBox(base._TextBox):
"""
A flexible textbox that can be updated from bound keys, scripts and
qsh.
"""
defaults = manager.Defaults(
("font", "Arial", "Text font"),
("fontsize", None, "Font pixel size. Calculated if None."),
("fontshadow", None,
"font shadow color, default is None(no shadow)"),
("padding", None, "Padding left and right. Calculated if None."),
("background", None, "Background colour."),
("foreground", "#ffffff", "Foreground colour.")
)
def __init__(self, name, text=" ", width=bar.CALCULATED, **config):
"""
- name: Name for this widget. Used to address the widget from
scripts, commands and qsh.
- text: Initial widget text.
- width: An integer width, bar.STRETCH, or bar.CALCULATED .
"""
self.name = name
base._TextBox.__init__(self, text, width, **config)
def update(self, text):
self.text = text
self.bar.draw()
def cmd_update(self, text):
"""
Update the text in a TextBox widget.
"""
self.update(text)
def cmd_get(self):
"""
Retrieve the text in a TextBox widget.
"""
return self.text
<commit_msg>Remove positional argument "name" from Textbox
Users should just use the kwarg name; no sense in having both.
Closes #239<commit_after>
|
from .. import bar, manager
import base
class TextBox(base._TextBox):
"""
A flexible textbox that can be updated from bound keys, scripts and
qsh.
"""
defaults = manager.Defaults(
("font", "Arial", "Text font"),
("fontsize", None, "Font pixel size. Calculated if None."),
("fontshadow", None,
"font shadow color, default is None(no shadow)"),
("padding", None, "Padding left and right. Calculated if None."),
("background", None, "Background colour."),
("foreground", "#ffffff", "Foreground colour.")
)
def __init__(self, text=" ", width=bar.CALCULATED, **config):
"""
- text: Initial widget text.
- width: An integer width, bar.STRETCH, or bar.CALCULATED .
"""
base._TextBox.__init__(self, text, width, **config)
def update(self, text):
self.text = text
self.bar.draw()
def cmd_update(self, text):
"""
Update the text in a TextBox widget.
"""
self.update(text)
def cmd_get(self):
"""
Retrieve the text in a TextBox widget.
"""
return self.text
|
from .. import bar, manager
import base
class TextBox(base._TextBox):
"""
A flexible textbox that can be updated from bound keys, scripts and
qsh.
"""
defaults = manager.Defaults(
("font", "Arial", "Text font"),
("fontsize", None, "Font pixel size. Calculated if None."),
("fontshadow", None,
"font shadow color, default is None(no shadow)"),
("padding", None, "Padding left and right. Calculated if None."),
("background", None, "Background colour."),
("foreground", "#ffffff", "Foreground colour.")
)
def __init__(self, name, text=" ", width=bar.CALCULATED, **config):
"""
- name: Name for this widget. Used to address the widget from
scripts, commands and qsh.
- text: Initial widget text.
- width: An integer width, bar.STRETCH, or bar.CALCULATED .
"""
self.name = name
base._TextBox.__init__(self, text, width, **config)
def update(self, text):
self.text = text
self.bar.draw()
def cmd_update(self, text):
"""
Update the text in a TextBox widget.
"""
self.update(text)
def cmd_get(self):
"""
Retrieve the text in a TextBox widget.
"""
return self.text
Remove positional argument "name" from Textbox
Users should just use the kwarg name; no sense in having both.
Closes #239from .. import bar, manager
import base
class TextBox(base._TextBox):
"""
A flexible textbox that can be updated from bound keys, scripts and
qsh.
"""
defaults = manager.Defaults(
("font", "Arial", "Text font"),
("fontsize", None, "Font pixel size. Calculated if None."),
("fontshadow", None,
"font shadow color, default is None(no shadow)"),
("padding", None, "Padding left and right. Calculated if None."),
("background", None, "Background colour."),
("foreground", "#ffffff", "Foreground colour.")
)
def __init__(self, text=" ", width=bar.CALCULATED, **config):
"""
- text: Initial widget text.
- width: An integer width, bar.STRETCH, or bar.CALCULATED .
"""
base._TextBox.__init__(self, text, width, **config)
def update(self, text):
self.text = text
self.bar.draw()
def cmd_update(self, text):
"""
Update the text in a TextBox widget.
"""
self.update(text)
def cmd_get(self):
"""
Retrieve the text in a TextBox widget.
"""
return self.text
|
<commit_before>from .. import bar, manager
import base
class TextBox(base._TextBox):
"""
A flexible textbox that can be updated from bound keys, scripts and
qsh.
"""
defaults = manager.Defaults(
("font", "Arial", "Text font"),
("fontsize", None, "Font pixel size. Calculated if None."),
("fontshadow", None,
"font shadow color, default is None(no shadow)"),
("padding", None, "Padding left and right. Calculated if None."),
("background", None, "Background colour."),
("foreground", "#ffffff", "Foreground colour.")
)
def __init__(self, name, text=" ", width=bar.CALCULATED, **config):
"""
- name: Name for this widget. Used to address the widget from
scripts, commands and qsh.
- text: Initial widget text.
- width: An integer width, bar.STRETCH, or bar.CALCULATED .
"""
self.name = name
base._TextBox.__init__(self, text, width, **config)
def update(self, text):
self.text = text
self.bar.draw()
def cmd_update(self, text):
"""
Update the text in a TextBox widget.
"""
self.update(text)
def cmd_get(self):
"""
Retrieve the text in a TextBox widget.
"""
return self.text
<commit_msg>Remove positional argument "name" from Textbox
Users should just use the kwarg name; no sense in having both.
Closes #239<commit_after>from .. import bar, manager
import base
class TextBox(base._TextBox):
"""
A flexible textbox that can be updated from bound keys, scripts and
qsh.
"""
defaults = manager.Defaults(
("font", "Arial", "Text font"),
("fontsize", None, "Font pixel size. Calculated if None."),
("fontshadow", None,
"font shadow color, default is None(no shadow)"),
("padding", None, "Padding left and right. Calculated if None."),
("background", None, "Background colour."),
("foreground", "#ffffff", "Foreground colour.")
)
def __init__(self, text=" ", width=bar.CALCULATED, **config):
"""
- text: Initial widget text.
- width: An integer width, bar.STRETCH, or bar.CALCULATED .
"""
base._TextBox.__init__(self, text, width, **config)
def update(self, text):
self.text = text
self.bar.draw()
def cmd_update(self, text):
"""
Update the text in a TextBox widget.
"""
self.update(text)
def cmd_get(self):
"""
Retrieve the text in a TextBox widget.
"""
return self.text
|
91ba03f978d61063c93da381c253992759ba26ad
|
portal/migrations/versions/d0b40bc8d7e6_.py
|
portal/migrations/versions/d0b40bc8d7e6_.py
|
from alembic import op
import sqlalchemy as sa
"""empty message
Revision ID: d0b40bc8d7e6
Revises: 8ffec90e68a7
Create Date: 2017-09-20 05:59:45.168324
"""
# revision identifiers, used by Alembic.
revision = 'd0b40bc8d7e6'
down_revision = '8ffec90e68a7'
def upgrade():
# Work around site_persistence fragility. Replace a couple names
# as delete and recreate on these fails due to FK constraints
op.execute("UPDATE questionnaire_banks SET name = 'IRONMAN_baseline' "
" WHERE name = 'IRONMAN baseline'")
op.execute("UPDATE questionnaire_banks SET name = 'CRV_baseline' "
" WHERE name = 'CRV baseline'")
def downgrade():
op.execute("UPDATE questionnaire_banks SET name = 'IRONMAN baseline' "
" WHERE name = 'IRONMAN_baseline'")
op.execute("UPDATE questionnaire_banks SET name = 'CRV baseline' "
" WHERE name = 'CRV_baseline'")
|
Work around site_persistence fragility. Replace a couple names as delete and recreate on these fails due to FK constraints
|
Work around site_persistence fragility. Replace a couple names
as delete and recreate on these fails due to FK constraints
|
Python
|
bsd-3-clause
|
uwcirg/true_nth_usa_portal,uwcirg/true_nth_usa_portal,uwcirg/true_nth_usa_portal,uwcirg/true_nth_usa_portal
|
Work around site_persistence fragility. Replace a couple names
as delete and recreate on these fails due to FK constraints
|
from alembic import op
import sqlalchemy as sa
"""empty message
Revision ID: d0b40bc8d7e6
Revises: 8ffec90e68a7
Create Date: 2017-09-20 05:59:45.168324
"""
# revision identifiers, used by Alembic.
revision = 'd0b40bc8d7e6'
down_revision = '8ffec90e68a7'
def upgrade():
# Work around site_persistence fragility. Replace a couple names
# as delete and recreate on these fails due to FK constraints
op.execute("UPDATE questionnaire_banks SET name = 'IRONMAN_baseline' "
" WHERE name = 'IRONMAN baseline'")
op.execute("UPDATE questionnaire_banks SET name = 'CRV_baseline' "
" WHERE name = 'CRV baseline'")
def downgrade():
op.execute("UPDATE questionnaire_banks SET name = 'IRONMAN baseline' "
" WHERE name = 'IRONMAN_baseline'")
op.execute("UPDATE questionnaire_banks SET name = 'CRV baseline' "
" WHERE name = 'CRV_baseline'")
|
<commit_before><commit_msg>Work around site_persistence fragility. Replace a couple names
as delete and recreate on these fails due to FK constraints<commit_after>
|
from alembic import op
import sqlalchemy as sa
"""empty message
Revision ID: d0b40bc8d7e6
Revises: 8ffec90e68a7
Create Date: 2017-09-20 05:59:45.168324
"""
# revision identifiers, used by Alembic.
revision = 'd0b40bc8d7e6'
down_revision = '8ffec90e68a7'
def upgrade():
# Work around site_persistence fragility. Replace a couple names
# as delete and recreate on these fails due to FK constraints
op.execute("UPDATE questionnaire_banks SET name = 'IRONMAN_baseline' "
" WHERE name = 'IRONMAN baseline'")
op.execute("UPDATE questionnaire_banks SET name = 'CRV_baseline' "
" WHERE name = 'CRV baseline'")
def downgrade():
op.execute("UPDATE questionnaire_banks SET name = 'IRONMAN baseline' "
" WHERE name = 'IRONMAN_baseline'")
op.execute("UPDATE questionnaire_banks SET name = 'CRV baseline' "
" WHERE name = 'CRV_baseline'")
|
Work around site_persistence fragility. Replace a couple names
as delete and recreate on these fails due to FK constraintsfrom alembic import op
import sqlalchemy as sa
"""empty message
Revision ID: d0b40bc8d7e6
Revises: 8ffec90e68a7
Create Date: 2017-09-20 05:59:45.168324
"""
# revision identifiers, used by Alembic.
revision = 'd0b40bc8d7e6'
down_revision = '8ffec90e68a7'
def upgrade():
# Work around site_persistence fragility. Replace a couple names
# as delete and recreate on these fails due to FK constraints
op.execute("UPDATE questionnaire_banks SET name = 'IRONMAN_baseline' "
" WHERE name = 'IRONMAN baseline'")
op.execute("UPDATE questionnaire_banks SET name = 'CRV_baseline' "
" WHERE name = 'CRV baseline'")
def downgrade():
op.execute("UPDATE questionnaire_banks SET name = 'IRONMAN baseline' "
" WHERE name = 'IRONMAN_baseline'")
op.execute("UPDATE questionnaire_banks SET name = 'CRV baseline' "
" WHERE name = 'CRV_baseline'")
|
<commit_before><commit_msg>Work around site_persistence fragility. Replace a couple names
as delete and recreate on these fails due to FK constraints<commit_after>from alembic import op
import sqlalchemy as sa
"""empty message
Revision ID: d0b40bc8d7e6
Revises: 8ffec90e68a7
Create Date: 2017-09-20 05:59:45.168324
"""
# revision identifiers, used by Alembic.
revision = 'd0b40bc8d7e6'
down_revision = '8ffec90e68a7'
def upgrade():
# Work around site_persistence fragility. Replace a couple names
# as delete and recreate on these fails due to FK constraints
op.execute("UPDATE questionnaire_banks SET name = 'IRONMAN_baseline' "
" WHERE name = 'IRONMAN baseline'")
op.execute("UPDATE questionnaire_banks SET name = 'CRV_baseline' "
" WHERE name = 'CRV baseline'")
def downgrade():
op.execute("UPDATE questionnaire_banks SET name = 'IRONMAN baseline' "
" WHERE name = 'IRONMAN_baseline'")
op.execute("UPDATE questionnaire_banks SET name = 'CRV baseline' "
" WHERE name = 'CRV_baseline'")
|
|
ebebbd7a066863bc9ed19672e2a302920102254d
|
modules/msm_backup_restore/files/bin/msm-update-auth-lists.py
|
modules/msm_backup_restore/files/bin/msm-update-auth-lists.py
|
#!/usr/bin/env python
import os
import re
import requests
import sys
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
# Convert from Heroku style DATABASE_URL to Sqlalchemy style, if necessary
db_url = os.environ.get('DATABASE_URL')
DATABASE_URL = re.sub('^postgres:', 'postgresql:', db_url)
engine = create_engine(DATABASE_URL, echo=False)
Base = declarative_base(engine)
class User(Base):
""""""
__tablename__ = 'auth_user'
__table_args__ = {'autoload':True}
def loadSession():
""""""
metadata = Base.metadata
Session = sessionmaker(bind=engine)
session = Session()
return session
def get_players(session):
"""Return list of player names for whitelist.txt"""
query = (session.query(User.username)
.filter(User.is_active == True)
.order_by(User.username)
)
return [player[0].encode('utf-8') for player in query]
def get_ops(session):
"""Return list of player names for ops.txt."""
query = (session.query(User.username)
.filter(User.is_superuser == True)
.filter(User.is_active == True)
.order_by(User.username)
)
return [ops[0].encode('utf-8') for ops in query]
def write_names(names, filename):
"""Write list of names to filename."""
with open(filename, 'w') as f:
for name in names:
f.write("%s\n" % name)
def main():
if len(sys.argv) != 1:
print "Usage: %s" % sys.argv[0]
sys.exit(1)
session = loadSession()
whitelist_file = '/opt/msm/servers/default/white-list.txt'
ops_file = '/opt/msm/servers/default/ops.txt'
write_names(get_players(session), whitelist_file)
write_names(get_ops(session), ops_file)
session.close()
if __name__ == "__main__":
main()
|
Write 'white-list.txt' and 'ops.txt' auth files.
|
Write 'white-list.txt' and 'ops.txt' auth files.
* Add msm-update-auth-lists.py.
* Django admins (is_superuser) are Minecraft ops.
* Django users (is_active) are white-listed players.
|
Python
|
mit
|
toffer/minecloud-ami,toffer/minecloud-ami
|
Write 'white-list.txt' and 'ops.txt' auth files.
* Add msm-update-auth-lists.py.
* Django admins (is_superuser) are Minecraft ops.
* Django users (is_active) are white-listed players.
|
#!/usr/bin/env python
import os
import re
import requests
import sys
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
# Convert from Heroku style DATABASE_URL to Sqlalchemy style, if necessary
db_url = os.environ.get('DATABASE_URL')
DATABASE_URL = re.sub('^postgres:', 'postgresql:', db_url)
engine = create_engine(DATABASE_URL, echo=False)
Base = declarative_base(engine)
class User(Base):
""""""
__tablename__ = 'auth_user'
__table_args__ = {'autoload':True}
def loadSession():
""""""
metadata = Base.metadata
Session = sessionmaker(bind=engine)
session = Session()
return session
def get_players(session):
"""Return list of player names for whitelist.txt"""
query = (session.query(User.username)
.filter(User.is_active == True)
.order_by(User.username)
)
return [player[0].encode('utf-8') for player in query]
def get_ops(session):
"""Return list of player names for ops.txt."""
query = (session.query(User.username)
.filter(User.is_superuser == True)
.filter(User.is_active == True)
.order_by(User.username)
)
return [ops[0].encode('utf-8') for ops in query]
def write_names(names, filename):
"""Write list of names to filename."""
with open(filename, 'w') as f:
for name in names:
f.write("%s\n" % name)
def main():
if len(sys.argv) != 1:
print "Usage: %s" % sys.argv[0]
sys.exit(1)
session = loadSession()
whitelist_file = '/opt/msm/servers/default/white-list.txt'
ops_file = '/opt/msm/servers/default/ops.txt'
write_names(get_players(session), whitelist_file)
write_names(get_ops(session), ops_file)
session.close()
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Write 'white-list.txt' and 'ops.txt' auth files.
* Add msm-update-auth-lists.py.
* Django admins (is_superuser) are Minecraft ops.
* Django users (is_active) are white-listed players.<commit_after>
|
#!/usr/bin/env python
import os
import re
import requests
import sys
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
# Convert from Heroku style DATABASE_URL to Sqlalchemy style, if necessary
db_url = os.environ.get('DATABASE_URL')
DATABASE_URL = re.sub('^postgres:', 'postgresql:', db_url)
engine = create_engine(DATABASE_URL, echo=False)
Base = declarative_base(engine)
class User(Base):
""""""
__tablename__ = 'auth_user'
__table_args__ = {'autoload':True}
def loadSession():
""""""
metadata = Base.metadata
Session = sessionmaker(bind=engine)
session = Session()
return session
def get_players(session):
"""Return list of player names for whitelist.txt"""
query = (session.query(User.username)
.filter(User.is_active == True)
.order_by(User.username)
)
return [player[0].encode('utf-8') for player in query]
def get_ops(session):
"""Return list of player names for ops.txt."""
query = (session.query(User.username)
.filter(User.is_superuser == True)
.filter(User.is_active == True)
.order_by(User.username)
)
return [ops[0].encode('utf-8') for ops in query]
def write_names(names, filename):
"""Write list of names to filename."""
with open(filename, 'w') as f:
for name in names:
f.write("%s\n" % name)
def main():
if len(sys.argv) != 1:
print "Usage: %s" % sys.argv[0]
sys.exit(1)
session = loadSession()
whitelist_file = '/opt/msm/servers/default/white-list.txt'
ops_file = '/opt/msm/servers/default/ops.txt'
write_names(get_players(session), whitelist_file)
write_names(get_ops(session), ops_file)
session.close()
if __name__ == "__main__":
main()
|
Write 'white-list.txt' and 'ops.txt' auth files.
* Add msm-update-auth-lists.py.
* Django admins (is_superuser) are Minecraft ops.
* Django users (is_active) are white-listed players.#!/usr/bin/env python
import os
import re
import requests
import sys
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
# Convert from Heroku style DATABASE_URL to Sqlalchemy style, if necessary
db_url = os.environ.get('DATABASE_URL')
DATABASE_URL = re.sub('^postgres:', 'postgresql:', db_url)
engine = create_engine(DATABASE_URL, echo=False)
Base = declarative_base(engine)
class User(Base):
""""""
__tablename__ = 'auth_user'
__table_args__ = {'autoload':True}
def loadSession():
""""""
metadata = Base.metadata
Session = sessionmaker(bind=engine)
session = Session()
return session
def get_players(session):
"""Return list of player names for whitelist.txt"""
query = (session.query(User.username)
.filter(User.is_active == True)
.order_by(User.username)
)
return [player[0].encode('utf-8') for player in query]
def get_ops(session):
"""Return list of player names for ops.txt."""
query = (session.query(User.username)
.filter(User.is_superuser == True)
.filter(User.is_active == True)
.order_by(User.username)
)
return [ops[0].encode('utf-8') for ops in query]
def write_names(names, filename):
"""Write list of names to filename."""
with open(filename, 'w') as f:
for name in names:
f.write("%s\n" % name)
def main():
if len(sys.argv) != 1:
print "Usage: %s" % sys.argv[0]
sys.exit(1)
session = loadSession()
whitelist_file = '/opt/msm/servers/default/white-list.txt'
ops_file = '/opt/msm/servers/default/ops.txt'
write_names(get_players(session), whitelist_file)
write_names(get_ops(session), ops_file)
session.close()
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Write 'white-list.txt' and 'ops.txt' auth files.
* Add msm-update-auth-lists.py.
* Django admins (is_superuser) are Minecraft ops.
* Django users (is_active) are white-listed players.<commit_after>#!/usr/bin/env python
import os
import re
import requests
import sys
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
# Convert from Heroku style DATABASE_URL to Sqlalchemy style, if necessary
db_url = os.environ.get('DATABASE_URL')
DATABASE_URL = re.sub('^postgres:', 'postgresql:', db_url)
engine = create_engine(DATABASE_URL, echo=False)
Base = declarative_base(engine)
class User(Base):
""""""
__tablename__ = 'auth_user'
__table_args__ = {'autoload':True}
def loadSession():
""""""
metadata = Base.metadata
Session = sessionmaker(bind=engine)
session = Session()
return session
def get_players(session):
"""Return list of player names for whitelist.txt"""
query = (session.query(User.username)
.filter(User.is_active == True)
.order_by(User.username)
)
return [player[0].encode('utf-8') for player in query]
def get_ops(session):
"""Return list of player names for ops.txt."""
query = (session.query(User.username)
.filter(User.is_superuser == True)
.filter(User.is_active == True)
.order_by(User.username)
)
return [ops[0].encode('utf-8') for ops in query]
def write_names(names, filename):
"""Write list of names to filename."""
with open(filename, 'w') as f:
for name in names:
f.write("%s\n" % name)
def main():
if len(sys.argv) != 1:
print "Usage: %s" % sys.argv[0]
sys.exit(1)
session = loadSession()
whitelist_file = '/opt/msm/servers/default/white-list.txt'
ops_file = '/opt/msm/servers/default/ops.txt'
write_names(get_players(session), whitelist_file)
write_names(get_ops(session), ops_file)
session.close()
if __name__ == "__main__":
main()
|
|
d22d3aebf818632de832adfba684e44603623e5b
|
raiden/network/transport/matrix/__init__.py
|
raiden/network/transport/matrix/__init__.py
|
from raiden.network.transport.matrix.transport import ( # noqa
MatrixTransport,
UserPresence,
_RetryQueue,
)
from raiden.network.transport.matrix.utils import ( # noqa
join_global_room,
login_or_register,
validate_userid_signature,
)
|
Move matrix transport to own module, pull in raiden_libs matrix classes
|
Move matrix transport to own module, pull in raiden_libs matrix classes
|
Python
|
mit
|
hackaugusto/raiden,hackaugusto/raiden
|
Move matrix transport to own module, pull in raiden_libs matrix classes
|
from raiden.network.transport.matrix.transport import ( # noqa
MatrixTransport,
UserPresence,
_RetryQueue,
)
from raiden.network.transport.matrix.utils import ( # noqa
join_global_room,
login_or_register,
validate_userid_signature,
)
|
<commit_before><commit_msg>Move matrix transport to own module, pull in raiden_libs matrix classes<commit_after>
|
from raiden.network.transport.matrix.transport import ( # noqa
MatrixTransport,
UserPresence,
_RetryQueue,
)
from raiden.network.transport.matrix.utils import ( # noqa
join_global_room,
login_or_register,
validate_userid_signature,
)
|
Move matrix transport to own module, pull in raiden_libs matrix classesfrom raiden.network.transport.matrix.transport import ( # noqa
MatrixTransport,
UserPresence,
_RetryQueue,
)
from raiden.network.transport.matrix.utils import ( # noqa
join_global_room,
login_or_register,
validate_userid_signature,
)
|
<commit_before><commit_msg>Move matrix transport to own module, pull in raiden_libs matrix classes<commit_after>from raiden.network.transport.matrix.transport import ( # noqa
MatrixTransport,
UserPresence,
_RetryQueue,
)
from raiden.network.transport.matrix.utils import ( # noqa
join_global_room,
login_or_register,
validate_userid_signature,
)
|
|
e73f621792db820a4d96ef63e53f63aa223133e8
|
modules/tests/test_twitter.py
|
modules/tests/test_twitter.py
|
import modules
import pytest
def test_twitter():
assert('twitter' == modules.process_query('twitter')[0])
assert('twitter' == modules.process_query('twitter hotnews')[0])
assert('twitter' == modules.process_query('paris twitter')[0])
assert('twitter' != modules.process_query('something random')[0])
intent, entities = modules.process_query('twitter @NBA')
assert('twitter' == intent)
assert('@nba' == entities['twitter'][0]['value'].lower())
intent, entities = modules.process_query('twitter')
assert('twitter' == intent)
with pytest.raises(KeyError):
assert(entities['twitter'][0]['value'].lower())
intent, entities = modules.process_query('twitter @POTUS')
assert('twitter' == intent)
assert('@potus' == entities['twitter'][0]['value'].lower())
intent, entities = modules.process_query('twitter someone')
assert('twitter' == intent)
assert('someone' == entities['twitter'][0]['value'].lower())
intent, entities = modules.process_query('twitter zidane')
assert('twitter' == intent)
assert('zidane' == entities['twitter'][0]['value'].lower())
|
Add tests for twitter module
|
Add tests for twitter module
|
Python
|
mit
|
ZuZuD/JARVIS-on-Messenger
|
Add tests for twitter module
|
import modules
import pytest
def test_twitter():
assert('twitter' == modules.process_query('twitter')[0])
assert('twitter' == modules.process_query('twitter hotnews')[0])
assert('twitter' == modules.process_query('paris twitter')[0])
assert('twitter' != modules.process_query('something random')[0])
intent, entities = modules.process_query('twitter @NBA')
assert('twitter' == intent)
assert('@nba' == entities['twitter'][0]['value'].lower())
intent, entities = modules.process_query('twitter')
assert('twitter' == intent)
with pytest.raises(KeyError):
assert(entities['twitter'][0]['value'].lower())
intent, entities = modules.process_query('twitter @POTUS')
assert('twitter' == intent)
assert('@potus' == entities['twitter'][0]['value'].lower())
intent, entities = modules.process_query('twitter someone')
assert('twitter' == intent)
assert('someone' == entities['twitter'][0]['value'].lower())
intent, entities = modules.process_query('twitter zidane')
assert('twitter' == intent)
assert('zidane' == entities['twitter'][0]['value'].lower())
|
<commit_before><commit_msg>Add tests for twitter module<commit_after>
|
import modules
import pytest
def test_twitter():
assert('twitter' == modules.process_query('twitter')[0])
assert('twitter' == modules.process_query('twitter hotnews')[0])
assert('twitter' == modules.process_query('paris twitter')[0])
assert('twitter' != modules.process_query('something random')[0])
intent, entities = modules.process_query('twitter @NBA')
assert('twitter' == intent)
assert('@nba' == entities['twitter'][0]['value'].lower())
intent, entities = modules.process_query('twitter')
assert('twitter' == intent)
with pytest.raises(KeyError):
assert(entities['twitter'][0]['value'].lower())
intent, entities = modules.process_query('twitter @POTUS')
assert('twitter' == intent)
assert('@potus' == entities['twitter'][0]['value'].lower())
intent, entities = modules.process_query('twitter someone')
assert('twitter' == intent)
assert('someone' == entities['twitter'][0]['value'].lower())
intent, entities = modules.process_query('twitter zidane')
assert('twitter' == intent)
assert('zidane' == entities['twitter'][0]['value'].lower())
|
Add tests for twitter moduleimport modules
import pytest
def test_twitter():
assert('twitter' == modules.process_query('twitter')[0])
assert('twitter' == modules.process_query('twitter hotnews')[0])
assert('twitter' == modules.process_query('paris twitter')[0])
assert('twitter' != modules.process_query('something random')[0])
intent, entities = modules.process_query('twitter @NBA')
assert('twitter' == intent)
assert('@nba' == entities['twitter'][0]['value'].lower())
intent, entities = modules.process_query('twitter')
assert('twitter' == intent)
with pytest.raises(KeyError):
assert(entities['twitter'][0]['value'].lower())
intent, entities = modules.process_query('twitter @POTUS')
assert('twitter' == intent)
assert('@potus' == entities['twitter'][0]['value'].lower())
intent, entities = modules.process_query('twitter someone')
assert('twitter' == intent)
assert('someone' == entities['twitter'][0]['value'].lower())
intent, entities = modules.process_query('twitter zidane')
assert('twitter' == intent)
assert('zidane' == entities['twitter'][0]['value'].lower())
|
<commit_before><commit_msg>Add tests for twitter module<commit_after>import modules
import pytest
def test_twitter():
assert('twitter' == modules.process_query('twitter')[0])
assert('twitter' == modules.process_query('twitter hotnews')[0])
assert('twitter' == modules.process_query('paris twitter')[0])
assert('twitter' != modules.process_query('something random')[0])
intent, entities = modules.process_query('twitter @NBA')
assert('twitter' == intent)
assert('@nba' == entities['twitter'][0]['value'].lower())
intent, entities = modules.process_query('twitter')
assert('twitter' == intent)
with pytest.raises(KeyError):
assert(entities['twitter'][0]['value'].lower())
intent, entities = modules.process_query('twitter @POTUS')
assert('twitter' == intent)
assert('@potus' == entities['twitter'][0]['value'].lower())
intent, entities = modules.process_query('twitter someone')
assert('twitter' == intent)
assert('someone' == entities['twitter'][0]['value'].lower())
intent, entities = modules.process_query('twitter zidane')
assert('twitter' == intent)
assert('zidane' == entities['twitter'][0]['value'].lower())
|
|
dde66a8f4a03400fbf292c1ddf03cf7c63e517b3
|
Clean_Energy_Outlook/Ridge_pred_nuclear.py
|
Clean_Energy_Outlook/Ridge_pred_nuclear.py
|
# import libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn.model_selection import train_test_split
# define the function to predict nuclear data in the future
def pred_nuclear(samplefile, filelist):
# read data
data = pd.read_csv(samplefile)
year1 = data[['Year']][:44]
#print(year1.shape)
year2 = data[['Year']][-11:]
# predict nuclear for future
year3 = year2 = data[['Year']][-6:]
year3 = year3.set_index([[0, 1, 2, 3, 4, 5]])
#statelist=["AK","AL","AR","AZ","CA","CO","CT","DE","FL","GA","IA","ID","IL","IN","KS","KY","LA","MA","MD","ME","MI","MN","MO","MS","MT","NC","ND","NE","NH","NJ","NM","NV","NY","OH","OK","OR","PA","RI","SC","SD","TN","TX","UT","VA","VT","WA","WI","WV","WY"]
#print(len(statelist))
future = year3
# do ridge regression on train data
for i in range(49):
data = pd.read_csv('%s.csv' % (statelist[i]))
year1 = data[['Year']][:44]
#print(year1.shape)
year2 = data[['Year']][-11:]
# Split data for train and test
#print(i)
all_x = data[['GDP','CLPRB','EMFDB','ENPRP','NGMPB','PAPRB','PCP','ZNDX','Nominal Price', 'Inflation Adjusted Price']][0:55]
all_y = data[['NUETP']][0:55]
train_x, test_x, train_y, test_y = train_test_split(all_x, all_y, test_size=0.2)
regr2 = linear_model.Ridge(alpha = 0.75)
regr2.fit(train_x, train_y)
# predict NUETP for future
#year3 = data[['Year']][-6:]
#year3 = year3.set_index([[0, 1, 2, 3, 4, 5]])
#print(year3)
future_x = data[['GDP','CLPRB','EMFDB','ENPRP','NGMPB','PAPRB','PCP','ZNDX','Nominal Price', 'Inflation Adjusted Price']][-6:]
pred = pd.DataFrame(regr2.predict(future_x))
pred.columns = [statelist[i]]
#print(pred)
future = pd.concat([future, pred], axis=1)
|
Add ridge prediction code for nuclear
|
Add ridge prediction code for nuclear
|
Python
|
mit
|
uwkejia/Clean-Energy-Outlook
|
Add ridge prediction code for nuclear
|
# import libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn.model_selection import train_test_split
# define the function to predict nuclear data in the future
def pred_nuclear(samplefile, filelist):
# read data
data = pd.read_csv(samplefile)
year1 = data[['Year']][:44]
#print(year1.shape)
year2 = data[['Year']][-11:]
# predict nuclear for future
year3 = year2 = data[['Year']][-6:]
year3 = year3.set_index([[0, 1, 2, 3, 4, 5]])
#statelist=["AK","AL","AR","AZ","CA","CO","CT","DE","FL","GA","IA","ID","IL","IN","KS","KY","LA","MA","MD","ME","MI","MN","MO","MS","MT","NC","ND","NE","NH","NJ","NM","NV","NY","OH","OK","OR","PA","RI","SC","SD","TN","TX","UT","VA","VT","WA","WI","WV","WY"]
#print(len(statelist))
future = year3
# do ridge regression on train data
for i in range(49):
data = pd.read_csv('%s.csv' % (statelist[i]))
year1 = data[['Year']][:44]
#print(year1.shape)
year2 = data[['Year']][-11:]
# Split data for train and test
#print(i)
all_x = data[['GDP','CLPRB','EMFDB','ENPRP','NGMPB','PAPRB','PCP','ZNDX','Nominal Price', 'Inflation Adjusted Price']][0:55]
all_y = data[['NUETP']][0:55]
train_x, test_x, train_y, test_y = train_test_split(all_x, all_y, test_size=0.2)
regr2 = linear_model.Ridge(alpha = 0.75)
regr2.fit(train_x, train_y)
# predict NUETP for future
#year3 = data[['Year']][-6:]
#year3 = year3.set_index([[0, 1, 2, 3, 4, 5]])
#print(year3)
future_x = data[['GDP','CLPRB','EMFDB','ENPRP','NGMPB','PAPRB','PCP','ZNDX','Nominal Price', 'Inflation Adjusted Price']][-6:]
pred = pd.DataFrame(regr2.predict(future_x))
pred.columns = [statelist[i]]
#print(pred)
future = pd.concat([future, pred], axis=1)
|
<commit_before><commit_msg>Add ridge prediction code for nuclear<commit_after>
|
# import libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn.model_selection import train_test_split
# define the function to predict nuclear data in the future
def pred_nuclear(samplefile, filelist):
# read data
data = pd.read_csv(samplefile)
year1 = data[['Year']][:44]
#print(year1.shape)
year2 = data[['Year']][-11:]
# predict nuclear for future
year3 = year2 = data[['Year']][-6:]
year3 = year3.set_index([[0, 1, 2, 3, 4, 5]])
#statelist=["AK","AL","AR","AZ","CA","CO","CT","DE","FL","GA","IA","ID","IL","IN","KS","KY","LA","MA","MD","ME","MI","MN","MO","MS","MT","NC","ND","NE","NH","NJ","NM","NV","NY","OH","OK","OR","PA","RI","SC","SD","TN","TX","UT","VA","VT","WA","WI","WV","WY"]
#print(len(statelist))
future = year3
# do ridge regression on train data
for i in range(49):
data = pd.read_csv('%s.csv' % (statelist[i]))
year1 = data[['Year']][:44]
#print(year1.shape)
year2 = data[['Year']][-11:]
# Split data for train and test
#print(i)
all_x = data[['GDP','CLPRB','EMFDB','ENPRP','NGMPB','PAPRB','PCP','ZNDX','Nominal Price', 'Inflation Adjusted Price']][0:55]
all_y = data[['NUETP']][0:55]
train_x, test_x, train_y, test_y = train_test_split(all_x, all_y, test_size=0.2)
regr2 = linear_model.Ridge(alpha = 0.75)
regr2.fit(train_x, train_y)
# predict NUETP for future
#year3 = data[['Year']][-6:]
#year3 = year3.set_index([[0, 1, 2, 3, 4, 5]])
#print(year3)
future_x = data[['GDP','CLPRB','EMFDB','ENPRP','NGMPB','PAPRB','PCP','ZNDX','Nominal Price', 'Inflation Adjusted Price']][-6:]
pred = pd.DataFrame(regr2.predict(future_x))
pred.columns = [statelist[i]]
#print(pred)
future = pd.concat([future, pred], axis=1)
|
Add ridge prediction code for nuclear# import libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn.model_selection import train_test_split
# define the function to predict nuclear data in the future
def pred_nuclear(samplefile, filelist):
# read data
data = pd.read_csv(samplefile)
year1 = data[['Year']][:44]
#print(year1.shape)
year2 = data[['Year']][-11:]
# predict nuclear for future
year3 = year2 = data[['Year']][-6:]
year3 = year3.set_index([[0, 1, 2, 3, 4, 5]])
#statelist=["AK","AL","AR","AZ","CA","CO","CT","DE","FL","GA","IA","ID","IL","IN","KS","KY","LA","MA","MD","ME","MI","MN","MO","MS","MT","NC","ND","NE","NH","NJ","NM","NV","NY","OH","OK","OR","PA","RI","SC","SD","TN","TX","UT","VA","VT","WA","WI","WV","WY"]
#print(len(statelist))
future = year3
# do ridge regression on train data
for i in range(49):
data = pd.read_csv('%s.csv' % (statelist[i]))
year1 = data[['Year']][:44]
#print(year1.shape)
year2 = data[['Year']][-11:]
# Split data for train and test
#print(i)
all_x = data[['GDP','CLPRB','EMFDB','ENPRP','NGMPB','PAPRB','PCP','ZNDX','Nominal Price', 'Inflation Adjusted Price']][0:55]
all_y = data[['NUETP']][0:55]
train_x, test_x, train_y, test_y = train_test_split(all_x, all_y, test_size=0.2)
regr2 = linear_model.Ridge(alpha = 0.75)
regr2.fit(train_x, train_y)
# predict NUETP for future
#year3 = data[['Year']][-6:]
#year3 = year3.set_index([[0, 1, 2, 3, 4, 5]])
#print(year3)
future_x = data[['GDP','CLPRB','EMFDB','ENPRP','NGMPB','PAPRB','PCP','ZNDX','Nominal Price', 'Inflation Adjusted Price']][-6:]
pred = pd.DataFrame(regr2.predict(future_x))
pred.columns = [statelist[i]]
#print(pred)
future = pd.concat([future, pred], axis=1)
|
<commit_before><commit_msg>Add ridge prediction code for nuclear<commit_after># import libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn.model_selection import train_test_split
# define the function to predict nuclear data in the future
def pred_nuclear(samplefile, filelist):
# read data
data = pd.read_csv(samplefile)
year1 = data[['Year']][:44]
#print(year1.shape)
year2 = data[['Year']][-11:]
# predict nuclear for future
year3 = year2 = data[['Year']][-6:]
year3 = year3.set_index([[0, 1, 2, 3, 4, 5]])
#statelist=["AK","AL","AR","AZ","CA","CO","CT","DE","FL","GA","IA","ID","IL","IN","KS","KY","LA","MA","MD","ME","MI","MN","MO","MS","MT","NC","ND","NE","NH","NJ","NM","NV","NY","OH","OK","OR","PA","RI","SC","SD","TN","TX","UT","VA","VT","WA","WI","WV","WY"]
#print(len(statelist))
future = year3
# do ridge regression on train data
for i in range(49):
data = pd.read_csv('%s.csv' % (statelist[i]))
year1 = data[['Year']][:44]
#print(year1.shape)
year2 = data[['Year']][-11:]
# Split data for train and test
#print(i)
all_x = data[['GDP','CLPRB','EMFDB','ENPRP','NGMPB','PAPRB','PCP','ZNDX','Nominal Price', 'Inflation Adjusted Price']][0:55]
all_y = data[['NUETP']][0:55]
train_x, test_x, train_y, test_y = train_test_split(all_x, all_y, test_size=0.2)
regr2 = linear_model.Ridge(alpha = 0.75)
regr2.fit(train_x, train_y)
# predict NUETP for future
#year3 = data[['Year']][-6:]
#year3 = year3.set_index([[0, 1, 2, 3, 4, 5]])
#print(year3)
future_x = data[['GDP','CLPRB','EMFDB','ENPRP','NGMPB','PAPRB','PCP','ZNDX','Nominal Price', 'Inflation Adjusted Price']][-6:]
pred = pd.DataFrame(regr2.predict(future_x))
pred.columns = [statelist[i]]
#print(pred)
future = pd.concat([future, pred], axis=1)
|
|
eb6b595c854192c085f635a5e489210a29dc335a
|
bin/data/gigaword_summarization.py
|
bin/data/gigaword_summarization.py
|
#! /usr/bin/env python
"""Processes the Gigaword Corpus (https://catalog.ldc.upenn.edu/LDC2011T07).
Generates source and target files where sources correspond to the first N
sentences of each article, and targets corresponds to article headlines.
Usage:
bin/data/gigaword_summary < GIGAWORD \
-f gigaword_data
-o output/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
import os
import argparse
from tensorflow.python.platform import gfile
from bs4 import BeautifulSoup
PARSER = argparse.ArgumentParser(
description="Processes the Gigaword corpus.")
PARSER.add_argument(
"-f", "--file", type=str, required=True,
help="path to the Gigaword SGML file")
PARSER.add_argument(
"-o", "--output_dir", type=str, required=True,
help="path to the output directory")
PARSER.add_argument(
"-n", "--num_sentences", type=str, required=False, default=2,
help="Use the first N sentences as source text")
ARGS = PARSER.parse_args()
def gigaword_iter(path, n_sentences=2):
"""Creates an iterator that yields (source, target) tuples.
"""
soup = BeautifulSoup(open(path))
for doc in soup.find_all("doc"):
# Skip docs without headline
if doc.headline is None:
continue
# Find first N sentences
sentences = doc.find_all("p")[:n_sentences]
if not sentences:
continue
sentences = [_.text.strip().replace("\n", "") for _ in sentences]
# Returns sentencs and headline
yield " ".join(sentences), doc.headline.text.strip()
def main():
"""The entrypoint for the script"""
gfile.MakeDirs(ARGS.output_dir)
sources_filename = os.path.join(ARGS.output_dir, "sources.txt")
targets_filename = os.path.join(ARGS.output_dir, "targets.txt")
sources_file = gfile.GFile(sources_filename, "w")
targets_file = gfile.GFile(targets_filename, "w")
records = gigaword_iter(ARGS.file, ARGS.num_sentences)
for i, (source, target) in enumerate(records):
sources_file.write(source + "\n")
targets_file.write(target + "\n")
if i % 1000 == 0:
sys.stderr.write(".")
if i % 100000 == 0:
sys.stderr.write("\n")
sources_file.close()
targets_file.close()
if __name__ == "__main__":
main()
|
Add Gigaword summarization preprocessing script
|
Add Gigaword summarization preprocessing script
|
Python
|
apache-2.0
|
google/seq2seq,shashankrajput/seq2seq,liyi193328/seq2seq,liyi193328/seq2seq,kontact-chan/seq2seq,google/seq2seq,shashankrajput/seq2seq,shashankrajput/seq2seq,shashankrajput/seq2seq,chunfengh/seq2seq,chunfengh/seq2seq,liyi193328/seq2seq,kontact-chan/seq2seq,chunfengh/seq2seq,liyi193328/seq2seq,google/seq2seq,kontact-chan/seq2seq,chunfengh/seq2seq,liyi193328/seq2seq,kontact-chan/seq2seq,google/seq2seq
|
Add Gigaword summarization preprocessing script
|
#! /usr/bin/env python
"""Processes the Gigaword Corpus (https://catalog.ldc.upenn.edu/LDC2011T07).
Generates source and target files where sources correspond to the first N
sentences of each article, and targets corresponds to article headlines.
Usage:
bin/data/gigaword_summary < GIGAWORD \
-f gigaword_data
-o output/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
import os
import argparse
from tensorflow.python.platform import gfile
from bs4 import BeautifulSoup
PARSER = argparse.ArgumentParser(
description="Processes the Gigaword corpus.")
PARSER.add_argument(
"-f", "--file", type=str, required=True,
help="path to the Gigaword SGML file")
PARSER.add_argument(
"-o", "--output_dir", type=str, required=True,
help="path to the output directory")
PARSER.add_argument(
"-n", "--num_sentences", type=str, required=False, default=2,
help="Use the first N sentences as source text")
ARGS = PARSER.parse_args()
def gigaword_iter(path, n_sentences=2):
"""Creates an iterator that yields (source, target) tuples.
"""
soup = BeautifulSoup(open(path))
for doc in soup.find_all("doc"):
# Skip docs without headline
if doc.headline is None:
continue
# Find first N sentences
sentences = doc.find_all("p")[:n_sentences]
if not sentences:
continue
sentences = [_.text.strip().replace("\n", "") for _ in sentences]
# Returns sentencs and headline
yield " ".join(sentences), doc.headline.text.strip()
def main():
"""The entrypoint for the script"""
gfile.MakeDirs(ARGS.output_dir)
sources_filename = os.path.join(ARGS.output_dir, "sources.txt")
targets_filename = os.path.join(ARGS.output_dir, "targets.txt")
sources_file = gfile.GFile(sources_filename, "w")
targets_file = gfile.GFile(targets_filename, "w")
records = gigaword_iter(ARGS.file, ARGS.num_sentences)
for i, (source, target) in enumerate(records):
sources_file.write(source + "\n")
targets_file.write(target + "\n")
if i % 1000 == 0:
sys.stderr.write(".")
if i % 100000 == 0:
sys.stderr.write("\n")
sources_file.close()
targets_file.close()
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add Gigaword summarization preprocessing script<commit_after>
|
#! /usr/bin/env python
"""Processes the Gigaword Corpus (https://catalog.ldc.upenn.edu/LDC2011T07).
Generates source and target files where sources correspond to the first N
sentences of each article, and targets corresponds to article headlines.
Usage:
bin/data/gigaword_summary < GIGAWORD \
-f gigaword_data
-o output/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
import os
import argparse
from tensorflow.python.platform import gfile
from bs4 import BeautifulSoup
PARSER = argparse.ArgumentParser(
description="Processes the Gigaword corpus.")
PARSER.add_argument(
"-f", "--file", type=str, required=True,
help="path to the Gigaword SGML file")
PARSER.add_argument(
"-o", "--output_dir", type=str, required=True,
help="path to the output directory")
PARSER.add_argument(
"-n", "--num_sentences", type=str, required=False, default=2,
help="Use the first N sentences as source text")
ARGS = PARSER.parse_args()
def gigaword_iter(path, n_sentences=2):
"""Creates an iterator that yields (source, target) tuples.
"""
soup = BeautifulSoup(open(path))
for doc in soup.find_all("doc"):
# Skip docs without headline
if doc.headline is None:
continue
# Find first N sentences
sentences = doc.find_all("p")[:n_sentences]
if not sentences:
continue
sentences = [_.text.strip().replace("\n", "") for _ in sentences]
# Returns sentencs and headline
yield " ".join(sentences), doc.headline.text.strip()
def main():
"""The entrypoint for the script"""
gfile.MakeDirs(ARGS.output_dir)
sources_filename = os.path.join(ARGS.output_dir, "sources.txt")
targets_filename = os.path.join(ARGS.output_dir, "targets.txt")
sources_file = gfile.GFile(sources_filename, "w")
targets_file = gfile.GFile(targets_filename, "w")
records = gigaword_iter(ARGS.file, ARGS.num_sentences)
for i, (source, target) in enumerate(records):
sources_file.write(source + "\n")
targets_file.write(target + "\n")
if i % 1000 == 0:
sys.stderr.write(".")
if i % 100000 == 0:
sys.stderr.write("\n")
sources_file.close()
targets_file.close()
if __name__ == "__main__":
main()
|
Add Gigaword summarization preprocessing script#! /usr/bin/env python
"""Processes the Gigaword Corpus (https://catalog.ldc.upenn.edu/LDC2011T07).
Generates source and target files where sources correspond to the first N
sentences of each article, and targets corresponds to article headlines.
Usage:
bin/data/gigaword_summary < GIGAWORD \
-f gigaword_data
-o output/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
import os
import argparse
from tensorflow.python.platform import gfile
from bs4 import BeautifulSoup
PARSER = argparse.ArgumentParser(
description="Processes the Gigaword corpus.")
PARSER.add_argument(
"-f", "--file", type=str, required=True,
help="path to the Gigaword SGML file")
PARSER.add_argument(
"-o", "--output_dir", type=str, required=True,
help="path to the output directory")
PARSER.add_argument(
"-n", "--num_sentences", type=str, required=False, default=2,
help="Use the first N sentences as source text")
ARGS = PARSER.parse_args()
def gigaword_iter(path, n_sentences=2):
"""Creates an iterator that yields (source, target) tuples.
"""
soup = BeautifulSoup(open(path))
for doc in soup.find_all("doc"):
# Skip docs without headline
if doc.headline is None:
continue
# Find first N sentences
sentences = doc.find_all("p")[:n_sentences]
if not sentences:
continue
sentences = [_.text.strip().replace("\n", "") for _ in sentences]
# Returns sentencs and headline
yield " ".join(sentences), doc.headline.text.strip()
def main():
"""The entrypoint for the script"""
gfile.MakeDirs(ARGS.output_dir)
sources_filename = os.path.join(ARGS.output_dir, "sources.txt")
targets_filename = os.path.join(ARGS.output_dir, "targets.txt")
sources_file = gfile.GFile(sources_filename, "w")
targets_file = gfile.GFile(targets_filename, "w")
records = gigaword_iter(ARGS.file, ARGS.num_sentences)
for i, (source, target) in enumerate(records):
sources_file.write(source + "\n")
targets_file.write(target + "\n")
if i % 1000 == 0:
sys.stderr.write(".")
if i % 100000 == 0:
sys.stderr.write("\n")
sources_file.close()
targets_file.close()
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add Gigaword summarization preprocessing script<commit_after>#! /usr/bin/env python
"""Processes the Gigaword Corpus (https://catalog.ldc.upenn.edu/LDC2011T07).
Generates source and target files where sources correspond to the first N
sentences of each article, and targets corresponds to article headlines.
Usage:
bin/data/gigaword_summary < GIGAWORD \
-f gigaword_data
-o output/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
import os
import argparse
from tensorflow.python.platform import gfile
from bs4 import BeautifulSoup
PARSER = argparse.ArgumentParser(
description="Processes the Gigaword corpus.")
PARSER.add_argument(
"-f", "--file", type=str, required=True,
help="path to the Gigaword SGML file")
PARSER.add_argument(
"-o", "--output_dir", type=str, required=True,
help="path to the output directory")
PARSER.add_argument(
"-n", "--num_sentences", type=str, required=False, default=2,
help="Use the first N sentences as source text")
ARGS = PARSER.parse_args()
def gigaword_iter(path, n_sentences=2):
"""Creates an iterator that yields (source, target) tuples.
"""
soup = BeautifulSoup(open(path))
for doc in soup.find_all("doc"):
# Skip docs without headline
if doc.headline is None:
continue
# Find first N sentences
sentences = doc.find_all("p")[:n_sentences]
if not sentences:
continue
sentences = [_.text.strip().replace("\n", "") for _ in sentences]
# Returns sentencs and headline
yield " ".join(sentences), doc.headline.text.strip()
def main():
"""The entrypoint for the script"""
gfile.MakeDirs(ARGS.output_dir)
sources_filename = os.path.join(ARGS.output_dir, "sources.txt")
targets_filename = os.path.join(ARGS.output_dir, "targets.txt")
sources_file = gfile.GFile(sources_filename, "w")
targets_file = gfile.GFile(targets_filename, "w")
records = gigaword_iter(ARGS.file, ARGS.num_sentences)
for i, (source, target) in enumerate(records):
sources_file.write(source + "\n")
targets_file.write(target + "\n")
if i % 1000 == 0:
sys.stderr.write(".")
if i % 100000 == 0:
sys.stderr.write("\n")
sources_file.close()
targets_file.close()
if __name__ == "__main__":
main()
|
|
33d0129be8c82b9d3a114591a6055d972a342a7a
|
test/functionalities/abbreviation/TestCommonShortSpellings.py
|
test/functionalities/abbreviation/TestCommonShortSpellings.py
|
"""
Test some lldb command abbreviations to make sure the common short spellings of
many commands remain available even after we add/delte commands in the future.
"""
import os, time
import unittest2
import lldb
from lldbtest import *
class CommonShortSpellingsTestCase(TestBase):
mydir = os.path.join("functionalities", "abbreviation")
@unittest2.skipUnless(sys.platform.startswith("darwin"), "requires Darwin")
def test_with_dsym (self):
self.buildDsym ()
self.run_abbrevs2 ()
def test_with_dwarf (self):
self.buildDwarf ()
self.run_abbrevs2 ()
def run_abbrevs2 (self):
exe = os.path.join (os.getcwd(), "a.out")
self.expect("fil " + exe,
patterns = [ "Current executable set to .*a.out.*" ])
# br s -> breakpoint set
self.expect("br s -n sum",
startstr = "Breakpoint created: 1: name = 'sum', locations = 1")
# disp -> display
self.expect("disp a",
startstr = "target stop-hook add -o")
self.expect("disp b",
startstr = "target stop-hook add -o")
# di/dis -> disassemble
self.expect("help di",
substrs = ["disassemble"])
self.expect("help dis",
substrs = ["disassemble"])
# ta st a -> target stop-hook add
self.expect("help ta st a",
substrs = ["target stop-hook add"])
# fr v -> frame variable
self.expect("help fr v",
substrs = ["frame variable"])
# ta st li -> target stop-hook list
self.expect("ta st li",
substrs = ["Hook: 1", "Hook: 2"])
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
Test some lldb command abbreviations to make sure the common short spellings of many commands remain available even after we add/delte commands in the future.
|
Test some lldb command abbreviations to make sure the common short spellings of
many commands remain available even after we add/delte commands in the future.
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@142857 91177308-0d34-0410-b5e6-96231b3b80d8
|
Python
|
apache-2.0
|
llvm-mirror/lldb,llvm-mirror/lldb,apple/swift-lldb,apple/swift-lldb,apple/swift-lldb,apple/swift-lldb,llvm-mirror/lldb,apple/swift-lldb,apple/swift-lldb,llvm-mirror/lldb,llvm-mirror/lldb
|
Test some lldb command abbreviations to make sure the common short spellings of
many commands remain available even after we add/delte commands in the future.
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@142857 91177308-0d34-0410-b5e6-96231b3b80d8
|
"""
Test some lldb command abbreviations to make sure the common short spellings of
many commands remain available even after we add/delte commands in the future.
"""
import os, time
import unittest2
import lldb
from lldbtest import *
class CommonShortSpellingsTestCase(TestBase):
mydir = os.path.join("functionalities", "abbreviation")
@unittest2.skipUnless(sys.platform.startswith("darwin"), "requires Darwin")
def test_with_dsym (self):
self.buildDsym ()
self.run_abbrevs2 ()
def test_with_dwarf (self):
self.buildDwarf ()
self.run_abbrevs2 ()
def run_abbrevs2 (self):
exe = os.path.join (os.getcwd(), "a.out")
self.expect("fil " + exe,
patterns = [ "Current executable set to .*a.out.*" ])
# br s -> breakpoint set
self.expect("br s -n sum",
startstr = "Breakpoint created: 1: name = 'sum', locations = 1")
# disp -> display
self.expect("disp a",
startstr = "target stop-hook add -o")
self.expect("disp b",
startstr = "target stop-hook add -o")
# di/dis -> disassemble
self.expect("help di",
substrs = ["disassemble"])
self.expect("help dis",
substrs = ["disassemble"])
# ta st a -> target stop-hook add
self.expect("help ta st a",
substrs = ["target stop-hook add"])
# fr v -> frame variable
self.expect("help fr v",
substrs = ["frame variable"])
# ta st li -> target stop-hook list
self.expect("ta st li",
substrs = ["Hook: 1", "Hook: 2"])
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
<commit_before><commit_msg>Test some lldb command abbreviations to make sure the common short spellings of
many commands remain available even after we add/delte commands in the future.
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@142857 91177308-0d34-0410-b5e6-96231b3b80d8<commit_after>
|
"""
Test some lldb command abbreviations to make sure the common short spellings of
many commands remain available even after we add/delte commands in the future.
"""
import os, time
import unittest2
import lldb
from lldbtest import *
class CommonShortSpellingsTestCase(TestBase):
mydir = os.path.join("functionalities", "abbreviation")
@unittest2.skipUnless(sys.platform.startswith("darwin"), "requires Darwin")
def test_with_dsym (self):
self.buildDsym ()
self.run_abbrevs2 ()
def test_with_dwarf (self):
self.buildDwarf ()
self.run_abbrevs2 ()
def run_abbrevs2 (self):
exe = os.path.join (os.getcwd(), "a.out")
self.expect("fil " + exe,
patterns = [ "Current executable set to .*a.out.*" ])
# br s -> breakpoint set
self.expect("br s -n sum",
startstr = "Breakpoint created: 1: name = 'sum', locations = 1")
# disp -> display
self.expect("disp a",
startstr = "target stop-hook add -o")
self.expect("disp b",
startstr = "target stop-hook add -o")
# di/dis -> disassemble
self.expect("help di",
substrs = ["disassemble"])
self.expect("help dis",
substrs = ["disassemble"])
# ta st a -> target stop-hook add
self.expect("help ta st a",
substrs = ["target stop-hook add"])
# fr v -> frame variable
self.expect("help fr v",
substrs = ["frame variable"])
# ta st li -> target stop-hook list
self.expect("ta st li",
substrs = ["Hook: 1", "Hook: 2"])
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
Test some lldb command abbreviations to make sure the common short spellings of
many commands remain available even after we add/delte commands in the future.
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@142857 91177308-0d34-0410-b5e6-96231b3b80d8"""
Test some lldb command abbreviations to make sure the common short spellings of
many commands remain available even after we add/delte commands in the future.
"""
import os, time
import unittest2
import lldb
from lldbtest import *
class CommonShortSpellingsTestCase(TestBase):
mydir = os.path.join("functionalities", "abbreviation")
@unittest2.skipUnless(sys.platform.startswith("darwin"), "requires Darwin")
def test_with_dsym (self):
self.buildDsym ()
self.run_abbrevs2 ()
def test_with_dwarf (self):
self.buildDwarf ()
self.run_abbrevs2 ()
def run_abbrevs2 (self):
exe = os.path.join (os.getcwd(), "a.out")
self.expect("fil " + exe,
patterns = [ "Current executable set to .*a.out.*" ])
# br s -> breakpoint set
self.expect("br s -n sum",
startstr = "Breakpoint created: 1: name = 'sum', locations = 1")
# disp -> display
self.expect("disp a",
startstr = "target stop-hook add -o")
self.expect("disp b",
startstr = "target stop-hook add -o")
# di/dis -> disassemble
self.expect("help di",
substrs = ["disassemble"])
self.expect("help dis",
substrs = ["disassemble"])
# ta st a -> target stop-hook add
self.expect("help ta st a",
substrs = ["target stop-hook add"])
# fr v -> frame variable
self.expect("help fr v",
substrs = ["frame variable"])
# ta st li -> target stop-hook list
self.expect("ta st li",
substrs = ["Hook: 1", "Hook: 2"])
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
<commit_before><commit_msg>Test some lldb command abbreviations to make sure the common short spellings of
many commands remain available even after we add/delte commands in the future.
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@142857 91177308-0d34-0410-b5e6-96231b3b80d8<commit_after>"""
Test some lldb command abbreviations to make sure the common short spellings of
many commands remain available even after we add/delte commands in the future.
"""
import os, time
import unittest2
import lldb
from lldbtest import *
class CommonShortSpellingsTestCase(TestBase):
mydir = os.path.join("functionalities", "abbreviation")
@unittest2.skipUnless(sys.platform.startswith("darwin"), "requires Darwin")
def test_with_dsym (self):
self.buildDsym ()
self.run_abbrevs2 ()
def test_with_dwarf (self):
self.buildDwarf ()
self.run_abbrevs2 ()
def run_abbrevs2 (self):
exe = os.path.join (os.getcwd(), "a.out")
self.expect("fil " + exe,
patterns = [ "Current executable set to .*a.out.*" ])
# br s -> breakpoint set
self.expect("br s -n sum",
startstr = "Breakpoint created: 1: name = 'sum', locations = 1")
# disp -> display
self.expect("disp a",
startstr = "target stop-hook add -o")
self.expect("disp b",
startstr = "target stop-hook add -o")
# di/dis -> disassemble
self.expect("help di",
substrs = ["disassemble"])
self.expect("help dis",
substrs = ["disassemble"])
# ta st a -> target stop-hook add
self.expect("help ta st a",
substrs = ["target stop-hook add"])
# fr v -> frame variable
self.expect("help fr v",
substrs = ["frame variable"])
# ta st li -> target stop-hook list
self.expect("ta st li",
substrs = ["Hook: 1", "Hook: 2"])
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
|
bb7a78662545312660dbddff806d58db8a393bf8
|
tools/ipython.py
|
tools/ipython.py
|
#!/usr/bin/env python
# encoding: utf-8
from __future__ import division, print_function
from time import sleep
import progressbar as pb
from IPython.utils.io import rprint
def wait_interactive(job):
"""Same as IPython.parallel.client.view.LoadBalancedView.wait_interactive
but prints a Progressbar to both, the Notebook and the stdout of the kernel
:param job: A ipython parallel job, should have members ready(), progress
and __len__
"""
widgets = [pb.Percentage(), ' ', pb.Bar(), ' ', pb.ETA()]
bar = pb.ProgressBar(maxval=len(job), widgets=widgets)
bar.start()
while not job.ready():
sleep(1)
bar.update(job.progress)
rprint("\r\x1b[31m" + bar._format_line() + "\x1b[0m", end="")
bar.finish()
rprint("\r\x1b[31m" + bar._format_line() + "\x1b[0m", end="")
|
Implement progress bar for parallel jobs
|
Implement progress bar for parallel jobs
|
Python
|
unlicense
|
dseuss/pythonlibs
|
Implement progress bar for parallel jobs
|
#!/usr/bin/env python
# encoding: utf-8
from __future__ import division, print_function
from time import sleep
import progressbar as pb
from IPython.utils.io import rprint
def wait_interactive(job):
"""Same as IPython.parallel.client.view.LoadBalancedView.wait_interactive
but prints a Progressbar to both, the Notebook and the stdout of the kernel
:param job: A ipython parallel job, should have members ready(), progress
and __len__
"""
widgets = [pb.Percentage(), ' ', pb.Bar(), ' ', pb.ETA()]
bar = pb.ProgressBar(maxval=len(job), widgets=widgets)
bar.start()
while not job.ready():
sleep(1)
bar.update(job.progress)
rprint("\r\x1b[31m" + bar._format_line() + "\x1b[0m", end="")
bar.finish()
rprint("\r\x1b[31m" + bar._format_line() + "\x1b[0m", end="")
|
<commit_before><commit_msg>Implement progress bar for parallel jobs<commit_after>
|
#!/usr/bin/env python
# encoding: utf-8
from __future__ import division, print_function
from time import sleep
import progressbar as pb
from IPython.utils.io import rprint
def wait_interactive(job):
"""Same as IPython.parallel.client.view.LoadBalancedView.wait_interactive
but prints a Progressbar to both, the Notebook and the stdout of the kernel
:param job: A ipython parallel job, should have members ready(), progress
and __len__
"""
widgets = [pb.Percentage(), ' ', pb.Bar(), ' ', pb.ETA()]
bar = pb.ProgressBar(maxval=len(job), widgets=widgets)
bar.start()
while not job.ready():
sleep(1)
bar.update(job.progress)
rprint("\r\x1b[31m" + bar._format_line() + "\x1b[0m", end="")
bar.finish()
rprint("\r\x1b[31m" + bar._format_line() + "\x1b[0m", end="")
|
Implement progress bar for parallel jobs#!/usr/bin/env python
# encoding: utf-8
from __future__ import division, print_function
from time import sleep
import progressbar as pb
from IPython.utils.io import rprint
def wait_interactive(job):
"""Same as IPython.parallel.client.view.LoadBalancedView.wait_interactive
but prints a Progressbar to both, the Notebook and the stdout of the kernel
:param job: A ipython parallel job, should have members ready(), progress
and __len__
"""
widgets = [pb.Percentage(), ' ', pb.Bar(), ' ', pb.ETA()]
bar = pb.ProgressBar(maxval=len(job), widgets=widgets)
bar.start()
while not job.ready():
sleep(1)
bar.update(job.progress)
rprint("\r\x1b[31m" + bar._format_line() + "\x1b[0m", end="")
bar.finish()
rprint("\r\x1b[31m" + bar._format_line() + "\x1b[0m", end="")
|
<commit_before><commit_msg>Implement progress bar for parallel jobs<commit_after>#!/usr/bin/env python
# encoding: utf-8
from __future__ import division, print_function
from time import sleep
import progressbar as pb
from IPython.utils.io import rprint
def wait_interactive(job):
"""Same as IPython.parallel.client.view.LoadBalancedView.wait_interactive
but prints a Progressbar to both, the Notebook and the stdout of the kernel
:param job: A ipython parallel job, should have members ready(), progress
and __len__
"""
widgets = [pb.Percentage(), ' ', pb.Bar(), ' ', pb.ETA()]
bar = pb.ProgressBar(maxval=len(job), widgets=widgets)
bar.start()
while not job.ready():
sleep(1)
bar.update(job.progress)
rprint("\r\x1b[31m" + bar._format_line() + "\x1b[0m", end="")
bar.finish()
rprint("\r\x1b[31m" + bar._format_line() + "\x1b[0m", end="")
|
|
36f81bbe28a9bf018aded659cbe2d59e200a25ca
|
nipap-cli/tests/cli-test.py
|
nipap-cli/tests/cli-test.py
|
#!/usr/bin/python
import unittest
import sys
sys.path.append('../')
from nipap_cli import nipap_cli
from nipap_cli.command import Command
class CliCheck(unittest.TestCase):
def cli_test(self, command):
"""
"""
cmd = Command(nipap_cli.cmds, command)
comp = cmd.complete()
return cmd.exe, cmd.arg, cmd.exe_options, comp
def test_basic(self):
""" Run some basic tests
"""
# top level completion of possible command branches
self.assertEqual(self.cli_test(('',)),
(None, None, {},
['address', 'pool', 'schema']
)
)
def test_cmd_stop(self):
""" should not return anything as we should already have found the command (view) and have an argument to it
"""
self.assertEqual(self.cli_test(('address', 'view', 'FOO', '',)),
(nipap_cli.view_prefix, 'FOO', {},
[]
)
)
if __name__ == '__main__':
unittest.main()
|
Add a very basic unittest for the CLI lib
|
Add a very basic unittest for the CLI lib
|
Python
|
mit
|
fredsod/NIPAP,garberg/NIPAP,bbaja42/NIPAP,SoundGoof/NIPAP,SoundGoof/NIPAP,plajjan/NIPAP,garberg/NIPAP,plajjan/NIPAP,bbaja42/NIPAP,SoundGoof/NIPAP,garberg/NIPAP,plajjan/NIPAP,plajjan/NIPAP,garberg/NIPAP,SpriteLink/NIPAP,bbaja42/NIPAP,ettrig/NIPAP,fredsod/NIPAP,bbaja42/NIPAP,garberg/NIPAP,SoundGoof/NIPAP,SpriteLink/NIPAP,plajjan/NIPAP,SpriteLink/NIPAP,ettrig/NIPAP,fredsod/NIPAP,ettrig/NIPAP,ettrig/NIPAP,SpriteLink/NIPAP,ettrig/NIPAP,fredsod/NIPAP,bbaja42/NIPAP,fredsod/NIPAP,SpriteLink/NIPAP,ettrig/NIPAP,SpriteLink/NIPAP,fredsod/NIPAP,bbaja42/NIPAP,SoundGoof/NIPAP,plajjan/NIPAP,SoundGoof/NIPAP,garberg/NIPAP
|
Add a very basic unittest for the CLI lib
|
#!/usr/bin/python
import unittest
import sys
sys.path.append('../')
from nipap_cli import nipap_cli
from nipap_cli.command import Command
class CliCheck(unittest.TestCase):
def cli_test(self, command):
"""
"""
cmd = Command(nipap_cli.cmds, command)
comp = cmd.complete()
return cmd.exe, cmd.arg, cmd.exe_options, comp
def test_basic(self):
""" Run some basic tests
"""
# top level completion of possible command branches
self.assertEqual(self.cli_test(('',)),
(None, None, {},
['address', 'pool', 'schema']
)
)
def test_cmd_stop(self):
""" should not return anything as we should already have found the command (view) and have an argument to it
"""
self.assertEqual(self.cli_test(('address', 'view', 'FOO', '',)),
(nipap_cli.view_prefix, 'FOO', {},
[]
)
)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add a very basic unittest for the CLI lib<commit_after>
|
#!/usr/bin/python
import unittest
import sys
sys.path.append('../')
from nipap_cli import nipap_cli
from nipap_cli.command import Command
class CliCheck(unittest.TestCase):
def cli_test(self, command):
"""
"""
cmd = Command(nipap_cli.cmds, command)
comp = cmd.complete()
return cmd.exe, cmd.arg, cmd.exe_options, comp
def test_basic(self):
""" Run some basic tests
"""
# top level completion of possible command branches
self.assertEqual(self.cli_test(('',)),
(None, None, {},
['address', 'pool', 'schema']
)
)
def test_cmd_stop(self):
""" should not return anything as we should already have found the command (view) and have an argument to it
"""
self.assertEqual(self.cli_test(('address', 'view', 'FOO', '',)),
(nipap_cli.view_prefix, 'FOO', {},
[]
)
)
if __name__ == '__main__':
unittest.main()
|
Add a very basic unittest for the CLI lib#!/usr/bin/python
import unittest
import sys
sys.path.append('../')
from nipap_cli import nipap_cli
from nipap_cli.command import Command
class CliCheck(unittest.TestCase):
def cli_test(self, command):
"""
"""
cmd = Command(nipap_cli.cmds, command)
comp = cmd.complete()
return cmd.exe, cmd.arg, cmd.exe_options, comp
def test_basic(self):
""" Run some basic tests
"""
# top level completion of possible command branches
self.assertEqual(self.cli_test(('',)),
(None, None, {},
['address', 'pool', 'schema']
)
)
def test_cmd_stop(self):
""" should not return anything as we should already have found the command (view) and have an argument to it
"""
self.assertEqual(self.cli_test(('address', 'view', 'FOO', '',)),
(nipap_cli.view_prefix, 'FOO', {},
[]
)
)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add a very basic unittest for the CLI lib<commit_after>#!/usr/bin/python
import unittest
import sys
sys.path.append('../')
from nipap_cli import nipap_cli
from nipap_cli.command import Command
class CliCheck(unittest.TestCase):
def cli_test(self, command):
"""
"""
cmd = Command(nipap_cli.cmds, command)
comp = cmd.complete()
return cmd.exe, cmd.arg, cmd.exe_options, comp
def test_basic(self):
""" Run some basic tests
"""
# top level completion of possible command branches
self.assertEqual(self.cli_test(('',)),
(None, None, {},
['address', 'pool', 'schema']
)
)
def test_cmd_stop(self):
""" should not return anything as we should already have found the command (view) and have an argument to it
"""
self.assertEqual(self.cli_test(('address', 'view', 'FOO', '',)),
(nipap_cli.view_prefix, 'FOO', {},
[]
)
)
if __name__ == '__main__':
unittest.main()
|
|
6f882f92ad99376ddf147d9637d298601e4d1abf
|
Sketches/MPS/VideoRecorder/SimpleDiracPlayer.py
|
Sketches/MPS/VideoRecorder/SimpleDiracPlayer.py
|
#!/usr/bin/python
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.Codec.Dirac import DiracDecoder
from Kamaelia.File.ReadFileAdaptor import ReadFileAdaptor
from Kamaelia.Util.RateFilter import MessageRateLimit
from Kamaelia.UI.Pygame.VideoOverlay import VideoOverlay
import sys
if len(sys.argv) != 2:
sys.stderr.write("Usage:\n "+sys.argv[0]+" <dirac-file>\n\n")
sys.exit(1)
file = sys.argv[1]
framerate = 15
Pipeline(
ReadFileAdaptor(file, readmode="bitrate",
bitrate = 300000*8/5),
DiracDecoder(),
MessageRateLimit(framerate),
VideoOverlay(),
).run()
|
Copy of the player for convenience of testing
|
Copy of the player for convenience of testing
|
Python
|
apache-2.0
|
sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia
|
Copy of the player for convenience of testing
|
#!/usr/bin/python
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.Codec.Dirac import DiracDecoder
from Kamaelia.File.ReadFileAdaptor import ReadFileAdaptor
from Kamaelia.Util.RateFilter import MessageRateLimit
from Kamaelia.UI.Pygame.VideoOverlay import VideoOverlay
import sys
if len(sys.argv) != 2:
sys.stderr.write("Usage:\n "+sys.argv[0]+" <dirac-file>\n\n")
sys.exit(1)
file = sys.argv[1]
framerate = 15
Pipeline(
ReadFileAdaptor(file, readmode="bitrate",
bitrate = 300000*8/5),
DiracDecoder(),
MessageRateLimit(framerate),
VideoOverlay(),
).run()
|
<commit_before><commit_msg>Copy of the player for convenience of testing<commit_after>
|
#!/usr/bin/python
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.Codec.Dirac import DiracDecoder
from Kamaelia.File.ReadFileAdaptor import ReadFileAdaptor
from Kamaelia.Util.RateFilter import MessageRateLimit
from Kamaelia.UI.Pygame.VideoOverlay import VideoOverlay
import sys
if len(sys.argv) != 2:
sys.stderr.write("Usage:\n "+sys.argv[0]+" <dirac-file>\n\n")
sys.exit(1)
file = sys.argv[1]
framerate = 15
Pipeline(
ReadFileAdaptor(file, readmode="bitrate",
bitrate = 300000*8/5),
DiracDecoder(),
MessageRateLimit(framerate),
VideoOverlay(),
).run()
|
Copy of the player for convenience of testing#!/usr/bin/python
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.Codec.Dirac import DiracDecoder
from Kamaelia.File.ReadFileAdaptor import ReadFileAdaptor
from Kamaelia.Util.RateFilter import MessageRateLimit
from Kamaelia.UI.Pygame.VideoOverlay import VideoOverlay
import sys
if len(sys.argv) != 2:
sys.stderr.write("Usage:\n "+sys.argv[0]+" <dirac-file>\n\n")
sys.exit(1)
file = sys.argv[1]
framerate = 15
Pipeline(
ReadFileAdaptor(file, readmode="bitrate",
bitrate = 300000*8/5),
DiracDecoder(),
MessageRateLimit(framerate),
VideoOverlay(),
).run()
|
<commit_before><commit_msg>Copy of the player for convenience of testing<commit_after>#!/usr/bin/python
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.Codec.Dirac import DiracDecoder
from Kamaelia.File.ReadFileAdaptor import ReadFileAdaptor
from Kamaelia.Util.RateFilter import MessageRateLimit
from Kamaelia.UI.Pygame.VideoOverlay import VideoOverlay
import sys
if len(sys.argv) != 2:
sys.stderr.write("Usage:\n "+sys.argv[0]+" <dirac-file>\n\n")
sys.exit(1)
file = sys.argv[1]
framerate = 15
Pipeline(
ReadFileAdaptor(file, readmode="bitrate",
bitrate = 300000*8/5),
DiracDecoder(),
MessageRateLimit(framerate),
VideoOverlay(),
).run()
|
|
4cd4db39976ae0695a202951b9db75cfc6b9b6b5
|
openquake/commands/webui.py
|
openquake/commands/webui.py
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2016, GEM Foundation
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import sys
import subprocess
from openquake.risklib import valid
from openquake.commonlib import sap
from openquake.engine import config
from openquake.server.dbserver import get_status
from openquake.commands.dbserver import runserver
def rundjango(subcmd):
subprocess.call([sys.executable, '-m', 'openquake.server.manage', subcmd])
def webui(cmd):
"""
start the webui server in foreground or perform other operation on the
django application
"""
dbstatus = get_status()
if dbstatus == 'not-running':
if valid.boolean(config.get('dbserver', 'multi_user')):
sys.exit('Please start the DbServer: '
'see the documentation for details')
runserver()
if cmd == 'start':
rundjango('runserver')
elif cmd == 'syncdb':
rundjango(cmd)
parser = sap.Parser(webui)
parser.arg('cmd', 'webui command',
choices='start syncdb'.split())
|
Add a command to start the WebUI using oq
|
Add a command to start the WebUI using oq
The django development server is started in foreground
on localhost:8800. Meant to be used with multi_user = false
but works also if true.
Former-commit-id: 5df6e6b3ec523376906abeb8a52eb6c5dc5d46ec [formerly 5df6e6b3ec523376906abeb8a52eb6c5dc5d46ec [formerly bbfc5549fb632d535ed1934e0d2bd1226ccd4507]]
Former-commit-id: 97050796d5de66127acc07596b2f85c1544047f6
Former-commit-id: 9e3d460d797b9ab3032a8be0c92b88efe136458b
|
Python
|
agpl-3.0
|
gem/oq-engine,gem/oq-engine,gem/oq-engine,gem/oq-engine,gem/oq-engine
|
Add a command to start the WebUI using oq
The django development server is started in foreground
on localhost:8800. Meant to be used with multi_user = false
but works also if true.
Former-commit-id: 5df6e6b3ec523376906abeb8a52eb6c5dc5d46ec [formerly 5df6e6b3ec523376906abeb8a52eb6c5dc5d46ec [formerly bbfc5549fb632d535ed1934e0d2bd1226ccd4507]]
Former-commit-id: 97050796d5de66127acc07596b2f85c1544047f6
Former-commit-id: 9e3d460d797b9ab3032a8be0c92b88efe136458b
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2016, GEM Foundation
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import sys
import subprocess
from openquake.risklib import valid
from openquake.commonlib import sap
from openquake.engine import config
from openquake.server.dbserver import get_status
from openquake.commands.dbserver import runserver
def rundjango(subcmd):
subprocess.call([sys.executable, '-m', 'openquake.server.manage', subcmd])
def webui(cmd):
"""
start the webui server in foreground or perform other operation on the
django application
"""
dbstatus = get_status()
if dbstatus == 'not-running':
if valid.boolean(config.get('dbserver', 'multi_user')):
sys.exit('Please start the DbServer: '
'see the documentation for details')
runserver()
if cmd == 'start':
rundjango('runserver')
elif cmd == 'syncdb':
rundjango(cmd)
parser = sap.Parser(webui)
parser.arg('cmd', 'webui command',
choices='start syncdb'.split())
|
<commit_before><commit_msg>Add a command to start the WebUI using oq
The django development server is started in foreground
on localhost:8800. Meant to be used with multi_user = false
but works also if true.
Former-commit-id: 5df6e6b3ec523376906abeb8a52eb6c5dc5d46ec [formerly 5df6e6b3ec523376906abeb8a52eb6c5dc5d46ec [formerly bbfc5549fb632d535ed1934e0d2bd1226ccd4507]]
Former-commit-id: 97050796d5de66127acc07596b2f85c1544047f6
Former-commit-id: 9e3d460d797b9ab3032a8be0c92b88efe136458b<commit_after>
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2016, GEM Foundation
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import sys
import subprocess
from openquake.risklib import valid
from openquake.commonlib import sap
from openquake.engine import config
from openquake.server.dbserver import get_status
from openquake.commands.dbserver import runserver
def rundjango(subcmd):
subprocess.call([sys.executable, '-m', 'openquake.server.manage', subcmd])
def webui(cmd):
"""
start the webui server in foreground or perform other operation on the
django application
"""
dbstatus = get_status()
if dbstatus == 'not-running':
if valid.boolean(config.get('dbserver', 'multi_user')):
sys.exit('Please start the DbServer: '
'see the documentation for details')
runserver()
if cmd == 'start':
rundjango('runserver')
elif cmd == 'syncdb':
rundjango(cmd)
parser = sap.Parser(webui)
parser.arg('cmd', 'webui command',
choices='start syncdb'.split())
|
Add a command to start the WebUI using oq
The django development server is started in foreground
on localhost:8800. Meant to be used with multi_user = false
but works also if true.
Former-commit-id: 5df6e6b3ec523376906abeb8a52eb6c5dc5d46ec [formerly 5df6e6b3ec523376906abeb8a52eb6c5dc5d46ec [formerly bbfc5549fb632d535ed1934e0d2bd1226ccd4507]]
Former-commit-id: 97050796d5de66127acc07596b2f85c1544047f6
Former-commit-id: 9e3d460d797b9ab3032a8be0c92b88efe136458b# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2016, GEM Foundation
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import sys
import subprocess
from openquake.risklib import valid
from openquake.commonlib import sap
from openquake.engine import config
from openquake.server.dbserver import get_status
from openquake.commands.dbserver import runserver
def rundjango(subcmd):
subprocess.call([sys.executable, '-m', 'openquake.server.manage', subcmd])
def webui(cmd):
"""
start the webui server in foreground or perform other operation on the
django application
"""
dbstatus = get_status()
if dbstatus == 'not-running':
if valid.boolean(config.get('dbserver', 'multi_user')):
sys.exit('Please start the DbServer: '
'see the documentation for details')
runserver()
if cmd == 'start':
rundjango('runserver')
elif cmd == 'syncdb':
rundjango(cmd)
parser = sap.Parser(webui)
parser.arg('cmd', 'webui command',
choices='start syncdb'.split())
|
<commit_before><commit_msg>Add a command to start the WebUI using oq
The django development server is started in foreground
on localhost:8800. Meant to be used with multi_user = false
but works also if true.
Former-commit-id: 5df6e6b3ec523376906abeb8a52eb6c5dc5d46ec [formerly 5df6e6b3ec523376906abeb8a52eb6c5dc5d46ec [formerly bbfc5549fb632d535ed1934e0d2bd1226ccd4507]]
Former-commit-id: 97050796d5de66127acc07596b2f85c1544047f6
Former-commit-id: 9e3d460d797b9ab3032a8be0c92b88efe136458b<commit_after># -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2016, GEM Foundation
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import sys
import subprocess
from openquake.risklib import valid
from openquake.commonlib import sap
from openquake.engine import config
from openquake.server.dbserver import get_status
from openquake.commands.dbserver import runserver
def rundjango(subcmd):
subprocess.call([sys.executable, '-m', 'openquake.server.manage', subcmd])
def webui(cmd):
"""
start the webui server in foreground or perform other operation on the
django application
"""
dbstatus = get_status()
if dbstatus == 'not-running':
if valid.boolean(config.get('dbserver', 'multi_user')):
sys.exit('Please start the DbServer: '
'see the documentation for details')
runserver()
if cmd == 'start':
rundjango('runserver')
elif cmd == 'syncdb':
rundjango(cmd)
parser = sap.Parser(webui)
parser.arg('cmd', 'webui command',
choices='start syncdb'.split())
|
|
cc19d21489ce0cea805b226f3f8122be321b5bf8
|
pylua/tests/test_repeat.py
|
pylua/tests/test_repeat.py
|
from .helpers import codetest
class TestRepeat(object):
"""
tests for the lua if then else and various comparisons
"""
def test_simple_repeat(self):
ret = codetest("""
x = 0
repeat
x = x + 1
until x == 10
return x
""")
assert ret.returnvalue == 10
def test_simple_repeat_false(self):
ret = codetest("""
x = 99
repeat
x = x + 1
until x > 0
return x
""")
assert ret.returnvalue == 100
def test_nested_repeat(self):
ret = codetest("""
i = 0
x = 0
repeat
i = i + 1
x = x + 1
j = 5
repeat
j = j - 1
x = x + 1
until j == 0
until i == 10
return x
""")
assert ret.returnvalue == 60
|
Add tests for Lua repeat until loop
|
Add tests for Lua repeat until loop
|
Python
|
bsd-3-clause
|
fhahn/luna,fhahn/luna
|
Add tests for Lua repeat until loop
|
from .helpers import codetest
class TestRepeat(object):
"""
tests for the lua if then else and various comparisons
"""
def test_simple_repeat(self):
ret = codetest("""
x = 0
repeat
x = x + 1
until x == 10
return x
""")
assert ret.returnvalue == 10
def test_simple_repeat_false(self):
ret = codetest("""
x = 99
repeat
x = x + 1
until x > 0
return x
""")
assert ret.returnvalue == 100
def test_nested_repeat(self):
ret = codetest("""
i = 0
x = 0
repeat
i = i + 1
x = x + 1
j = 5
repeat
j = j - 1
x = x + 1
until j == 0
until i == 10
return x
""")
assert ret.returnvalue == 60
|
<commit_before><commit_msg>Add tests for Lua repeat until loop<commit_after>
|
from .helpers import codetest
class TestRepeat(object):
"""
tests for the lua if then else and various comparisons
"""
def test_simple_repeat(self):
ret = codetest("""
x = 0
repeat
x = x + 1
until x == 10
return x
""")
assert ret.returnvalue == 10
def test_simple_repeat_false(self):
ret = codetest("""
x = 99
repeat
x = x + 1
until x > 0
return x
""")
assert ret.returnvalue == 100
def test_nested_repeat(self):
ret = codetest("""
i = 0
x = 0
repeat
i = i + 1
x = x + 1
j = 5
repeat
j = j - 1
x = x + 1
until j == 0
until i == 10
return x
""")
assert ret.returnvalue == 60
|
Add tests for Lua repeat until loopfrom .helpers import codetest
class TestRepeat(object):
"""
tests for the lua if then else and various comparisons
"""
def test_simple_repeat(self):
ret = codetest("""
x = 0
repeat
x = x + 1
until x == 10
return x
""")
assert ret.returnvalue == 10
def test_simple_repeat_false(self):
ret = codetest("""
x = 99
repeat
x = x + 1
until x > 0
return x
""")
assert ret.returnvalue == 100
def test_nested_repeat(self):
ret = codetest("""
i = 0
x = 0
repeat
i = i + 1
x = x + 1
j = 5
repeat
j = j - 1
x = x + 1
until j == 0
until i == 10
return x
""")
assert ret.returnvalue == 60
|
<commit_before><commit_msg>Add tests for Lua repeat until loop<commit_after>from .helpers import codetest
class TestRepeat(object):
"""
tests for the lua if then else and various comparisons
"""
def test_simple_repeat(self):
ret = codetest("""
x = 0
repeat
x = x + 1
until x == 10
return x
""")
assert ret.returnvalue == 10
def test_simple_repeat_false(self):
ret = codetest("""
x = 99
repeat
x = x + 1
until x > 0
return x
""")
assert ret.returnvalue == 100
def test_nested_repeat(self):
ret = codetest("""
i = 0
x = 0
repeat
i = i + 1
x = x + 1
j = 5
repeat
j = j - 1
x = x + 1
until j == 0
until i == 10
return x
""")
assert ret.returnvalue == 60
|
|
5ac8e4619473275f2f0b26b8a9b64049d793a4ed
|
rmqid/__init__.py
|
rmqid/__init__.py
|
__version__ = '0.3.0'
from rmqid.connection import Connection
from rmqid.exchange import Exchange
from rmqid.message import Message
from rmqid.queue import Queue
from rmqid.tx import Tx
from rmqid.simple import consumer
from rmqid.simple import get
from rmqid.simple import publish
|
__version__ = '0.3.0'
from rmqid.connection import Connection
from rmqid.exchange import Exchange
from rmqid.message import Message
from rmqid.queue import Queue
from rmqid.tx import Tx
from rmqid.simple import consumer
from rmqid.simple import get
from rmqid.simple import publish
import logging
try:
from logging import NullHandler
except ImportError:
# Python 2.6 does not have a NullHandler
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger().addHandler(NullHandler())
|
Add a NullHandler so logging warnings are not emitted if no logger is setup
|
Add a NullHandler so logging warnings are not emitted if no logger is setup
|
Python
|
bsd-3-clause
|
jonahbull/rabbitpy,gmr/rabbitpy,gmr/rabbitpy
|
__version__ = '0.3.0'
from rmqid.connection import Connection
from rmqid.exchange import Exchange
from rmqid.message import Message
from rmqid.queue import Queue
from rmqid.tx import Tx
from rmqid.simple import consumer
from rmqid.simple import get
from rmqid.simple import publish
Add a NullHandler so logging warnings are not emitted if no logger is setup
|
__version__ = '0.3.0'
from rmqid.connection import Connection
from rmqid.exchange import Exchange
from rmqid.message import Message
from rmqid.queue import Queue
from rmqid.tx import Tx
from rmqid.simple import consumer
from rmqid.simple import get
from rmqid.simple import publish
import logging
try:
from logging import NullHandler
except ImportError:
# Python 2.6 does not have a NullHandler
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger().addHandler(NullHandler())
|
<commit_before>__version__ = '0.3.0'
from rmqid.connection import Connection
from rmqid.exchange import Exchange
from rmqid.message import Message
from rmqid.queue import Queue
from rmqid.tx import Tx
from rmqid.simple import consumer
from rmqid.simple import get
from rmqid.simple import publish
<commit_msg>Add a NullHandler so logging warnings are not emitted if no logger is setup<commit_after>
|
__version__ = '0.3.0'
from rmqid.connection import Connection
from rmqid.exchange import Exchange
from rmqid.message import Message
from rmqid.queue import Queue
from rmqid.tx import Tx
from rmqid.simple import consumer
from rmqid.simple import get
from rmqid.simple import publish
import logging
try:
from logging import NullHandler
except ImportError:
# Python 2.6 does not have a NullHandler
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger().addHandler(NullHandler())
|
__version__ = '0.3.0'
from rmqid.connection import Connection
from rmqid.exchange import Exchange
from rmqid.message import Message
from rmqid.queue import Queue
from rmqid.tx import Tx
from rmqid.simple import consumer
from rmqid.simple import get
from rmqid.simple import publish
Add a NullHandler so logging warnings are not emitted if no logger is setup__version__ = '0.3.0'
from rmqid.connection import Connection
from rmqid.exchange import Exchange
from rmqid.message import Message
from rmqid.queue import Queue
from rmqid.tx import Tx
from rmqid.simple import consumer
from rmqid.simple import get
from rmqid.simple import publish
import logging
try:
from logging import NullHandler
except ImportError:
# Python 2.6 does not have a NullHandler
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger().addHandler(NullHandler())
|
<commit_before>__version__ = '0.3.0'
from rmqid.connection import Connection
from rmqid.exchange import Exchange
from rmqid.message import Message
from rmqid.queue import Queue
from rmqid.tx import Tx
from rmqid.simple import consumer
from rmqid.simple import get
from rmqid.simple import publish
<commit_msg>Add a NullHandler so logging warnings are not emitted if no logger is setup<commit_after>__version__ = '0.3.0'
from rmqid.connection import Connection
from rmqid.exchange import Exchange
from rmqid.message import Message
from rmqid.queue import Queue
from rmqid.tx import Tx
from rmqid.simple import consumer
from rmqid.simple import get
from rmqid.simple import publish
import logging
try:
from logging import NullHandler
except ImportError:
# Python 2.6 does not have a NullHandler
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger().addHandler(NullHandler())
|
cc7ea622542337df088ffdb1960bcecc7c4508f1
|
darkopt/integration/xgboost.py
|
darkopt/integration/xgboost.py
|
import numpy as np
import xgboost.core
from darkopt import learning_curve
class XGBoostCallback(object):
def __init__(self, known_best_score, score_key=None,
pruning_prob_thresh=0.05,
maximize=False, learning_curve_predictor=None,
min_iters_before_prune=10, test_interval=10):
if maximize:
known_best_score = -known_best_score
self.known_best_score = known_best_score
if learning_curve_predictor is None:
learning_curve_predictor = learning_curve.EnsembleSamplingPredictor()
self.evals_result_key = score_key
self.pruning_prob_thresh = pruning_prob_thresh
self.maximize = maximize
self.learning_curve_predictor = learning_curve_predictor
self.min_iters_to_prune = min_iters_before_prune
self.test_interval = test_interval
self.history_iterations = []
self.history_scores = []
def __call__(self, env):
if np.isinf(self.known_best_score):
return
current_iteration = env.iteration
if self.evals_result_key is None:
current_score = env.evaluation_result_list[-1][1]
else:
current_score = dict(env.evaluation_result_list)[self.evals_result_key]
if self.maximize:
current_score = -current_score
self.history_iterations.append(current_iteration)
self.history_scores.append(current_score)
if current_iteration < self.min_iters_to_prune:
return
if (current_iteration - self.min_iters_to_prune) % self.test_interval != 0:
return
lcp = self.learning_curve_predictor
lcp.fit(self.history_iterations, self.history_scores)
prob_win = lcp.predict_proba_less_than(env.end_iteration, self.known_best_score)
print('Probability to beat the known best score:', prob_win)
if prob_win < self.pruning_prob_thresh:
raise xgboost.core.EarlyStopException(env.iteration)
|
Add XGBoost callback class for pruning
|
Add XGBoost callback class for pruning
|
Python
|
mit
|
iwiwi/darkopt
|
Add XGBoost callback class for pruning
|
import numpy as np
import xgboost.core
from darkopt import learning_curve
class XGBoostCallback(object):
def __init__(self, known_best_score, score_key=None,
pruning_prob_thresh=0.05,
maximize=False, learning_curve_predictor=None,
min_iters_before_prune=10, test_interval=10):
if maximize:
known_best_score = -known_best_score
self.known_best_score = known_best_score
if learning_curve_predictor is None:
learning_curve_predictor = learning_curve.EnsembleSamplingPredictor()
self.evals_result_key = score_key
self.pruning_prob_thresh = pruning_prob_thresh
self.maximize = maximize
self.learning_curve_predictor = learning_curve_predictor
self.min_iters_to_prune = min_iters_before_prune
self.test_interval = test_interval
self.history_iterations = []
self.history_scores = []
def __call__(self, env):
if np.isinf(self.known_best_score):
return
current_iteration = env.iteration
if self.evals_result_key is None:
current_score = env.evaluation_result_list[-1][1]
else:
current_score = dict(env.evaluation_result_list)[self.evals_result_key]
if self.maximize:
current_score = -current_score
self.history_iterations.append(current_iteration)
self.history_scores.append(current_score)
if current_iteration < self.min_iters_to_prune:
return
if (current_iteration - self.min_iters_to_prune) % self.test_interval != 0:
return
lcp = self.learning_curve_predictor
lcp.fit(self.history_iterations, self.history_scores)
prob_win = lcp.predict_proba_less_than(env.end_iteration, self.known_best_score)
print('Probability to beat the known best score:', prob_win)
if prob_win < self.pruning_prob_thresh:
raise xgboost.core.EarlyStopException(env.iteration)
|
<commit_before><commit_msg>Add XGBoost callback class for pruning<commit_after>
|
import numpy as np
import xgboost.core
from darkopt import learning_curve
class XGBoostCallback(object):
def __init__(self, known_best_score, score_key=None,
pruning_prob_thresh=0.05,
maximize=False, learning_curve_predictor=None,
min_iters_before_prune=10, test_interval=10):
if maximize:
known_best_score = -known_best_score
self.known_best_score = known_best_score
if learning_curve_predictor is None:
learning_curve_predictor = learning_curve.EnsembleSamplingPredictor()
self.evals_result_key = score_key
self.pruning_prob_thresh = pruning_prob_thresh
self.maximize = maximize
self.learning_curve_predictor = learning_curve_predictor
self.min_iters_to_prune = min_iters_before_prune
self.test_interval = test_interval
self.history_iterations = []
self.history_scores = []
def __call__(self, env):
if np.isinf(self.known_best_score):
return
current_iteration = env.iteration
if self.evals_result_key is None:
current_score = env.evaluation_result_list[-1][1]
else:
current_score = dict(env.evaluation_result_list)[self.evals_result_key]
if self.maximize:
current_score = -current_score
self.history_iterations.append(current_iteration)
self.history_scores.append(current_score)
if current_iteration < self.min_iters_to_prune:
return
if (current_iteration - self.min_iters_to_prune) % self.test_interval != 0:
return
lcp = self.learning_curve_predictor
lcp.fit(self.history_iterations, self.history_scores)
prob_win = lcp.predict_proba_less_than(env.end_iteration, self.known_best_score)
print('Probability to beat the known best score:', prob_win)
if prob_win < self.pruning_prob_thresh:
raise xgboost.core.EarlyStopException(env.iteration)
|
Add XGBoost callback class for pruningimport numpy as np
import xgboost.core
from darkopt import learning_curve
class XGBoostCallback(object):
def __init__(self, known_best_score, score_key=None,
pruning_prob_thresh=0.05,
maximize=False, learning_curve_predictor=None,
min_iters_before_prune=10, test_interval=10):
if maximize:
known_best_score = -known_best_score
self.known_best_score = known_best_score
if learning_curve_predictor is None:
learning_curve_predictor = learning_curve.EnsembleSamplingPredictor()
self.evals_result_key = score_key
self.pruning_prob_thresh = pruning_prob_thresh
self.maximize = maximize
self.learning_curve_predictor = learning_curve_predictor
self.min_iters_to_prune = min_iters_before_prune
self.test_interval = test_interval
self.history_iterations = []
self.history_scores = []
def __call__(self, env):
if np.isinf(self.known_best_score):
return
current_iteration = env.iteration
if self.evals_result_key is None:
current_score = env.evaluation_result_list[-1][1]
else:
current_score = dict(env.evaluation_result_list)[self.evals_result_key]
if self.maximize:
current_score = -current_score
self.history_iterations.append(current_iteration)
self.history_scores.append(current_score)
if current_iteration < self.min_iters_to_prune:
return
if (current_iteration - self.min_iters_to_prune) % self.test_interval != 0:
return
lcp = self.learning_curve_predictor
lcp.fit(self.history_iterations, self.history_scores)
prob_win = lcp.predict_proba_less_than(env.end_iteration, self.known_best_score)
print('Probability to beat the known best score:', prob_win)
if prob_win < self.pruning_prob_thresh:
raise xgboost.core.EarlyStopException(env.iteration)
|
<commit_before><commit_msg>Add XGBoost callback class for pruning<commit_after>import numpy as np
import xgboost.core
from darkopt import learning_curve
class XGBoostCallback(object):
def __init__(self, known_best_score, score_key=None,
pruning_prob_thresh=0.05,
maximize=False, learning_curve_predictor=None,
min_iters_before_prune=10, test_interval=10):
if maximize:
known_best_score = -known_best_score
self.known_best_score = known_best_score
if learning_curve_predictor is None:
learning_curve_predictor = learning_curve.EnsembleSamplingPredictor()
self.evals_result_key = score_key
self.pruning_prob_thresh = pruning_prob_thresh
self.maximize = maximize
self.learning_curve_predictor = learning_curve_predictor
self.min_iters_to_prune = min_iters_before_prune
self.test_interval = test_interval
self.history_iterations = []
self.history_scores = []
def __call__(self, env):
if np.isinf(self.known_best_score):
return
current_iteration = env.iteration
if self.evals_result_key is None:
current_score = env.evaluation_result_list[-1][1]
else:
current_score = dict(env.evaluation_result_list)[self.evals_result_key]
if self.maximize:
current_score = -current_score
self.history_iterations.append(current_iteration)
self.history_scores.append(current_score)
if current_iteration < self.min_iters_to_prune:
return
if (current_iteration - self.min_iters_to_prune) % self.test_interval != 0:
return
lcp = self.learning_curve_predictor
lcp.fit(self.history_iterations, self.history_scores)
prob_win = lcp.predict_proba_less_than(env.end_iteration, self.known_best_score)
print('Probability to beat the known best score:', prob_win)
if prob_win < self.pruning_prob_thresh:
raise xgboost.core.EarlyStopException(env.iteration)
|
|
ba4a3caef1f361992aa7887d1f434510060d434f
|
hackingignores.py
|
hackingignores.py
|
#!/usr/bin/python3
import collections
import glob
# Run from openstack git org directory
# Format
# Rule: [repo]
result = collections.defaultdict(list)
for file in glob.glob("*/tox.ini"):
repo = file.split('/')[0]
with open(file) as f:
for line in f.readlines():
if line.startswith("ignore"):
ignore = line.strip().split('=')[1].split(',')
for rule in ignore:
if "H" not in rule:
# We only care about hacking rules
continue
result[rule].append(repo)
print("rule: number of ignores")
for k in result:
print("%s: %s" % (k, len(result[k])))
print("-- %s" % result[k])
|
Add code to track which hacking rules are ignored
|
Add code to track which hacking rules are ignored
|
Python
|
apache-2.0
|
jogo/hackingignores
|
Add code to track which hacking rules are ignored
|
#!/usr/bin/python3
import collections
import glob
# Run from openstack git org directory
# Format
# Rule: [repo]
result = collections.defaultdict(list)
for file in glob.glob("*/tox.ini"):
repo = file.split('/')[0]
with open(file) as f:
for line in f.readlines():
if line.startswith("ignore"):
ignore = line.strip().split('=')[1].split(',')
for rule in ignore:
if "H" not in rule:
# We only care about hacking rules
continue
result[rule].append(repo)
print("rule: number of ignores")
for k in result:
print("%s: %s" % (k, len(result[k])))
print("-- %s" % result[k])
|
<commit_before><commit_msg>Add code to track which hacking rules are ignored<commit_after>
|
#!/usr/bin/python3
import collections
import glob
# Run from openstack git org directory
# Format
# Rule: [repo]
result = collections.defaultdict(list)
for file in glob.glob("*/tox.ini"):
repo = file.split('/')[0]
with open(file) as f:
for line in f.readlines():
if line.startswith("ignore"):
ignore = line.strip().split('=')[1].split(',')
for rule in ignore:
if "H" not in rule:
# We only care about hacking rules
continue
result[rule].append(repo)
print("rule: number of ignores")
for k in result:
print("%s: %s" % (k, len(result[k])))
print("-- %s" % result[k])
|
Add code to track which hacking rules are ignored#!/usr/bin/python3
import collections
import glob
# Run from openstack git org directory
# Format
# Rule: [repo]
result = collections.defaultdict(list)
for file in glob.glob("*/tox.ini"):
repo = file.split('/')[0]
with open(file) as f:
for line in f.readlines():
if line.startswith("ignore"):
ignore = line.strip().split('=')[1].split(',')
for rule in ignore:
if "H" not in rule:
# We only care about hacking rules
continue
result[rule].append(repo)
print("rule: number of ignores")
for k in result:
print("%s: %s" % (k, len(result[k])))
print("-- %s" % result[k])
|
<commit_before><commit_msg>Add code to track which hacking rules are ignored<commit_after>#!/usr/bin/python3
import collections
import glob
# Run from openstack git org directory
# Format
# Rule: [repo]
result = collections.defaultdict(list)
for file in glob.glob("*/tox.ini"):
repo = file.split('/')[0]
with open(file) as f:
for line in f.readlines():
if line.startswith("ignore"):
ignore = line.strip().split('=')[1].split(',')
for rule in ignore:
if "H" not in rule:
# We only care about hacking rules
continue
result[rule].append(repo)
print("rule: number of ignores")
for k in result:
print("%s: %s" % (k, len(result[k])))
print("-- %s" % result[k])
|
|
c74426fc72f521bfcaa0e06922856a111e738a42
|
onadata/apps/logger/migrations/0019_purge_deleted_instances.py
|
onadata/apps/logger/migrations/0019_purge_deleted_instances.py
|
# Generated by Django 2.2.14 on 2021-06-30 19:00
from django.conf import settings
from django.db import migrations
def purge_deleted_instances(apps, schema_editor):
"""
Remove all submissions that have been already marked as deleted from both
PostgreSQL and MongoDB. If this is too slow, revert to a previous release
and run the code in
https://github.com/kobotoolbox/kobocat/issues/696#issuecomment-809622367
using `manage.py shell_plus`.
"""
Instance = apps.get_model('logger', 'Instance')
to_purge = Instance.objects.exclude(deleted_at=None).only('pk')
if not to_purge.exists():
return
print(
f'Purging {to_purge.count()} deleted instances...', end='', flush=True
)
for instance in to_purge.iterator():
# Manually delete from MongoDB because signals are not called in
# migrations (that would require freezing all application code, not
# just the models!)
settings.MONGO_DB.instances.delete_one({'_id': instance.pk})
instance.delete()
print('Done!', flush=True)
def do_nothing(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('logger', '0018_add_submission_counter'),
]
operations = [
migrations.RunPython(
purge_deleted_instances, reverse_code=do_nothing
),
]
|
Add data migration to purge deleted submissions
|
Add data migration to purge deleted submissions
WARNING: may be slow on large databases. Mention this in release notes.
Closes #733.
|
Python
|
bsd-2-clause
|
kobotoolbox/kobocat,kobotoolbox/kobocat,kobotoolbox/kobocat,kobotoolbox/kobocat
|
Add data migration to purge deleted submissions
WARNING: may be slow on large databases. Mention this in release notes.
Closes #733.
|
# Generated by Django 2.2.14 on 2021-06-30 19:00
from django.conf import settings
from django.db import migrations
def purge_deleted_instances(apps, schema_editor):
"""
Remove all submissions that have been already marked as deleted from both
PostgreSQL and MongoDB. If this is too slow, revert to a previous release
and run the code in
https://github.com/kobotoolbox/kobocat/issues/696#issuecomment-809622367
using `manage.py shell_plus`.
"""
Instance = apps.get_model('logger', 'Instance')
to_purge = Instance.objects.exclude(deleted_at=None).only('pk')
if not to_purge.exists():
return
print(
f'Purging {to_purge.count()} deleted instances...', end='', flush=True
)
for instance in to_purge.iterator():
# Manually delete from MongoDB because signals are not called in
# migrations (that would require freezing all application code, not
# just the models!)
settings.MONGO_DB.instances.delete_one({'_id': instance.pk})
instance.delete()
print('Done!', flush=True)
def do_nothing(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('logger', '0018_add_submission_counter'),
]
operations = [
migrations.RunPython(
purge_deleted_instances, reverse_code=do_nothing
),
]
|
<commit_before><commit_msg>Add data migration to purge deleted submissions
WARNING: may be slow on large databases. Mention this in release notes.
Closes #733.<commit_after>
|
# Generated by Django 2.2.14 on 2021-06-30 19:00
from django.conf import settings
from django.db import migrations
def purge_deleted_instances(apps, schema_editor):
"""
Remove all submissions that have been already marked as deleted from both
PostgreSQL and MongoDB. If this is too slow, revert to a previous release
and run the code in
https://github.com/kobotoolbox/kobocat/issues/696#issuecomment-809622367
using `manage.py shell_plus`.
"""
Instance = apps.get_model('logger', 'Instance')
to_purge = Instance.objects.exclude(deleted_at=None).only('pk')
if not to_purge.exists():
return
print(
f'Purging {to_purge.count()} deleted instances...', end='', flush=True
)
for instance in to_purge.iterator():
# Manually delete from MongoDB because signals are not called in
# migrations (that would require freezing all application code, not
# just the models!)
settings.MONGO_DB.instances.delete_one({'_id': instance.pk})
instance.delete()
print('Done!', flush=True)
def do_nothing(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('logger', '0018_add_submission_counter'),
]
operations = [
migrations.RunPython(
purge_deleted_instances, reverse_code=do_nothing
),
]
|
Add data migration to purge deleted submissions
WARNING: may be slow on large databases. Mention this in release notes.
Closes #733.# Generated by Django 2.2.14 on 2021-06-30 19:00
from django.conf import settings
from django.db import migrations
def purge_deleted_instances(apps, schema_editor):
"""
Remove all submissions that have been already marked as deleted from both
PostgreSQL and MongoDB. If this is too slow, revert to a previous release
and run the code in
https://github.com/kobotoolbox/kobocat/issues/696#issuecomment-809622367
using `manage.py shell_plus`.
"""
Instance = apps.get_model('logger', 'Instance')
to_purge = Instance.objects.exclude(deleted_at=None).only('pk')
if not to_purge.exists():
return
print(
f'Purging {to_purge.count()} deleted instances...', end='', flush=True
)
for instance in to_purge.iterator():
# Manually delete from MongoDB because signals are not called in
# migrations (that would require freezing all application code, not
# just the models!)
settings.MONGO_DB.instances.delete_one({'_id': instance.pk})
instance.delete()
print('Done!', flush=True)
def do_nothing(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('logger', '0018_add_submission_counter'),
]
operations = [
migrations.RunPython(
purge_deleted_instances, reverse_code=do_nothing
),
]
|
<commit_before><commit_msg>Add data migration to purge deleted submissions
WARNING: may be slow on large databases. Mention this in release notes.
Closes #733.<commit_after># Generated by Django 2.2.14 on 2021-06-30 19:00
from django.conf import settings
from django.db import migrations
def purge_deleted_instances(apps, schema_editor):
"""
Remove all submissions that have been already marked as deleted from both
PostgreSQL and MongoDB. If this is too slow, revert to a previous release
and run the code in
https://github.com/kobotoolbox/kobocat/issues/696#issuecomment-809622367
using `manage.py shell_plus`.
"""
Instance = apps.get_model('logger', 'Instance')
to_purge = Instance.objects.exclude(deleted_at=None).only('pk')
if not to_purge.exists():
return
print(
f'Purging {to_purge.count()} deleted instances...', end='', flush=True
)
for instance in to_purge.iterator():
# Manually delete from MongoDB because signals are not called in
# migrations (that would require freezing all application code, not
# just the models!)
settings.MONGO_DB.instances.delete_one({'_id': instance.pk})
instance.delete()
print('Done!', flush=True)
def do_nothing(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('logger', '0018_add_submission_counter'),
]
operations = [
migrations.RunPython(
purge_deleted_instances, reverse_code=do_nothing
),
]
|
|
5487bc28d09ef9628cc192083f4e4dab3742e857
|
ehriportal/portal/templatetags/langname.py
|
ehriportal/portal/templatetags/langname.py
|
from urllib import quote
from django.template import Library
register = Library()
from ehriportal.portal import utils
@register.filter
def langname(code):
"""Creates a haystack facet parameter in format:
&selected_facets=<name>_exact:<value>"""
return utils.language_name_from_code(code)
|
Add a templatetag to convert language code to language name
|
Add a templatetag to convert language code to language name
|
Python
|
mit
|
mikesname/ehri-collections,mikesname/ehri-collections,mikesname/ehri-collections
|
Add a templatetag to convert language code to language name
|
from urllib import quote
from django.template import Library
register = Library()
from ehriportal.portal import utils
@register.filter
def langname(code):
"""Creates a haystack facet parameter in format:
&selected_facets=<name>_exact:<value>"""
return utils.language_name_from_code(code)
|
<commit_before><commit_msg>Add a templatetag to convert language code to language name<commit_after>
|
from urllib import quote
from django.template import Library
register = Library()
from ehriportal.portal import utils
@register.filter
def langname(code):
"""Creates a haystack facet parameter in format:
&selected_facets=<name>_exact:<value>"""
return utils.language_name_from_code(code)
|
Add a templatetag to convert language code to language namefrom urllib import quote
from django.template import Library
register = Library()
from ehriportal.portal import utils
@register.filter
def langname(code):
"""Creates a haystack facet parameter in format:
&selected_facets=<name>_exact:<value>"""
return utils.language_name_from_code(code)
|
<commit_before><commit_msg>Add a templatetag to convert language code to language name<commit_after>from urllib import quote
from django.template import Library
register = Library()
from ehriportal.portal import utils
@register.filter
def langname(code):
"""Creates a haystack facet parameter in format:
&selected_facets=<name>_exact:<value>"""
return utils.language_name_from_code(code)
|
|
62d6568dc058f14a518516999dc3a22888593d0d
|
beatles/data/beatles/normalize_audio_cd_paths.py
|
beatles/data/beatles/normalize_audio_cd_paths.py
|
# Change the directory structure and file names of audio CD files so that they
# correspond to the naming in the isophonics annotation dataset.
import glob
import shutil
mkdir -p audio-cd/The_Beatles
echo mv audio-cd-raw/*/*/* audio-cd/The_Beatles
with open('album-mapping.csv', 'r') as file:
album_mapping = [line.strip().split('\t') for line in file.readlines()]
base_dir = 'audio-cd/The_Beatles/'
for src, dest in album_mapping:
shutil.move(base_dir + src, base_dir + dest)
with open('song-mapping.csv', 'r') as file:
song_mapping = [line.strip().split('\t') for line in file.readlines()]
for src, dest in song_mapping:
shutil.move(base_dir + src + '.wav', base_dir + dest + '.wav')
|
Add a script to normalize file names to match between isophonics annotations and audio CD.
|
Add a script to normalize file names to match between isophonics annotations and audio CD.
|
Python
|
mit
|
bzamecnik/ml-playground,bzamecnik/ml-playground,bzamecnik/ml,bzamecnik/ml,bzamecnik/ml
|
Add a script to normalize file names to match between isophonics annotations and audio CD.
|
# Change the directory structure and file names of audio CD files so that they
# correspond to the naming in the isophonics annotation dataset.
import glob
import shutil
mkdir -p audio-cd/The_Beatles
echo mv audio-cd-raw/*/*/* audio-cd/The_Beatles
with open('album-mapping.csv', 'r') as file:
album_mapping = [line.strip().split('\t') for line in file.readlines()]
base_dir = 'audio-cd/The_Beatles/'
for src, dest in album_mapping:
shutil.move(base_dir + src, base_dir + dest)
with open('song-mapping.csv', 'r') as file:
song_mapping = [line.strip().split('\t') for line in file.readlines()]
for src, dest in song_mapping:
shutil.move(base_dir + src + '.wav', base_dir + dest + '.wav')
|
<commit_before><commit_msg>Add a script to normalize file names to match between isophonics annotations and audio CD.<commit_after>
|
# Change the directory structure and file names of audio CD files so that they
# correspond to the naming in the isophonics annotation dataset.
import glob
import shutil
mkdir -p audio-cd/The_Beatles
echo mv audio-cd-raw/*/*/* audio-cd/The_Beatles
with open('album-mapping.csv', 'r') as file:
album_mapping = [line.strip().split('\t') for line in file.readlines()]
base_dir = 'audio-cd/The_Beatles/'
for src, dest in album_mapping:
shutil.move(base_dir + src, base_dir + dest)
with open('song-mapping.csv', 'r') as file:
song_mapping = [line.strip().split('\t') for line in file.readlines()]
for src, dest in song_mapping:
shutil.move(base_dir + src + '.wav', base_dir + dest + '.wav')
|
Add a script to normalize file names to match between isophonics annotations and audio CD.# Change the directory structure and file names of audio CD files so that they
# correspond to the naming in the isophonics annotation dataset.
import glob
import shutil
mkdir -p audio-cd/The_Beatles
echo mv audio-cd-raw/*/*/* audio-cd/The_Beatles
with open('album-mapping.csv', 'r') as file:
album_mapping = [line.strip().split('\t') for line in file.readlines()]
base_dir = 'audio-cd/The_Beatles/'
for src, dest in album_mapping:
shutil.move(base_dir + src, base_dir + dest)
with open('song-mapping.csv', 'r') as file:
song_mapping = [line.strip().split('\t') for line in file.readlines()]
for src, dest in song_mapping:
shutil.move(base_dir + src + '.wav', base_dir + dest + '.wav')
|
<commit_before><commit_msg>Add a script to normalize file names to match between isophonics annotations and audio CD.<commit_after># Change the directory structure and file names of audio CD files so that they
# correspond to the naming in the isophonics annotation dataset.
import glob
import shutil
mkdir -p audio-cd/The_Beatles
echo mv audio-cd-raw/*/*/* audio-cd/The_Beatles
with open('album-mapping.csv', 'r') as file:
album_mapping = [line.strip().split('\t') for line in file.readlines()]
base_dir = 'audio-cd/The_Beatles/'
for src, dest in album_mapping:
shutil.move(base_dir + src, base_dir + dest)
with open('song-mapping.csv', 'r') as file:
song_mapping = [line.strip().split('\t') for line in file.readlines()]
for src, dest in song_mapping:
shutil.move(base_dir + src + '.wav', base_dir + dest + '.wav')
|
|
bcc69f77d8754161d5edd7ff35d583fb83c82b6b
|
samklang_utils/managers.py
|
samklang_utils/managers.py
|
from django.db.models import Manager
from django.db.models.query_utils import Q
class PermissionManager(Manager):
def for_user(self, user=None):
if user and user.is_authenticated():
return self.get_query_set().filter(Q(group=None) | Q(group__user=user))
else:
return self.get_query_set().filter(group=None)
|
Add new PermissionsManager that will be used by most Samklang modules
|
Add new PermissionsManager that will be used by most Samklang modules
|
Python
|
agpl-3.0
|
sigurdga/samklang-utils,sigurdga/samklang-utils
|
Add new PermissionsManager that will be used by most Samklang modules
|
from django.db.models import Manager
from django.db.models.query_utils import Q
class PermissionManager(Manager):
def for_user(self, user=None):
if user and user.is_authenticated():
return self.get_query_set().filter(Q(group=None) | Q(group__user=user))
else:
return self.get_query_set().filter(group=None)
|
<commit_before><commit_msg>Add new PermissionsManager that will be used by most Samklang modules<commit_after>
|
from django.db.models import Manager
from django.db.models.query_utils import Q
class PermissionManager(Manager):
def for_user(self, user=None):
if user and user.is_authenticated():
return self.get_query_set().filter(Q(group=None) | Q(group__user=user))
else:
return self.get_query_set().filter(group=None)
|
Add new PermissionsManager that will be used by most Samklang modulesfrom django.db.models import Manager
from django.db.models.query_utils import Q
class PermissionManager(Manager):
def for_user(self, user=None):
if user and user.is_authenticated():
return self.get_query_set().filter(Q(group=None) | Q(group__user=user))
else:
return self.get_query_set().filter(group=None)
|
<commit_before><commit_msg>Add new PermissionsManager that will be used by most Samklang modules<commit_after>from django.db.models import Manager
from django.db.models.query_utils import Q
class PermissionManager(Manager):
def for_user(self, user=None):
if user and user.is_authenticated():
return self.get_query_set().filter(Q(group=None) | Q(group__user=user))
else:
return self.get_query_set().filter(group=None)
|
|
15bd37c9042e2f5734e1123d28ec1544780c7f8f
|
Scripts/Make_Load_Maps/project_nodal_load.py
|
Scripts/Make_Load_Maps/project_nodal_load.py
|
import numpy as np
import scipy.sparse as sparse
import argparse, os
from itertools import izip as zip
# Uses the output of nodal_projection_matrix.py to aggregate signals
# into the nodal domain.
parser = argparse.ArgumentParser(description='Wind conversion options')
parser.add_argument('-r', '--indir', help='Input directory for forecast files', default='../../Data/Signal_Converted', metavar="load root")
parser.add_argument('-o', '--outdir', help='Output directory for forecast files', default='../../Data/Nodal_Signal', metavar="load outroot")
parser.add_argument('-f', '--first', help='First year to extract', default=2012, type=int, metavar="first year")
parser.add_argument('-l', '--last', help='Last year to extract', default=2014, type=int, metavar="last year")
parser.add_argument('-fm', help='First month to extract', default=1, type=int, metavar="first month")
parser.add_argument('-lm', help='Last month to extract', default=12, type=int, metavar="last month")
args = parser.parse_args()
loadtransfer = np.load('../../Data/Metadata/loadtransfercsr_ECMWF.npz')
loadtransfer = sparse.csr_matrix((loadtransfer['data'], loadtransfer['indices'], loadtransfer['indptr']), shape=loadtransfer['shape'])
loadname = 'load-{0:04d}{1:02d}.npz'
indirls = sorted(os.listdir(args.indir))
loadls = [x for x in indirls if 'load-' in x]
startidx = loadls.index(loadname.format(args.first, args.fm))
try:
stopidx = loadls.index(loadname.format(args.last+int(args.lm == 12), args.lm % 12+1))
loadls = loadls[startidx:stopidx]
except ValueError:
print 'Stopdate + 1 month not found - assuming we need to use all directories'
loadls = loadls[startidx:]
for loadfile in loadls:
print loadfile
dataf = np.load(args.indir + '/' + loadfile)
dates = dataf['dates']
data = dataf['data']
shape = data.shape
outdata = loadtransfer.dot(np.reshape(data, (shape[0], shape[1]*shape[2])).T).T
np.savez_compressed(args.outdir + '/' + loadfile, data=outdata, dates=dates)
|
Add projection to nodal load
|
Add projection to nodal load
|
Python
|
apache-2.0
|
DTU-ELMA/European_Dataset,DTU-ELMA/European_Dataset
|
Add projection to nodal load
|
import numpy as np
import scipy.sparse as sparse
import argparse, os
from itertools import izip as zip
# Uses the output of nodal_projection_matrix.py to aggregate signals
# into the nodal domain.
parser = argparse.ArgumentParser(description='Wind conversion options')
parser.add_argument('-r', '--indir', help='Input directory for forecast files', default='../../Data/Signal_Converted', metavar="load root")
parser.add_argument('-o', '--outdir', help='Output directory for forecast files', default='../../Data/Nodal_Signal', metavar="load outroot")
parser.add_argument('-f', '--first', help='First year to extract', default=2012, type=int, metavar="first year")
parser.add_argument('-l', '--last', help='Last year to extract', default=2014, type=int, metavar="last year")
parser.add_argument('-fm', help='First month to extract', default=1, type=int, metavar="first month")
parser.add_argument('-lm', help='Last month to extract', default=12, type=int, metavar="last month")
args = parser.parse_args()
loadtransfer = np.load('../../Data/Metadata/loadtransfercsr_ECMWF.npz')
loadtransfer = sparse.csr_matrix((loadtransfer['data'], loadtransfer['indices'], loadtransfer['indptr']), shape=loadtransfer['shape'])
loadname = 'load-{0:04d}{1:02d}.npz'
indirls = sorted(os.listdir(args.indir))
loadls = [x for x in indirls if 'load-' in x]
startidx = loadls.index(loadname.format(args.first, args.fm))
try:
stopidx = loadls.index(loadname.format(args.last+int(args.lm == 12), args.lm % 12+1))
loadls = loadls[startidx:stopidx]
except ValueError:
print 'Stopdate + 1 month not found - assuming we need to use all directories'
loadls = loadls[startidx:]
for loadfile in loadls:
print loadfile
dataf = np.load(args.indir + '/' + loadfile)
dates = dataf['dates']
data = dataf['data']
shape = data.shape
outdata = loadtransfer.dot(np.reshape(data, (shape[0], shape[1]*shape[2])).T).T
np.savez_compressed(args.outdir + '/' + loadfile, data=outdata, dates=dates)
|
<commit_before><commit_msg>Add projection to nodal load<commit_after>
|
import numpy as np
import scipy.sparse as sparse
import argparse, os
from itertools import izip as zip
# Uses the output of nodal_projection_matrix.py to aggregate signals
# into the nodal domain.
parser = argparse.ArgumentParser(description='Wind conversion options')
parser.add_argument('-r', '--indir', help='Input directory for forecast files', default='../../Data/Signal_Converted', metavar="load root")
parser.add_argument('-o', '--outdir', help='Output directory for forecast files', default='../../Data/Nodal_Signal', metavar="load outroot")
parser.add_argument('-f', '--first', help='First year to extract', default=2012, type=int, metavar="first year")
parser.add_argument('-l', '--last', help='Last year to extract', default=2014, type=int, metavar="last year")
parser.add_argument('-fm', help='First month to extract', default=1, type=int, metavar="first month")
parser.add_argument('-lm', help='Last month to extract', default=12, type=int, metavar="last month")
args = parser.parse_args()
loadtransfer = np.load('../../Data/Metadata/loadtransfercsr_ECMWF.npz')
loadtransfer = sparse.csr_matrix((loadtransfer['data'], loadtransfer['indices'], loadtransfer['indptr']), shape=loadtransfer['shape'])
loadname = 'load-{0:04d}{1:02d}.npz'
indirls = sorted(os.listdir(args.indir))
loadls = [x for x in indirls if 'load-' in x]
startidx = loadls.index(loadname.format(args.first, args.fm))
try:
stopidx = loadls.index(loadname.format(args.last+int(args.lm == 12), args.lm % 12+1))
loadls = loadls[startidx:stopidx]
except ValueError:
print 'Stopdate + 1 month not found - assuming we need to use all directories'
loadls = loadls[startidx:]
for loadfile in loadls:
print loadfile
dataf = np.load(args.indir + '/' + loadfile)
dates = dataf['dates']
data = dataf['data']
shape = data.shape
outdata = loadtransfer.dot(np.reshape(data, (shape[0], shape[1]*shape[2])).T).T
np.savez_compressed(args.outdir + '/' + loadfile, data=outdata, dates=dates)
|
Add projection to nodal loadimport numpy as np
import scipy.sparse as sparse
import argparse, os
from itertools import izip as zip
# Uses the output of nodal_projection_matrix.py to aggregate signals
# into the nodal domain.
parser = argparse.ArgumentParser(description='Wind conversion options')
parser.add_argument('-r', '--indir', help='Input directory for forecast files', default='../../Data/Signal_Converted', metavar="load root")
parser.add_argument('-o', '--outdir', help='Output directory for forecast files', default='../../Data/Nodal_Signal', metavar="load outroot")
parser.add_argument('-f', '--first', help='First year to extract', default=2012, type=int, metavar="first year")
parser.add_argument('-l', '--last', help='Last year to extract', default=2014, type=int, metavar="last year")
parser.add_argument('-fm', help='First month to extract', default=1, type=int, metavar="first month")
parser.add_argument('-lm', help='Last month to extract', default=12, type=int, metavar="last month")
args = parser.parse_args()
loadtransfer = np.load('../../Data/Metadata/loadtransfercsr_ECMWF.npz')
loadtransfer = sparse.csr_matrix((loadtransfer['data'], loadtransfer['indices'], loadtransfer['indptr']), shape=loadtransfer['shape'])
loadname = 'load-{0:04d}{1:02d}.npz'
indirls = sorted(os.listdir(args.indir))
loadls = [x for x in indirls if 'load-' in x]
startidx = loadls.index(loadname.format(args.first, args.fm))
try:
stopidx = loadls.index(loadname.format(args.last+int(args.lm == 12), args.lm % 12+1))
loadls = loadls[startidx:stopidx]
except ValueError:
print 'Stopdate + 1 month not found - assuming we need to use all directories'
loadls = loadls[startidx:]
for loadfile in loadls:
print loadfile
dataf = np.load(args.indir + '/' + loadfile)
dates = dataf['dates']
data = dataf['data']
shape = data.shape
outdata = loadtransfer.dot(np.reshape(data, (shape[0], shape[1]*shape[2])).T).T
np.savez_compressed(args.outdir + '/' + loadfile, data=outdata, dates=dates)
|
<commit_before><commit_msg>Add projection to nodal load<commit_after>import numpy as np
import scipy.sparse as sparse
import argparse, os
from itertools import izip as zip
# Uses the output of nodal_projection_matrix.py to aggregate signals
# into the nodal domain.
parser = argparse.ArgumentParser(description='Wind conversion options')
parser.add_argument('-r', '--indir', help='Input directory for forecast files', default='../../Data/Signal_Converted', metavar="load root")
parser.add_argument('-o', '--outdir', help='Output directory for forecast files', default='../../Data/Nodal_Signal', metavar="load outroot")
parser.add_argument('-f', '--first', help='First year to extract', default=2012, type=int, metavar="first year")
parser.add_argument('-l', '--last', help='Last year to extract', default=2014, type=int, metavar="last year")
parser.add_argument('-fm', help='First month to extract', default=1, type=int, metavar="first month")
parser.add_argument('-lm', help='Last month to extract', default=12, type=int, metavar="last month")
args = parser.parse_args()
loadtransfer = np.load('../../Data/Metadata/loadtransfercsr_ECMWF.npz')
loadtransfer = sparse.csr_matrix((loadtransfer['data'], loadtransfer['indices'], loadtransfer['indptr']), shape=loadtransfer['shape'])
loadname = 'load-{0:04d}{1:02d}.npz'
indirls = sorted(os.listdir(args.indir))
loadls = [x for x in indirls if 'load-' in x]
startidx = loadls.index(loadname.format(args.first, args.fm))
try:
stopidx = loadls.index(loadname.format(args.last+int(args.lm == 12), args.lm % 12+1))
loadls = loadls[startidx:stopidx]
except ValueError:
print 'Stopdate + 1 month not found - assuming we need to use all directories'
loadls = loadls[startidx:]
for loadfile in loadls:
print loadfile
dataf = np.load(args.indir + '/' + loadfile)
dates = dataf['dates']
data = dataf['data']
shape = data.shape
outdata = loadtransfer.dot(np.reshape(data, (shape[0], shape[1]*shape[2])).T).T
np.savez_compressed(args.outdir + '/' + loadfile, data=outdata, dates=dates)
|
|
65fa05e6646487180c02b2628b53138a24fabf32
|
benchexec/tools/testcov.py
|
benchexec/tools/testcov.py
|
"""
BenchExec is a framework for reliable benchmarking.
This file is part of BenchExec.
Copyright (C) 2007-2019 Dirk Beyer
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import benchexec.result as result
import benchexec.util as util
import benchexec.tools.template
class Tool(benchexec.tools.template.BaseTool):
"""
Tool info for TestCov (https://gitlab.com/sosy-lab/software/test-suite-validator).
"""
REQUIRED_PATHS = ["suite_validation", "lib", "bin"]
def program_files(self, executable):
return self._program_files_from_executable(
executable, self.REQUIRED_PATHS, parent_dir=True
)
def executable(self):
return util.find_executable("testcov", "bin/testcov")
def cmdline(self, executable, options, tasks, propertyfile=None, rlimits={}):
cmd = [executable] + options
if propertyfile:
cmd += ["--goal", propertyfile]
return cmd + tasks
def version(self, executable):
return self._version_from_tool(executable)
def name(self):
return "TestCov"
def determine_result(self, returncode, returnsignal, output, isTimeout):
"""
Parse the output of the tool and extract the verification result.
This method always needs to be overridden.
If the tool gave a result, this method needs to return one of the
benchexec.result.RESULT_* strings.
Otherwise an arbitrary string can be returned that will be shown to the user
and should give some indication of the failure reason
(e.g., "CRASH", "OUT_OF_MEMORY", etc.).
"""
for line in reversed(output):
if line.startswith("ERROR:"):
if "timeout" in line.lower():
return "TIMEOUT"
else:
return "ERROR ({0})".format(returncode)
elif line.startswith("Result:") and "FALSE" in line:
return result.RESULT_FALSE_REACH
elif line.startswith("Result:") and "TRUE" in line:
return result.RESULT_TRUE_PROP
elif line.startswith("Result") and "DONE" in line:
return result.RESULT_DONE
return result.RESULT_UNKNOWN
def get_value_from_output(self, lines, identifier):
for line in reversed(lines):
if identifier in line:
start = line.find(":") + 1
end = line.find("(", start)
return line[start:end].strip()
return None
|
Add tool-info module for TestCov
|
Add tool-info module for TestCov
|
Python
|
apache-2.0
|
dbeyer/benchexec,sosy-lab/benchexec,ultimate-pa/benchexec,sosy-lab/benchexec,sosy-lab/benchexec,ultimate-pa/benchexec,ultimate-pa/benchexec,sosy-lab/benchexec,ultimate-pa/benchexec,ultimate-pa/benchexec,sosy-lab/benchexec,sosy-lab/benchexec,dbeyer/benchexec,dbeyer/benchexec,ultimate-pa/benchexec,dbeyer/benchexec
|
Add tool-info module for TestCov
|
"""
BenchExec is a framework for reliable benchmarking.
This file is part of BenchExec.
Copyright (C) 2007-2019 Dirk Beyer
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import benchexec.result as result
import benchexec.util as util
import benchexec.tools.template
class Tool(benchexec.tools.template.BaseTool):
"""
Tool info for TestCov (https://gitlab.com/sosy-lab/software/test-suite-validator).
"""
REQUIRED_PATHS = ["suite_validation", "lib", "bin"]
def program_files(self, executable):
return self._program_files_from_executable(
executable, self.REQUIRED_PATHS, parent_dir=True
)
def executable(self):
return util.find_executable("testcov", "bin/testcov")
def cmdline(self, executable, options, tasks, propertyfile=None, rlimits={}):
cmd = [executable] + options
if propertyfile:
cmd += ["--goal", propertyfile]
return cmd + tasks
def version(self, executable):
return self._version_from_tool(executable)
def name(self):
return "TestCov"
def determine_result(self, returncode, returnsignal, output, isTimeout):
"""
Parse the output of the tool and extract the verification result.
This method always needs to be overridden.
If the tool gave a result, this method needs to return one of the
benchexec.result.RESULT_* strings.
Otherwise an arbitrary string can be returned that will be shown to the user
and should give some indication of the failure reason
(e.g., "CRASH", "OUT_OF_MEMORY", etc.).
"""
for line in reversed(output):
if line.startswith("ERROR:"):
if "timeout" in line.lower():
return "TIMEOUT"
else:
return "ERROR ({0})".format(returncode)
elif line.startswith("Result:") and "FALSE" in line:
return result.RESULT_FALSE_REACH
elif line.startswith("Result:") and "TRUE" in line:
return result.RESULT_TRUE_PROP
elif line.startswith("Result") and "DONE" in line:
return result.RESULT_DONE
return result.RESULT_UNKNOWN
def get_value_from_output(self, lines, identifier):
for line in reversed(lines):
if identifier in line:
start = line.find(":") + 1
end = line.find("(", start)
return line[start:end].strip()
return None
|
<commit_before><commit_msg>Add tool-info module for TestCov<commit_after>
|
"""
BenchExec is a framework for reliable benchmarking.
This file is part of BenchExec.
Copyright (C) 2007-2019 Dirk Beyer
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import benchexec.result as result
import benchexec.util as util
import benchexec.tools.template
class Tool(benchexec.tools.template.BaseTool):
"""
Tool info for TestCov (https://gitlab.com/sosy-lab/software/test-suite-validator).
"""
REQUIRED_PATHS = ["suite_validation", "lib", "bin"]
def program_files(self, executable):
return self._program_files_from_executable(
executable, self.REQUIRED_PATHS, parent_dir=True
)
def executable(self):
return util.find_executable("testcov", "bin/testcov")
def cmdline(self, executable, options, tasks, propertyfile=None, rlimits={}):
cmd = [executable] + options
if propertyfile:
cmd += ["--goal", propertyfile]
return cmd + tasks
def version(self, executable):
return self._version_from_tool(executable)
def name(self):
return "TestCov"
def determine_result(self, returncode, returnsignal, output, isTimeout):
"""
Parse the output of the tool and extract the verification result.
This method always needs to be overridden.
If the tool gave a result, this method needs to return one of the
benchexec.result.RESULT_* strings.
Otherwise an arbitrary string can be returned that will be shown to the user
and should give some indication of the failure reason
(e.g., "CRASH", "OUT_OF_MEMORY", etc.).
"""
for line in reversed(output):
if line.startswith("ERROR:"):
if "timeout" in line.lower():
return "TIMEOUT"
else:
return "ERROR ({0})".format(returncode)
elif line.startswith("Result:") and "FALSE" in line:
return result.RESULT_FALSE_REACH
elif line.startswith("Result:") and "TRUE" in line:
return result.RESULT_TRUE_PROP
elif line.startswith("Result") and "DONE" in line:
return result.RESULT_DONE
return result.RESULT_UNKNOWN
def get_value_from_output(self, lines, identifier):
for line in reversed(lines):
if identifier in line:
start = line.find(":") + 1
end = line.find("(", start)
return line[start:end].strip()
return None
|
Add tool-info module for TestCov"""
BenchExec is a framework for reliable benchmarking.
This file is part of BenchExec.
Copyright (C) 2007-2019 Dirk Beyer
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import benchexec.result as result
import benchexec.util as util
import benchexec.tools.template
class Tool(benchexec.tools.template.BaseTool):
"""
Tool info for TestCov (https://gitlab.com/sosy-lab/software/test-suite-validator).
"""
REQUIRED_PATHS = ["suite_validation", "lib", "bin"]
def program_files(self, executable):
return self._program_files_from_executable(
executable, self.REQUIRED_PATHS, parent_dir=True
)
def executable(self):
return util.find_executable("testcov", "bin/testcov")
def cmdline(self, executable, options, tasks, propertyfile=None, rlimits={}):
cmd = [executable] + options
if propertyfile:
cmd += ["--goal", propertyfile]
return cmd + tasks
def version(self, executable):
return self._version_from_tool(executable)
def name(self):
return "TestCov"
def determine_result(self, returncode, returnsignal, output, isTimeout):
"""
Parse the output of the tool and extract the verification result.
This method always needs to be overridden.
If the tool gave a result, this method needs to return one of the
benchexec.result.RESULT_* strings.
Otherwise an arbitrary string can be returned that will be shown to the user
and should give some indication of the failure reason
(e.g., "CRASH", "OUT_OF_MEMORY", etc.).
"""
for line in reversed(output):
if line.startswith("ERROR:"):
if "timeout" in line.lower():
return "TIMEOUT"
else:
return "ERROR ({0})".format(returncode)
elif line.startswith("Result:") and "FALSE" in line:
return result.RESULT_FALSE_REACH
elif line.startswith("Result:") and "TRUE" in line:
return result.RESULT_TRUE_PROP
elif line.startswith("Result") and "DONE" in line:
return result.RESULT_DONE
return result.RESULT_UNKNOWN
def get_value_from_output(self, lines, identifier):
for line in reversed(lines):
if identifier in line:
start = line.find(":") + 1
end = line.find("(", start)
return line[start:end].strip()
return None
|
<commit_before><commit_msg>Add tool-info module for TestCov<commit_after>"""
BenchExec is a framework for reliable benchmarking.
This file is part of BenchExec.
Copyright (C) 2007-2019 Dirk Beyer
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import benchexec.result as result
import benchexec.util as util
import benchexec.tools.template
class Tool(benchexec.tools.template.BaseTool):
"""
Tool info for TestCov (https://gitlab.com/sosy-lab/software/test-suite-validator).
"""
REQUIRED_PATHS = ["suite_validation", "lib", "bin"]
def program_files(self, executable):
return self._program_files_from_executable(
executable, self.REQUIRED_PATHS, parent_dir=True
)
def executable(self):
return util.find_executable("testcov", "bin/testcov")
def cmdline(self, executable, options, tasks, propertyfile=None, rlimits={}):
cmd = [executable] + options
if propertyfile:
cmd += ["--goal", propertyfile]
return cmd + tasks
def version(self, executable):
return self._version_from_tool(executable)
def name(self):
return "TestCov"
def determine_result(self, returncode, returnsignal, output, isTimeout):
"""
Parse the output of the tool and extract the verification result.
This method always needs to be overridden.
If the tool gave a result, this method needs to return one of the
benchexec.result.RESULT_* strings.
Otherwise an arbitrary string can be returned that will be shown to the user
and should give some indication of the failure reason
(e.g., "CRASH", "OUT_OF_MEMORY", etc.).
"""
for line in reversed(output):
if line.startswith("ERROR:"):
if "timeout" in line.lower():
return "TIMEOUT"
else:
return "ERROR ({0})".format(returncode)
elif line.startswith("Result:") and "FALSE" in line:
return result.RESULT_FALSE_REACH
elif line.startswith("Result:") and "TRUE" in line:
return result.RESULT_TRUE_PROP
elif line.startswith("Result") and "DONE" in line:
return result.RESULT_DONE
return result.RESULT_UNKNOWN
def get_value_from_output(self, lines, identifier):
for line in reversed(lines):
if identifier in line:
start = line.find(":") + 1
end = line.find("(", start)
return line[start:end].strip()
return None
|
|
fde618591d6c3ac3503583da07f8be82f8d85793
|
scratchpad/captureFrame.py
|
scratchpad/captureFrame.py
|
import cv2
import time
import optparse
parser = optparse.OptionParser()
parser.add_option("-y", "--yPos", action="store", type="int", dest="yPos")
parser.add_option("-x", "--xPos", action="store", type="int", dest="xPos")
parser.add_option("-s", "--scale", action="store", type="int", dest="scale")
parser.add_option("-d", "--destonation", action="store", type="string", dest="path")
options, remainder = parser.parse_args()
content = cv2.VideoCapture(0)
#Takes five frames for exposure
for i in range(0,5):
frame = content.read()[1]
scale = options.scale
x = frame.shape[1]
y = frame.shape[0]
#Scale if scale is set
if(scale != None):
newX = (int)(x/scale)
newY = (int)(y/scale)
newSize = (newX, newY)
frame = cv2.resize(frame, newSize)
elif(options.xPos != None):
newX = options.xPos
newY = (int)((newX*y)/x)
newSize = (newX, newY)
frame = cv2.resize(frame, newSize)
elif(options.yPos != None):
newY = options.yPos
newX = (int)((newY*x)/y)
newSize = (newX, newY)
frame = cv2.resize(frame, newSize)
#Figeurs out path
path = options.path
if path == None:
path = ("%s.png" %time.strftime("%H:%M:%S"))
cv2.imwrite(path, frame)
#cv2.imshow("Test", frame)
|
Add script found on roboRIO
|
Add script found on roboRIO
|
Python
|
apache-2.0
|
CarterFendley/2015-vision,CarterFendley/2015-vision,frc1418/2015-vision,frc1418/2015-vision
|
Add script found on roboRIO
|
import cv2
import time
import optparse
parser = optparse.OptionParser()
parser.add_option("-y", "--yPos", action="store", type="int", dest="yPos")
parser.add_option("-x", "--xPos", action="store", type="int", dest="xPos")
parser.add_option("-s", "--scale", action="store", type="int", dest="scale")
parser.add_option("-d", "--destonation", action="store", type="string", dest="path")
options, remainder = parser.parse_args()
content = cv2.VideoCapture(0)
#Takes five frames for exposure
for i in range(0,5):
frame = content.read()[1]
scale = options.scale
x = frame.shape[1]
y = frame.shape[0]
#Scale if scale is set
if(scale != None):
newX = (int)(x/scale)
newY = (int)(y/scale)
newSize = (newX, newY)
frame = cv2.resize(frame, newSize)
elif(options.xPos != None):
newX = options.xPos
newY = (int)((newX*y)/x)
newSize = (newX, newY)
frame = cv2.resize(frame, newSize)
elif(options.yPos != None):
newY = options.yPos
newX = (int)((newY*x)/y)
newSize = (newX, newY)
frame = cv2.resize(frame, newSize)
#Figeurs out path
path = options.path
if path == None:
path = ("%s.png" %time.strftime("%H:%M:%S"))
cv2.imwrite(path, frame)
#cv2.imshow("Test", frame)
|
<commit_before><commit_msg>Add script found on roboRIO<commit_after>
|
import cv2
import time
import optparse
parser = optparse.OptionParser()
parser.add_option("-y", "--yPos", action="store", type="int", dest="yPos")
parser.add_option("-x", "--xPos", action="store", type="int", dest="xPos")
parser.add_option("-s", "--scale", action="store", type="int", dest="scale")
parser.add_option("-d", "--destonation", action="store", type="string", dest="path")
options, remainder = parser.parse_args()
content = cv2.VideoCapture(0)
#Takes five frames for exposure
for i in range(0,5):
frame = content.read()[1]
scale = options.scale
x = frame.shape[1]
y = frame.shape[0]
#Scale if scale is set
if(scale != None):
newX = (int)(x/scale)
newY = (int)(y/scale)
newSize = (newX, newY)
frame = cv2.resize(frame, newSize)
elif(options.xPos != None):
newX = options.xPos
newY = (int)((newX*y)/x)
newSize = (newX, newY)
frame = cv2.resize(frame, newSize)
elif(options.yPos != None):
newY = options.yPos
newX = (int)((newY*x)/y)
newSize = (newX, newY)
frame = cv2.resize(frame, newSize)
#Figeurs out path
path = options.path
if path == None:
path = ("%s.png" %time.strftime("%H:%M:%S"))
cv2.imwrite(path, frame)
#cv2.imshow("Test", frame)
|
Add script found on roboRIOimport cv2
import time
import optparse
parser = optparse.OptionParser()
parser.add_option("-y", "--yPos", action="store", type="int", dest="yPos")
parser.add_option("-x", "--xPos", action="store", type="int", dest="xPos")
parser.add_option("-s", "--scale", action="store", type="int", dest="scale")
parser.add_option("-d", "--destonation", action="store", type="string", dest="path")
options, remainder = parser.parse_args()
content = cv2.VideoCapture(0)
#Takes five frames for exposure
for i in range(0,5):
frame = content.read()[1]
scale = options.scale
x = frame.shape[1]
y = frame.shape[0]
#Scale if scale is set
if(scale != None):
newX = (int)(x/scale)
newY = (int)(y/scale)
newSize = (newX, newY)
frame = cv2.resize(frame, newSize)
elif(options.xPos != None):
newX = options.xPos
newY = (int)((newX*y)/x)
newSize = (newX, newY)
frame = cv2.resize(frame, newSize)
elif(options.yPos != None):
newY = options.yPos
newX = (int)((newY*x)/y)
newSize = (newX, newY)
frame = cv2.resize(frame, newSize)
#Figeurs out path
path = options.path
if path == None:
path = ("%s.png" %time.strftime("%H:%M:%S"))
cv2.imwrite(path, frame)
#cv2.imshow("Test", frame)
|
<commit_before><commit_msg>Add script found on roboRIO<commit_after>import cv2
import time
import optparse
parser = optparse.OptionParser()
parser.add_option("-y", "--yPos", action="store", type="int", dest="yPos")
parser.add_option("-x", "--xPos", action="store", type="int", dest="xPos")
parser.add_option("-s", "--scale", action="store", type="int", dest="scale")
parser.add_option("-d", "--destonation", action="store", type="string", dest="path")
options, remainder = parser.parse_args()
content = cv2.VideoCapture(0)
#Takes five frames for exposure
for i in range(0,5):
frame = content.read()[1]
scale = options.scale
x = frame.shape[1]
y = frame.shape[0]
#Scale if scale is set
if(scale != None):
newX = (int)(x/scale)
newY = (int)(y/scale)
newSize = (newX, newY)
frame = cv2.resize(frame, newSize)
elif(options.xPos != None):
newX = options.xPos
newY = (int)((newX*y)/x)
newSize = (newX, newY)
frame = cv2.resize(frame, newSize)
elif(options.yPos != None):
newY = options.yPos
newX = (int)((newY*x)/y)
newSize = (newX, newY)
frame = cv2.resize(frame, newSize)
#Figeurs out path
path = options.path
if path == None:
path = ("%s.png" %time.strftime("%H:%M:%S"))
cv2.imwrite(path, frame)
#cv2.imshow("Test", frame)
|
|
8d84f59ae4ac2fd88b32f6059451652494556201
|
scripts/delete_restored_trashedfilenodes.py
|
scripts/delete_restored_trashedfilenodes.py
|
# -*- coding: utf-8 -*-
"""Restore a deleted StoredFileNode. If the file was reuploaded, renames the file
to <filename> (restored).<ext>. For example, README.rst would be renamed to README (restored).rst.
python -m scripts.restore_file 123ab --dry
python -m scripts.restore_file 123ab
"""
import sys
import logging
from framework.transactions.context import TokuTransaction
from website.app import init_app
from website.files.models.base import TrashedFileNode, StoredFileNode
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
def main():
init_app(routes=False)
dry = '--dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
with TokuTransaction():
stored = StoredFileNode.find().get_keys()
trashed = TrashedFileNode.find().get_keys()
stored_set = set(stored)
trashed_set = set(trashed)
intersection = trashed_set & stored_set
print('There are {} restored trashed file nodes'.format(len(intersection)))
for trash_id in intersection:
TrashedFileNode.remove_one(trash_id)
print('Removed TrashedFileNode {}'.format(trash_id))
if dry:
raise RuntimeError('Dry run - rolling back transaction')
if __name__ == "__main__":
main()
|
Add migration script to delete restored trashedfilenodes
|
Add migration script to delete restored trashedfilenodes
|
Python
|
apache-2.0
|
adlius/osf.io,mattclark/osf.io,laurenrevere/osf.io,saradbowman/osf.io,chennan47/osf.io,sloria/osf.io,binoculars/osf.io,erinspace/osf.io,cwisecarver/osf.io,CenterForOpenScience/osf.io,CenterForOpenScience/osf.io,pattisdr/osf.io,brianjgeiger/osf.io,chennan47/osf.io,cslzchen/osf.io,aaxelb/osf.io,Nesiehr/osf.io,baylee-d/osf.io,sloria/osf.io,Nesiehr/osf.io,felliott/osf.io,hmoco/osf.io,cwisecarver/osf.io,mfraezz/osf.io,mfraezz/osf.io,cslzchen/osf.io,hmoco/osf.io,caneruguz/osf.io,HalcyonChimera/osf.io,chennan47/osf.io,brianjgeiger/osf.io,leb2dg/osf.io,felliott/osf.io,saradbowman/osf.io,Johnetordoff/osf.io,adlius/osf.io,mattclark/osf.io,caseyrollins/osf.io,CenterForOpenScience/osf.io,sloria/osf.io,Johnetordoff/osf.io,cwisecarver/osf.io,CenterForOpenScience/osf.io,HalcyonChimera/osf.io,binoculars/osf.io,laurenrevere/osf.io,caneruguz/osf.io,TomBaxter/osf.io,chrisseto/osf.io,aaxelb/osf.io,aaxelb/osf.io,icereval/osf.io,crcresearch/osf.io,leb2dg/osf.io,erinspace/osf.io,TomBaxter/osf.io,Johnetordoff/osf.io,chrisseto/osf.io,baylee-d/osf.io,chrisseto/osf.io,adlius/osf.io,icereval/osf.io,Johnetordoff/osf.io,chrisseto/osf.io,icereval/osf.io,brianjgeiger/osf.io,Nesiehr/osf.io,cwisecarver/osf.io,felliott/osf.io,mfraezz/osf.io,caneruguz/osf.io,TomBaxter/osf.io,baylee-d/osf.io,crcresearch/osf.io,pattisdr/osf.io,cslzchen/osf.io,HalcyonChimera/osf.io,caseyrollins/osf.io,leb2dg/osf.io,pattisdr/osf.io,hmoco/osf.io,caneruguz/osf.io,laurenrevere/osf.io,mattclark/osf.io,aaxelb/osf.io,crcresearch/osf.io,binoculars/osf.io,hmoco/osf.io,cslzchen/osf.io,erinspace/osf.io,caseyrollins/osf.io,brianjgeiger/osf.io,leb2dg/osf.io,mfraezz/osf.io,adlius/osf.io,Nesiehr/osf.io,HalcyonChimera/osf.io,felliott/osf.io
|
Add migration script to delete restored trashedfilenodes
|
# -*- coding: utf-8 -*-
"""Restore a deleted StoredFileNode. If the file was reuploaded, renames the file
to <filename> (restored).<ext>. For example, README.rst would be renamed to README (restored).rst.
python -m scripts.restore_file 123ab --dry
python -m scripts.restore_file 123ab
"""
import sys
import logging
from framework.transactions.context import TokuTransaction
from website.app import init_app
from website.files.models.base import TrashedFileNode, StoredFileNode
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
def main():
init_app(routes=False)
dry = '--dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
with TokuTransaction():
stored = StoredFileNode.find().get_keys()
trashed = TrashedFileNode.find().get_keys()
stored_set = set(stored)
trashed_set = set(trashed)
intersection = trashed_set & stored_set
print('There are {} restored trashed file nodes'.format(len(intersection)))
for trash_id in intersection:
TrashedFileNode.remove_one(trash_id)
print('Removed TrashedFileNode {}'.format(trash_id))
if dry:
raise RuntimeError('Dry run - rolling back transaction')
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add migration script to delete restored trashedfilenodes<commit_after>
|
# -*- coding: utf-8 -*-
"""Restore a deleted StoredFileNode. If the file was reuploaded, renames the file
to <filename> (restored).<ext>. For example, README.rst would be renamed to README (restored).rst.
python -m scripts.restore_file 123ab --dry
python -m scripts.restore_file 123ab
"""
import sys
import logging
from framework.transactions.context import TokuTransaction
from website.app import init_app
from website.files.models.base import TrashedFileNode, StoredFileNode
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
def main():
init_app(routes=False)
dry = '--dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
with TokuTransaction():
stored = StoredFileNode.find().get_keys()
trashed = TrashedFileNode.find().get_keys()
stored_set = set(stored)
trashed_set = set(trashed)
intersection = trashed_set & stored_set
print('There are {} restored trashed file nodes'.format(len(intersection)))
for trash_id in intersection:
TrashedFileNode.remove_one(trash_id)
print('Removed TrashedFileNode {}'.format(trash_id))
if dry:
raise RuntimeError('Dry run - rolling back transaction')
if __name__ == "__main__":
main()
|
Add migration script to delete restored trashedfilenodes# -*- coding: utf-8 -*-
"""Restore a deleted StoredFileNode. If the file was reuploaded, renames the file
to <filename> (restored).<ext>. For example, README.rst would be renamed to README (restored).rst.
python -m scripts.restore_file 123ab --dry
python -m scripts.restore_file 123ab
"""
import sys
import logging
from framework.transactions.context import TokuTransaction
from website.app import init_app
from website.files.models.base import TrashedFileNode, StoredFileNode
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
def main():
init_app(routes=False)
dry = '--dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
with TokuTransaction():
stored = StoredFileNode.find().get_keys()
trashed = TrashedFileNode.find().get_keys()
stored_set = set(stored)
trashed_set = set(trashed)
intersection = trashed_set & stored_set
print('There are {} restored trashed file nodes'.format(len(intersection)))
for trash_id in intersection:
TrashedFileNode.remove_one(trash_id)
print('Removed TrashedFileNode {}'.format(trash_id))
if dry:
raise RuntimeError('Dry run - rolling back transaction')
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add migration script to delete restored trashedfilenodes<commit_after># -*- coding: utf-8 -*-
"""Restore a deleted StoredFileNode. If the file was reuploaded, renames the file
to <filename> (restored).<ext>. For example, README.rst would be renamed to README (restored).rst.
python -m scripts.restore_file 123ab --dry
python -m scripts.restore_file 123ab
"""
import sys
import logging
from framework.transactions.context import TokuTransaction
from website.app import init_app
from website.files.models.base import TrashedFileNode, StoredFileNode
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
def main():
init_app(routes=False)
dry = '--dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
with TokuTransaction():
stored = StoredFileNode.find().get_keys()
trashed = TrashedFileNode.find().get_keys()
stored_set = set(stored)
trashed_set = set(trashed)
intersection = trashed_set & stored_set
print('There are {} restored trashed file nodes'.format(len(intersection)))
for trash_id in intersection:
TrashedFileNode.remove_one(trash_id)
print('Removed TrashedFileNode {}'.format(trash_id))
if dry:
raise RuntimeError('Dry run - rolling back transaction')
if __name__ == "__main__":
main()
|
|
17f4e610e272d24c9178e43caa79a6f7c17a568b
|
tests/test_orderbook.py
|
tests/test_orderbook.py
|
from src import orderbook as ob
def test_create_msg_incrementing_message_id():
first_message = ob.create_msg()
second_message = ob.create_msg()
assert first_message['message-id'] == 0, 'Expected 0, got {}'.format(first_message['message-id'])
assert second_message['message-id'] == 1, 'Expected 1, got {}'.format(second_message['message-id'])
def test_create_msg():
message = ob.create_msg()
assert type(message) == dict
def test_create_msg_passing_options():
options = {
'hello': 'world',
}
message = ob.create_msg(options=options)
assert 'hello' in message
assert message['hello'] == 'world'
def test_create_msg_passing_options_overriding_default():
options = {
'id': 1234,
}
message = ob.create_msg(options=options)
assert 'id' in message
assert message['id'] == 1234
def test_create_ask():
import datetime
ask = ob.create_ask(1, 1, datetime.datetime.now())
assert ask['type'] == 'ask'
assert ask['price'] == 1
assert ask['quantity'] == 1
assert len(ob.offers) == 1
|
Add some basic testing for orderbook.create_msg.
|
Add some basic testing for orderbook.create_msg.
|
Python
|
mit
|
Tribler/decentral-market
|
Add some basic testing for orderbook.create_msg.
|
from src import orderbook as ob
def test_create_msg_incrementing_message_id():
first_message = ob.create_msg()
second_message = ob.create_msg()
assert first_message['message-id'] == 0, 'Expected 0, got {}'.format(first_message['message-id'])
assert second_message['message-id'] == 1, 'Expected 1, got {}'.format(second_message['message-id'])
def test_create_msg():
message = ob.create_msg()
assert type(message) == dict
def test_create_msg_passing_options():
options = {
'hello': 'world',
}
message = ob.create_msg(options=options)
assert 'hello' in message
assert message['hello'] == 'world'
def test_create_msg_passing_options_overriding_default():
options = {
'id': 1234,
}
message = ob.create_msg(options=options)
assert 'id' in message
assert message['id'] == 1234
def test_create_ask():
import datetime
ask = ob.create_ask(1, 1, datetime.datetime.now())
assert ask['type'] == 'ask'
assert ask['price'] == 1
assert ask['quantity'] == 1
assert len(ob.offers) == 1
|
<commit_before><commit_msg>Add some basic testing for orderbook.create_msg.<commit_after>
|
from src import orderbook as ob
def test_create_msg_incrementing_message_id():
first_message = ob.create_msg()
second_message = ob.create_msg()
assert first_message['message-id'] == 0, 'Expected 0, got {}'.format(first_message['message-id'])
assert second_message['message-id'] == 1, 'Expected 1, got {}'.format(second_message['message-id'])
def test_create_msg():
message = ob.create_msg()
assert type(message) == dict
def test_create_msg_passing_options():
options = {
'hello': 'world',
}
message = ob.create_msg(options=options)
assert 'hello' in message
assert message['hello'] == 'world'
def test_create_msg_passing_options_overriding_default():
options = {
'id': 1234,
}
message = ob.create_msg(options=options)
assert 'id' in message
assert message['id'] == 1234
def test_create_ask():
import datetime
ask = ob.create_ask(1, 1, datetime.datetime.now())
assert ask['type'] == 'ask'
assert ask['price'] == 1
assert ask['quantity'] == 1
assert len(ob.offers) == 1
|
Add some basic testing for orderbook.create_msg.from src import orderbook as ob
def test_create_msg_incrementing_message_id():
first_message = ob.create_msg()
second_message = ob.create_msg()
assert first_message['message-id'] == 0, 'Expected 0, got {}'.format(first_message['message-id'])
assert second_message['message-id'] == 1, 'Expected 1, got {}'.format(second_message['message-id'])
def test_create_msg():
message = ob.create_msg()
assert type(message) == dict
def test_create_msg_passing_options():
options = {
'hello': 'world',
}
message = ob.create_msg(options=options)
assert 'hello' in message
assert message['hello'] == 'world'
def test_create_msg_passing_options_overriding_default():
options = {
'id': 1234,
}
message = ob.create_msg(options=options)
assert 'id' in message
assert message['id'] == 1234
def test_create_ask():
import datetime
ask = ob.create_ask(1, 1, datetime.datetime.now())
assert ask['type'] == 'ask'
assert ask['price'] == 1
assert ask['quantity'] == 1
assert len(ob.offers) == 1
|
<commit_before><commit_msg>Add some basic testing for orderbook.create_msg.<commit_after>from src import orderbook as ob
def test_create_msg_incrementing_message_id():
first_message = ob.create_msg()
second_message = ob.create_msg()
assert first_message['message-id'] == 0, 'Expected 0, got {}'.format(first_message['message-id'])
assert second_message['message-id'] == 1, 'Expected 1, got {}'.format(second_message['message-id'])
def test_create_msg():
message = ob.create_msg()
assert type(message) == dict
def test_create_msg_passing_options():
options = {
'hello': 'world',
}
message = ob.create_msg(options=options)
assert 'hello' in message
assert message['hello'] == 'world'
def test_create_msg_passing_options_overriding_default():
options = {
'id': 1234,
}
message = ob.create_msg(options=options)
assert 'id' in message
assert message['id'] == 1234
def test_create_ask():
import datetime
ask = ob.create_ask(1, 1, datetime.datetime.now())
assert ask['type'] == 'ask'
assert ask['price'] == 1
assert ask['quantity'] == 1
assert len(ob.offers) == 1
|
|
f5e9ab08d252c9a135a5b96ec157b74e92565f60
|
evalset/multiopt_test_funcs.py
|
evalset/multiopt_test_funcs.py
|
from evalset.test_funcs import TestFunction, lzip
import numpy
class MultioptTestFunction(TestFunction):
def __init__(self, dim):
super(MultioptTestFunction, self).__init__(dim)
self.local_minima = [] # Sorted in increasing order of function value at the local minima
class LowDMixtureOfGaussians(MultioptTestFunction):
def __init__(self, dim=2):
assert dim == 2
super(KevinTest, self).__init__(dim)
self.bounds = lzip([-1] * self.dim, [1] * self.dim)
self.fmin = -0.502124885135
self.fmax = 0
self.local_minima = [(-0.2, -0.5), (0.8, 0.3)]
def do_evaluate(self, x):
x1, x2 = x
return -(
.5 * numpy.exp(-10 * (.8 * (x1 + .2) ** 2 + .7 * (x2 + .5) ** 2)) +
.5 * numpy.exp(-8 * (.3 * (x1 - .8) ** 2 + .6 * (x2 - .3) ** 2))
)
|
Add multiopt class for evalset
|
Add multiopt class for evalset
|
Python
|
mit
|
sigopt/evalset
|
Add multiopt class for evalset
|
from evalset.test_funcs import TestFunction, lzip
import numpy
class MultioptTestFunction(TestFunction):
def __init__(self, dim):
super(MultioptTestFunction, self).__init__(dim)
self.local_minima = [] # Sorted in increasing order of function value at the local minima
class LowDMixtureOfGaussians(MultioptTestFunction):
def __init__(self, dim=2):
assert dim == 2
super(KevinTest, self).__init__(dim)
self.bounds = lzip([-1] * self.dim, [1] * self.dim)
self.fmin = -0.502124885135
self.fmax = 0
self.local_minima = [(-0.2, -0.5), (0.8, 0.3)]
def do_evaluate(self, x):
x1, x2 = x
return -(
.5 * numpy.exp(-10 * (.8 * (x1 + .2) ** 2 + .7 * (x2 + .5) ** 2)) +
.5 * numpy.exp(-8 * (.3 * (x1 - .8) ** 2 + .6 * (x2 - .3) ** 2))
)
|
<commit_before><commit_msg>Add multiopt class for evalset<commit_after>
|
from evalset.test_funcs import TestFunction, lzip
import numpy
class MultioptTestFunction(TestFunction):
def __init__(self, dim):
super(MultioptTestFunction, self).__init__(dim)
self.local_minima = [] # Sorted in increasing order of function value at the local minima
class LowDMixtureOfGaussians(MultioptTestFunction):
def __init__(self, dim=2):
assert dim == 2
super(KevinTest, self).__init__(dim)
self.bounds = lzip([-1] * self.dim, [1] * self.dim)
self.fmin = -0.502124885135
self.fmax = 0
self.local_minima = [(-0.2, -0.5), (0.8, 0.3)]
def do_evaluate(self, x):
x1, x2 = x
return -(
.5 * numpy.exp(-10 * (.8 * (x1 + .2) ** 2 + .7 * (x2 + .5) ** 2)) +
.5 * numpy.exp(-8 * (.3 * (x1 - .8) ** 2 + .6 * (x2 - .3) ** 2))
)
|
Add multiopt class for evalsetfrom evalset.test_funcs import TestFunction, lzip
import numpy
class MultioptTestFunction(TestFunction):
def __init__(self, dim):
super(MultioptTestFunction, self).__init__(dim)
self.local_minima = [] # Sorted in increasing order of function value at the local minima
class LowDMixtureOfGaussians(MultioptTestFunction):
def __init__(self, dim=2):
assert dim == 2
super(KevinTest, self).__init__(dim)
self.bounds = lzip([-1] * self.dim, [1] * self.dim)
self.fmin = -0.502124885135
self.fmax = 0
self.local_minima = [(-0.2, -0.5), (0.8, 0.3)]
def do_evaluate(self, x):
x1, x2 = x
return -(
.5 * numpy.exp(-10 * (.8 * (x1 + .2) ** 2 + .7 * (x2 + .5) ** 2)) +
.5 * numpy.exp(-8 * (.3 * (x1 - .8) ** 2 + .6 * (x2 - .3) ** 2))
)
|
<commit_before><commit_msg>Add multiopt class for evalset<commit_after>from evalset.test_funcs import TestFunction, lzip
import numpy
class MultioptTestFunction(TestFunction):
def __init__(self, dim):
super(MultioptTestFunction, self).__init__(dim)
self.local_minima = [] # Sorted in increasing order of function value at the local minima
class LowDMixtureOfGaussians(MultioptTestFunction):
def __init__(self, dim=2):
assert dim == 2
super(KevinTest, self).__init__(dim)
self.bounds = lzip([-1] * self.dim, [1] * self.dim)
self.fmin = -0.502124885135
self.fmax = 0
self.local_minima = [(-0.2, -0.5), (0.8, 0.3)]
def do_evaluate(self, x):
x1, x2 = x
return -(
.5 * numpy.exp(-10 * (.8 * (x1 + .2) ** 2 + .7 * (x2 + .5) ** 2)) +
.5 * numpy.exp(-8 * (.3 * (x1 - .8) ** 2 + .6 * (x2 - .3) ** 2))
)
|
|
402315309ae576059014b8340db66477f9f1b0c5
|
app/grandchallenge/cases/migrations/0011_auto_20190311_1350.py
|
app/grandchallenge/cases/migrations/0011_auto_20190311_1350.py
|
# Generated by Django 2.1.7 on 2019-03-11 13:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cases', '0010_auto_20190219_1301'),
]
operations = [
migrations.AddField(
model_name='image',
name='field_of_view',
field=models.CharField(choices=[('F1M', 'F1M'), ('F2', 'F2'), ('F3M', 'F3M'), ('F4', 'F4'), ('F5', 'F5'), ('F6', 'F6'), ('F7', 'F7'), ('U', 'Unknown'), (None, 'Not applicable')], default=None, help_text='What is the field of view of this image?', max_length=3, null=True),
),
migrations.AddField(
model_name='image',
name='stereoscopic_choice',
field=models.CharField(choices=[('L', 'Left'), ('R', 'Right'), ('U', 'Unknown'), (None, 'Not applicable')], default='U', help_text='Is this the left or right image of a stereoscopic pair?', max_length=1, null=True),
),
]
|
Add migration for new attributes
|
Add migration for new attributes
|
Python
|
apache-2.0
|
comic/comic-django,comic/comic-django,comic/comic-django,comic/comic-django,comic/comic-django
|
Add migration for new attributes
|
# Generated by Django 2.1.7 on 2019-03-11 13:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cases', '0010_auto_20190219_1301'),
]
operations = [
migrations.AddField(
model_name='image',
name='field_of_view',
field=models.CharField(choices=[('F1M', 'F1M'), ('F2', 'F2'), ('F3M', 'F3M'), ('F4', 'F4'), ('F5', 'F5'), ('F6', 'F6'), ('F7', 'F7'), ('U', 'Unknown'), (None, 'Not applicable')], default=None, help_text='What is the field of view of this image?', max_length=3, null=True),
),
migrations.AddField(
model_name='image',
name='stereoscopic_choice',
field=models.CharField(choices=[('L', 'Left'), ('R', 'Right'), ('U', 'Unknown'), (None, 'Not applicable')], default='U', help_text='Is this the left or right image of a stereoscopic pair?', max_length=1, null=True),
),
]
|
<commit_before><commit_msg>Add migration for new attributes<commit_after>
|
# Generated by Django 2.1.7 on 2019-03-11 13:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cases', '0010_auto_20190219_1301'),
]
operations = [
migrations.AddField(
model_name='image',
name='field_of_view',
field=models.CharField(choices=[('F1M', 'F1M'), ('F2', 'F2'), ('F3M', 'F3M'), ('F4', 'F4'), ('F5', 'F5'), ('F6', 'F6'), ('F7', 'F7'), ('U', 'Unknown'), (None, 'Not applicable')], default=None, help_text='What is the field of view of this image?', max_length=3, null=True),
),
migrations.AddField(
model_name='image',
name='stereoscopic_choice',
field=models.CharField(choices=[('L', 'Left'), ('R', 'Right'), ('U', 'Unknown'), (None, 'Not applicable')], default='U', help_text='Is this the left or right image of a stereoscopic pair?', max_length=1, null=True),
),
]
|
Add migration for new attributes# Generated by Django 2.1.7 on 2019-03-11 13:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cases', '0010_auto_20190219_1301'),
]
operations = [
migrations.AddField(
model_name='image',
name='field_of_view',
field=models.CharField(choices=[('F1M', 'F1M'), ('F2', 'F2'), ('F3M', 'F3M'), ('F4', 'F4'), ('F5', 'F5'), ('F6', 'F6'), ('F7', 'F7'), ('U', 'Unknown'), (None, 'Not applicable')], default=None, help_text='What is the field of view of this image?', max_length=3, null=True),
),
migrations.AddField(
model_name='image',
name='stereoscopic_choice',
field=models.CharField(choices=[('L', 'Left'), ('R', 'Right'), ('U', 'Unknown'), (None, 'Not applicable')], default='U', help_text='Is this the left or right image of a stereoscopic pair?', max_length=1, null=True),
),
]
|
<commit_before><commit_msg>Add migration for new attributes<commit_after># Generated by Django 2.1.7 on 2019-03-11 13:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cases', '0010_auto_20190219_1301'),
]
operations = [
migrations.AddField(
model_name='image',
name='field_of_view',
field=models.CharField(choices=[('F1M', 'F1M'), ('F2', 'F2'), ('F3M', 'F3M'), ('F4', 'F4'), ('F5', 'F5'), ('F6', 'F6'), ('F7', 'F7'), ('U', 'Unknown'), (None, 'Not applicable')], default=None, help_text='What is the field of view of this image?', max_length=3, null=True),
),
migrations.AddField(
model_name='image',
name='stereoscopic_choice',
field=models.CharField(choices=[('L', 'Left'), ('R', 'Right'), ('U', 'Unknown'), (None, 'Not applicable')], default='U', help_text='Is this the left or right image of a stereoscopic pair?', max_length=1, null=True),
),
]
|
|
78bf7e2af689f189ae1e77007014421fd865c949
|
bin/clean_unused_headers.py
|
bin/clean_unused_headers.py
|
#!/usr/bin/env python
from __future__ import print_function
import sys
import os
import re
from subprocess import check_output
IMAGE_PATTERN = re.compile(
'linux-image-(?P<version>[0-9\.]+)-(?P<rev>[0-9]{2})-generic')
HEADER_PATTERN = re.compile(
'linux-headers-(?P<version>[0-9\.]+)-(?P<rev>[0-9]{2})-generic')
def get_all_packages():
for line in check_output(['dpkg', '-l']).split('\n'):
if line.startswith('ii'):
# print(line.split(' '))
yield line.split()[1]
def find_group(pattern, text):
matched = pattern.match(text)
if matched:
return '{version}-{rev}'.format(
version=matched.group('version'),
rev=matched.group('rev'))
return None
def main():
packages = list(get_all_packages())
header_pkgs = filter(lambda x: HEADER_PATTERN.match(x), packages)
image_pkgs = filter(lambda x: IMAGE_PATTERN.match(x), packages)
header_versions = map(lambda x: find_group(HEADER_PATTERN, x), header_pkgs)
image_versions = map(lambda x: find_group(IMAGE_PATTERN, x), image_pkgs)
print(header_pkgs)
print(image_pkgs)
print(header_versions)
print(image_versions)
if __name__ == "__main__":
main()
|
Add script to clean unused linux-headers packages
|
Add script to clean unused linux-headers packages
|
Python
|
apache-2.0
|
elleryq/oh-my-home,elleryq/oh-my-home,elleryq/oh-my-home
|
Add script to clean unused linux-headers packages
|
#!/usr/bin/env python
from __future__ import print_function
import sys
import os
import re
from subprocess import check_output
IMAGE_PATTERN = re.compile(
'linux-image-(?P<version>[0-9\.]+)-(?P<rev>[0-9]{2})-generic')
HEADER_PATTERN = re.compile(
'linux-headers-(?P<version>[0-9\.]+)-(?P<rev>[0-9]{2})-generic')
def get_all_packages():
for line in check_output(['dpkg', '-l']).split('\n'):
if line.startswith('ii'):
# print(line.split(' '))
yield line.split()[1]
def find_group(pattern, text):
matched = pattern.match(text)
if matched:
return '{version}-{rev}'.format(
version=matched.group('version'),
rev=matched.group('rev'))
return None
def main():
packages = list(get_all_packages())
header_pkgs = filter(lambda x: HEADER_PATTERN.match(x), packages)
image_pkgs = filter(lambda x: IMAGE_PATTERN.match(x), packages)
header_versions = map(lambda x: find_group(HEADER_PATTERN, x), header_pkgs)
image_versions = map(lambda x: find_group(IMAGE_PATTERN, x), image_pkgs)
print(header_pkgs)
print(image_pkgs)
print(header_versions)
print(image_versions)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add script to clean unused linux-headers packages<commit_after>
|
#!/usr/bin/env python
from __future__ import print_function
import sys
import os
import re
from subprocess import check_output
IMAGE_PATTERN = re.compile(
'linux-image-(?P<version>[0-9\.]+)-(?P<rev>[0-9]{2})-generic')
HEADER_PATTERN = re.compile(
'linux-headers-(?P<version>[0-9\.]+)-(?P<rev>[0-9]{2})-generic')
def get_all_packages():
for line in check_output(['dpkg', '-l']).split('\n'):
if line.startswith('ii'):
# print(line.split(' '))
yield line.split()[1]
def find_group(pattern, text):
matched = pattern.match(text)
if matched:
return '{version}-{rev}'.format(
version=matched.group('version'),
rev=matched.group('rev'))
return None
def main():
packages = list(get_all_packages())
header_pkgs = filter(lambda x: HEADER_PATTERN.match(x), packages)
image_pkgs = filter(lambda x: IMAGE_PATTERN.match(x), packages)
header_versions = map(lambda x: find_group(HEADER_PATTERN, x), header_pkgs)
image_versions = map(lambda x: find_group(IMAGE_PATTERN, x), image_pkgs)
print(header_pkgs)
print(image_pkgs)
print(header_versions)
print(image_versions)
if __name__ == "__main__":
main()
|
Add script to clean unused linux-headers packages#!/usr/bin/env python
from __future__ import print_function
import sys
import os
import re
from subprocess import check_output
IMAGE_PATTERN = re.compile(
'linux-image-(?P<version>[0-9\.]+)-(?P<rev>[0-9]{2})-generic')
HEADER_PATTERN = re.compile(
'linux-headers-(?P<version>[0-9\.]+)-(?P<rev>[0-9]{2})-generic')
def get_all_packages():
for line in check_output(['dpkg', '-l']).split('\n'):
if line.startswith('ii'):
# print(line.split(' '))
yield line.split()[1]
def find_group(pattern, text):
matched = pattern.match(text)
if matched:
return '{version}-{rev}'.format(
version=matched.group('version'),
rev=matched.group('rev'))
return None
def main():
packages = list(get_all_packages())
header_pkgs = filter(lambda x: HEADER_PATTERN.match(x), packages)
image_pkgs = filter(lambda x: IMAGE_PATTERN.match(x), packages)
header_versions = map(lambda x: find_group(HEADER_PATTERN, x), header_pkgs)
image_versions = map(lambda x: find_group(IMAGE_PATTERN, x), image_pkgs)
print(header_pkgs)
print(image_pkgs)
print(header_versions)
print(image_versions)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add script to clean unused linux-headers packages<commit_after>#!/usr/bin/env python
from __future__ import print_function
import sys
import os
import re
from subprocess import check_output
IMAGE_PATTERN = re.compile(
'linux-image-(?P<version>[0-9\.]+)-(?P<rev>[0-9]{2})-generic')
HEADER_PATTERN = re.compile(
'linux-headers-(?P<version>[0-9\.]+)-(?P<rev>[0-9]{2})-generic')
def get_all_packages():
for line in check_output(['dpkg', '-l']).split('\n'):
if line.startswith('ii'):
# print(line.split(' '))
yield line.split()[1]
def find_group(pattern, text):
matched = pattern.match(text)
if matched:
return '{version}-{rev}'.format(
version=matched.group('version'),
rev=matched.group('rev'))
return None
def main():
packages = list(get_all_packages())
header_pkgs = filter(lambda x: HEADER_PATTERN.match(x), packages)
image_pkgs = filter(lambda x: IMAGE_PATTERN.match(x), packages)
header_versions = map(lambda x: find_group(HEADER_PATTERN, x), header_pkgs)
image_versions = map(lambda x: find_group(IMAGE_PATTERN, x), image_pkgs)
print(header_pkgs)
print(image_pkgs)
print(header_versions)
print(image_versions)
if __name__ == "__main__":
main()
|
|
559e24c1ec397c07bcbc02b395d70cb0383ed291
|
moksha/tests/test_entity.py
|
moksha/tests/test_entity.py
|
# -*- coding: utf-8 -*-
"""Test Moksha's Entity/Fact model"""
from datetime import datetime
from nose.tools import eq_, assert_true
from sqlalchemy import *
from moksha.model import DBSession, Entity, Fact, with_characteristic
from moksha.tests.test_models import TestModel
class TestEntity(TestModel):
"""Test case for the Entity model."""
def setUp(self):
super(TestEntity, self).setUp()
self.entity = Entity(u'lmacken')
self.entity[u'firstname'] = u'Luke'
self.entity[u'lastname'] = u'Macken'
self.entity[u'age'] = 24
self.entity[u'dob'] = datetime(1984, 11, 02)
self.entity[u'l33t'] = True
def test_entity_creation_name(self):
eq_(self.entity.name, u'lmacken')
def test_fact_types(self):
DBSession.add(self.entity)
DBSession.flush()
me = DBSession.query(Entity).filter_by(name=u'lmacken').one()
eq_(me[u'lastname'], u'Macken')
eq_(me[u'age'], 24)
eq_(me[u'dob'], datetime(1984, 11, 2))
eq_(me[u'l33t'], True)
def test_getting_by_name(self):
""" Entities should be fetchable by their name """
DBSession.add(self.entity)
lmacken = Entity.by_name(u'lmacken')
eq_(lmacken, self.entity)
def test_filter_by_name(self):
DBSession.add(self.entity)
me = DBSession.query(Entity).filter_by(name=u'lmacken').one()
eq_(me.name, u'lmacken')
eq_(me[u'firstname'], u'Luke')
def test_query_by_fact(self):
""" Query entities by facts """
DBSession.add(self.entity)
assert DBSession.query(Entity).filter(
Entity.facts.any(
and_(Fact.key == u'l33t',
Fact.value == True))).first()
def test_query_with_characteristic(self):
""" Query entities based on facts using with_characteristic """
DBSession.add(self.entity)
assert (DBSession.query(Entity).
filter(or_(Entity.facts.any(
with_characteristic(u'dob', datetime(1984, 11, 02))),
not_(Entity.facts.any(Fact.key == u'l33t'))))).first()
def test_query_facts_by_characteristic(self):
""" Query facts by certain characteristics """
DBSession.add(self.entity)
assert (DBSession.query(Fact).
filter(with_characteristic(u'l33t', True))).one()
|
Add test cases for our entity/fact model
|
Add test cases for our entity/fact model
|
Python
|
apache-2.0
|
lmacken/moksha,mokshaproject/moksha,pombredanne/moksha,mokshaproject/moksha,pombredanne/moksha,lmacken/moksha,mokshaproject/moksha,ralphbean/moksha,pombredanne/moksha,ralphbean/moksha,pombredanne/moksha,lmacken/moksha,mokshaproject/moksha,ralphbean/moksha
|
Add test cases for our entity/fact model
|
# -*- coding: utf-8 -*-
"""Test Moksha's Entity/Fact model"""
from datetime import datetime
from nose.tools import eq_, assert_true
from sqlalchemy import *
from moksha.model import DBSession, Entity, Fact, with_characteristic
from moksha.tests.test_models import TestModel
class TestEntity(TestModel):
"""Test case for the Entity model."""
def setUp(self):
super(TestEntity, self).setUp()
self.entity = Entity(u'lmacken')
self.entity[u'firstname'] = u'Luke'
self.entity[u'lastname'] = u'Macken'
self.entity[u'age'] = 24
self.entity[u'dob'] = datetime(1984, 11, 02)
self.entity[u'l33t'] = True
def test_entity_creation_name(self):
eq_(self.entity.name, u'lmacken')
def test_fact_types(self):
DBSession.add(self.entity)
DBSession.flush()
me = DBSession.query(Entity).filter_by(name=u'lmacken').one()
eq_(me[u'lastname'], u'Macken')
eq_(me[u'age'], 24)
eq_(me[u'dob'], datetime(1984, 11, 2))
eq_(me[u'l33t'], True)
def test_getting_by_name(self):
""" Entities should be fetchable by their name """
DBSession.add(self.entity)
lmacken = Entity.by_name(u'lmacken')
eq_(lmacken, self.entity)
def test_filter_by_name(self):
DBSession.add(self.entity)
me = DBSession.query(Entity).filter_by(name=u'lmacken').one()
eq_(me.name, u'lmacken')
eq_(me[u'firstname'], u'Luke')
def test_query_by_fact(self):
""" Query entities by facts """
DBSession.add(self.entity)
assert DBSession.query(Entity).filter(
Entity.facts.any(
and_(Fact.key == u'l33t',
Fact.value == True))).first()
def test_query_with_characteristic(self):
""" Query entities based on facts using with_characteristic """
DBSession.add(self.entity)
assert (DBSession.query(Entity).
filter(or_(Entity.facts.any(
with_characteristic(u'dob', datetime(1984, 11, 02))),
not_(Entity.facts.any(Fact.key == u'l33t'))))).first()
def test_query_facts_by_characteristic(self):
""" Query facts by certain characteristics """
DBSession.add(self.entity)
assert (DBSession.query(Fact).
filter(with_characteristic(u'l33t', True))).one()
|
<commit_before><commit_msg>Add test cases for our entity/fact model<commit_after>
|
# -*- coding: utf-8 -*-
"""Test Moksha's Entity/Fact model"""
from datetime import datetime
from nose.tools import eq_, assert_true
from sqlalchemy import *
from moksha.model import DBSession, Entity, Fact, with_characteristic
from moksha.tests.test_models import TestModel
class TestEntity(TestModel):
"""Test case for the Entity model."""
def setUp(self):
super(TestEntity, self).setUp()
self.entity = Entity(u'lmacken')
self.entity[u'firstname'] = u'Luke'
self.entity[u'lastname'] = u'Macken'
self.entity[u'age'] = 24
self.entity[u'dob'] = datetime(1984, 11, 02)
self.entity[u'l33t'] = True
def test_entity_creation_name(self):
eq_(self.entity.name, u'lmacken')
def test_fact_types(self):
DBSession.add(self.entity)
DBSession.flush()
me = DBSession.query(Entity).filter_by(name=u'lmacken').one()
eq_(me[u'lastname'], u'Macken')
eq_(me[u'age'], 24)
eq_(me[u'dob'], datetime(1984, 11, 2))
eq_(me[u'l33t'], True)
def test_getting_by_name(self):
""" Entities should be fetchable by their name """
DBSession.add(self.entity)
lmacken = Entity.by_name(u'lmacken')
eq_(lmacken, self.entity)
def test_filter_by_name(self):
DBSession.add(self.entity)
me = DBSession.query(Entity).filter_by(name=u'lmacken').one()
eq_(me.name, u'lmacken')
eq_(me[u'firstname'], u'Luke')
def test_query_by_fact(self):
""" Query entities by facts """
DBSession.add(self.entity)
assert DBSession.query(Entity).filter(
Entity.facts.any(
and_(Fact.key == u'l33t',
Fact.value == True))).first()
def test_query_with_characteristic(self):
""" Query entities based on facts using with_characteristic """
DBSession.add(self.entity)
assert (DBSession.query(Entity).
filter(or_(Entity.facts.any(
with_characteristic(u'dob', datetime(1984, 11, 02))),
not_(Entity.facts.any(Fact.key == u'l33t'))))).first()
def test_query_facts_by_characteristic(self):
""" Query facts by certain characteristics """
DBSession.add(self.entity)
assert (DBSession.query(Fact).
filter(with_characteristic(u'l33t', True))).one()
|
Add test cases for our entity/fact model# -*- coding: utf-8 -*-
"""Test Moksha's Entity/Fact model"""
from datetime import datetime
from nose.tools import eq_, assert_true
from sqlalchemy import *
from moksha.model import DBSession, Entity, Fact, with_characteristic
from moksha.tests.test_models import TestModel
class TestEntity(TestModel):
"""Test case for the Entity model."""
def setUp(self):
super(TestEntity, self).setUp()
self.entity = Entity(u'lmacken')
self.entity[u'firstname'] = u'Luke'
self.entity[u'lastname'] = u'Macken'
self.entity[u'age'] = 24
self.entity[u'dob'] = datetime(1984, 11, 02)
self.entity[u'l33t'] = True
def test_entity_creation_name(self):
eq_(self.entity.name, u'lmacken')
def test_fact_types(self):
DBSession.add(self.entity)
DBSession.flush()
me = DBSession.query(Entity).filter_by(name=u'lmacken').one()
eq_(me[u'lastname'], u'Macken')
eq_(me[u'age'], 24)
eq_(me[u'dob'], datetime(1984, 11, 2))
eq_(me[u'l33t'], True)
def test_getting_by_name(self):
""" Entities should be fetchable by their name """
DBSession.add(self.entity)
lmacken = Entity.by_name(u'lmacken')
eq_(lmacken, self.entity)
def test_filter_by_name(self):
DBSession.add(self.entity)
me = DBSession.query(Entity).filter_by(name=u'lmacken').one()
eq_(me.name, u'lmacken')
eq_(me[u'firstname'], u'Luke')
def test_query_by_fact(self):
""" Query entities by facts """
DBSession.add(self.entity)
assert DBSession.query(Entity).filter(
Entity.facts.any(
and_(Fact.key == u'l33t',
Fact.value == True))).first()
def test_query_with_characteristic(self):
""" Query entities based on facts using with_characteristic """
DBSession.add(self.entity)
assert (DBSession.query(Entity).
filter(or_(Entity.facts.any(
with_characteristic(u'dob', datetime(1984, 11, 02))),
not_(Entity.facts.any(Fact.key == u'l33t'))))).first()
def test_query_facts_by_characteristic(self):
""" Query facts by certain characteristics """
DBSession.add(self.entity)
assert (DBSession.query(Fact).
filter(with_characteristic(u'l33t', True))).one()
|
<commit_before><commit_msg>Add test cases for our entity/fact model<commit_after># -*- coding: utf-8 -*-
"""Test Moksha's Entity/Fact model"""
from datetime import datetime
from nose.tools import eq_, assert_true
from sqlalchemy import *
from moksha.model import DBSession, Entity, Fact, with_characteristic
from moksha.tests.test_models import TestModel
class TestEntity(TestModel):
"""Test case for the Entity model."""
def setUp(self):
super(TestEntity, self).setUp()
self.entity = Entity(u'lmacken')
self.entity[u'firstname'] = u'Luke'
self.entity[u'lastname'] = u'Macken'
self.entity[u'age'] = 24
self.entity[u'dob'] = datetime(1984, 11, 02)
self.entity[u'l33t'] = True
def test_entity_creation_name(self):
eq_(self.entity.name, u'lmacken')
def test_fact_types(self):
DBSession.add(self.entity)
DBSession.flush()
me = DBSession.query(Entity).filter_by(name=u'lmacken').one()
eq_(me[u'lastname'], u'Macken')
eq_(me[u'age'], 24)
eq_(me[u'dob'], datetime(1984, 11, 2))
eq_(me[u'l33t'], True)
def test_getting_by_name(self):
""" Entities should be fetchable by their name """
DBSession.add(self.entity)
lmacken = Entity.by_name(u'lmacken')
eq_(lmacken, self.entity)
def test_filter_by_name(self):
DBSession.add(self.entity)
me = DBSession.query(Entity).filter_by(name=u'lmacken').one()
eq_(me.name, u'lmacken')
eq_(me[u'firstname'], u'Luke')
def test_query_by_fact(self):
""" Query entities by facts """
DBSession.add(self.entity)
assert DBSession.query(Entity).filter(
Entity.facts.any(
and_(Fact.key == u'l33t',
Fact.value == True))).first()
def test_query_with_characteristic(self):
""" Query entities based on facts using with_characteristic """
DBSession.add(self.entity)
assert (DBSession.query(Entity).
filter(or_(Entity.facts.any(
with_characteristic(u'dob', datetime(1984, 11, 02))),
not_(Entity.facts.any(Fact.key == u'l33t'))))).first()
def test_query_facts_by_characteristic(self):
""" Query facts by certain characteristics """
DBSession.add(self.entity)
assert (DBSession.query(Fact).
filter(with_characteristic(u'l33t', True))).one()
|
|
7c1c53fc63866e0832d39236f04d0675505d4518
|
corehq/apps/reminders/management/commands/find_unsubscribed_projects_with_reminders.py
|
corehq/apps/reminders/management/commands/find_unsubscribed_projects_with_reminders.py
|
from django.core.management.base import BaseCommand
from corehq.apps.domain.models import Domain
from corehq import privileges
from corehq.apps.accounting.utils import domain_has_privilege
from corehq.apps.reminders.models import (CaseReminderHandler,
REMINDER_TYPE_DEFAULT)
class Command(BaseCommand):
args = ""
help = ""
def domain_has_active_reminders(self, domain):
for handler in CaseReminderHandler.get_handlers(
domain.name,
reminder_type_filter=REMINDER_TYPE_DEFAULT
):
if handler.active:
return True
return False
def handle(self, *args, **options):
for domain in Domain.get_all():
if (
self.domain_has_active_reminders(domain) and
not domain_has_privilege(domain, privileges.REMINDERS_FRAMEWORK)
):
print "%s has active reminders without a subscription" % domain.name
|
Add script to find projects with active reminders but without appropriate subscription
|
Add script to find projects with active reminders but without appropriate subscription
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,puttarajubr/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq
|
Add script to find projects with active reminders but without appropriate subscription
|
from django.core.management.base import BaseCommand
from corehq.apps.domain.models import Domain
from corehq import privileges
from corehq.apps.accounting.utils import domain_has_privilege
from corehq.apps.reminders.models import (CaseReminderHandler,
REMINDER_TYPE_DEFAULT)
class Command(BaseCommand):
args = ""
help = ""
def domain_has_active_reminders(self, domain):
for handler in CaseReminderHandler.get_handlers(
domain.name,
reminder_type_filter=REMINDER_TYPE_DEFAULT
):
if handler.active:
return True
return False
def handle(self, *args, **options):
for domain in Domain.get_all():
if (
self.domain_has_active_reminders(domain) and
not domain_has_privilege(domain, privileges.REMINDERS_FRAMEWORK)
):
print "%s has active reminders without a subscription" % domain.name
|
<commit_before><commit_msg>Add script to find projects with active reminders but without appropriate subscription<commit_after>
|
from django.core.management.base import BaseCommand
from corehq.apps.domain.models import Domain
from corehq import privileges
from corehq.apps.accounting.utils import domain_has_privilege
from corehq.apps.reminders.models import (CaseReminderHandler,
REMINDER_TYPE_DEFAULT)
class Command(BaseCommand):
args = ""
help = ""
def domain_has_active_reminders(self, domain):
for handler in CaseReminderHandler.get_handlers(
domain.name,
reminder_type_filter=REMINDER_TYPE_DEFAULT
):
if handler.active:
return True
return False
def handle(self, *args, **options):
for domain in Domain.get_all():
if (
self.domain_has_active_reminders(domain) and
not domain_has_privilege(domain, privileges.REMINDERS_FRAMEWORK)
):
print "%s has active reminders without a subscription" % domain.name
|
Add script to find projects with active reminders but without appropriate subscriptionfrom django.core.management.base import BaseCommand
from corehq.apps.domain.models import Domain
from corehq import privileges
from corehq.apps.accounting.utils import domain_has_privilege
from corehq.apps.reminders.models import (CaseReminderHandler,
REMINDER_TYPE_DEFAULT)
class Command(BaseCommand):
args = ""
help = ""
def domain_has_active_reminders(self, domain):
for handler in CaseReminderHandler.get_handlers(
domain.name,
reminder_type_filter=REMINDER_TYPE_DEFAULT
):
if handler.active:
return True
return False
def handle(self, *args, **options):
for domain in Domain.get_all():
if (
self.domain_has_active_reminders(domain) and
not domain_has_privilege(domain, privileges.REMINDERS_FRAMEWORK)
):
print "%s has active reminders without a subscription" % domain.name
|
<commit_before><commit_msg>Add script to find projects with active reminders but without appropriate subscription<commit_after>from django.core.management.base import BaseCommand
from corehq.apps.domain.models import Domain
from corehq import privileges
from corehq.apps.accounting.utils import domain_has_privilege
from corehq.apps.reminders.models import (CaseReminderHandler,
REMINDER_TYPE_DEFAULT)
class Command(BaseCommand):
args = ""
help = ""
def domain_has_active_reminders(self, domain):
for handler in CaseReminderHandler.get_handlers(
domain.name,
reminder_type_filter=REMINDER_TYPE_DEFAULT
):
if handler.active:
return True
return False
def handle(self, *args, **options):
for domain in Domain.get_all():
if (
self.domain_has_active_reminders(domain) and
not domain_has_privilege(domain, privileges.REMINDERS_FRAMEWORK)
):
print "%s has active reminders without a subscription" % domain.name
|
|
b864a173a0fa863042bd21b6251d1e7aafa4ba29
|
kazoo/tests/test_exceptions.py
|
kazoo/tests/test_exceptions.py
|
import unittest
from nose.tools import eq_
import zookeeper
class TestExceptions(unittest.TestCase):
def _makeOne(self, *args, **kwargs):
from kazoo.exceptions import err_to_exception
return err_to_exception(*args, **kwargs)
def test_error_translate(self):
exc = self._makeOne(zookeeper.SYSTEMERROR)
assert isinstance(exc, zookeeper.SystemErrorException)
def test_error_with_message(self):
exc = self._makeOne(zookeeper.NONODE, msg="oops")
assert isinstance(exc, zookeeper.NoNodeException)
eq_(str(exc), "no node: oops")
def test_generic_error_code(self):
exc = self._makeOne(-200)
assert isinstance(exc, Exception)
def test_zookeeper_ok(self):
exc = self._makeOne(zookeeper.OK)
eq_(exc, None)
def test_not_error_code(self):
exc = self._makeOne("this needs to be an int")
assert isinstance(exc, Exception)
|
Add complete exception unit tests.
|
Add complete exception unit tests.
|
Python
|
apache-2.0
|
pombredanne/kazoo,rockerbox/kazoo,python-zk/kazoo,harlowja/kazoo,rackerlabs/kazoo,harlowja/kazoo,jacksontj/kazoo,rockerbox/kazoo,rgs1/kazoo,jacksontj/kazoo,AlexanderplUs/kazoo,rackerlabs/kazoo,rgs1/kazoo,pombredanne/kazoo,tempbottle/kazoo,bsanders/kazoo,kormat/kazoo,bsanders/kazoo,python-zk/kazoo,Asana/kazoo,max0d41/kazoo,tempbottle/kazoo,AlexanderplUs/kazoo,kormat/kazoo,max0d41/kazoo
|
Add complete exception unit tests.
|
import unittest
from nose.tools import eq_
import zookeeper
class TestExceptions(unittest.TestCase):
def _makeOne(self, *args, **kwargs):
from kazoo.exceptions import err_to_exception
return err_to_exception(*args, **kwargs)
def test_error_translate(self):
exc = self._makeOne(zookeeper.SYSTEMERROR)
assert isinstance(exc, zookeeper.SystemErrorException)
def test_error_with_message(self):
exc = self._makeOne(zookeeper.NONODE, msg="oops")
assert isinstance(exc, zookeeper.NoNodeException)
eq_(str(exc), "no node: oops")
def test_generic_error_code(self):
exc = self._makeOne(-200)
assert isinstance(exc, Exception)
def test_zookeeper_ok(self):
exc = self._makeOne(zookeeper.OK)
eq_(exc, None)
def test_not_error_code(self):
exc = self._makeOne("this needs to be an int")
assert isinstance(exc, Exception)
|
<commit_before><commit_msg>Add complete exception unit tests.<commit_after>
|
import unittest
from nose.tools import eq_
import zookeeper
class TestExceptions(unittest.TestCase):
def _makeOne(self, *args, **kwargs):
from kazoo.exceptions import err_to_exception
return err_to_exception(*args, **kwargs)
def test_error_translate(self):
exc = self._makeOne(zookeeper.SYSTEMERROR)
assert isinstance(exc, zookeeper.SystemErrorException)
def test_error_with_message(self):
exc = self._makeOne(zookeeper.NONODE, msg="oops")
assert isinstance(exc, zookeeper.NoNodeException)
eq_(str(exc), "no node: oops")
def test_generic_error_code(self):
exc = self._makeOne(-200)
assert isinstance(exc, Exception)
def test_zookeeper_ok(self):
exc = self._makeOne(zookeeper.OK)
eq_(exc, None)
def test_not_error_code(self):
exc = self._makeOne("this needs to be an int")
assert isinstance(exc, Exception)
|
Add complete exception unit tests.import unittest
from nose.tools import eq_
import zookeeper
class TestExceptions(unittest.TestCase):
def _makeOne(self, *args, **kwargs):
from kazoo.exceptions import err_to_exception
return err_to_exception(*args, **kwargs)
def test_error_translate(self):
exc = self._makeOne(zookeeper.SYSTEMERROR)
assert isinstance(exc, zookeeper.SystemErrorException)
def test_error_with_message(self):
exc = self._makeOne(zookeeper.NONODE, msg="oops")
assert isinstance(exc, zookeeper.NoNodeException)
eq_(str(exc), "no node: oops")
def test_generic_error_code(self):
exc = self._makeOne(-200)
assert isinstance(exc, Exception)
def test_zookeeper_ok(self):
exc = self._makeOne(zookeeper.OK)
eq_(exc, None)
def test_not_error_code(self):
exc = self._makeOne("this needs to be an int")
assert isinstance(exc, Exception)
|
<commit_before><commit_msg>Add complete exception unit tests.<commit_after>import unittest
from nose.tools import eq_
import zookeeper
class TestExceptions(unittest.TestCase):
def _makeOne(self, *args, **kwargs):
from kazoo.exceptions import err_to_exception
return err_to_exception(*args, **kwargs)
def test_error_translate(self):
exc = self._makeOne(zookeeper.SYSTEMERROR)
assert isinstance(exc, zookeeper.SystemErrorException)
def test_error_with_message(self):
exc = self._makeOne(zookeeper.NONODE, msg="oops")
assert isinstance(exc, zookeeper.NoNodeException)
eq_(str(exc), "no node: oops")
def test_generic_error_code(self):
exc = self._makeOne(-200)
assert isinstance(exc, Exception)
def test_zookeeper_ok(self):
exc = self._makeOne(zookeeper.OK)
eq_(exc, None)
def test_not_error_code(self):
exc = self._makeOne("this needs to be an int")
assert isinstance(exc, Exception)
|
|
268d825eb603fd6107fd663ac1e0df9e7037f0e9
|
dbaas/dbaas/management/commands/filer_migrate_by_infra_name.py
|
dbaas/dbaas/management/commands/filer_migrate_by_infra_name.py
|
# coding: utf-8
from django.core.management.base import BaseCommand
from maintenance.scripts.filer_migrate import FilerMigrate
from logical.models import Database
class Command(BaseCommand):
'''Migrate filer'''
def handle(self, *args, **options):
names = filter(lambda s: s, map(lambda s: s.strip(), args[0].split(',')))
dbs = Database.objects.filter(databaseinfra__name__in=names)
if not dbs:
return "Nenhum banco encontrado"
step = FilerMigrate(dbs)
print "{} banco(s) para a migração: {}".format(
len(dbs),
",".join(dbs.values_list('name', flat=True))
)
step.do()
print "DONE"
|
Create command for migrate filer
|
Create command for migrate filer
|
Python
|
bsd-3-clause
|
globocom/database-as-a-service,globocom/database-as-a-service,globocom/database-as-a-service,globocom/database-as-a-service
|
Create command for migrate filer
|
# coding: utf-8
from django.core.management.base import BaseCommand
from maintenance.scripts.filer_migrate import FilerMigrate
from logical.models import Database
class Command(BaseCommand):
'''Migrate filer'''
def handle(self, *args, **options):
names = filter(lambda s: s, map(lambda s: s.strip(), args[0].split(',')))
dbs = Database.objects.filter(databaseinfra__name__in=names)
if not dbs:
return "Nenhum banco encontrado"
step = FilerMigrate(dbs)
print "{} banco(s) para a migração: {}".format(
len(dbs),
",".join(dbs.values_list('name', flat=True))
)
step.do()
print "DONE"
|
<commit_before><commit_msg>Create command for migrate filer<commit_after>
|
# coding: utf-8
from django.core.management.base import BaseCommand
from maintenance.scripts.filer_migrate import FilerMigrate
from logical.models import Database
class Command(BaseCommand):
'''Migrate filer'''
def handle(self, *args, **options):
names = filter(lambda s: s, map(lambda s: s.strip(), args[0].split(',')))
dbs = Database.objects.filter(databaseinfra__name__in=names)
if not dbs:
return "Nenhum banco encontrado"
step = FilerMigrate(dbs)
print "{} banco(s) para a migração: {}".format(
len(dbs),
",".join(dbs.values_list('name', flat=True))
)
step.do()
print "DONE"
|
Create command for migrate filer# coding: utf-8
from django.core.management.base import BaseCommand
from maintenance.scripts.filer_migrate import FilerMigrate
from logical.models import Database
class Command(BaseCommand):
'''Migrate filer'''
def handle(self, *args, **options):
names = filter(lambda s: s, map(lambda s: s.strip(), args[0].split(',')))
dbs = Database.objects.filter(databaseinfra__name__in=names)
if not dbs:
return "Nenhum banco encontrado"
step = FilerMigrate(dbs)
print "{} banco(s) para a migração: {}".format(
len(dbs),
",".join(dbs.values_list('name', flat=True))
)
step.do()
print "DONE"
|
<commit_before><commit_msg>Create command for migrate filer<commit_after># coding: utf-8
from django.core.management.base import BaseCommand
from maintenance.scripts.filer_migrate import FilerMigrate
from logical.models import Database
class Command(BaseCommand):
'''Migrate filer'''
def handle(self, *args, **options):
names = filter(lambda s: s, map(lambda s: s.strip(), args[0].split(',')))
dbs = Database.objects.filter(databaseinfra__name__in=names)
if not dbs:
return "Nenhum banco encontrado"
step = FilerMigrate(dbs)
print "{} banco(s) para a migração: {}".format(
len(dbs),
",".join(dbs.values_list('name', flat=True))
)
step.do()
print "DONE"
|
|
890f4a225d783c410de80eca7e2b6988c92d63c6
|
src/spot_test.py
|
src/spot_test.py
|
"""Spot Resolver Test"""
import spot
def test_to_msw_id() -> None:
"""Test if spot resolving works as expected"""
res = spot.to_msw_id("Carcavelos")
assert res == 912, \
"to_msw_id() return expected {} but returned {}".format(912, res)
def test_to_msw_id_case_insensitive() -> None:
"""Test if resolving works even if case does not match"""
res = spot.to_msw_id("carcavelos")
assert res == 912, \
"to_msw_id() return expected {} but returned {}".format(912, res)
|
Add tests for spot module
|
Add tests for spot module
|
Python
|
mit
|
Smotko/surfbot,Smotko/surfbot
|
Add tests for spot module
|
"""Spot Resolver Test"""
import spot
def test_to_msw_id() -> None:
"""Test if spot resolving works as expected"""
res = spot.to_msw_id("Carcavelos")
assert res == 912, \
"to_msw_id() return expected {} but returned {}".format(912, res)
def test_to_msw_id_case_insensitive() -> None:
"""Test if resolving works even if case does not match"""
res = spot.to_msw_id("carcavelos")
assert res == 912, \
"to_msw_id() return expected {} but returned {}".format(912, res)
|
<commit_before><commit_msg>Add tests for spot module<commit_after>
|
"""Spot Resolver Test"""
import spot
def test_to_msw_id() -> None:
"""Test if spot resolving works as expected"""
res = spot.to_msw_id("Carcavelos")
assert res == 912, \
"to_msw_id() return expected {} but returned {}".format(912, res)
def test_to_msw_id_case_insensitive() -> None:
"""Test if resolving works even if case does not match"""
res = spot.to_msw_id("carcavelos")
assert res == 912, \
"to_msw_id() return expected {} but returned {}".format(912, res)
|
Add tests for spot module"""Spot Resolver Test"""
import spot
def test_to_msw_id() -> None:
"""Test if spot resolving works as expected"""
res = spot.to_msw_id("Carcavelos")
assert res == 912, \
"to_msw_id() return expected {} but returned {}".format(912, res)
def test_to_msw_id_case_insensitive() -> None:
"""Test if resolving works even if case does not match"""
res = spot.to_msw_id("carcavelos")
assert res == 912, \
"to_msw_id() return expected {} but returned {}".format(912, res)
|
<commit_before><commit_msg>Add tests for spot module<commit_after>"""Spot Resolver Test"""
import spot
def test_to_msw_id() -> None:
"""Test if spot resolving works as expected"""
res = spot.to_msw_id("Carcavelos")
assert res == 912, \
"to_msw_id() return expected {} but returned {}".format(912, res)
def test_to_msw_id_case_insensitive() -> None:
"""Test if resolving works even if case does not match"""
res = spot.to_msw_id("carcavelos")
assert res == 912, \
"to_msw_id() return expected {} but returned {}".format(912, res)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.